]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/mips/kernel/vpe.c
MIPS: VPE: Fix compiler warning.
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / vpe.c
CommitLineData
e01402b1
RB
1/*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
e01402b1
RB
16 */
17
18/*
19 * VPE support module
20 *
21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
26 *
27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
28 * i.e cat spapp >/dev/vpe1.
e01402b1 29 */
e01402b1 30#include <linux/kernel.h>
27a3bbaf 31#include <linux/device.h>
e01402b1
RB
32#include <linux/module.h>
33#include <linux/fs.h>
34#include <linux/init.h>
35#include <asm/uaccess.h>
36#include <linux/slab.h>
37#include <linux/list.h>
38#include <linux/vmalloc.h>
39#include <linux/elf.h>
40#include <linux/seq_file.h>
7558da94 41#include <linux/smp_lock.h>
e01402b1
RB
42#include <linux/syscalls.h>
43#include <linux/moduleloader.h>
44#include <linux/interrupt.h>
45#include <linux/poll.h>
46#include <linux/bootmem.h>
47#include <asm/mipsregs.h>
340ee4b9 48#include <asm/mipsmtregs.h>
e01402b1
RB
49#include <asm/cacheflush.h>
50#include <asm/atomic.h>
51#include <asm/cpu.h>
27a3bbaf 52#include <asm/mips_mt.h>
e01402b1
RB
53#include <asm/processor.h>
54#include <asm/system.h>
2600990e
RB
55#include <asm/vpe.h>
56#include <asm/kspd.h>
e01402b1
RB
57
58typedef void *vpe_handle;
59
e01402b1
RB
60#ifndef ARCH_SHF_SMALL
61#define ARCH_SHF_SMALL 0
62#endif
63
64/* If this is set, the section belongs in the init part of the module */
65#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
66
41790e04
RB
67/*
68 * The number of TCs and VPEs physically available on the core
69 */
70static int hw_tcs, hw_vpes;
e01402b1 71static char module_name[] = "vpe";
307bd284 72static int major;
27a3bbaf 73static const int minor = 1; /* fixed for now */
e01402b1 74
2600990e
RB
75#ifdef CONFIG_MIPS_APSP_KSPD
76 static struct kspd_notifications kspd_events;
77static int kspd_events_reqd = 0;
78#endif
79
e01402b1
RB
80/* grab the likely amount of memory we will need. */
81#ifdef CONFIG_MIPS_VPE_LOADER_TOM
82#define P_SIZE (2 * 1024 * 1024)
83#else
84/* add an overhead to the max kmalloc size for non-striped symbols/etc */
85#define P_SIZE (256 * 1024)
86#endif
87
2600990e
RB
88extern unsigned long physical_memsize;
89
e01402b1 90#define MAX_VPES 16
2600990e 91#define VPE_PATH_MAX 256
e01402b1
RB
92
93enum vpe_state {
94 VPE_STATE_UNUSED = 0,
95 VPE_STATE_INUSE,
96 VPE_STATE_RUNNING
97};
98
99enum tc_state {
100 TC_STATE_UNUSED = 0,
101 TC_STATE_INUSE,
102 TC_STATE_RUNNING,
103 TC_STATE_DYNAMIC
104};
105
307bd284 106struct vpe {
e01402b1
RB
107 enum vpe_state state;
108
109 /* (device) minor associated with this vpe */
110 int minor;
111
112 /* elfloader stuff */
113 void *load_addr;
571e0bed 114 unsigned long len;
e01402b1 115 char *pbuffer;
571e0bed 116 unsigned long plen;
2600990e
RB
117 unsigned int uid, gid;
118 char cwd[VPE_PATH_MAX];
e01402b1
RB
119
120 unsigned long __start;
121
122 /* tc's associated with this vpe */
123 struct list_head tc;
124
125 /* The list of vpe's */
126 struct list_head list;
127
128 /* shared symbol address */
129 void *shared_ptr;
2600990e
RB
130
131 /* the list of who wants to know when something major happens */
132 struct list_head notify;
41790e04
RB
133
134 unsigned int ntcs;
307bd284
RB
135};
136
137struct tc {
138 enum tc_state state;
139 int index;
140
07cc0c9e
RB
141 struct vpe *pvpe; /* parent VPE */
142 struct list_head tc; /* The list of TC's with this VPE */
143 struct list_head list; /* The global list of tc's */
307bd284 144};
e01402b1 145
9cfdf6f1 146struct {
e01402b1
RB
147 /* Virtual processing elements */
148 struct list_head vpe_list;
149
150 /* Thread contexts */
151 struct list_head tc_list;
9cfdf6f1
RB
152} vpecontrol = {
153 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155};
e01402b1
RB
156
157static void release_progmem(void *ptr);
e01402b1
RB
158extern void save_gp_address(unsigned int secbase, unsigned int rel);
159
160/* get the vpe associated with this minor */
161struct vpe *get_vpe(int minor)
162{
163 struct vpe *v;
164
2600990e
RB
165 if (!cpu_has_mipsmt)
166 return NULL;
167
e01402b1
RB
168 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
169 if (v->minor == minor)
170 return v;
171 }
172
e01402b1
RB
173 return NULL;
174}
175
176/* get the vpe associated with this minor */
177struct tc *get_tc(int index)
178{
179 struct tc *t;
180
181 list_for_each_entry(t, &vpecontrol.tc_list, list) {
182 if (t->index == index)
183 return t;
184 }
185
e01402b1
RB
186 return NULL;
187}
188
189struct tc *get_tc_unused(void)
190{
191 struct tc *t;
192
193 list_for_each_entry(t, &vpecontrol.tc_list, list) {
194 if (t->state == TC_STATE_UNUSED)
195 return t;
196 }
197
e01402b1
RB
198 return NULL;
199}
200
201/* allocate a vpe and associate it with this minor (or index) */
202struct vpe *alloc_vpe(int minor)
203{
204 struct vpe *v;
205
307bd284 206 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
e01402b1
RB
207 return NULL;
208 }
209
e01402b1
RB
210 INIT_LIST_HEAD(&v->tc);
211 list_add_tail(&v->list, &vpecontrol.vpe_list);
212
2600990e 213 INIT_LIST_HEAD(&v->notify);
e01402b1
RB
214 v->minor = minor;
215 return v;
216}
217
218/* allocate a tc. At startup only tc0 is running, all other can be halted. */
219struct tc *alloc_tc(int index)
220{
07cc0c9e 221 struct tc *tc;
e01402b1 222
07cc0c9e
RB
223 if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
224 goto out;
e01402b1 225
07cc0c9e
RB
226 INIT_LIST_HEAD(&tc->tc);
227 tc->index = index;
228 list_add_tail(&tc->list, &vpecontrol.tc_list);
e01402b1 229
07cc0c9e
RB
230out:
231 return tc;
e01402b1
RB
232}
233
234/* clean up and free everything */
235void release_vpe(struct vpe *v)
236{
237 list_del(&v->list);
238 if (v->load_addr)
239 release_progmem(v);
240 kfree(v);
241}
242
243void dump_mtregs(void)
244{
245 unsigned long val;
246
247 val = read_c0_config3();
248 printk("config3 0x%lx MT %ld\n", val,
249 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
250
e01402b1
RB
251 val = read_c0_mvpcontrol();
252 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
253 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
254 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
255 (val & MVPCONTROL_EVP));
256
2600990e
RB
257 val = read_c0_mvpconf0();
258 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
259 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
260 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
e01402b1
RB
261}
262
263/* Find some VPE program space */
571e0bed 264static void *alloc_progmem(unsigned long len)
e01402b1 265{
5408c490
RB
266 void *addr;
267
e01402b1 268#ifdef CONFIG_MIPS_VPE_LOADER_TOM
5408c490
RB
269 /*
270 * This means you must tell Linux to use less memory than you
271 * physically have, for example by passing a mem= boot argument.
272 */
9f2546ad 273 addr = pfn_to_kaddr(max_low_pfn);
5408c490 274 memset(addr, 0, len);
e01402b1 275#else
5408c490
RB
276 /* simple grab some mem for now */
277 addr = kzalloc(len, GFP_KERNEL);
e01402b1 278#endif
5408c490
RB
279
280 return addr;
e01402b1
RB
281}
282
283static void release_progmem(void *ptr)
284{
285#ifndef CONFIG_MIPS_VPE_LOADER_TOM
286 kfree(ptr);
287#endif
288}
289
290/* Update size with this section: return offset. */
291static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
292{
293 long ret;
294
295 ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
296 *size = ret + sechdr->sh_size;
297 return ret;
298}
299
300/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
301 might -- code, read-only data, read-write data, small data. Tally
302 sizes, and place the offsets into sh_entsize fields: high bit means it
303 belongs in init. */
304static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
305 Elf_Shdr * sechdrs, const char *secstrings)
306{
307 static unsigned long const masks[][2] = {
308 /* NOTE: all executable code must be the first section
309 * in this array; otherwise modify the text_size
310 * finder in the two loops below */
311 {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
312 {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
313 {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
314 {ARCH_SHF_SMALL | SHF_ALLOC, 0}
315 };
316 unsigned int m, i;
317
318 for (i = 0; i < hdr->e_shnum; i++)
319 sechdrs[i].sh_entsize = ~0UL;
320
321 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
322 for (i = 0; i < hdr->e_shnum; ++i) {
323 Elf_Shdr *s = &sechdrs[i];
324
325 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
326 if ((s->sh_flags & masks[m][0]) != masks[m][0]
327 || (s->sh_flags & masks[m][1])
328 || s->sh_entsize != ~0UL)
329 continue;
e2a9cf96
RG
330 s->sh_entsize =
331 get_offset((unsigned long *)&mod->core_size, s);
e01402b1
RB
332 }
333
334 if (m == 0)
335 mod->core_text_size = mod->core_size;
336
337 }
338}
339
340
341/* from module-elf32.c, but subverted a little */
342
343struct mips_hi16 {
344 struct mips_hi16 *next;
345 Elf32_Addr *addr;
346 Elf32_Addr value;
347};
348
349static struct mips_hi16 *mips_hi16_list;
350static unsigned int gp_offs, gp_addr;
351
352static int apply_r_mips_none(struct module *me, uint32_t *location,
353 Elf32_Addr v)
354{
355 return 0;
356}
357
358static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
359 Elf32_Addr v)
360{
361 int rel;
362
363 if( !(*location & 0xffff) ) {
364 rel = (int)v - gp_addr;
365 }
366 else {
367 /* .sbss + gp(relative) + offset */
368 /* kludge! */
369 rel = (int)(short)((int)v + gp_offs +
370 (int)(short)(*location & 0xffff) - gp_addr);
371 }
372
373 if( (rel > 32768) || (rel < -32768) ) {
2600990e
RB
374 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
375 "relative address 0x%x out of range of gp register\n",
376 rel);
e01402b1
RB
377 return -ENOEXEC;
378 }
379
380 *location = (*location & 0xffff0000) | (rel & 0xffff);
381
382 return 0;
383}
384
385static int apply_r_mips_pc16(struct module *me, uint32_t *location,
386 Elf32_Addr v)
387{
388 int rel;
389 rel = (((unsigned int)v - (unsigned int)location));
390 rel >>= 2; // because the offset is in _instructions_ not bytes.
391 rel -= 1; // and one instruction less due to the branch delay slot.
392
393 if( (rel > 32768) || (rel < -32768) ) {
2600990e
RB
394 printk(KERN_DEBUG "VPE loader: "
395 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
e01402b1
RB
396 return -ENOEXEC;
397 }
398
399 *location = (*location & 0xffff0000) | (rel & 0xffff);
400
401 return 0;
402}
403
404static int apply_r_mips_32(struct module *me, uint32_t *location,
405 Elf32_Addr v)
406{
407 *location += v;
408
409 return 0;
410}
411
412static int apply_r_mips_26(struct module *me, uint32_t *location,
413 Elf32_Addr v)
414{
415 if (v % 4) {
2600990e
RB
416 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
417 " unaligned relocation\n");
e01402b1
RB
418 return -ENOEXEC;
419 }
420
307bd284
RB
421/*
422 * Not desperately convinced this is a good check of an overflow condition
423 * anyway. But it gets in the way of handling undefined weak symbols which
424 * we want to set to zero.
425 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
426 * printk(KERN_ERR
427 * "module %s: relocation overflow\n",
428 * me->name);
429 * return -ENOEXEC;
430 * }
431 */
e01402b1
RB
432
433 *location = (*location & ~0x03ffffff) |
434 ((*location + (v >> 2)) & 0x03ffffff);
435 return 0;
436}
437
438static int apply_r_mips_hi16(struct module *me, uint32_t *location,
439 Elf32_Addr v)
440{
441 struct mips_hi16 *n;
442
443 /*
444 * We cannot relocate this one now because we don't know the value of
445 * the carry we need to add. Save the information, and let LO16 do the
446 * actual relocation.
447 */
448 n = kmalloc(sizeof *n, GFP_KERNEL);
449 if (!n)
450 return -ENOMEM;
451
452 n->addr = location;
453 n->value = v;
454 n->next = mips_hi16_list;
455 mips_hi16_list = n;
456
457 return 0;
458}
459
460static int apply_r_mips_lo16(struct module *me, uint32_t *location,
461 Elf32_Addr v)
462{
463 unsigned long insnlo = *location;
464 Elf32_Addr val, vallo;
465
466 /* Sign extend the addend we extract from the lo insn. */
467 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
468
469 if (mips_hi16_list != NULL) {
470 struct mips_hi16 *l;
471
472 l = mips_hi16_list;
473 while (l != NULL) {
474 struct mips_hi16 *next;
475 unsigned long insn;
476
477 /*
478 * The value for the HI16 had best be the same.
479 */
2600990e
RB
480 if (v != l->value) {
481 printk(KERN_DEBUG "VPE loader: "
b1e3afa0 482 "apply_r_mips_lo16/hi16: \t"
2600990e
RB
483 "inconsistent value information\n");
484 return -ENOEXEC;
e01402b1
RB
485 }
486
e01402b1
RB
487 /*
488 * Do the HI16 relocation. Note that we actually don't
489 * need to know anything about the LO16 itself, except
490 * where to find the low 16 bits of the addend needed
491 * by the LO16.
492 */
493 insn = *l->addr;
494 val = ((insn & 0xffff) << 16) + vallo;
495 val += v;
496
497 /*
498 * Account for the sign extension that will happen in
499 * the low bits.
500 */
501 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
502
503 insn = (insn & ~0xffff) | val;
504 *l->addr = insn;
505
506 next = l->next;
507 kfree(l);
508 l = next;
509 }
510
511 mips_hi16_list = NULL;
512 }
513
514 /*
515 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
516 */
517 val = v + vallo;
518 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
519 *location = insnlo;
520
521 return 0;
e01402b1
RB
522}
523
524static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
525 Elf32_Addr v) = {
526 [R_MIPS_NONE] = apply_r_mips_none,
527 [R_MIPS_32] = apply_r_mips_32,
528 [R_MIPS_26] = apply_r_mips_26,
529 [R_MIPS_HI16] = apply_r_mips_hi16,
530 [R_MIPS_LO16] = apply_r_mips_lo16,
531 [R_MIPS_GPREL16] = apply_r_mips_gprel16,
532 [R_MIPS_PC16] = apply_r_mips_pc16
533};
534
2600990e 535static char *rstrs[] = {
e0daad44 536 [R_MIPS_NONE] = "MIPS_NONE",
2600990e
RB
537 [R_MIPS_32] = "MIPS_32",
538 [R_MIPS_26] = "MIPS_26",
539 [R_MIPS_HI16] = "MIPS_HI16",
540 [R_MIPS_LO16] = "MIPS_LO16",
541 [R_MIPS_GPREL16] = "MIPS_GPREL16",
542 [R_MIPS_PC16] = "MIPS_PC16"
543};
e01402b1
RB
544
545int apply_relocations(Elf32_Shdr *sechdrs,
546 const char *strtab,
547 unsigned int symindex,
548 unsigned int relsec,
549 struct module *me)
550{
551 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
552 Elf32_Sym *sym;
553 uint32_t *location;
554 unsigned int i;
555 Elf32_Addr v;
556 int res;
557
558 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
559 Elf32_Word r_info = rel[i].r_info;
560
561 /* This is where to make the change */
562 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
563 + rel[i].r_offset;
564 /* This is the symbol it is referring to */
565 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
566 + ELF32_R_SYM(r_info);
567
568 if (!sym->st_value) {
569 printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
570 me->name, strtab + sym->st_name);
571 /* just print the warning, dont barf */
572 }
573
574 v = sym->st_value;
575
576 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
577 if( res ) {
2600990e
RB
578 char *r = rstrs[ELF32_R_TYPE(r_info)];
579 printk(KERN_WARNING "VPE loader: .text+0x%x "
580 "relocation type %s for symbol \"%s\" failed\n",
581 rel[i].r_offset, r ? r : "UNKNOWN",
582 strtab + sym->st_name);
e01402b1 583 return res;
2600990e 584 }
e01402b1
RB
585 }
586
587 return 0;
588}
589
590void save_gp_address(unsigned int secbase, unsigned int rel)
591{
592 gp_addr = secbase + rel;
593 gp_offs = gp_addr - (secbase & 0xffff0000);
594}
595/* end module-elf32.c */
596
597
598
599/* Change all symbols so that sh_value encodes the pointer directly. */
2600990e 600static void simplify_symbols(Elf_Shdr * sechdrs,
e01402b1
RB
601 unsigned int symindex,
602 const char *strtab,
603 const char *secstrings,
604 unsigned int nsecs, struct module *mod)
605{
606 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
607 unsigned long secbase, bssbase = 0;
608 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
2600990e 609 int size;
e01402b1
RB
610
611 /* find the .bss section for COMMON symbols */
612 for (i = 0; i < nsecs; i++) {
2600990e 613 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
e01402b1 614 bssbase = sechdrs[i].sh_addr;
2600990e
RB
615 break;
616 }
e01402b1
RB
617 }
618
619 for (i = 1; i < n; i++) {
620 switch (sym[i].st_shndx) {
621 case SHN_COMMON:
2600990e
RB
622 /* Allocate space for the symbol in the .bss section.
623 st_value is currently size.
e01402b1
RB
624 We want it to have the address of the symbol. */
625
626 size = sym[i].st_value;
627 sym[i].st_value = bssbase;
628
629 bssbase += size;
630 break;
631
632 case SHN_ABS:
633 /* Don't need to do anything */
634 break;
635
636 case SHN_UNDEF:
637 /* ret = -ENOENT; */
638 break;
639
640 case SHN_MIPS_SCOMMON:
b1e3afa0 641 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
2600990e
RB
642 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
643 sym[i].st_shndx);
e01402b1
RB
644 // .sbss section
645 break;
646
647 default:
648 secbase = sechdrs[sym[i].st_shndx].sh_addr;
649
650 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
651 save_gp_address(secbase, sym[i].st_value);
652 }
653
654 sym[i].st_value += secbase;
655 break;
656 }
e01402b1 657 }
e01402b1
RB
658}
659
660#ifdef DEBUG_ELFLOADER
661static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
662 const char *strtab, struct module *mod)
663{
664 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
665 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
666
667 printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
668 for (i = 1; i < n; i++) {
669 printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
670 strtab + sym[i].st_name, sym[i].st_value);
671 }
672}
673#endif
674
e01402b1 675/* We are prepared so configure and start the VPE... */
be6e1437 676static int vpe_run(struct vpe * v)
e01402b1 677{
07cc0c9e 678 unsigned long flags, val, dmt_flag;
2600990e 679 struct vpe_notifications *n;
07cc0c9e 680 unsigned int vpeflags;
e01402b1
RB
681 struct tc *t;
682
683 /* check we are the Master VPE */
07cc0c9e 684 local_irq_save(flags);
e01402b1
RB
685 val = read_c0_vpeconf0();
686 if (!(val & VPECONF0_MVP)) {
687 printk(KERN_WARNING
2600990e 688 "VPE loader: only Master VPE's are allowed to configure MT\n");
07cc0c9e
RB
689 local_irq_restore(flags);
690
e01402b1
RB
691 return -1;
692 }
693
07cc0c9e
RB
694 dmt_flag = dmt();
695 vpeflags = dvpe();
e01402b1 696
2600990e 697 if (!list_empty(&v->tc)) {
e0daad44 698 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
07cc0c9e
RB
699 evpe(vpeflags);
700 emt(dmt_flag);
701 local_irq_restore(flags);
702
703 printk(KERN_WARNING
704 "VPE loader: TC %d is already in use.\n",
705 t->index);
e0daad44
RB
706 return -ENOEXEC;
707 }
708 } else {
07cc0c9e
RB
709 evpe(vpeflags);
710 emt(dmt_flag);
711 local_irq_restore(flags);
712
713 printk(KERN_WARNING
714 "VPE loader: No TC's associated with VPE %d\n",
e0daad44 715 v->minor);
07cc0c9e 716
e0daad44
RB
717 return -ENOEXEC;
718 }
2600990e 719
e01402b1 720 /* Put MVPE's into 'configuration state' */
340ee4b9 721 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 722
e01402b1
RB
723 settc(t->index);
724
e01402b1
RB
725 /* should check it is halted, and not activated */
726 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
07cc0c9e
RB
727 evpe(vpeflags);
728 emt(dmt_flag);
729 local_irq_restore(flags);
730
731 printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
e01402b1 732 t->index);
07cc0c9e 733
e01402b1
RB
734 return -ENOEXEC;
735 }
736
737 /* Write the address we want it to start running from in the TCPC register. */
738 write_tc_c0_tcrestart((unsigned long)v->__start);
e01402b1 739 write_tc_c0_tccontext((unsigned long)0);
07cc0c9e 740
2600990e
RB
741 /*
742 * Mark the TC as activated, not interrupt exempt and not dynamically
743 * allocatable
744 */
e01402b1
RB
745 val = read_tc_c0_tcstatus();
746 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
747 write_tc_c0_tcstatus(val);
748
749 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
750
e01402b1
RB
751 /*
752 * The sde-kit passes 'memsize' to __start in $a3, so set something
2600990e 753 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
e01402b1
RB
754 * DFLT_HEAP_SIZE when you compile your program
755 */
41790e04 756 mttgpr(6, v->ntcs);
07cc0c9e 757 mttgpr(7, physical_memsize);
2600990e
RB
758
759 /* set up VPE1 */
760 /*
761 * bind the TC to VPE 1 as late as possible so we only have the final
762 * VPE registers to set up, and so an EJTAG probe can trigger on it
763 */
07cc0c9e 764 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
e01402b1 765
a94d7020
EO
766 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
767
768 back_to_back_c0_hazard();
769
e0daad44
RB
770 /* Set up the XTC bit in vpeconf0 to point at our tc */
771 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
772 | (t->index << VPECONF0_XTC_SHIFT));
e01402b1 773
a94d7020
EO
774 back_to_back_c0_hazard();
775
e0daad44
RB
776 /* enable this VPE */
777 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
e01402b1
RB
778
779 /* clear out any left overs from a previous program */
2600990e 780 write_vpe_c0_status(0);
e01402b1
RB
781 write_vpe_c0_cause(0);
782
783 /* take system out of configuration state */
340ee4b9 784 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 785
b618336a
KK
786 /*
787 * SMTC/SMVP kernels manage VPE enable independently,
788 * but uniprocessor kernels need to turn it on, even
789 * if that wasn't the pre-dvpe() state.
790 */
07cc0c9e 791#ifdef CONFIG_SMP
07cc0c9e 792 evpe(vpeflags);
b618336a
KK
793#else
794 evpe(EVPE_ENABLE);
07cc0c9e
RB
795#endif
796 emt(dmt_flag);
797 local_irq_restore(flags);
e01402b1 798
07cc0c9e
RB
799 list_for_each_entry(n, &v->notify, list)
800 n->start(minor);
2600990e 801
e01402b1
RB
802 return 0;
803}
804
2600990e 805static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
e01402b1
RB
806 unsigned int symindex, const char *strtab,
807 struct module *mod)
808{
809 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
810 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
811
812 for (i = 1; i < n; i++) {
813 if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
814 v->__start = sym[i].st_value;
815 }
816
817 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
818 v->shared_ptr = (void *)sym[i].st_value;
819 }
820 }
821
2600990e
RB
822 if ( (v->__start == 0) || (v->shared_ptr == NULL))
823 return -1;
824
e01402b1
RB
825 return 0;
826}
827
307bd284 828/*
2600990e
RB
829 * Allocates a VPE with some program code space(the load address), copies the
830 * contents of the program (p)buffer performing relocatations/etc, free's it
831 * when finished.
832 */
be6e1437 833static int vpe_elfload(struct vpe * v)
e01402b1
RB
834{
835 Elf_Ehdr *hdr;
836 Elf_Shdr *sechdrs;
837 long err = 0;
838 char *secstrings, *strtab = NULL;
2600990e 839 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
e01402b1
RB
840 struct module mod; // so we can re-use the relocations code
841
842 memset(&mod, 0, sizeof(struct module));
2600990e 843 strcpy(mod.name, "VPE loader");
e01402b1
RB
844
845 hdr = (Elf_Ehdr *) v->pbuffer;
846 len = v->plen;
847
848 /* Sanity checks against insmoding binaries or wrong arch,
849 weird elf version */
d303f4a1 850 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2600990e
RB
851 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
852 || !elf_check_arch(hdr)
e01402b1
RB
853 || hdr->e_shentsize != sizeof(*sechdrs)) {
854 printk(KERN_WARNING
2600990e 855 "VPE loader: program wrong arch or weird elf version\n");
e01402b1
RB
856
857 return -ENOEXEC;
858 }
859
2600990e
RB
860 if (hdr->e_type == ET_REL)
861 relocate = 1;
862
e01402b1 863 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
2600990e
RB
864 printk(KERN_ERR "VPE loader: program length %u truncated\n",
865 len);
866
e01402b1
RB
867 return -ENOEXEC;
868 }
869
870 /* Convenience variables */
871 sechdrs = (void *)hdr + hdr->e_shoff;
872 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
873 sechdrs[0].sh_addr = 0;
874
875 /* And these should exist, but gcc whinges if we don't init them */
876 symindex = strindex = 0;
877
2600990e
RB
878 if (relocate) {
879 for (i = 1; i < hdr->e_shnum; i++) {
880 if (sechdrs[i].sh_type != SHT_NOBITS
881 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
882 printk(KERN_ERR "VPE program length %u truncated\n",
883 len);
884 return -ENOEXEC;
885 }
e01402b1 886
2600990e
RB
887 /* Mark all sections sh_addr with their address in the
888 temporary image. */
889 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
e01402b1 890
2600990e
RB
891 /* Internal symbols and strings. */
892 if (sechdrs[i].sh_type == SHT_SYMTAB) {
893 symindex = i;
894 strindex = sechdrs[i].sh_link;
895 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
896 }
e01402b1 897 }
2600990e 898 layout_sections(&mod, hdr, sechdrs, secstrings);
e01402b1
RB
899 }
900
e01402b1 901 v->load_addr = alloc_progmem(mod.core_size);
5408c490
RB
902 if (!v->load_addr)
903 return -ENOMEM;
e01402b1 904
5408c490 905 pr_info("VPE loader: loading to %p\n", v->load_addr);
e01402b1 906
2600990e
RB
907 if (relocate) {
908 for (i = 0; i < hdr->e_shnum; i++) {
909 void *dest;
e01402b1 910
2600990e
RB
911 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
912 continue;
e01402b1 913
2600990e 914 dest = v->load_addr + sechdrs[i].sh_entsize;
e01402b1 915
2600990e
RB
916 if (sechdrs[i].sh_type != SHT_NOBITS)
917 memcpy(dest, (void *)sechdrs[i].sh_addr,
918 sechdrs[i].sh_size);
919 /* Update sh_addr to point to copy in image. */
920 sechdrs[i].sh_addr = (unsigned long)dest;
e01402b1 921
2600990e
RB
922 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
923 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
924 }
e01402b1 925
2600990e
RB
926 /* Fix up syms, so that st_value is a pointer to location. */
927 simplify_symbols(sechdrs, symindex, strtab, secstrings,
928 hdr->e_shnum, &mod);
929
930 /* Now do relocations. */
931 for (i = 1; i < hdr->e_shnum; i++) {
932 const char *strtab = (char *)sechdrs[strindex].sh_addr;
933 unsigned int info = sechdrs[i].sh_info;
934
935 /* Not a valid relocation section? */
936 if (info >= hdr->e_shnum)
937 continue;
938
939 /* Don't bother with non-allocated sections */
940 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
941 continue;
942
943 if (sechdrs[i].sh_type == SHT_REL)
944 err = apply_relocations(sechdrs, strtab, symindex, i,
945 &mod);
946 else if (sechdrs[i].sh_type == SHT_RELA)
947 err = apply_relocate_add(sechdrs, strtab, symindex, i,
948 &mod);
949 if (err < 0)
950 return err;
951
952 }
953 } else {
bdf5d42c 954 struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
2600990e 955
bdf5d42c 956 for (i = 0; i < hdr->e_phnum; i++) {
b618336a
KK
957 if (phdr->p_type == PT_LOAD) {
958 memcpy((void *)phdr->p_paddr,
959 (char *)hdr + phdr->p_offset,
960 phdr->p_filesz);
961 memset((void *)phdr->p_paddr + phdr->p_filesz,
962 0, phdr->p_memsz - phdr->p_filesz);
963 }
964 phdr++;
bdf5d42c
RB
965 }
966
967 for (i = 0; i < hdr->e_shnum; i++) {
2600990e
RB
968 /* Internal symbols and strings. */
969 if (sechdrs[i].sh_type == SHT_SYMTAB) {
970 symindex = i;
971 strindex = sechdrs[i].sh_link;
972 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
973
974 /* mark the symtab's address for when we try to find the
975 magic symbols */
976 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
977 }
e01402b1
RB
978 }
979 }
980
981 /* make sure it's physically written out */
982 flush_icache_range((unsigned long)v->load_addr,
983 (unsigned long)v->load_addr + v->len);
984
985 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
2600990e
RB
986 if (v->__start == 0) {
987 printk(KERN_WARNING "VPE loader: program does not contain "
988 "a __start symbol\n");
989 return -ENOEXEC;
990 }
e01402b1 991
2600990e
RB
992 if (v->shared_ptr == NULL)
993 printk(KERN_WARNING "VPE loader: "
994 "program does not contain vpe_shared symbol.\n"
995 " Unable to use AMVP (AP/SP) facilities.\n");
e01402b1
RB
996 }
997
998 printk(" elf loaded\n");
2600990e 999 return 0;
e01402b1
RB
1000}
1001
2600990e
RB
1002static void cleanup_tc(struct tc *tc)
1003{
07cc0c9e
RB
1004 unsigned long flags;
1005 unsigned int mtflags, vpflags;
2600990e
RB
1006 int tmp;
1007
07cc0c9e
RB
1008 local_irq_save(flags);
1009 mtflags = dmt();
1010 vpflags = dvpe();
2600990e
RB
1011 /* Put MVPE's into 'configuration state' */
1012 set_c0_mvpcontrol(MVPCONTROL_VPC);
1013
1014 settc(tc->index);
1015 tmp = read_tc_c0_tcstatus();
1016
1017 /* mark not allocated and not dynamically allocatable */
1018 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1019 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1020 write_tc_c0_tcstatus(tmp);
1021
1022 write_tc_c0_tchalt(TCHALT_H);
7c3a622d 1023 mips_ihb();
2600990e
RB
1024
1025 /* bind it to anything other than VPE1 */
07cc0c9e 1026// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
2600990e
RB
1027
1028 clear_c0_mvpcontrol(MVPCONTROL_VPC);
07cc0c9e
RB
1029 evpe(vpflags);
1030 emt(mtflags);
1031 local_irq_restore(flags);
2600990e
RB
1032}
1033
1034static int getcwd(char *buff, int size)
1035{
1036 mm_segment_t old_fs;
1037 int ret;
1038
1039 old_fs = get_fs();
1040 set_fs(KERNEL_DS);
1041
21a151d8 1042 ret = sys_getcwd(buff, size);
2600990e
RB
1043
1044 set_fs(old_fs);
1045
1046 return ret;
1047}
1048
1049/* checks VPE is unused and gets ready to load program */
e01402b1
RB
1050static int vpe_open(struct inode *inode, struct file *filp)
1051{
c4c4018b 1052 enum vpe_state state;
2600990e 1053 struct vpe_notifications *not;
07cc0c9e 1054 struct vpe *v;
7558da94 1055 int ret, err = 0;
e01402b1 1056
7558da94 1057 lock_kernel();
07cc0c9e
RB
1058 if (minor != iminor(inode)) {
1059 /* assume only 1 device at the moment. */
2600990e 1060 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
7558da94
JC
1061 err = -ENODEV;
1062 goto out;
e01402b1
RB
1063 }
1064
07cc0c9e 1065 if ((v = get_vpe(tclimit)) == NULL) {
2600990e 1066 printk(KERN_WARNING "VPE loader: unable to get vpe\n");
7558da94
JC
1067 err = -ENODEV;
1068 goto out;
e01402b1
RB
1069 }
1070
c4c4018b
RB
1071 state = xchg(&v->state, VPE_STATE_INUSE);
1072 if (state != VPE_STATE_UNUSED) {
2600990e 1073 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
e01402b1 1074
2600990e 1075 list_for_each_entry(not, &v->notify, list) {
07cc0c9e 1076 not->stop(tclimit);
2600990e 1077 }
e01402b1 1078
2600990e 1079 release_progmem(v->load_addr);
07cc0c9e 1080 cleanup_tc(get_tc(tclimit));
e01402b1
RB
1081 }
1082
e01402b1
RB
1083 /* this of-course trashes what was there before... */
1084 v->pbuffer = vmalloc(P_SIZE);
1085 v->plen = P_SIZE;
1086 v->load_addr = NULL;
1087 v->len = 0;
1088
d76b0d9b
DH
1089 v->uid = filp->f_cred->fsuid;
1090 v->gid = filp->f_cred->fsgid;
2600990e
RB
1091
1092#ifdef CONFIG_MIPS_APSP_KSPD
1093 /* get kspd to tell us when a syscall_exit happens */
1094 if (!kspd_events_reqd) {
1095 kspd_notify(&kspd_events);
1096 kspd_events_reqd++;
1097 }
1098#endif
1099
1100 v->cwd[0] = 0;
1101 ret = getcwd(v->cwd, VPE_PATH_MAX);
1102 if (ret < 0)
1103 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
1104
1105 v->shared_ptr = NULL;
1106 v->__start = 0;
07cc0c9e 1107
7558da94
JC
1108out:
1109 unlock_kernel();
e01402b1
RB
1110 return 0;
1111}
1112
1113static int vpe_release(struct inode *inode, struct file *filp)
1114{
307bd284 1115 struct vpe *v;
e01402b1 1116 Elf_Ehdr *hdr;
07cc0c9e 1117 int ret = 0;
e01402b1 1118
07cc0c9e
RB
1119 v = get_vpe(tclimit);
1120 if (v == NULL)
e01402b1
RB
1121 return -ENODEV;
1122
e01402b1 1123 hdr = (Elf_Ehdr *) v->pbuffer;
d303f4a1 1124 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
07cc0c9e 1125 if (vpe_elfload(v) >= 0) {
e01402b1 1126 vpe_run(v);
07cc0c9e 1127 } else {
2600990e 1128 printk(KERN_WARNING "VPE loader: ELF load failed.\n");
e01402b1
RB
1129 ret = -ENOEXEC;
1130 }
1131 } else {
2600990e 1132 printk(KERN_WARNING "VPE loader: only elf files are supported\n");
e01402b1
RB
1133 ret = -ENOEXEC;
1134 }
1135
2600990e
RB
1136 /* It's good to be able to run the SP and if it chokes have a look at
1137 the /dev/rt?. But if we reset the pointer to the shared struct we
8ebcfc8b 1138 lose what has happened. So perhaps if garbage is sent to the vpe
2600990e
RB
1139 device, use it as a trigger for the reset. Hopefully a nice
1140 executable will be along shortly. */
1141 if (ret < 0)
1142 v->shared_ptr = NULL;
1143
e01402b1
RB
1144 // cleanup any temp buffers
1145 if (v->pbuffer)
1146 vfree(v->pbuffer);
1147 v->plen = 0;
1148 return ret;
1149}
1150
1151static ssize_t vpe_write(struct file *file, const char __user * buffer,
1152 size_t count, loff_t * ppos)
1153{
e01402b1 1154 size_t ret = count;
307bd284 1155 struct vpe *v;
e01402b1 1156
07cc0c9e
RB
1157 if (iminor(file->f_path.dentry->d_inode) != minor)
1158 return -ENODEV;
1159
1160 v = get_vpe(tclimit);
1161 if (v == NULL)
e01402b1
RB
1162 return -ENODEV;
1163
1164 if (v->pbuffer == NULL) {
2600990e 1165 printk(KERN_ERR "VPE loader: no buffer for program\n");
e01402b1
RB
1166 return -ENOMEM;
1167 }
1168
1169 if ((count + v->len) > v->plen) {
1170 printk(KERN_WARNING
2600990e 1171 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
e01402b1
RB
1172 return -ENOMEM;
1173 }
1174
1175 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
2600990e 1176 if (!count)
e01402b1 1177 return -EFAULT;
e01402b1
RB
1178
1179 v->len += count;
1180 return ret;
1181}
1182
5dfe4c96 1183static const struct file_operations vpe_fops = {
e01402b1
RB
1184 .owner = THIS_MODULE,
1185 .open = vpe_open,
1186 .release = vpe_release,
1187 .write = vpe_write
1188};
1189
1190/* module wrapper entry points */
1191/* give me a vpe */
1192vpe_handle vpe_alloc(void)
1193{
1194 int i;
1195 struct vpe *v;
1196
1197 /* find a vpe */
1198 for (i = 1; i < MAX_VPES; i++) {
1199 if ((v = get_vpe(i)) != NULL) {
1200 v->state = VPE_STATE_INUSE;
1201 return v;
1202 }
1203 }
1204 return NULL;
1205}
1206
1207EXPORT_SYMBOL(vpe_alloc);
1208
1209/* start running from here */
1210int vpe_start(vpe_handle vpe, unsigned long start)
1211{
1212 struct vpe *v = vpe;
1213
1214 v->__start = start;
1215 return vpe_run(v);
1216}
1217
1218EXPORT_SYMBOL(vpe_start);
1219
1220/* halt it for now */
1221int vpe_stop(vpe_handle vpe)
1222{
1223 struct vpe *v = vpe;
1224 struct tc *t;
1225 unsigned int evpe_flags;
1226
1227 evpe_flags = dvpe();
1228
1229 if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
1230
1231 settc(t->index);
1232 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1233 }
1234
1235 evpe(evpe_flags);
1236
1237 return 0;
1238}
1239
1240EXPORT_SYMBOL(vpe_stop);
1241
1242/* I've done with it thank you */
1243int vpe_free(vpe_handle vpe)
1244{
1245 struct vpe *v = vpe;
1246 struct tc *t;
1247 unsigned int evpe_flags;
1248
1249 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
1250 return -ENOEXEC;
1251 }
1252
1253 evpe_flags = dvpe();
1254
1255 /* Put MVPE's into 'configuration state' */
340ee4b9 1256 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1257
1258 settc(t->index);
1259 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1260
7c3a622d 1261 /* halt the TC */
e01402b1 1262 write_tc_c0_tchalt(TCHALT_H);
7c3a622d
NS
1263 mips_ihb();
1264
1265 /* mark the TC unallocated */
1266 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
e01402b1
RB
1267
1268 v->state = VPE_STATE_UNUSED;
1269
340ee4b9 1270 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1271 evpe(evpe_flags);
1272
1273 return 0;
1274}
1275
1276EXPORT_SYMBOL(vpe_free);
1277
1278void *vpe_get_shared(int index)
1279{
1280 struct vpe *v;
1281
2600990e 1282 if ((v = get_vpe(index)) == NULL)
e01402b1 1283 return NULL;
e01402b1
RB
1284
1285 return v->shared_ptr;
1286}
1287
1288EXPORT_SYMBOL(vpe_get_shared);
1289
2600990e
RB
1290int vpe_getuid(int index)
1291{
1292 struct vpe *v;
1293
1294 if ((v = get_vpe(index)) == NULL)
1295 return -1;
1296
1297 return v->uid;
1298}
1299
1300EXPORT_SYMBOL(vpe_getuid);
1301
1302int vpe_getgid(int index)
1303{
1304 struct vpe *v;
1305
1306 if ((v = get_vpe(index)) == NULL)
1307 return -1;
1308
1309 return v->gid;
1310}
1311
1312EXPORT_SYMBOL(vpe_getgid);
1313
1314int vpe_notify(int index, struct vpe_notifications *notify)
1315{
1316 struct vpe *v;
1317
1318 if ((v = get_vpe(index)) == NULL)
1319 return -1;
1320
1321 list_add(&notify->list, &v->notify);
1322 return 0;
1323}
1324
1325EXPORT_SYMBOL(vpe_notify);
1326
1327char *vpe_getcwd(int index)
1328{
1329 struct vpe *v;
1330
1331 if ((v = get_vpe(index)) == NULL)
1332 return NULL;
1333
1334 return v->cwd;
1335}
1336
1337EXPORT_SYMBOL(vpe_getcwd);
1338
1339#ifdef CONFIG_MIPS_APSP_KSPD
1340static void kspd_sp_exit( int sp_id)
1341{
1342 cleanup_tc(get_tc(sp_id));
1343}
1344#endif
1345
736fad17
KS
1346static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1347 const char *buf, size_t len)
0f5d0df3
RB
1348{
1349 struct vpe *vpe = get_vpe(tclimit);
1350 struct vpe_notifications *not;
1351
1352 list_for_each_entry(not, &vpe->notify, list) {
1353 not->stop(tclimit);
1354 }
1355
1356 release_progmem(vpe->load_addr);
1357 cleanup_tc(get_tc(tclimit));
1358 vpe_stop(vpe);
1359 vpe_free(vpe);
1360
1361 return len;
1362}
1363
736fad17
KS
1364static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
1365 char *buf)
41790e04
RB
1366{
1367 struct vpe *vpe = get_vpe(tclimit);
1368
1369 return sprintf(buf, "%d\n", vpe->ntcs);
1370}
1371
736fad17
KS
1372static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
1373 const char *buf, size_t len)
41790e04
RB
1374{
1375 struct vpe *vpe = get_vpe(tclimit);
1376 unsigned long new;
1377 char *endp;
1378
1379 new = simple_strtoul(buf, &endp, 0);
1380 if (endp == buf)
1381 goto out_einval;
1382
1383 if (new == 0 || new > (hw_tcs - tclimit))
1384 goto out_einval;
1385
1386 vpe->ntcs = new;
1387
1388 return len;
1389
1390out_einval:
52a7a27c 1391 return -EINVAL;
41790e04
RB
1392}
1393
736fad17 1394static struct device_attribute vpe_class_attributes[] = {
0f5d0df3 1395 __ATTR(kill, S_IWUSR, NULL, store_kill),
41790e04
RB
1396 __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
1397 {}
1398};
1399
736fad17 1400static void vpe_device_release(struct device *cd)
41790e04
RB
1401{
1402 kfree(cd);
1403}
1404
1405struct class vpe_class = {
1406 .name = "vpe",
1407 .owner = THIS_MODULE,
736fad17
KS
1408 .dev_release = vpe_device_release,
1409 .dev_attrs = vpe_class_attributes,
41790e04
RB
1410};
1411
736fad17 1412struct device vpe_device;
27a3bbaf 1413
e01402b1
RB
1414static int __init vpe_module_init(void)
1415{
07cc0c9e 1416 unsigned int mtflags, vpflags;
07cc0c9e 1417 unsigned long flags, val;
e01402b1
RB
1418 struct vpe *v = NULL;
1419 struct tc *t;
41790e04 1420 int tc, err;
e01402b1
RB
1421
1422 if (!cpu_has_mipsmt) {
1423 printk("VPE loader: not a MIPS MT capable processor\n");
1424 return -ENODEV;
1425 }
1426
07cc0c9e
RB
1427 if (vpelimit == 0) {
1428 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1429 "initializing VPE loader.\nPass maxvpes=<n> argument as "
1430 "kernel argument\n");
1431
1432 return -ENODEV;
1433 }
1434
1435 if (tclimit == 0) {
1436 printk(KERN_WARNING "No TCs reserved for AP/SP, not "
1437 "initializing VPE loader.\nPass maxtcs=<n> argument as "
1438 "kernel argument\n");
1439
1440 return -ENODEV;
1441 }
1442
682e852e
AD
1443 major = register_chrdev(0, module_name, &vpe_fops);
1444 if (major < 0) {
e01402b1 1445 printk("VPE loader: unable to register character device\n");
307bd284 1446 return major;
e01402b1
RB
1447 }
1448
41790e04
RB
1449 err = class_register(&vpe_class);
1450 if (err) {
1451 printk(KERN_ERR "vpe_class registration failed\n");
27a3bbaf
RB
1452 goto out_chrdev;
1453 }
41790e04 1454
736fad17 1455 device_initialize(&vpe_device);
41790e04
RB
1456 vpe_device.class = &vpe_class,
1457 vpe_device.parent = NULL,
1bb5beb4 1458 dev_set_name(&vpe_device, "vpe1");
41790e04 1459 vpe_device.devt = MKDEV(major, minor);
736fad17 1460 err = device_add(&vpe_device);
41790e04
RB
1461 if (err) {
1462 printk(KERN_ERR "Adding vpe_device failed\n");
1463 goto out_class;
1464 }
27a3bbaf 1465
07cc0c9e
RB
1466 local_irq_save(flags);
1467 mtflags = dmt();
1468 vpflags = dvpe();
e01402b1
RB
1469
1470 /* Put MVPE's into 'configuration state' */
340ee4b9 1471 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1472
1473 /* dump_mtregs(); */
1474
e01402b1 1475 val = read_c0_mvpconf0();
07cc0c9e
RB
1476 hw_tcs = (val & MVPCONF0_PTC) + 1;
1477 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
1478
1479 for (tc = tclimit; tc < hw_tcs; tc++) {
1480 /*
1481 * Must re-enable multithreading temporarily or in case we
1482 * reschedule send IPIs or similar we might hang.
1483 */
1484 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1485 evpe(vpflags);
1486 emt(mtflags);
1487 local_irq_restore(flags);
1488 t = alloc_tc(tc);
1489 if (!t) {
1490 err = -ENOMEM;
1491 goto out;
1492 }
1493
1494 local_irq_save(flags);
1495 mtflags = dmt();
1496 vpflags = dvpe();
1497 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1498
1499 /* VPE's */
07cc0c9e
RB
1500 if (tc < hw_tcs) {
1501 settc(tc);
e01402b1 1502
07cc0c9e 1503 if ((v = alloc_vpe(tc)) == NULL) {
e01402b1 1504 printk(KERN_WARNING "VPE: unable to allocate VPE\n");
07cc0c9e
RB
1505
1506 goto out_reenable;
e01402b1
RB
1507 }
1508
41790e04
RB
1509 v->ntcs = hw_tcs - tclimit;
1510
2600990e
RB
1511 /* add the tc to the list of this vpe's tc's. */
1512 list_add(&t->tc, &v->tc);
e01402b1
RB
1513
1514 /* deactivate all but vpe0 */
07cc0c9e 1515 if (tc >= tclimit) {
e01402b1
RB
1516 unsigned long tmp = read_vpe_c0_vpeconf0();
1517
1518 tmp &= ~VPECONF0_VPA;
1519
1520 /* master VPE */
1521 tmp |= VPECONF0_MVP;
1522 write_vpe_c0_vpeconf0(tmp);
1523 }
1524
1525 /* disable multi-threading with TC's */
1526 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
1527
07cc0c9e 1528 if (tc >= vpelimit) {
2600990e
RB
1529 /*
1530 * Set config to be the same as vpe0,
1531 * particularly kseg0 coherency alg
1532 */
e01402b1
RB
1533 write_vpe_c0_config(read_c0_config());
1534 }
e01402b1
RB
1535 }
1536
1537 /* TC's */
1538 t->pvpe = v; /* set the parent vpe */
1539
07cc0c9e 1540 if (tc >= tclimit) {
e01402b1
RB
1541 unsigned long tmp;
1542
07cc0c9e 1543 settc(tc);
e01402b1 1544
2600990e
RB
1545 /* Any TC that is bound to VPE0 gets left as is - in case
1546 we are running SMTC on VPE0. A TC that is bound to any
1547 other VPE gets bound to VPE0, ideally I'd like to make
1548 it homeless but it doesn't appear to let me bind a TC
1549 to a non-existent VPE. Which is perfectly reasonable.
1550
1551 The (un)bound state is visible to an EJTAG probe so may
1552 notify GDB...
1553 */
1554
1555 if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
1556 /* tc is bound >vpe0 */
1557 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
1558
1559 t->pvpe = get_vpe(0); /* set the parent vpe */
1560 }
e01402b1 1561
7c3a622d
NS
1562 /* halt the TC */
1563 write_tc_c0_tchalt(TCHALT_H);
1564 mips_ihb();
1565
e01402b1
RB
1566 tmp = read_tc_c0_tcstatus();
1567
2600990e 1568 /* mark not activated and not dynamically allocatable */
e01402b1
RB
1569 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1570 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1571 write_tc_c0_tcstatus(tmp);
e01402b1
RB
1572 }
1573 }
1574
07cc0c9e 1575out_reenable:
e01402b1 1576 /* release config state */
340ee4b9 1577 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 1578
07cc0c9e
RB
1579 evpe(vpflags);
1580 emt(mtflags);
1581 local_irq_restore(flags);
1582
2600990e
RB
1583#ifdef CONFIG_MIPS_APSP_KSPD
1584 kspd_events.kspd_sp_exit = kspd_sp_exit;
1585#endif
e01402b1 1586 return 0;
27a3bbaf 1587
41790e04
RB
1588out_class:
1589 class_unregister(&vpe_class);
27a3bbaf
RB
1590out_chrdev:
1591 unregister_chrdev(major, module_name);
1592
07cc0c9e 1593out:
27a3bbaf 1594 return err;
e01402b1
RB
1595}
1596
1597static void __exit vpe_module_exit(void)
1598{
1599 struct vpe *v, *n;
1600
1601 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
1602 if (v->state != VPE_STATE_UNUSED) {
1603 release_vpe(v);
1604 }
1605 }
1606
736fad17 1607 device_del(&vpe_device);
e01402b1
RB
1608 unregister_chrdev(major, module_name);
1609}
1610
1611module_init(vpe_module_init);
1612module_exit(vpe_module_exit);
1613MODULE_DESCRIPTION("MIPS VPE Loader");
2600990e 1614MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
e01402b1 1615MODULE_LICENSE("GPL");