]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/mips/kernel/vpe.c
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
[mirror_ubuntu-bionic-kernel.git] / arch / mips / kernel / vpe.c
1 /*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 *
17 */
18
19 /*
20 * VPE support module
21 *
22 * Provides support for loading a MIPS SP program on VPE1.
23 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable
24 * (or partially linked). You should initialise your stack in the startup
25 * code. This loader looks for the symbol __start and sets up
26 * execution to resume from there. The MIPS SDE kit contains suitable examples.
27 *
28 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
29 * i.e cat spapp >/dev/vpe1.
30 *
31 * You'll need to have the following device files.
32 * mknod /dev/vpe0 c 63 0
33 * mknod /dev/vpe1 c 63 1
34 */
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/fs.h>
39 #include <linux/init.h>
40 #include <asm/uaccess.h>
41 #include <linux/slab.h>
42 #include <linux/list.h>
43 #include <linux/vmalloc.h>
44 #include <linux/elf.h>
45 #include <linux/seq_file.h>
46 #include <linux/syscalls.h>
47 #include <linux/moduleloader.h>
48 #include <linux/interrupt.h>
49 #include <linux/poll.h>
50 #include <linux/bootmem.h>
51 #include <asm/mipsregs.h>
52 #include <asm/mipsmtregs.h>
53 #include <asm/cacheflush.h>
54 #include <asm/atomic.h>
55 #include <asm/cpu.h>
56 #include <asm/processor.h>
57 #include <asm/system.h>
58
59 typedef void *vpe_handle;
60
61 #ifndef ARCH_SHF_SMALL
62 #define ARCH_SHF_SMALL 0
63 #endif
64
65 /* If this is set, the section belongs in the init part of the module */
66 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
67
68 static char module_name[] = "vpe";
69 static int major;
70
71 /* grab the likely amount of memory we will need. */
72 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
73 #define P_SIZE (2 * 1024 * 1024)
74 #else
75 /* add an overhead to the max kmalloc size for non-striped symbols/etc */
76 #define P_SIZE (256 * 1024)
77 #endif
78
79 #define MAX_VPES 16
80
81 enum vpe_state {
82 VPE_STATE_UNUSED = 0,
83 VPE_STATE_INUSE,
84 VPE_STATE_RUNNING
85 };
86
87 enum tc_state {
88 TC_STATE_UNUSED = 0,
89 TC_STATE_INUSE,
90 TC_STATE_RUNNING,
91 TC_STATE_DYNAMIC
92 };
93
94 struct vpe {
95 enum vpe_state state;
96
97 /* (device) minor associated with this vpe */
98 int minor;
99
100 /* elfloader stuff */
101 void *load_addr;
102 u32 len;
103 char *pbuffer;
104 u32 plen;
105
106 unsigned long __start;
107
108 /* tc's associated with this vpe */
109 struct list_head tc;
110
111 /* The list of vpe's */
112 struct list_head list;
113
114 /* shared symbol address */
115 void *shared_ptr;
116 };
117
118 struct tc {
119 enum tc_state state;
120 int index;
121
122 /* parent VPE */
123 struct vpe *pvpe;
124
125 /* The list of TC's with this VPE */
126 struct list_head tc;
127
128 /* The global list of tc's */
129 struct list_head list;
130 };
131
132 struct vpecontrol_ {
133 /* Virtual processing elements */
134 struct list_head vpe_list;
135
136 /* Thread contexts */
137 struct list_head tc_list;
138 } vpecontrol;
139
140 static void release_progmem(void *ptr);
141 static void dump_vpe(struct vpe * v);
142 extern void save_gp_address(unsigned int secbase, unsigned int rel);
143
144 /* get the vpe associated with this minor */
145 struct vpe *get_vpe(int minor)
146 {
147 struct vpe *v;
148
149 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
150 if (v->minor == minor)
151 return v;
152 }
153
154 printk(KERN_DEBUG "VPE: get_vpe minor %d not found\n", minor);
155 return NULL;
156 }
157
158 /* get the vpe associated with this minor */
159 struct tc *get_tc(int index)
160 {
161 struct tc *t;
162
163 list_for_each_entry(t, &vpecontrol.tc_list, list) {
164 if (t->index == index)
165 return t;
166 }
167
168 printk(KERN_DEBUG "VPE: get_tc index %d not found\n", index);
169
170 return NULL;
171 }
172
173 struct tc *get_tc_unused(void)
174 {
175 struct tc *t;
176
177 list_for_each_entry(t, &vpecontrol.tc_list, list) {
178 if (t->state == TC_STATE_UNUSED)
179 return t;
180 }
181
182 printk(KERN_DEBUG "VPE: All TC's are in use\n");
183
184 return NULL;
185 }
186
187 /* allocate a vpe and associate it with this minor (or index) */
188 struct vpe *alloc_vpe(int minor)
189 {
190 struct vpe *v;
191
192 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
193 printk(KERN_WARNING "VPE: alloc_vpe no mem\n");
194 return NULL;
195 }
196
197 INIT_LIST_HEAD(&v->tc);
198 list_add_tail(&v->list, &vpecontrol.vpe_list);
199
200 v->minor = minor;
201 return v;
202 }
203
204 /* allocate a tc. At startup only tc0 is running, all other can be halted. */
205 struct tc *alloc_tc(int index)
206 {
207 struct tc *t;
208
209 if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
210 printk(KERN_WARNING "VPE: alloc_tc no mem\n");
211 return NULL;
212 }
213
214 INIT_LIST_HEAD(&t->tc);
215 list_add_tail(&t->list, &vpecontrol.tc_list);
216
217 t->index = index;
218
219 return t;
220 }
221
222 /* clean up and free everything */
223 void release_vpe(struct vpe *v)
224 {
225 list_del(&v->list);
226 if (v->load_addr)
227 release_progmem(v);
228 kfree(v);
229 }
230
231 void dump_mtregs(void)
232 {
233 unsigned long val;
234
235 val = read_c0_config3();
236 printk("config3 0x%lx MT %ld\n", val,
237 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
238
239 val = read_c0_mvpconf0();
240 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
241 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
242 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
243
244 val = read_c0_mvpcontrol();
245 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
246 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
247 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
248 (val & MVPCONTROL_EVP));
249
250 val = read_c0_vpeconf0();
251 printk("VPEConf0 0x%lx MVP %ld\n", val,
252 (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT);
253 }
254
255 /* Find some VPE program space */
256 static void *alloc_progmem(u32 len)
257 {
258 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
259 /* this means you must tell linux to use less memory than you physically have */
260 return (void *)((max_pfn * PAGE_SIZE) + KSEG0);
261 #else
262 // simple grab some mem for now
263 return kmalloc(len, GFP_KERNEL);
264 #endif
265 }
266
267 static void release_progmem(void *ptr)
268 {
269 #ifndef CONFIG_MIPS_VPE_LOADER_TOM
270 kfree(ptr);
271 #endif
272 }
273
274 /* Update size with this section: return offset. */
275 static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
276 {
277 long ret;
278
279 ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
280 *size = ret + sechdr->sh_size;
281 return ret;
282 }
283
284 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
285 might -- code, read-only data, read-write data, small data. Tally
286 sizes, and place the offsets into sh_entsize fields: high bit means it
287 belongs in init. */
288 static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
289 Elf_Shdr * sechdrs, const char *secstrings)
290 {
291 static unsigned long const masks[][2] = {
292 /* NOTE: all executable code must be the first section
293 * in this array; otherwise modify the text_size
294 * finder in the two loops below */
295 {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
296 {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
297 {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
298 {ARCH_SHF_SMALL | SHF_ALLOC, 0}
299 };
300 unsigned int m, i;
301
302 for (i = 0; i < hdr->e_shnum; i++)
303 sechdrs[i].sh_entsize = ~0UL;
304
305 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
306 for (i = 0; i < hdr->e_shnum; ++i) {
307 Elf_Shdr *s = &sechdrs[i];
308
309 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
310 if ((s->sh_flags & masks[m][0]) != masks[m][0]
311 || (s->sh_flags & masks[m][1])
312 || s->sh_entsize != ~0UL)
313 continue;
314 s->sh_entsize = get_offset(&mod->core_size, s);
315 }
316
317 if (m == 0)
318 mod->core_text_size = mod->core_size;
319
320 }
321 }
322
323
324 /* from module-elf32.c, but subverted a little */
325
326 struct mips_hi16 {
327 struct mips_hi16 *next;
328 Elf32_Addr *addr;
329 Elf32_Addr value;
330 };
331
332 static struct mips_hi16 *mips_hi16_list;
333 static unsigned int gp_offs, gp_addr;
334
335 static int apply_r_mips_none(struct module *me, uint32_t *location,
336 Elf32_Addr v)
337 {
338 return 0;
339 }
340
341 static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
342 Elf32_Addr v)
343 {
344 int rel;
345
346 if( !(*location & 0xffff) ) {
347 rel = (int)v - gp_addr;
348 }
349 else {
350 /* .sbss + gp(relative) + offset */
351 /* kludge! */
352 rel = (int)(short)((int)v + gp_offs +
353 (int)(short)(*location & 0xffff) - gp_addr);
354 }
355
356 if( (rel > 32768) || (rel < -32768) ) {
357 printk(KERN_ERR
358 "apply_r_mips_gprel16: relative address out of range 0x%x %d\n",
359 rel, rel);
360 return -ENOEXEC;
361 }
362
363 *location = (*location & 0xffff0000) | (rel & 0xffff);
364
365 return 0;
366 }
367
368 static int apply_r_mips_pc16(struct module *me, uint32_t *location,
369 Elf32_Addr v)
370 {
371 int rel;
372 rel = (((unsigned int)v - (unsigned int)location));
373 rel >>= 2; // because the offset is in _instructions_ not bytes.
374 rel -= 1; // and one instruction less due to the branch delay slot.
375
376 if( (rel > 32768) || (rel < -32768) ) {
377 printk(KERN_ERR
378 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
379 return -ENOEXEC;
380 }
381
382 *location = (*location & 0xffff0000) | (rel & 0xffff);
383
384 return 0;
385 }
386
387 static int apply_r_mips_32(struct module *me, uint32_t *location,
388 Elf32_Addr v)
389 {
390 *location += v;
391
392 return 0;
393 }
394
395 static int apply_r_mips_26(struct module *me, uint32_t *location,
396 Elf32_Addr v)
397 {
398 if (v % 4) {
399 printk(KERN_ERR "module %s: dangerous relocation mod4\n", me->name);
400 return -ENOEXEC;
401 }
402
403 /*
404 * Not desperately convinced this is a good check of an overflow condition
405 * anyway. But it gets in the way of handling undefined weak symbols which
406 * we want to set to zero.
407 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
408 * printk(KERN_ERR
409 * "module %s: relocation overflow\n",
410 * me->name);
411 * return -ENOEXEC;
412 * }
413 */
414
415 *location = (*location & ~0x03ffffff) |
416 ((*location + (v >> 2)) & 0x03ffffff);
417 return 0;
418 }
419
420 static int apply_r_mips_hi16(struct module *me, uint32_t *location,
421 Elf32_Addr v)
422 {
423 struct mips_hi16 *n;
424
425 /*
426 * We cannot relocate this one now because we don't know the value of
427 * the carry we need to add. Save the information, and let LO16 do the
428 * actual relocation.
429 */
430 n = kmalloc(sizeof *n, GFP_KERNEL);
431 if (!n)
432 return -ENOMEM;
433
434 n->addr = location;
435 n->value = v;
436 n->next = mips_hi16_list;
437 mips_hi16_list = n;
438
439 return 0;
440 }
441
442 static int apply_r_mips_lo16(struct module *me, uint32_t *location,
443 Elf32_Addr v)
444 {
445 unsigned long insnlo = *location;
446 Elf32_Addr val, vallo;
447
448 /* Sign extend the addend we extract from the lo insn. */
449 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
450
451 if (mips_hi16_list != NULL) {
452 struct mips_hi16 *l;
453
454 l = mips_hi16_list;
455 while (l != NULL) {
456 struct mips_hi16 *next;
457 unsigned long insn;
458
459 /*
460 * The value for the HI16 had best be the same.
461 */
462 if (v != l->value) {
463 printk("%d != %d\n", v, l->value);
464 goto out_danger;
465 }
466
467
468 /*
469 * Do the HI16 relocation. Note that we actually don't
470 * need to know anything about the LO16 itself, except
471 * where to find the low 16 bits of the addend needed
472 * by the LO16.
473 */
474 insn = *l->addr;
475 val = ((insn & 0xffff) << 16) + vallo;
476 val += v;
477
478 /*
479 * Account for the sign extension that will happen in
480 * the low bits.
481 */
482 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
483
484 insn = (insn & ~0xffff) | val;
485 *l->addr = insn;
486
487 next = l->next;
488 kfree(l);
489 l = next;
490 }
491
492 mips_hi16_list = NULL;
493 }
494
495 /*
496 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
497 */
498 val = v + vallo;
499 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
500 *location = insnlo;
501
502 return 0;
503
504 out_danger:
505 printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
506
507 return -ENOEXEC;
508 }
509
510 static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
511 Elf32_Addr v) = {
512 [R_MIPS_NONE] = apply_r_mips_none,
513 [R_MIPS_32] = apply_r_mips_32,
514 [R_MIPS_26] = apply_r_mips_26,
515 [R_MIPS_HI16] = apply_r_mips_hi16,
516 [R_MIPS_LO16] = apply_r_mips_lo16,
517 [R_MIPS_GPREL16] = apply_r_mips_gprel16,
518 [R_MIPS_PC16] = apply_r_mips_pc16
519 };
520
521
522 int apply_relocations(Elf32_Shdr *sechdrs,
523 const char *strtab,
524 unsigned int symindex,
525 unsigned int relsec,
526 struct module *me)
527 {
528 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
529 Elf32_Sym *sym;
530 uint32_t *location;
531 unsigned int i;
532 Elf32_Addr v;
533 int res;
534
535 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
536 Elf32_Word r_info = rel[i].r_info;
537
538 /* This is where to make the change */
539 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
540 + rel[i].r_offset;
541 /* This is the symbol it is referring to */
542 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
543 + ELF32_R_SYM(r_info);
544
545 if (!sym->st_value) {
546 printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
547 me->name, strtab + sym->st_name);
548 /* just print the warning, dont barf */
549 }
550
551 v = sym->st_value;
552
553 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
554 if( res ) {
555 printk(KERN_DEBUG
556 "relocation error 0x%x sym refer <%s> value 0x%x "
557 "type 0x%x r_info 0x%x\n",
558 (unsigned int)location, strtab + sym->st_name, v,
559 r_info, ELF32_R_TYPE(r_info));
560 }
561
562 if (res)
563 return res;
564 }
565
566 return 0;
567 }
568
569 void save_gp_address(unsigned int secbase, unsigned int rel)
570 {
571 gp_addr = secbase + rel;
572 gp_offs = gp_addr - (secbase & 0xffff0000);
573 }
574 /* end module-elf32.c */
575
576
577
578 /* Change all symbols so that sh_value encodes the pointer directly. */
579 static int simplify_symbols(Elf_Shdr * sechdrs,
580 unsigned int symindex,
581 const char *strtab,
582 const char *secstrings,
583 unsigned int nsecs, struct module *mod)
584 {
585 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
586 unsigned long secbase, bssbase = 0;
587 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
588 int ret = 0, size;
589
590 /* find the .bss section for COMMON symbols */
591 for (i = 0; i < nsecs; i++) {
592 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0)
593 bssbase = sechdrs[i].sh_addr;
594 }
595
596 for (i = 1; i < n; i++) {
597 switch (sym[i].st_shndx) {
598 case SHN_COMMON:
599 /* Allocate space for the symbol in the .bss section. st_value is currently size.
600 We want it to have the address of the symbol. */
601
602 size = sym[i].st_value;
603 sym[i].st_value = bssbase;
604
605 bssbase += size;
606 break;
607
608 case SHN_ABS:
609 /* Don't need to do anything */
610 break;
611
612 case SHN_UNDEF:
613 /* ret = -ENOENT; */
614 break;
615
616 case SHN_MIPS_SCOMMON:
617
618 printk(KERN_DEBUG
619 "simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
620 strtab + sym[i].st_name, sym[i].st_shndx);
621
622 // .sbss section
623 break;
624
625 default:
626 secbase = sechdrs[sym[i].st_shndx].sh_addr;
627
628 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
629 save_gp_address(secbase, sym[i].st_value);
630 }
631
632 sym[i].st_value += secbase;
633 break;
634 }
635
636 }
637
638 return ret;
639 }
640
641 #ifdef DEBUG_ELFLOADER
642 static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
643 const char *strtab, struct module *mod)
644 {
645 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
646 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
647
648 printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
649 for (i = 1; i < n; i++) {
650 printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
651 strtab + sym[i].st_name, sym[i].st_value);
652 }
653 }
654 #endif
655
656 static void dump_tc(struct tc *t)
657 {
658 printk(KERN_WARNING "VPE: TC index %d TCStatus 0x%lx halt 0x%lx\n",
659 t->index, read_tc_c0_tcstatus(), read_tc_c0_tchalt());
660 printk(KERN_WARNING "VPE: tcrestart 0x%lx\n", read_tc_c0_tcrestart());
661 }
662
663 static void dump_tclist(void)
664 {
665 struct tc *t;
666
667 list_for_each_entry(t, &vpecontrol.tc_list, list) {
668 dump_tc(t);
669 }
670 }
671
672 /* We are prepared so configure and start the VPE... */
673 int vpe_run(struct vpe * v)
674 {
675 unsigned long val;
676 struct tc *t;
677
678 /* check we are the Master VPE */
679 val = read_c0_vpeconf0();
680 if (!(val & VPECONF0_MVP)) {
681 printk(KERN_WARNING
682 "VPE: only Master VPE's are allowed to configure MT\n");
683 return -1;
684 }
685
686 /* disable MT (using dvpe) */
687 dvpe();
688
689 /* Put MVPE's into 'configuration state' */
690 set_c0_mvpcontrol(MVPCONTROL_VPC);
691
692 if (!list_empty(&v->tc)) {
693 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
694 printk(KERN_WARNING "VPE: TC %d is already in use.\n",
695 t->index);
696 return -ENOEXEC;
697 }
698 } else {
699 printk(KERN_WARNING "VPE: No TC's associated with VPE %d\n",
700 v->minor);
701 return -ENOEXEC;
702 }
703
704 settc(t->index);
705
706 val = read_vpe_c0_vpeconf0();
707
708 /* should check it is halted, and not activated */
709 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
710 printk(KERN_WARNING "VPE: TC %d is already doing something!\n",
711 t->index);
712
713 dump_tclist();
714 return -ENOEXEC;
715 }
716
717 /* Write the address we want it to start running from in the TCPC register. */
718 write_tc_c0_tcrestart((unsigned long)v->__start);
719
720 /* write the sivc_info address to tccontext */
721 write_tc_c0_tccontext((unsigned long)0);
722
723 /* Set up the XTC bit in vpeconf0 to point at our tc */
724 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (t->index << VPECONF0_XTC_SHIFT));
725
726 /* mark the TC as activated, not interrupt exempt and not dynamically allocatable */
727 val = read_tc_c0_tcstatus();
728 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
729 write_tc_c0_tcstatus(val);
730
731 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
732
733 /* set up VPE1 */
734 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); // no multiple TC's
735 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); // enable this VPE
736
737 /*
738 * The sde-kit passes 'memsize' to __start in $a3, so set something
739 * here...
740 * Or set $a3 (register 7) to zero and define DFLT_STACK_SIZE and
741 * DFLT_HEAP_SIZE when you compile your program
742 */
743
744 mttgpr(7, 0);
745
746 /* set config to be the same as vpe0, particularly kseg0 coherency alg */
747 write_vpe_c0_config(read_c0_config());
748
749 /* clear out any left overs from a previous program */
750 write_vpe_c0_cause(0);
751
752 /* take system out of configuration state */
753 clear_c0_mvpcontrol(MVPCONTROL_VPC);
754
755 /* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */
756 write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL));
757
758 /* set it running */
759 evpe(EVPE_ENABLE);
760
761 return 0;
762 }
763
764 static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
765 unsigned int symindex, const char *strtab,
766 struct module *mod)
767 {
768 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
769 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
770
771 for (i = 1; i < n; i++) {
772 if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
773 v->__start = sym[i].st_value;
774 }
775
776 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
777 v->shared_ptr = (void *)sym[i].st_value;
778 }
779 }
780
781 return 0;
782 }
783
784 /*
785 * Allocates a VPE with some program code space(the load address), copies
786 * the contents of the program (p)buffer performing relocatations/etc,
787 * free's it when finished.
788 */
789 int vpe_elfload(struct vpe * v)
790 {
791 Elf_Ehdr *hdr;
792 Elf_Shdr *sechdrs;
793 long err = 0;
794 char *secstrings, *strtab = NULL;
795 unsigned int len, i, symindex = 0, strindex = 0;
796
797 struct module mod; // so we can re-use the relocations code
798
799 memset(&mod, 0, sizeof(struct module));
800 strcpy(mod.name, "VPE dummy prog module");
801
802 hdr = (Elf_Ehdr *) v->pbuffer;
803 len = v->plen;
804
805 /* Sanity checks against insmoding binaries or wrong arch,
806 weird elf version */
807 if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
808 || hdr->e_type != ET_REL || !elf_check_arch(hdr)
809 || hdr->e_shentsize != sizeof(*sechdrs)) {
810 printk(KERN_WARNING
811 "VPE program, wrong arch or weird elf version\n");
812
813 return -ENOEXEC;
814 }
815
816 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
817 printk(KERN_ERR "VPE program length %u truncated\n", len);
818 return -ENOEXEC;
819 }
820
821 /* Convenience variables */
822 sechdrs = (void *)hdr + hdr->e_shoff;
823 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
824 sechdrs[0].sh_addr = 0;
825
826 /* And these should exist, but gcc whinges if we don't init them */
827 symindex = strindex = 0;
828
829 for (i = 1; i < hdr->e_shnum; i++) {
830
831 if (sechdrs[i].sh_type != SHT_NOBITS
832 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
833 printk(KERN_ERR "VPE program length %u truncated\n",
834 len);
835 return -ENOEXEC;
836 }
837
838 /* Mark all sections sh_addr with their address in the
839 temporary image. */
840 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
841
842 /* Internal symbols and strings. */
843 if (sechdrs[i].sh_type == SHT_SYMTAB) {
844 symindex = i;
845 strindex = sechdrs[i].sh_link;
846 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
847 }
848 }
849
850 layout_sections(&mod, hdr, sechdrs, secstrings);
851
852 v->load_addr = alloc_progmem(mod.core_size);
853 memset(v->load_addr, 0, mod.core_size);
854
855 printk("VPE elf_loader: loading to %p\n", v->load_addr);
856
857 for (i = 0; i < hdr->e_shnum; i++) {
858 void *dest;
859
860 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
861 continue;
862
863 dest = v->load_addr + sechdrs[i].sh_entsize;
864
865 if (sechdrs[i].sh_type != SHT_NOBITS)
866 memcpy(dest, (void *)sechdrs[i].sh_addr,
867 sechdrs[i].sh_size);
868 /* Update sh_addr to point to copy in image. */
869 sechdrs[i].sh_addr = (unsigned long)dest;
870 }
871
872 /* Fix up syms, so that st_value is a pointer to location. */
873 err =
874 simplify_symbols(sechdrs, symindex, strtab, secstrings,
875 hdr->e_shnum, &mod);
876 if (err < 0) {
877 printk(KERN_WARNING "VPE: unable to simplify symbols\n");
878 goto cleanup;
879 }
880
881 /* Now do relocations. */
882 for (i = 1; i < hdr->e_shnum; i++) {
883 const char *strtab = (char *)sechdrs[strindex].sh_addr;
884 unsigned int info = sechdrs[i].sh_info;
885
886 /* Not a valid relocation section? */
887 if (info >= hdr->e_shnum)
888 continue;
889
890 /* Don't bother with non-allocated sections */
891 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
892 continue;
893
894 if (sechdrs[i].sh_type == SHT_REL)
895 err =
896 apply_relocations(sechdrs, strtab, symindex, i, &mod);
897 else if (sechdrs[i].sh_type == SHT_RELA)
898 err = apply_relocate_add(sechdrs, strtab, symindex, i,
899 &mod);
900 if (err < 0) {
901 printk(KERN_WARNING
902 "vpe_elfload: error in relocations err %ld\n",
903 err);
904 goto cleanup;
905 }
906 }
907
908 /* make sure it's physically written out */
909 flush_icache_range((unsigned long)v->load_addr,
910 (unsigned long)v->load_addr + v->len);
911
912 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
913
914 printk(KERN_WARNING
915 "VPE: program doesn't contain __start or vpe_shared symbols\n");
916 err = -ENOEXEC;
917 }
918
919 printk(" elf loaded\n");
920
921 cleanup:
922 return err;
923 }
924
925 static void dump_vpe(struct vpe * v)
926 {
927 struct tc *t;
928
929 printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol());
930 printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0());
931
932 list_for_each_entry(t, &vpecontrol.tc_list, list) {
933 dump_tc(t);
934 }
935 }
936
937 /* checks for VPE is unused and gets ready to load program */
938 static int vpe_open(struct inode *inode, struct file *filp)
939 {
940 int minor;
941 struct vpe *v;
942
943 /* assume only 1 device at the mo. */
944 if ((minor = MINOR(inode->i_rdev)) != 1) {
945 printk(KERN_WARNING "VPE: only vpe1 is supported\n");
946 return -ENODEV;
947 }
948
949 if ((v = get_vpe(minor)) == NULL) {
950 printk(KERN_WARNING "VPE: unable to get vpe\n");
951 return -ENODEV;
952 }
953
954 if (v->state != VPE_STATE_UNUSED) {
955 unsigned long tmp;
956 struct tc *t;
957
958 printk(KERN_WARNING "VPE: device %d already in use\n", minor);
959
960 dvpe();
961 dump_vpe(v);
962
963 printk(KERN_WARNING "VPE: re-initialising %d\n", minor);
964
965 release_progmem(v->load_addr);
966
967 t = get_tc(minor);
968 settc(minor);
969 tmp = read_tc_c0_tcstatus();
970
971 /* mark not allocated and not dynamically allocatable */
972 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
973 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
974 write_tc_c0_tcstatus(tmp);
975
976 write_tc_c0_tchalt(TCHALT_H);
977
978 }
979
980 // allocate it so when we get write ops we know it's expected.
981 v->state = VPE_STATE_INUSE;
982
983 /* this of-course trashes what was there before... */
984 v->pbuffer = vmalloc(P_SIZE);
985 v->plen = P_SIZE;
986 v->load_addr = NULL;
987 v->len = 0;
988
989 return 0;
990 }
991
992 static int vpe_release(struct inode *inode, struct file *filp)
993 {
994 int minor, ret = 0;
995 struct vpe *v;
996 Elf_Ehdr *hdr;
997
998 minor = MINOR(inode->i_rdev);
999 if ((v = get_vpe(minor)) == NULL)
1000 return -ENODEV;
1001
1002 // simple case of fire and forget, so tell the VPE to run...
1003
1004 hdr = (Elf_Ehdr *) v->pbuffer;
1005 if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) {
1006 if (vpe_elfload(v) >= 0)
1007 vpe_run(v);
1008 else {
1009 printk(KERN_WARNING "VPE: ELF load failed.\n");
1010 ret = -ENOEXEC;
1011 }
1012 } else {
1013 printk(KERN_WARNING "VPE: only elf files are supported\n");
1014 ret = -ENOEXEC;
1015 }
1016
1017 // cleanup any temp buffers
1018 if (v->pbuffer)
1019 vfree(v->pbuffer);
1020 v->plen = 0;
1021 return ret;
1022 }
1023
1024 static ssize_t vpe_write(struct file *file, const char __user * buffer,
1025 size_t count, loff_t * ppos)
1026 {
1027 int minor;
1028 size_t ret = count;
1029 struct vpe *v;
1030
1031 minor = MINOR(file->f_dentry->d_inode->i_rdev);
1032 if ((v = get_vpe(minor)) == NULL)
1033 return -ENODEV;
1034
1035 if (v->pbuffer == NULL) {
1036 printk(KERN_ERR "vpe_write: no pbuffer\n");
1037 return -ENOMEM;
1038 }
1039
1040 if ((count + v->len) > v->plen) {
1041 printk(KERN_WARNING
1042 "VPE Loader: elf size too big. Perhaps strip uneeded symbols\n");
1043 return -ENOMEM;
1044 }
1045
1046 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
1047 if (!count) {
1048 printk("vpe_write: copy_to_user failed\n");
1049 return -EFAULT;
1050 }
1051
1052 v->len += count;
1053 return ret;
1054 }
1055
1056 static struct file_operations vpe_fops = {
1057 .owner = THIS_MODULE,
1058 .open = vpe_open,
1059 .release = vpe_release,
1060 .write = vpe_write
1061 };
1062
1063 /* module wrapper entry points */
1064 /* give me a vpe */
1065 vpe_handle vpe_alloc(void)
1066 {
1067 int i;
1068 struct vpe *v;
1069
1070 /* find a vpe */
1071 for (i = 1; i < MAX_VPES; i++) {
1072 if ((v = get_vpe(i)) != NULL) {
1073 v->state = VPE_STATE_INUSE;
1074 return v;
1075 }
1076 }
1077 return NULL;
1078 }
1079
1080 EXPORT_SYMBOL(vpe_alloc);
1081
1082 /* start running from here */
1083 int vpe_start(vpe_handle vpe, unsigned long start)
1084 {
1085 struct vpe *v = vpe;
1086
1087 v->__start = start;
1088 return vpe_run(v);
1089 }
1090
1091 EXPORT_SYMBOL(vpe_start);
1092
1093 /* halt it for now */
1094 int vpe_stop(vpe_handle vpe)
1095 {
1096 struct vpe *v = vpe;
1097 struct tc *t;
1098 unsigned int evpe_flags;
1099
1100 evpe_flags = dvpe();
1101
1102 if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
1103
1104 settc(t->index);
1105 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1106 }
1107
1108 evpe(evpe_flags);
1109
1110 return 0;
1111 }
1112
1113 EXPORT_SYMBOL(vpe_stop);
1114
1115 /* I've done with it thank you */
1116 int vpe_free(vpe_handle vpe)
1117 {
1118 struct vpe *v = vpe;
1119 struct tc *t;
1120 unsigned int evpe_flags;
1121
1122 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
1123 return -ENOEXEC;
1124 }
1125
1126 evpe_flags = dvpe();
1127
1128 /* Put MVPE's into 'configuration state' */
1129 set_c0_mvpcontrol(MVPCONTROL_VPC);
1130
1131 settc(t->index);
1132 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1133
1134 /* mark the TC unallocated and halt'ed */
1135 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
1136 write_tc_c0_tchalt(TCHALT_H);
1137
1138 v->state = VPE_STATE_UNUSED;
1139
1140 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1141 evpe(evpe_flags);
1142
1143 return 0;
1144 }
1145
1146 EXPORT_SYMBOL(vpe_free);
1147
1148 void *vpe_get_shared(int index)
1149 {
1150 struct vpe *v;
1151
1152 if ((v = get_vpe(index)) == NULL) {
1153 printk(KERN_WARNING "vpe: invalid vpe index %d\n", index);
1154 return NULL;
1155 }
1156
1157 return v->shared_ptr;
1158 }
1159
1160 EXPORT_SYMBOL(vpe_get_shared);
1161
1162 static int __init vpe_module_init(void)
1163 {
1164 struct vpe *v = NULL;
1165 struct tc *t;
1166 unsigned long val;
1167 int i;
1168
1169 if (!cpu_has_mipsmt) {
1170 printk("VPE loader: not a MIPS MT capable processor\n");
1171 return -ENODEV;
1172 }
1173
1174 if ((major = register_chrdev(0, module_name, &vpe_fops) < 0)) {
1175 printk("VPE loader: unable to register character device\n");
1176 return major;
1177 }
1178
1179 dmt();
1180 dvpe();
1181
1182 /* Put MVPE's into 'configuration state' */
1183 set_c0_mvpcontrol(MVPCONTROL_VPC);
1184
1185 /* dump_mtregs(); */
1186
1187 INIT_LIST_HEAD(&vpecontrol.vpe_list);
1188 INIT_LIST_HEAD(&vpecontrol.tc_list);
1189
1190 val = read_c0_mvpconf0();
1191 for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) {
1192 t = alloc_tc(i);
1193
1194 /* VPE's */
1195 if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) {
1196 settc(i);
1197
1198 if ((v = alloc_vpe(i)) == NULL) {
1199 printk(KERN_WARNING "VPE: unable to allocate VPE\n");
1200 return -ENODEV;
1201 }
1202
1203 list_add(&t->tc, &v->tc); /* add the tc to the list of this vpe's tc's. */
1204
1205 /* deactivate all but vpe0 */
1206 if (i != 0) {
1207 unsigned long tmp = read_vpe_c0_vpeconf0();
1208
1209 tmp &= ~VPECONF0_VPA;
1210
1211 /* master VPE */
1212 tmp |= VPECONF0_MVP;
1213 write_vpe_c0_vpeconf0(tmp);
1214 }
1215
1216 /* disable multi-threading with TC's */
1217 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
1218
1219 if (i != 0) {
1220 write_vpe_c0_status((read_c0_status() &
1221 ~(ST0_IM | ST0_IE | ST0_KSU))
1222 | ST0_CU0);
1223
1224 /* set config to be the same as vpe0, particularly kseg0 coherency alg */
1225 write_vpe_c0_config(read_c0_config());
1226 }
1227
1228 }
1229
1230 /* TC's */
1231 t->pvpe = v; /* set the parent vpe */
1232
1233 if (i != 0) {
1234 unsigned long tmp;
1235
1236 /* tc 0 will of course be running.... */
1237 if (i == 0)
1238 t->state = TC_STATE_RUNNING;
1239
1240 settc(i);
1241
1242 /* bind a TC to each VPE, May as well put all excess TC's
1243 on the last VPE */
1244 if (i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1))
1245 write_tc_c0_tcbind(read_tc_c0_tcbind() |
1246 ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
1247 else
1248 write_tc_c0_tcbind(read_tc_c0_tcbind() | i);
1249
1250 tmp = read_tc_c0_tcstatus();
1251
1252 /* mark not allocated and not dynamically allocatable */
1253 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1254 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1255 write_tc_c0_tcstatus(tmp);
1256
1257 write_tc_c0_tchalt(TCHALT_H);
1258 }
1259 }
1260
1261 /* release config state */
1262 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1263
1264 return 0;
1265 }
1266
1267 static void __exit vpe_module_exit(void)
1268 {
1269 struct vpe *v, *n;
1270
1271 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
1272 if (v->state != VPE_STATE_UNUSED) {
1273 release_vpe(v);
1274 }
1275 }
1276
1277 unregister_chrdev(major, module_name);
1278 }
1279
1280 module_init(vpe_module_init);
1281 module_exit(vpe_module_exit);
1282 MODULE_DESCRIPTION("MIPS VPE Loader");
1283 MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc");
1284 MODULE_LICENSE("GPL");