]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/sparc64/kernel/mdesc.c
[SPARC64]: Fix {mc,smt}_capable().
[mirror_ubuntu-jammy-kernel.git] / arch / sparc64 / kernel / mdesc.c
1 /* mdesc.c: Sun4V machine description handling.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/bootmem.h>
8 #include <linux/log2.h>
9
10 #include <asm/hypervisor.h>
11 #include <asm/mdesc.h>
12 #include <asm/prom.h>
13 #include <asm/oplib.h>
14 #include <asm/smp.h>
15
16 /* Unlike the OBP device tree, the machine description is a full-on
17 * DAG. An arbitrary number of ARCs are possible from one
18 * node to other nodes and thus we can't use the OBP device_node
19 * data structure to represent these nodes inside of the kernel.
20 *
21 * Actually, it isn't even a DAG, because there are back pointers
22 * which create cycles in the graph.
23 *
24 * mdesc_hdr and mdesc_elem describe the layout of the data structure
25 * we get from the Hypervisor.
26 */
27 struct mdesc_hdr {
28 u32 version; /* Transport version */
29 u32 node_sz; /* node block size */
30 u32 name_sz; /* name block size */
31 u32 data_sz; /* data block size */
32 };
33
34 struct mdesc_elem {
35 u8 tag;
36 #define MD_LIST_END 0x00
37 #define MD_NODE 0x4e
38 #define MD_NODE_END 0x45
39 #define MD_NOOP 0x20
40 #define MD_PROP_ARC 0x61
41 #define MD_PROP_VAL 0x76
42 #define MD_PROP_STR 0x73
43 #define MD_PROP_DATA 0x64
44 u8 name_len;
45 u16 resv;
46 u32 name_offset;
47 union {
48 struct {
49 u32 data_len;
50 u32 data_offset;
51 } data;
52 u64 val;
53 } d;
54 };
55
56 static struct mdesc_hdr *main_mdesc;
57 static struct mdesc_node *allnodes;
58
59 static struct mdesc_node *allnodes_tail;
60 static unsigned int unique_id;
61
62 static struct mdesc_node **mdesc_hash;
63 static unsigned int mdesc_hash_size;
64
65 static inline unsigned int node_hashfn(u64 node)
66 {
67 return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16)))
68 & (mdesc_hash_size - 1);
69 }
70
71 static inline void hash_node(struct mdesc_node *mp)
72 {
73 struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)];
74
75 mp->hash_next = *head;
76 *head = mp;
77
78 if (allnodes_tail) {
79 allnodes_tail->allnodes_next = mp;
80 allnodes_tail = mp;
81 } else {
82 allnodes = allnodes_tail = mp;
83 }
84 }
85
86 static struct mdesc_node *find_node(u64 node)
87 {
88 struct mdesc_node *mp = mdesc_hash[node_hashfn(node)];
89
90 while (mp) {
91 if (mp->node == node)
92 return mp;
93
94 mp = mp->hash_next;
95 }
96 return NULL;
97 }
98
99 struct property *md_find_property(const struct mdesc_node *mp,
100 const char *name,
101 int *lenp)
102 {
103 struct property *pp;
104
105 for (pp = mp->properties; pp != 0; pp = pp->next) {
106 if (strcasecmp(pp->name, name) == 0) {
107 if (lenp)
108 *lenp = pp->length;
109 break;
110 }
111 }
112 return pp;
113 }
114 EXPORT_SYMBOL(md_find_property);
115
116 /*
117 * Find a property with a given name for a given node
118 * and return the value.
119 */
120 const void *md_get_property(const struct mdesc_node *mp, const char *name,
121 int *lenp)
122 {
123 struct property *pp = md_find_property(mp, name, lenp);
124 return pp ? pp->value : NULL;
125 }
126 EXPORT_SYMBOL(md_get_property);
127
128 struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
129 const char *name)
130 {
131 struct mdesc_node *mp;
132
133 mp = from ? from->allnodes_next : allnodes;
134 for (; mp != NULL; mp = mp->allnodes_next) {
135 if (strcmp(mp->name, name) == 0)
136 break;
137 }
138 return mp;
139 }
140 EXPORT_SYMBOL(md_find_node_by_name);
141
142 static unsigned int mdesc_early_allocated;
143
144 static void * __init mdesc_early_alloc(unsigned long size)
145 {
146 void *ret;
147
148 ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
149 if (ret == NULL) {
150 prom_printf("MDESC: alloc of %lu bytes failed.\n", size);
151 prom_halt();
152 }
153
154 memset(ret, 0, size);
155
156 mdesc_early_allocated += size;
157
158 return ret;
159 }
160
161 static unsigned int __init count_arcs(struct mdesc_elem *ep)
162 {
163 unsigned int ret = 0;
164
165 ep++;
166 while (ep->tag != MD_NODE_END) {
167 if (ep->tag == MD_PROP_ARC)
168 ret++;
169 ep++;
170 }
171 return ret;
172 }
173
174 static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names)
175 {
176 unsigned int num_arcs = count_arcs(ep);
177 struct mdesc_node *mp;
178
179 mp = mdesc_early_alloc(sizeof(*mp) +
180 (num_arcs * sizeof(struct mdesc_arc)));
181 mp->name = names + ep->name_offset;
182 mp->node = node;
183 mp->unique_id = unique_id++;
184 mp->num_arcs = num_arcs;
185
186 hash_node(mp);
187 }
188
189 static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
190 {
191 return (struct mdesc_elem *) (mdesc + 1);
192 }
193
194 static inline void *name_block(struct mdesc_hdr *mdesc)
195 {
196 return ((void *) node_block(mdesc)) + mdesc->node_sz;
197 }
198
199 static inline void *data_block(struct mdesc_hdr *mdesc)
200 {
201 return ((void *) name_block(mdesc)) + mdesc->name_sz;
202 }
203
204 /* In order to avoid recursion (the graph can be very deep) we use a
205 * two pass algorithm. First we allocate all the nodes and hash them.
206 * Then we iterate over each node, filling in the arcs and properties.
207 */
208 static void __init build_all_nodes(struct mdesc_hdr *mdesc)
209 {
210 struct mdesc_elem *start, *ep;
211 struct mdesc_node *mp;
212 const char *names;
213 void *data;
214 u64 last_node;
215
216 start = ep = node_block(mdesc);
217 last_node = mdesc->node_sz / 16;
218
219 names = name_block(mdesc);
220
221 while (1) {
222 u64 node = ep - start;
223
224 if (ep->tag == MD_LIST_END)
225 break;
226
227 if (ep->tag != MD_NODE) {
228 prom_printf("MDESC: Inconsistent element list.\n");
229 prom_halt();
230 }
231
232 mdesc_node_alloc(node, ep, names);
233
234 if (ep->d.val >= last_node) {
235 printk("MDESC: Warning, early break out of node scan.\n");
236 printk("MDESC: Next node [%lu] last_node [%lu].\n",
237 node, last_node);
238 break;
239 }
240
241 ep = start + ep->d.val;
242 }
243
244 data = data_block(mdesc);
245 for (mp = allnodes; mp; mp = mp->allnodes_next) {
246 struct mdesc_elem *ep = start + mp->node;
247 struct property **link = &mp->properties;
248 unsigned int this_arc = 0;
249
250 ep++;
251 while (ep->tag != MD_NODE_END) {
252 switch (ep->tag) {
253 case MD_PROP_ARC: {
254 struct mdesc_node *target;
255
256 if (this_arc >= mp->num_arcs) {
257 prom_printf("MDESC: ARC overrun [%u:%u]\n",
258 this_arc, mp->num_arcs);
259 prom_halt();
260 }
261 target = find_node(ep->d.val);
262 if (!target) {
263 printk("MDESC: Warning, arc points to "
264 "missing node, ignoring.\n");
265 break;
266 }
267 mp->arcs[this_arc].name =
268 (names + ep->name_offset);
269 mp->arcs[this_arc].arc = target;
270 this_arc++;
271 break;
272 }
273
274 case MD_PROP_VAL:
275 case MD_PROP_STR:
276 case MD_PROP_DATA: {
277 struct property *p = mdesc_early_alloc(sizeof(*p));
278
279 p->unique_id = unique_id++;
280 p->name = (char *) names + ep->name_offset;
281 if (ep->tag == MD_PROP_VAL) {
282 p->value = &ep->d.val;
283 p->length = 8;
284 } else {
285 p->value = data + ep->d.data.data_offset;
286 p->length = ep->d.data.data_len;
287 }
288 *link = p;
289 link = &p->next;
290 break;
291 }
292
293 case MD_NOOP:
294 break;
295
296 default:
297 printk("MDESC: Warning, ignoring unknown tag type %02x\n",
298 ep->tag);
299 }
300 ep++;
301 }
302 }
303 }
304
305 static unsigned int __init count_nodes(struct mdesc_hdr *mdesc)
306 {
307 struct mdesc_elem *ep = node_block(mdesc);
308 struct mdesc_elem *end;
309 unsigned int cnt = 0;
310
311 end = ((void *)ep) + mdesc->node_sz;
312 while (ep < end) {
313 if (ep->tag == MD_NODE)
314 cnt++;
315 ep++;
316 }
317 return cnt;
318 }
319
320 static void __init report_platform_properties(void)
321 {
322 struct mdesc_node *pn = md_find_node_by_name(NULL, "platform");
323 const char *s;
324 const u64 *v;
325
326 if (!pn) {
327 prom_printf("No platform node in machine-description.\n");
328 prom_halt();
329 }
330
331 s = md_get_property(pn, "banner-name", NULL);
332 printk("PLATFORM: banner-name [%s]\n", s);
333 s = md_get_property(pn, "name", NULL);
334 printk("PLATFORM: name [%s]\n", s);
335
336 v = md_get_property(pn, "hostid", NULL);
337 if (v)
338 printk("PLATFORM: hostid [%08lx]\n", *v);
339 v = md_get_property(pn, "serial#", NULL);
340 if (v)
341 printk("PLATFORM: serial# [%08lx]\n", *v);
342 v = md_get_property(pn, "stick-frequency", NULL);
343 printk("PLATFORM: stick-frequency [%08lx]\n", *v);
344 v = md_get_property(pn, "mac-address", NULL);
345 if (v)
346 printk("PLATFORM: mac-address [%lx]\n", *v);
347 v = md_get_property(pn, "watchdog-resolution", NULL);
348 if (v)
349 printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v);
350 v = md_get_property(pn, "watchdog-max-timeout", NULL);
351 if (v)
352 printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v);
353 v = md_get_property(pn, "max-cpus", NULL);
354 if (v)
355 printk("PLATFORM: max-cpus [%lu]\n", *v);
356 }
357
358 static int inline find_in_proplist(const char *list, const char *match, int len)
359 {
360 while (len > 0) {
361 int l;
362
363 if (!strcmp(list, match))
364 return 1;
365 l = strlen(list) + 1;
366 list += l;
367 len -= l;
368 }
369 return 0;
370 }
371
372 static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp)
373 {
374 const u64 *level = md_get_property(mp, "level", NULL);
375 const u64 *size = md_get_property(mp, "size", NULL);
376 const u64 *line_size = md_get_property(mp, "line-size", NULL);
377 const char *type;
378 int type_len;
379
380 type = md_get_property(mp, "type", &type_len);
381
382 switch (*level) {
383 case 1:
384 if (find_in_proplist(type, "instn", type_len)) {
385 c->icache_size = *size;
386 c->icache_line_size = *line_size;
387 } else if (find_in_proplist(type, "data", type_len)) {
388 c->dcache_size = *size;
389 c->dcache_line_size = *line_size;
390 }
391 break;
392
393 case 2:
394 c->ecache_size = *size;
395 c->ecache_line_size = *line_size;
396 break;
397
398 default:
399 break;
400 }
401
402 if (*level == 1) {
403 unsigned int i;
404
405 for (i = 0; i < mp->num_arcs; i++) {
406 struct mdesc_node *t = mp->arcs[i].arc;
407
408 if (strcmp(mp->arcs[i].name, "fwd"))
409 continue;
410
411 if (!strcmp(t->name, "cache"))
412 fill_in_one_cache(c, t);
413 }
414 }
415 }
416
417 static void __init mark_core_ids(struct mdesc_node *mp, int core_id)
418 {
419 unsigned int i;
420
421 for (i = 0; i < mp->num_arcs; i++) {
422 struct mdesc_node *t = mp->arcs[i].arc;
423 const u64 *id;
424
425 if (strcmp(mp->arcs[i].name, "back"))
426 continue;
427
428 if (!strcmp(t->name, "cpu")) {
429 id = md_get_property(t, "id", NULL);
430 if (*id < NR_CPUS)
431 cpu_data(*id).core_id = core_id;
432 } else {
433 unsigned int j;
434
435 for (j = 0; j < t->num_arcs; j++) {
436 struct mdesc_node *n = t->arcs[j].arc;
437
438 if (strcmp(t->arcs[j].name, "back"))
439 continue;
440
441 if (strcmp(n->name, "cpu"))
442 continue;
443
444 id = md_get_property(n, "id", NULL);
445 if (*id < NR_CPUS)
446 cpu_data(*id).core_id = core_id;
447 }
448 }
449 }
450 }
451
452 static void __init set_core_ids(void)
453 {
454 struct mdesc_node *mp;
455 int idx;
456
457 idx = 1;
458 md_for_each_node_by_name(mp, "cache") {
459 const u64 *level = md_get_property(mp, "level", NULL);
460 const char *type;
461 int len;
462
463 if (*level != 1)
464 continue;
465
466 type = md_get_property(mp, "type", &len);
467 if (!find_in_proplist(type, "instn", len))
468 continue;
469
470 mark_core_ids(mp, idx);
471
472 idx++;
473 }
474 }
475
476 static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
477 {
478 int i;
479
480 for (i = 0; i < mp->num_arcs; i++) {
481 struct mdesc_node *t = mp->arcs[i].arc;
482 const u64 *id;
483
484 if (strcmp(mp->arcs[i].name, "back"))
485 continue;
486
487 if (strcmp(t->name, "cpu"))
488 continue;
489
490 id = md_get_property(t, "id", NULL);
491 if (*id < NR_CPUS)
492 cpu_data(*id).proc_id = proc_id;
493 }
494 }
495
496 static void __init __set_proc_ids(const char *exec_unit_name)
497 {
498 struct mdesc_node *mp;
499 int idx;
500
501 idx = 0;
502 md_for_each_node_by_name(mp, exec_unit_name) {
503 const char *type;
504 int len;
505
506 type = md_get_property(mp, "type", &len);
507 if (!find_in_proplist(type, "int", len) &&
508 !find_in_proplist(type, "integer", len))
509 continue;
510
511 mark_proc_ids(mp, idx);
512
513 idx++;
514 }
515 }
516
517 static void __init set_proc_ids(void)
518 {
519 __set_proc_ids("exec_unit");
520 __set_proc_ids("exec-unit");
521 }
522
523 static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
524 {
525 u64 val;
526
527 if (!p)
528 goto use_default;
529 val = *p;
530
531 if (!val || val >= 64)
532 goto use_default;
533
534 *mask = ((1U << val) * 64U) - 1U;
535 return;
536
537 use_default:
538 *mask = ((1U << def) * 64U) - 1U;
539 }
540
541 static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb)
542 {
543 const u64 *val;
544
545 val = md_get_property(mp, "q-cpu-mondo-#bits", NULL);
546 get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
547
548 val = md_get_property(mp, "q-dev-mondo-#bits", NULL);
549 get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
550
551 val = md_get_property(mp, "q-resumable-#bits", NULL);
552 get_one_mondo_bits(val, &tb->resum_qmask, 6);
553
554 val = md_get_property(mp, "q-nonresumable-#bits", NULL);
555 get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
556 }
557
558 static void __init mdesc_fill_in_cpu_data(void)
559 {
560 struct mdesc_node *mp;
561
562 ncpus_probed = 0;
563 md_for_each_node_by_name(mp, "cpu") {
564 const u64 *id = md_get_property(mp, "id", NULL);
565 const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL);
566 struct trap_per_cpu *tb;
567 cpuinfo_sparc *c;
568 unsigned int i;
569 int cpuid;
570
571 ncpus_probed++;
572
573 cpuid = *id;
574
575 #ifdef CONFIG_SMP
576 if (cpuid >= NR_CPUS)
577 continue;
578 #else
579 /* On uniprocessor we only want the values for the
580 * real physical cpu the kernel booted onto, however
581 * cpu_data() only has one entry at index 0.
582 */
583 if (cpuid != real_hard_smp_processor_id())
584 continue;
585 cpuid = 0;
586 #endif
587
588 c = &cpu_data(cpuid);
589 c->clock_tick = *cfreq;
590
591 tb = &trap_block[cpuid];
592 get_mondo_data(mp, tb);
593
594 for (i = 0; i < mp->num_arcs; i++) {
595 struct mdesc_node *t = mp->arcs[i].arc;
596 unsigned int j;
597
598 if (strcmp(mp->arcs[i].name, "fwd"))
599 continue;
600
601 if (!strcmp(t->name, "cache")) {
602 fill_in_one_cache(c, t);
603 continue;
604 }
605
606 for (j = 0; j < t->num_arcs; j++) {
607 struct mdesc_node *n;
608
609 n = t->arcs[j].arc;
610 if (strcmp(t->arcs[j].name, "fwd"))
611 continue;
612
613 if (!strcmp(n->name, "cache"))
614 fill_in_one_cache(c, n);
615 }
616 }
617
618 #ifdef CONFIG_SMP
619 cpu_set(cpuid, cpu_present_map);
620 cpu_set(cpuid, phys_cpu_present_map);
621 #endif
622
623 c->core_id = 0;
624 c->proc_id = -1;
625 }
626
627 #ifdef CONFIG_SMP
628 sparc64_multi_core = 1;
629 #endif
630
631 set_core_ids();
632 set_proc_ids();
633
634 smp_fill_in_sib_core_maps();
635 }
636
637 void __init sun4v_mdesc_init(void)
638 {
639 unsigned long len, real_len, status;
640
641 (void) sun4v_mach_desc(0UL, 0UL, &len);
642
643 printk("MDESC: Size is %lu bytes.\n", len);
644
645 main_mdesc = mdesc_early_alloc(len);
646
647 status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len);
648 if (status != HV_EOK || real_len > len) {
649 prom_printf("sun4v_mach_desc fails, err(%lu), "
650 "len(%lu), real_len(%lu)\n",
651 status, len, real_len);
652 prom_halt();
653 }
654
655 len = count_nodes(main_mdesc);
656 printk("MDESC: %lu nodes.\n", len);
657
658 len = roundup_pow_of_two(len);
659
660 mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *));
661 mdesc_hash_size = len;
662
663 printk("MDESC: Hash size %lu entries.\n", len);
664
665 build_all_nodes(main_mdesc);
666
667 printk("MDESC: Built graph with %u bytes of memory.\n",
668 mdesc_early_allocated);
669
670 report_platform_properties();
671 mdesc_fill_in_cpu_data();
672 }