]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/mips/kernel/smp.c
timekeeping: Repair ktime_get_coarse*() granularity
[mirror_ubuntu-jammy-kernel.git] / arch / mips / kernel / smp.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/export.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched/mm.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36 #include <linux/irqdomain.h>
37 #include <linux/of.h>
38 #include <linux/of_irq.h>
39
40 #include <linux/atomic.h>
41 #include <asm/cpu.h>
42 #include <asm/ginvt.h>
43 #include <asm/processor.h>
44 #include <asm/idle.h>
45 #include <asm/r4k-timer.h>
46 #include <asm/mips-cps.h>
47 #include <asm/mmu_context.h>
48 #include <asm/time.h>
49 #include <asm/setup.h>
50 #include <asm/maar.h>
51
52 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
53 EXPORT_SYMBOL(__cpu_number_map);
54
55 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
56 EXPORT_SYMBOL(__cpu_logical_map);
57
58 /* Number of TCs (or siblings in Intel speak) per CPU core */
59 int smp_num_siblings = 1;
60 EXPORT_SYMBOL(smp_num_siblings);
61
62 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
63 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
64 EXPORT_SYMBOL(cpu_sibling_map);
65
66 /* representing the core map of multi-core chips of each logical CPU */
67 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
68 EXPORT_SYMBOL(cpu_core_map);
69
70 static DECLARE_COMPLETION(cpu_starting);
71 static DECLARE_COMPLETION(cpu_running);
72
73 /*
74 * A logcal cpu mask containing only one VPE per core to
75 * reduce the number of IPIs on large MT systems.
76 */
77 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
78 EXPORT_SYMBOL(cpu_foreign_map);
79
80 /* representing cpus for which sibling maps can be computed */
81 static cpumask_t cpu_sibling_setup_map;
82
83 /* representing cpus for which core maps can be computed */
84 static cpumask_t cpu_core_setup_map;
85
86 cpumask_t cpu_coherent_mask;
87
88 #ifdef CONFIG_GENERIC_IRQ_IPI
89 static struct irq_desc *call_desc;
90 static struct irq_desc *sched_desc;
91 #endif
92
93 static inline void set_cpu_sibling_map(int cpu)
94 {
95 int i;
96
97 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
98
99 if (smp_num_siblings > 1) {
100 for_each_cpu(i, &cpu_sibling_setup_map) {
101 if (cpus_are_siblings(cpu, i)) {
102 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
103 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
104 }
105 }
106 } else
107 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
108 }
109
110 static inline void set_cpu_core_map(int cpu)
111 {
112 int i;
113
114 cpumask_set_cpu(cpu, &cpu_core_setup_map);
115
116 for_each_cpu(i, &cpu_core_setup_map) {
117 if (cpu_data[cpu].package == cpu_data[i].package) {
118 cpumask_set_cpu(i, &cpu_core_map[cpu]);
119 cpumask_set_cpu(cpu, &cpu_core_map[i]);
120 }
121 }
122 }
123
124 /*
125 * Calculate a new cpu_foreign_map mask whenever a
126 * new cpu appears or disappears.
127 */
128 void calculate_cpu_foreign_map(void)
129 {
130 int i, k, core_present;
131 cpumask_t temp_foreign_map;
132
133 /* Re-calculate the mask */
134 cpumask_clear(&temp_foreign_map);
135 for_each_online_cpu(i) {
136 core_present = 0;
137 for_each_cpu(k, &temp_foreign_map)
138 if (cpus_are_siblings(i, k))
139 core_present = 1;
140 if (!core_present)
141 cpumask_set_cpu(i, &temp_foreign_map);
142 }
143
144 for_each_online_cpu(i)
145 cpumask_andnot(&cpu_foreign_map[i],
146 &temp_foreign_map, &cpu_sibling_map[i]);
147 }
148
149 const struct plat_smp_ops *mp_ops;
150 EXPORT_SYMBOL(mp_ops);
151
152 void register_smp_ops(const struct plat_smp_ops *ops)
153 {
154 if (mp_ops)
155 printk(KERN_WARNING "Overriding previously set SMP ops\n");
156
157 mp_ops = ops;
158 }
159
160 #ifdef CONFIG_GENERIC_IRQ_IPI
161 void mips_smp_send_ipi_single(int cpu, unsigned int action)
162 {
163 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
164 }
165
166 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
167 {
168 unsigned long flags;
169 unsigned int core;
170 int cpu;
171
172 local_irq_save(flags);
173
174 switch (action) {
175 case SMP_CALL_FUNCTION:
176 __ipi_send_mask(call_desc, mask);
177 break;
178
179 case SMP_RESCHEDULE_YOURSELF:
180 __ipi_send_mask(sched_desc, mask);
181 break;
182
183 default:
184 BUG();
185 }
186
187 if (mips_cpc_present()) {
188 for_each_cpu(cpu, mask) {
189 if (cpus_are_siblings(cpu, smp_processor_id()))
190 continue;
191
192 core = cpu_core(&cpu_data[cpu]);
193
194 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
195 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
196 mips_cpc_lock_other(core);
197 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
198 mips_cpc_unlock_other();
199 mips_cm_unlock_other();
200 }
201 }
202 }
203
204 local_irq_restore(flags);
205 }
206
207
208 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
209 {
210 scheduler_ipi();
211
212 return IRQ_HANDLED;
213 }
214
215 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
216 {
217 generic_smp_call_function_interrupt();
218
219 return IRQ_HANDLED;
220 }
221
222 static struct irqaction irq_resched = {
223 .handler = ipi_resched_interrupt,
224 .flags = IRQF_PERCPU,
225 .name = "IPI resched"
226 };
227
228 static struct irqaction irq_call = {
229 .handler = ipi_call_interrupt,
230 .flags = IRQF_PERCPU,
231 .name = "IPI call"
232 };
233
234 static void smp_ipi_init_one(unsigned int virq,
235 struct irqaction *action)
236 {
237 int ret;
238
239 irq_set_handler(virq, handle_percpu_irq);
240 ret = setup_irq(virq, action);
241 BUG_ON(ret);
242 }
243
244 static unsigned int call_virq, sched_virq;
245
246 int mips_smp_ipi_allocate(const struct cpumask *mask)
247 {
248 int virq;
249 struct irq_domain *ipidomain;
250 struct device_node *node;
251
252 node = of_irq_find_parent(of_root);
253 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
254
255 /*
256 * Some platforms have half DT setup. So if we found irq node but
257 * didn't find an ipidomain, try to search for one that is not in the
258 * DT.
259 */
260 if (node && !ipidomain)
261 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
262
263 /*
264 * There are systems which use IPI IRQ domains, but only have one
265 * registered when some runtime condition is met. For example a Malta
266 * kernel may include support for GIC & CPU interrupt controller IPI
267 * IRQ domains, but if run on a system with no GIC & no MT ASE then
268 * neither will be supported or registered.
269 *
270 * We only have a problem if we're actually using multiple CPUs so fail
271 * loudly if that is the case. Otherwise simply return, skipping IPI
272 * setup, if we're running with only a single CPU.
273 */
274 if (!ipidomain) {
275 BUG_ON(num_present_cpus() > 1);
276 return 0;
277 }
278
279 virq = irq_reserve_ipi(ipidomain, mask);
280 BUG_ON(!virq);
281 if (!call_virq)
282 call_virq = virq;
283
284 virq = irq_reserve_ipi(ipidomain, mask);
285 BUG_ON(!virq);
286 if (!sched_virq)
287 sched_virq = virq;
288
289 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
290 int cpu;
291
292 for_each_cpu(cpu, mask) {
293 smp_ipi_init_one(call_virq + cpu, &irq_call);
294 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
295 }
296 } else {
297 smp_ipi_init_one(call_virq, &irq_call);
298 smp_ipi_init_one(sched_virq, &irq_resched);
299 }
300
301 return 0;
302 }
303
304 int mips_smp_ipi_free(const struct cpumask *mask)
305 {
306 struct irq_domain *ipidomain;
307 struct device_node *node;
308
309 node = of_irq_find_parent(of_root);
310 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
311
312 /*
313 * Some platforms have half DT setup. So if we found irq node but
314 * didn't find an ipidomain, try to search for one that is not in the
315 * DT.
316 */
317 if (node && !ipidomain)
318 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
319
320 BUG_ON(!ipidomain);
321
322 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
323 int cpu;
324
325 for_each_cpu(cpu, mask) {
326 remove_irq(call_virq + cpu, &irq_call);
327 remove_irq(sched_virq + cpu, &irq_resched);
328 }
329 }
330 irq_destroy_ipi(call_virq, mask);
331 irq_destroy_ipi(sched_virq, mask);
332 return 0;
333 }
334
335
336 static int __init mips_smp_ipi_init(void)
337 {
338 if (num_possible_cpus() == 1)
339 return 0;
340
341 mips_smp_ipi_allocate(cpu_possible_mask);
342
343 call_desc = irq_to_desc(call_virq);
344 sched_desc = irq_to_desc(sched_virq);
345
346 return 0;
347 }
348 early_initcall(mips_smp_ipi_init);
349 #endif
350
351 /*
352 * First C code run on the secondary CPUs after being started up by
353 * the master.
354 */
355 asmlinkage void start_secondary(void)
356 {
357 unsigned int cpu;
358
359 cpu_probe();
360 per_cpu_trap_init(false);
361 mips_clockevent_init();
362 mp_ops->init_secondary();
363 cpu_report();
364 maar_init();
365
366 /*
367 * XXX parity protection should be folded in here when it's converted
368 * to an option instead of something based on .cputype
369 */
370
371 calibrate_delay();
372 preempt_disable();
373 cpu = smp_processor_id();
374 cpu_data[cpu].udelay_val = loops_per_jiffy;
375
376 cpumask_set_cpu(cpu, &cpu_coherent_mask);
377 notify_cpu_starting(cpu);
378
379 /* Notify boot CPU that we're starting & ready to sync counters */
380 complete(&cpu_starting);
381
382 synchronise_count_slave(cpu);
383
384 /* The CPU is running and counters synchronised, now mark it online */
385 set_cpu_online(cpu, true);
386
387 set_cpu_sibling_map(cpu);
388 set_cpu_core_map(cpu);
389
390 calculate_cpu_foreign_map();
391
392 /*
393 * Notify boot CPU that we're up & online and it can safely return
394 * from __cpu_up
395 */
396 complete(&cpu_running);
397
398 /*
399 * irq will be enabled in ->smp_finish(), enabling it too early
400 * is dangerous.
401 */
402 WARN_ON_ONCE(!irqs_disabled());
403 mp_ops->smp_finish();
404
405 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
406 }
407
408 static void stop_this_cpu(void *dummy)
409 {
410 /*
411 * Remove this CPU:
412 */
413
414 set_cpu_online(smp_processor_id(), false);
415 calculate_cpu_foreign_map();
416 local_irq_disable();
417 while (1);
418 }
419
420 void smp_send_stop(void)
421 {
422 smp_call_function(stop_this_cpu, NULL, 0);
423 }
424
425 void __init smp_cpus_done(unsigned int max_cpus)
426 {
427 }
428
429 /* called from main before smp_init() */
430 void __init smp_prepare_cpus(unsigned int max_cpus)
431 {
432 init_new_context(current, &init_mm);
433 current_thread_info()->cpu = 0;
434 mp_ops->prepare_cpus(max_cpus);
435 set_cpu_sibling_map(0);
436 set_cpu_core_map(0);
437 calculate_cpu_foreign_map();
438 #ifndef CONFIG_HOTPLUG_CPU
439 init_cpu_present(cpu_possible_mask);
440 #endif
441 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
442 }
443
444 /* preload SMP state for boot cpu */
445 void smp_prepare_boot_cpu(void)
446 {
447 if (mp_ops->prepare_boot_cpu)
448 mp_ops->prepare_boot_cpu();
449 set_cpu_possible(0, true);
450 set_cpu_online(0, true);
451 }
452
453 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
454 {
455 int err;
456
457 err = mp_ops->boot_secondary(cpu, tidle);
458 if (err)
459 return err;
460
461 /* Wait for CPU to start and be ready to sync counters */
462 if (!wait_for_completion_timeout(&cpu_starting,
463 msecs_to_jiffies(1000))) {
464 pr_crit("CPU%u: failed to start\n", cpu);
465 return -EIO;
466 }
467
468 synchronise_count_master(cpu);
469
470 /* Wait for CPU to finish startup & mark itself online before return */
471 wait_for_completion(&cpu_running);
472 return 0;
473 }
474
475 /* Not really SMP stuff ... */
476 int setup_profiling_timer(unsigned int multiplier)
477 {
478 return 0;
479 }
480
481 static void flush_tlb_all_ipi(void *info)
482 {
483 local_flush_tlb_all();
484 }
485
486 void flush_tlb_all(void)
487 {
488 if (cpu_has_mmid) {
489 htw_stop();
490 ginvt_full();
491 sync_ginv();
492 instruction_hazard();
493 htw_start();
494 return;
495 }
496
497 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
498 }
499
500 static void flush_tlb_mm_ipi(void *mm)
501 {
502 drop_mmu_context((struct mm_struct *)mm);
503 }
504
505 /*
506 * Special Variant of smp_call_function for use by TLB functions:
507 *
508 * o No return value
509 * o collapses to normal function call on UP kernels
510 * o collapses to normal function call on systems with a single shared
511 * primary cache.
512 */
513 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
514 {
515 smp_call_function(func, info, 1);
516 }
517
518 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
519 {
520 preempt_disable();
521
522 smp_on_other_tlbs(func, info);
523 func(info);
524
525 preempt_enable();
526 }
527
528 /*
529 * The following tlb flush calls are invoked when old translations are
530 * being torn down, or pte attributes are changing. For single threaded
531 * address spaces, a new context is obtained on the current cpu, and tlb
532 * context on other cpus are invalidated to force a new context allocation
533 * at switch_mm time, should the mm ever be used on other cpus. For
534 * multithreaded address spaces, intercpu interrupts have to be sent.
535 * Another case where intercpu interrupts are required is when the target
536 * mm might be active on another cpu (eg debuggers doing the flushes on
537 * behalf of debugees, kswapd stealing pages from another process etc).
538 * Kanoj 07/00.
539 */
540
541 void flush_tlb_mm(struct mm_struct *mm)
542 {
543 preempt_disable();
544
545 if (cpu_has_mmid) {
546 /*
547 * No need to worry about other CPUs - the ginvt in
548 * drop_mmu_context() will be globalized.
549 */
550 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
551 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
552 } else {
553 unsigned int cpu;
554
555 for_each_online_cpu(cpu) {
556 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
557 set_cpu_context(cpu, mm, 0);
558 }
559 }
560 drop_mmu_context(mm);
561
562 preempt_enable();
563 }
564
565 struct flush_tlb_data {
566 struct vm_area_struct *vma;
567 unsigned long addr1;
568 unsigned long addr2;
569 };
570
571 static void flush_tlb_range_ipi(void *info)
572 {
573 struct flush_tlb_data *fd = info;
574
575 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
576 }
577
578 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
579 {
580 struct mm_struct *mm = vma->vm_mm;
581 unsigned long addr;
582 u32 old_mmid;
583
584 preempt_disable();
585 if (cpu_has_mmid) {
586 htw_stop();
587 old_mmid = read_c0_memorymapid();
588 write_c0_memorymapid(cpu_asid(0, mm));
589 mtc0_tlbw_hazard();
590 addr = round_down(start, PAGE_SIZE * 2);
591 end = round_up(end, PAGE_SIZE * 2);
592 do {
593 ginvt_va_mmid(addr);
594 sync_ginv();
595 addr += PAGE_SIZE * 2;
596 } while (addr < end);
597 write_c0_memorymapid(old_mmid);
598 instruction_hazard();
599 htw_start();
600 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
601 struct flush_tlb_data fd = {
602 .vma = vma,
603 .addr1 = start,
604 .addr2 = end,
605 };
606
607 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
608 local_flush_tlb_range(vma, start, end);
609 } else {
610 unsigned int cpu;
611 int exec = vma->vm_flags & VM_EXEC;
612
613 for_each_online_cpu(cpu) {
614 /*
615 * flush_cache_range() will only fully flush icache if
616 * the VMA is executable, otherwise we must invalidate
617 * ASID without it appearing to has_valid_asid() as if
618 * mm has been completely unused by that CPU.
619 */
620 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
621 set_cpu_context(cpu, mm, !exec);
622 }
623 local_flush_tlb_range(vma, start, end);
624 }
625 preempt_enable();
626 }
627
628 static void flush_tlb_kernel_range_ipi(void *info)
629 {
630 struct flush_tlb_data *fd = info;
631
632 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
633 }
634
635 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
636 {
637 struct flush_tlb_data fd = {
638 .addr1 = start,
639 .addr2 = end,
640 };
641
642 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
643 }
644
645 static void flush_tlb_page_ipi(void *info)
646 {
647 struct flush_tlb_data *fd = info;
648
649 local_flush_tlb_page(fd->vma, fd->addr1);
650 }
651
652 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
653 {
654 u32 old_mmid;
655
656 preempt_disable();
657 if (cpu_has_mmid) {
658 htw_stop();
659 old_mmid = read_c0_memorymapid();
660 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
661 mtc0_tlbw_hazard();
662 ginvt_va_mmid(page);
663 sync_ginv();
664 write_c0_memorymapid(old_mmid);
665 instruction_hazard();
666 htw_start();
667 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
668 (current->mm != vma->vm_mm)) {
669 struct flush_tlb_data fd = {
670 .vma = vma,
671 .addr1 = page,
672 };
673
674 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
675 local_flush_tlb_page(vma, page);
676 } else {
677 unsigned int cpu;
678
679 for_each_online_cpu(cpu) {
680 /*
681 * flush_cache_page() only does partial flushes, so
682 * invalidate ASID without it appearing to
683 * has_valid_asid() as if mm has been completely unused
684 * by that CPU.
685 */
686 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
687 set_cpu_context(cpu, vma->vm_mm, 1);
688 }
689 local_flush_tlb_page(vma, page);
690 }
691 preempt_enable();
692 }
693
694 static void flush_tlb_one_ipi(void *info)
695 {
696 unsigned long vaddr = (unsigned long) info;
697
698 local_flush_tlb_one(vaddr);
699 }
700
701 void flush_tlb_one(unsigned long vaddr)
702 {
703 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
704 }
705
706 EXPORT_SYMBOL(flush_tlb_page);
707 EXPORT_SYMBOL(flush_tlb_one);
708
709 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
710
711 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
712 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
713
714 void tick_broadcast(const struct cpumask *mask)
715 {
716 atomic_t *count;
717 call_single_data_t *csd;
718 int cpu;
719
720 for_each_cpu(cpu, mask) {
721 count = &per_cpu(tick_broadcast_count, cpu);
722 csd = &per_cpu(tick_broadcast_csd, cpu);
723
724 if (atomic_inc_return(count) == 1)
725 smp_call_function_single_async(cpu, csd);
726 }
727 }
728
729 static void tick_broadcast_callee(void *info)
730 {
731 int cpu = smp_processor_id();
732 tick_receive_broadcast();
733 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
734 }
735
736 static int __init tick_broadcast_init(void)
737 {
738 call_single_data_t *csd;
739 int cpu;
740
741 for (cpu = 0; cpu < NR_CPUS; cpu++) {
742 csd = &per_cpu(tick_broadcast_csd, cpu);
743 csd->func = tick_broadcast_callee;
744 }
745
746 return 0;
747 }
748 early_initcall(tick_broadcast_init);
749
750 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */