]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
14e968ba VG |
2 | /* |
3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
14e968ba VG |
4 | */ |
5 | ||
6 | #ifndef __ASM_ARC_SMP_H | |
7 | #define __ASM_ARC_SMP_H | |
8 | ||
41195d23 VG |
9 | #ifdef CONFIG_SMP |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/threads.h> | |
14 | ||
15 | #define raw_smp_processor_id() (current_thread_info()->cpu) | |
16 | ||
17 | /* including cpumask.h leads to cyclic deps hence this Forward declaration */ | |
18 | struct cpumask; | |
19 | ||
20 | /* | |
21 | * APIs provided by arch SMP code to generic code | |
22 | */ | |
23 | extern void arch_send_call_function_single_ipi(int cpu); | |
24 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | |
25 | ||
26 | /* | |
27 | * APIs provided by arch SMP code to rest of arch code | |
28 | */ | |
29 | extern void __init smp_init_cpus(void); | |
8f5d221b | 30 | extern void first_lines_of_secondary(void); |
10b12718 | 31 | extern const char *arc_platform_smp_cpuinfo(void); |
41195d23 VG |
32 | |
33 | /* | |
34 | * API expected BY platform smp code (FROM arch smp code) | |
35 | * | |
36 | * smp_ipi_irq_setup: | |
34e71e4c | 37 | * Takes @cpu and @hwirq to which the arch-common ISR is hooked up |
41195d23 | 38 | */ |
34e71e4c | 39 | extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq); |
41195d23 VG |
40 | |
41 | /* | |
10b12718 | 42 | * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP |
41195d23 | 43 | * |
10b12718 | 44 | * @info: SoC SMP specific info for /proc/cpuinfo etc |
e55af4da VG |
45 | * @init_early_smp: A SMP specific h/w block can init itself |
46 | * Could be common across platforms so not covered by | |
47 | * mach_desc->init_early() | |
b474a023 | 48 | * @init_per_cpu: Called for each core so SMP h/w block driver can do |
286130eb | 49 | * any needed setup per cpu (e.g. IPI request) |
10b12718 | 50 | * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) |
ddf84433 | 51 | * @ipi_send: To send IPI to a @cpu |
ccdaa6e0 | 52 | * @ips_clear: To clear IPI received at @irq |
41195d23 | 53 | */ |
10b12718 VG |
54 | struct plat_smp_ops { |
55 | const char *info; | |
e55af4da | 56 | void (*init_early_smp)(void); |
b474a023 | 57 | void (*init_per_cpu)(int cpu); |
10b12718 | 58 | void (*cpu_kick)(int cpu, unsigned long pc); |
ddf84433 | 59 | void (*ipi_send)(int cpu); |
ccdaa6e0 | 60 | void (*ipi_clear)(int irq); |
10b12718 VG |
61 | }; |
62 | ||
63 | /* TBD: stop exporting it for direct population by platform */ | |
64 | extern struct plat_smp_ops plat_smp_ops; | |
41195d23 | 65 | |
619f3018 VG |
66 | #else /* CONFIG_SMP */ |
67 | ||
68 | static inline void smp_init_cpus(void) {} | |
69 | static inline const char *arc_platform_smp_cpuinfo(void) | |
70 | { | |
71 | return ""; | |
72 | } | |
73 | ||
74 | #endif /* !CONFIG_SMP */ | |
41195d23 | 75 | |
14e968ba VG |
76 | /* |
77 | * ARC700 doesn't support atomic Read-Modify-Write ops. | |
78 | * Originally Interrupts had to be disabled around code to gaurantee atomicity. | |
79 | * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops | |
80 | * based on retry-if-irq-in-atomic (with hardware assist). | |
81 | * However despite these, we provide the IRQ disabling variant | |
82 | * | |
83 | * (1) These insn were introduced only in 4.10 release. So for older released | |
84 | * support needed. | |
41195d23 | 85 | * |
2547476a | 86 | * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be |
41195d23 VG |
87 | * gaurantted by the platform (not something which core handles). |
88 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ | |
89 | * disabling for atomicity. | |
90 | * | |
91 | * However exported spinlock API is not usable due to cyclic hdr deps | |
92 | * (even after system.h disintegration upstream) | |
93 | * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h | |
94 | * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h | |
95 | * | |
96 | * So the workaround is to use the lowest level arch spinlock API. | |
97 | * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP, | |
98 | * but same is not true for ARCH backend, hence the need for 2 variants | |
14e968ba VG |
99 | */ |
100 | #ifndef CONFIG_ARC_HAS_LLSC | |
101 | ||
102 | #include <linux/irqflags.h> | |
41195d23 VG |
103 | #ifdef CONFIG_SMP |
104 | ||
105 | #include <asm/spinlock.h> | |
106 | ||
107 | extern arch_spinlock_t smp_atomic_ops_lock; | |
108 | extern arch_spinlock_t smp_bitops_lock; | |
109 | ||
110 | #define atomic_ops_lock(flags) do { \ | |
111 | local_irq_save(flags); \ | |
112 | arch_spin_lock(&smp_atomic_ops_lock); \ | |
113 | } while (0) | |
114 | ||
115 | #define atomic_ops_unlock(flags) do { \ | |
116 | arch_spin_unlock(&smp_atomic_ops_lock); \ | |
117 | local_irq_restore(flags); \ | |
118 | } while (0) | |
119 | ||
120 | #define bitops_lock(flags) do { \ | |
121 | local_irq_save(flags); \ | |
122 | arch_spin_lock(&smp_bitops_lock); \ | |
123 | } while (0) | |
124 | ||
125 | #define bitops_unlock(flags) do { \ | |
126 | arch_spin_unlock(&smp_bitops_lock); \ | |
127 | local_irq_restore(flags); \ | |
128 | } while (0) | |
129 | ||
130 | #else /* !CONFIG_SMP */ | |
14e968ba VG |
131 | |
132 | #define atomic_ops_lock(flags) local_irq_save(flags) | |
133 | #define atomic_ops_unlock(flags) local_irq_restore(flags) | |
134 | ||
135 | #define bitops_lock(flags) local_irq_save(flags) | |
136 | #define bitops_unlock(flags) local_irq_restore(flags) | |
137 | ||
41195d23 VG |
138 | #endif /* !CONFIG_SMP */ |
139 | ||
14e968ba VG |
140 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
141 | ||
142 | #endif |