]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/mach-hisi/platmcpm.c
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / arch / arm / mach-hisi / platmcpm.c
CommitLineData
9cdc9991
HZ
1/*
2 * Copyright (c) 2013-2014 Linaro Ltd.
3 * Copyright (c) 2013-2014 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 */
905cdf9d
NP
9#include <linux/init.h>
10#include <linux/smp.h>
9cdc9991
HZ
11#include <linux/delay.h>
12#include <linux/io.h>
13#include <linux/memblock.h>
14#include <linux/of_address.h>
15
16#include <asm/cputype.h>
17#include <asm/cp15.h>
905cdf9d
NP
18#include <asm/cacheflush.h>
19#include <asm/smp.h>
20#include <asm/smp_plat.h>
9cdc9991
HZ
21
22#include "core.h"
23
24/* bits definition in SC_CPU_RESET_REQ[x]/SC_CPU_RESET_DREQ[x]
25 * 1 -- unreset; 0 -- reset
26 */
27#define CORE_RESET_BIT(x) (1 << x)
28#define NEON_RESET_BIT(x) (1 << (x + 4))
29#define CORE_DEBUG_RESET_BIT(x) (1 << (x + 9))
30#define CLUSTER_L2_RESET_BIT (1 << 8)
31#define CLUSTER_DEBUG_RESET_BIT (1 << 13)
32
33/*
34 * bits definition in SC_CPU_RESET_STATUS[x]
35 * 1 -- reset status; 0 -- unreset status
36 */
37#define CORE_RESET_STATUS(x) (1 << x)
38#define NEON_RESET_STATUS(x) (1 << (x + 4))
39#define CORE_DEBUG_RESET_STATUS(x) (1 << (x + 9))
40#define CLUSTER_L2_RESET_STATUS (1 << 8)
41#define CLUSTER_DEBUG_RESET_STATUS (1 << 13)
42#define CORE_WFI_STATUS(x) (1 << (x + 16))
43#define CORE_WFE_STATUS(x) (1 << (x + 20))
44#define CORE_DEBUG_ACK(x) (1 << (x + 24))
45
46#define SC_CPU_RESET_REQ(x) (0x520 + (x << 3)) /* reset */
47#define SC_CPU_RESET_DREQ(x) (0x524 + (x << 3)) /* unreset */
48#define SC_CPU_RESET_STATUS(x) (0x1520 + (x << 3))
49
50#define FAB_SF_MODE 0x0c
51#define FAB_SF_INVLD 0x10
52
53/* bits definition in FB_SF_INVLD */
54#define FB_SF_INVLD_START (1 << 8)
55
56#define HIP04_MAX_CLUSTERS 4
57#define HIP04_MAX_CPUS_PER_CLUSTER 4
58
59#define POLL_MSEC 10
60#define TIMEOUT_MSEC 1000
61
62static void __iomem *sysctrl, *fabric;
63static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
64static DEFINE_SPINLOCK(boot_lock);
65static u32 fabric_phys_addr;
66/*
67 * [0]: bootwrapper physical address
68 * [1]: bootwrapper size
69 * [2]: relocation address
70 * [3]: relocation size
71 */
72static u32 hip04_boot_method[4];
73
74static bool hip04_cluster_is_down(unsigned int cluster)
75{
76 int i;
77
78 for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++)
79 if (hip04_cpu_table[cluster][i])
80 return false;
81 return true;
82}
83
84static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
85{
86 unsigned long data;
87
88 if (!fabric)
89 BUG();
90 data = readl_relaxed(fabric + FAB_SF_MODE);
91 if (on)
92 data |= 1 << cluster;
93 else
94 data &= ~(1 << cluster);
95 writel_relaxed(data, fabric + FAB_SF_MODE);
96 do {
97 cpu_relax();
98 } while (data != readl_relaxed(fabric + FAB_SF_MODE));
99}
100
905cdf9d 101static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
9cdc9991 102{
905cdf9d 103 unsigned int mpidr, cpu, cluster;
9cdc9991
HZ
104 unsigned long data;
105 void __iomem *sys_dreq, *sys_status;
106
905cdf9d
NP
107 mpidr = cpu_logical_map(l_cpu);
108 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
109 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
110
9cdc9991
HZ
111 if (!sysctrl)
112 return -ENODEV;
113 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
114 return -EINVAL;
115
116 spin_lock_irq(&boot_lock);
117
118 if (hip04_cpu_table[cluster][cpu])
119 goto out;
120
121 sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster);
122 sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster);
123 if (hip04_cluster_is_down(cluster)) {
124 data = CLUSTER_DEBUG_RESET_BIT;
125 writel_relaxed(data, sys_dreq);
126 do {
127 cpu_relax();
128 data = readl_relaxed(sys_status);
129 } while (data & CLUSTER_DEBUG_RESET_STATUS);
905cdf9d 130 hip04_set_snoop_filter(cluster, 1);
9cdc9991
HZ
131 }
132
133 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
134 CORE_DEBUG_RESET_BIT(cpu);
135 writel_relaxed(data, sys_dreq);
136 do {
137 cpu_relax();
138 } while (data == readl_relaxed(sys_status));
905cdf9d 139
9cdc9991
HZ
140 /*
141 * We may fail to power up core again without this delay.
142 * It's not mentioned in document. It's found by test.
143 */
144 udelay(20);
905cdf9d
NP
145
146 arch_send_wakeup_ipi_mask(cpumask_of(l_cpu));
147
9cdc9991
HZ
148out:
149 hip04_cpu_table[cluster][cpu]++;
150 spin_unlock_irq(&boot_lock);
151
152 return 0;
153}
154
4c9e0f76 155#ifdef CONFIG_HOTPLUG_CPU
905cdf9d 156static void hip04_cpu_die(unsigned int l_cpu)
9cdc9991
HZ
157{
158 unsigned int mpidr, cpu, cluster;
905cdf9d 159 bool last_man;
9cdc9991 160
905cdf9d 161 mpidr = cpu_logical_map(l_cpu);
9cdc9991
HZ
162 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
163 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
164
9cdc9991 165 spin_lock(&boot_lock);
9cdc9991
HZ
166 hip04_cpu_table[cluster][cpu]--;
167 if (hip04_cpu_table[cluster][cpu] == 1) {
168 /* A power_up request went ahead of us. */
905cdf9d
NP
169 spin_unlock(&boot_lock);
170 return;
9cdc9991
HZ
171 } else if (hip04_cpu_table[cluster][cpu] > 1) {
172 pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
173 BUG();
174 }
175
176 last_man = hip04_cluster_is_down(cluster);
905cdf9d
NP
177 spin_unlock(&boot_lock);
178 if (last_man) {
9cdc9991
HZ
179 /* Since it's Cortex A15, disable L2 prefetching. */
180 asm volatile(
181 "mcr p15, 1, %0, c15, c0, 3 \n\t"
182 "isb \n\t"
183 "dsb "
184 : : "r" (0x400) );
185 v7_exit_coherency_flush(all);
9cdc9991 186 } else {
9cdc9991
HZ
187 v7_exit_coherency_flush(louis);
188 }
189
905cdf9d 190 for (;;)
9cdc9991
HZ
191 wfi();
192}
193
905cdf9d 194static int hip04_cpu_kill(unsigned int l_cpu)
9cdc9991 195{
905cdf9d 196 unsigned int mpidr, cpu, cluster;
9cdc9991 197 unsigned int data, tries, count;
9cdc9991 198
905cdf9d
NP
199 mpidr = cpu_logical_map(l_cpu);
200 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
201 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
9cdc9991
HZ
202 BUG_ON(cluster >= HIP04_MAX_CLUSTERS ||
203 cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
204
205 count = TIMEOUT_MSEC / POLL_MSEC;
206 spin_lock_irq(&boot_lock);
207 for (tries = 0; tries < count; tries++) {
905cdf9d 208 if (hip04_cpu_table[cluster][cpu])
9cdc9991 209 goto err;
9cdc9991
HZ
210 cpu_relax();
211 data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
212 if (data & CORE_WFI_STATUS(cpu))
213 break;
214 spin_unlock_irq(&boot_lock);
215 /* Wait for clean L2 when the whole cluster is down. */
216 msleep(POLL_MSEC);
217 spin_lock_irq(&boot_lock);
218 }
219 if (tries >= count)
220 goto err;
221 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
222 CORE_DEBUG_RESET_BIT(cpu);
223 writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster));
224 for (tries = 0; tries < count; tries++) {
225 cpu_relax();
226 data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
227 if (data & CORE_RESET_STATUS(cpu))
228 break;
229 }
230 if (tries >= count)
231 goto err;
905cdf9d
NP
232 if (hip04_cluster_is_down(cluster))
233 hip04_set_snoop_filter(cluster, 0);
9cdc9991 234 spin_unlock_irq(&boot_lock);
905cdf9d 235 return 1;
9cdc9991
HZ
236err:
237 spin_unlock_irq(&boot_lock);
905cdf9d 238 return 0;
9cdc9991 239}
4c9e0f76 240#endif
9cdc9991 241
905cdf9d
NP
242static struct smp_operations __initdata hip04_smp_ops = {
243 .smp_boot_secondary = hip04_boot_secondary,
4c9e0f76 244#ifdef CONFIG_HOTPLUG_CPU
905cdf9d
NP
245 .cpu_die = hip04_cpu_die,
246 .cpu_kill = hip04_cpu_kill,
4c9e0f76 247#endif
9cdc9991
HZ
248};
249
250static bool __init hip04_cpu_table_init(void)
251{
252 unsigned int mpidr, cpu, cluster;
253
254 mpidr = read_cpuid_mpidr();
255 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
256 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
257
258 if (cluster >= HIP04_MAX_CLUSTERS ||
259 cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
260 pr_err("%s: boot CPU is out of bound!\n", __func__);
261 return false;
262 }
263 hip04_set_snoop_filter(cluster, 1);
264 hip04_cpu_table[cluster][cpu] = 1;
265 return true;
266}
267
905cdf9d 268static int __init hip04_smp_init(void)
9cdc9991
HZ
269{
270 struct device_node *np, *np_sctl, *np_fab;
271 struct resource fab_res;
272 void __iomem *relocation;
273 int ret = -ENODEV;
274
275 np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper");
276 if (!np)
277 goto err;
278 ret = of_property_read_u32_array(np, "boot-method",
279 &hip04_boot_method[0], 4);
280 if (ret)
281 goto err;
282 np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
283 if (!np_sctl)
284 goto err;
285 np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric");
286 if (!np_fab)
287 goto err;
288
289 ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]);
290 if (ret)
291 goto err;
292
293 relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]);
294 if (!relocation) {
295 pr_err("failed to map relocation space\n");
296 ret = -ENOMEM;
297 goto err_reloc;
298 }
299 sysctrl = of_iomap(np_sctl, 0);
300 if (!sysctrl) {
301 pr_err("failed to get sysctrl base\n");
302 ret = -ENOMEM;
303 goto err_sysctrl;
304 }
305 ret = of_address_to_resource(np_fab, 0, &fab_res);
306 if (ret) {
307 pr_err("failed to get fabric base phys\n");
308 goto err_fabric;
309 }
310 fabric_phys_addr = fab_res.start;
311 sync_cache_w(&fabric_phys_addr);
312 fabric = of_iomap(np_fab, 0);
313 if (!fabric) {
314 pr_err("failed to get fabric base\n");
315 ret = -ENOMEM;
316 goto err_fabric;
317 }
318
319 if (!hip04_cpu_table_init()) {
320 ret = -EINVAL;
321 goto err_table;
322 }
9cdc9991
HZ
323
324 /*
325 * Fill the instruction address that is used after secondary core
326 * out of reset.
327 */
328 writel_relaxed(hip04_boot_method[0], relocation);
329 writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */
905cdf9d 330 writel_relaxed(virt_to_phys(secondary_startup), relocation + 8);
9cdc9991
HZ
331 writel_relaxed(0, relocation + 12);
332 iounmap(relocation);
333
905cdf9d 334 smp_set_ops(&hip04_smp_ops);
9cdc9991
HZ
335 return ret;
336err_table:
337 iounmap(fabric);
338err_fabric:
339 iounmap(sysctrl);
340err_sysctrl:
341 iounmap(relocation);
342err_reloc:
343 memblock_free(hip04_boot_method[0], hip04_boot_method[1]);
344err:
345 return ret;
346}
905cdf9d 347early_initcall(hip04_smp_init);