]>
Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * pSeries_lpar.c | |
4 | * Copyright (C) 2001 Todd Inglett, IBM Corporation | |
5 | * | |
6 | * pSeries LPAR support. | |
1da177e4 LT |
7 | */ |
8 | ||
f7ebf352 ME |
9 | /* Enables debugging of low-level hash table routines - careful! */ |
10 | #undef DEBUG | |
65471d76 | 11 | #define pr_fmt(fmt) "lpar: " fmt |
1da177e4 | 12 | |
1da177e4 LT |
13 | #include <linux/kernel.h> |
14 | #include <linux/dma-mapping.h> | |
463ce0e1 | 15 | #include <linux/console.h> |
66b15db6 | 16 | #include <linux/export.h> |
58995a9a | 17 | #include <linux/jump_label.h> |
dbcf929c DG |
18 | #include <linux/delay.h> |
19 | #include <linux/stop_machine.h> | |
d62c8dee NR |
20 | #include <linux/spinlock.h> |
21 | #include <linux/cpuhotplug.h> | |
22 | #include <linux/workqueue.h> | |
23 | #include <linux/proc_fs.h> | |
1da177e4 LT |
24 | #include <asm/processor.h> |
25 | #include <asm/mmu.h> | |
26 | #include <asm/page.h> | |
ca5999fd | 27 | #include <linux/pgtable.h> |
1da177e4 | 28 | #include <asm/machdep.h> |
1da177e4 | 29 | #include <asm/mmu_context.h> |
1da177e4 | 30 | #include <asm/iommu.h> |
1da177e4 LT |
31 | #include <asm/tlb.h> |
32 | #include <asm/prom.h> | |
1da177e4 | 33 | #include <asm/cputable.h> |
dcad47fc | 34 | #include <asm/udbg.h> |
2249ca9d | 35 | #include <asm/smp.h> |
c8cd093a | 36 | #include <asm/trace.h> |
f5339277 | 37 | #include <asm/firmware.h> |
212bebb4 | 38 | #include <asm/plpar_wrappers.h> |
c1caae3d | 39 | #include <asm/kexec.h> |
408cddd9 | 40 | #include <asm/fadump.h> |
42f5b4ca | 41 | #include <asm/asm-prototypes.h> |
c6c26fb5 | 42 | #include <asm/debugfs.h> |
a1218720 | 43 | |
21cf9133 | 44 | #include "pseries.h" |
1da177e4 | 45 | |
1a527286 AK |
46 | /* Flag bits for H_BULK_REMOVE */ |
47 | #define HBR_REQUEST 0x4000000000000000UL | |
48 | #define HBR_RESPONSE 0x8000000000000000UL | |
49 | #define HBR_END 0xc000000000000000UL | |
50 | #define HBR_AVPN 0x0200000000000000UL | |
51 | #define HBR_ANDCOND 0x0100000000000000UL | |
52 | ||
1da177e4 | 53 | |
b9377ffc | 54 | /* in hvCall.S */ |
1da177e4 | 55 | EXPORT_SYMBOL(plpar_hcall); |
b9377ffc | 56 | EXPORT_SYMBOL(plpar_hcall9); |
1da177e4 | 57 | EXPORT_SYMBOL(plpar_hcall_norets); |
b9377ffc | 58 | |
1211ee61 LD |
59 | /* |
60 | * H_BLOCK_REMOVE supported block size for this page size in segment who's base | |
61 | * page size is that page size. | |
62 | * | |
63 | * The first index is the segment base page size, the second one is the actual | |
64 | * page size. | |
65 | */ | |
66 | static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init; | |
67 | ||
59545ebe LD |
68 | /* |
69 | * Due to the involved complexity, and that the current hypervisor is only | |
70 | * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE | |
71 | * buffer size to 8 size block. | |
72 | */ | |
73 | #define HBLKRM_SUPPORTED_BLOCK_SIZE 8 | |
74 | ||
d62c8dee NR |
75 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
76 | static u8 dtl_mask = DTL_LOG_PREEMPT; | |
77 | #else | |
78 | static u8 dtl_mask; | |
79 | #endif | |
80 | ||
18a593c8 | 81 | void alloc_dtl_buffers(unsigned long *time_limit) |
1c85a2a1 NR |
82 | { |
83 | int cpu; | |
84 | struct paca_struct *pp; | |
85 | struct dtl_entry *dtl; | |
86 | ||
87 | for_each_possible_cpu(cpu) { | |
88 | pp = paca_ptrs[cpu]; | |
d62c8dee NR |
89 | if (pp->dispatch_log) |
90 | continue; | |
1c85a2a1 NR |
91 | dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); |
92 | if (!dtl) { | |
93 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", | |
94 | cpu); | |
d62c8dee | 95 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1c85a2a1 | 96 | pr_warn("Stolen time statistics will be unreliable\n"); |
d62c8dee | 97 | #endif |
1c85a2a1 NR |
98 | break; |
99 | } | |
100 | ||
101 | pp->dtl_ridx = 0; | |
102 | pp->dispatch_log = dtl; | |
103 | pp->dispatch_log_end = dtl + N_DISPATCH_LOG; | |
104 | pp->dtl_curr = dtl; | |
18a593c8 NR |
105 | |
106 | if (time_limit && time_after(jiffies, *time_limit)) { | |
107 | cond_resched(); | |
108 | *time_limit = jiffies + HZ; | |
109 | } | |
1c85a2a1 NR |
110 | } |
111 | } | |
112 | ||
113 | void register_dtl_buffer(int cpu) | |
114 | { | |
115 | long ret; | |
116 | struct paca_struct *pp; | |
117 | struct dtl_entry *dtl; | |
118 | int hwcpu = get_hard_smp_processor_id(cpu); | |
119 | ||
120 | pp = paca_ptrs[cpu]; | |
121 | dtl = pp->dispatch_log; | |
d62c8dee | 122 | if (dtl && dtl_mask) { |
1c85a2a1 NR |
123 | pp->dtl_ridx = 0; |
124 | pp->dtl_curr = dtl; | |
125 | lppaca_of(cpu).dtl_idx = 0; | |
126 | ||
127 | /* hypervisor reads buffer length from this field */ | |
128 | dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); | |
129 | ret = register_dtl(hwcpu, __pa(dtl)); | |
130 | if (ret) | |
131 | pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n", | |
132 | cpu, hwcpu, ret); | |
133 | ||
d62c8dee | 134 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; |
1c85a2a1 NR |
135 | } |
136 | } | |
137 | ||
06220d78 | 138 | #ifdef CONFIG_PPC_SPLPAR |
d62c8dee NR |
139 | struct dtl_worker { |
140 | struct delayed_work work; | |
141 | int cpu; | |
142 | }; | |
143 | ||
144 | struct vcpu_dispatch_data { | |
145 | int last_disp_cpu; | |
146 | ||
147 | int total_disp; | |
148 | ||
149 | int same_cpu_disp; | |
150 | int same_chip_disp; | |
151 | int diff_chip_disp; | |
152 | int far_chip_disp; | |
153 | ||
154 | int numa_home_disp; | |
155 | int numa_remote_disp; | |
156 | int numa_far_disp; | |
157 | }; | |
158 | ||
159 | /* | |
160 | * This represents the number of cpus in the hypervisor. Since there is no | |
161 | * architected way to discover the number of processors in the host, we | |
162 | * provision for dealing with NR_CPUS. This is currently 2048 by default, and | |
163 | * is sufficient for our purposes. This will need to be tweaked if | |
164 | * CONFIG_NR_CPUS is changed. | |
165 | */ | |
166 | #define NR_CPUS_H NR_CPUS | |
167 | ||
06220d78 | 168 | DEFINE_RWLOCK(dtl_access_lock); |
d62c8dee NR |
169 | static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); |
170 | static DEFINE_PER_CPU(u64, dtl_entry_ridx); | |
171 | static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); | |
172 | static enum cpuhp_state dtl_worker_state; | |
173 | static DEFINE_MUTEX(dtl_enable_mutex); | |
174 | static int vcpudispatch_stats_on __read_mostly; | |
175 | static int vcpudispatch_stats_freq = 50; | |
176 | static __be32 *vcpu_associativity, *pcpu_associativity; | |
177 | ||
178 | ||
18a593c8 | 179 | static void free_dtl_buffers(unsigned long *time_limit) |
d62c8dee NR |
180 | { |
181 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
182 | int cpu; | |
183 | struct paca_struct *pp; | |
184 | ||
185 | for_each_possible_cpu(cpu) { | |
186 | pp = paca_ptrs[cpu]; | |
187 | if (!pp->dispatch_log) | |
188 | continue; | |
189 | kmem_cache_free(dtl_cache, pp->dispatch_log); | |
190 | pp->dtl_ridx = 0; | |
191 | pp->dispatch_log = 0; | |
192 | pp->dispatch_log_end = 0; | |
193 | pp->dtl_curr = 0; | |
18a593c8 NR |
194 | |
195 | if (time_limit && time_after(jiffies, *time_limit)) { | |
196 | cond_resched(); | |
197 | *time_limit = jiffies + HZ; | |
198 | } | |
d62c8dee NR |
199 | } |
200 | #endif | |
201 | } | |
202 | ||
203 | static int init_cpu_associativity(void) | |
204 | { | |
205 | vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core, | |
206 | VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); | |
207 | pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core, | |
208 | VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); | |
209 | ||
210 | if (!vcpu_associativity || !pcpu_associativity) { | |
211 | pr_err("error allocating memory for associativity information\n"); | |
212 | return -ENOMEM; | |
213 | } | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | static void destroy_cpu_associativity(void) | |
219 | { | |
220 | kfree(vcpu_associativity); | |
221 | kfree(pcpu_associativity); | |
222 | vcpu_associativity = pcpu_associativity = 0; | |
223 | } | |
224 | ||
225 | static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag) | |
226 | { | |
227 | __be32 *assoc; | |
228 | int rc = 0; | |
229 | ||
230 | assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE]; | |
231 | if (!assoc[0]) { | |
232 | rc = hcall_vphn(cpu, flag, &assoc[0]); | |
233 | if (rc) | |
234 | return NULL; | |
235 | } | |
236 | ||
237 | return assoc; | |
238 | } | |
239 | ||
240 | static __be32 *get_pcpu_associativity(int cpu) | |
241 | { | |
242 | return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU); | |
243 | } | |
244 | ||
245 | static __be32 *get_vcpu_associativity(int cpu) | |
246 | { | |
247 | return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU); | |
248 | } | |
249 | ||
250 | static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu) | |
251 | { | |
252 | __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc; | |
253 | ||
254 | if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H) | |
255 | return -EINVAL; | |
256 | ||
257 | last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu); | |
258 | cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu); | |
259 | ||
260 | if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc) | |
261 | return -EIO; | |
262 | ||
263 | return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc); | |
264 | } | |
265 | ||
266 | static int cpu_home_node_dispatch_distance(int disp_cpu) | |
267 | { | |
268 | __be32 *disp_cpu_assoc, *vcpu_assoc; | |
269 | int vcpu_id = smp_processor_id(); | |
270 | ||
271 | if (disp_cpu >= NR_CPUS_H) { | |
272 | pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n", | |
273 | disp_cpu, NR_CPUS_H); | |
274 | return -EINVAL; | |
275 | } | |
276 | ||
277 | disp_cpu_assoc = get_pcpu_associativity(disp_cpu); | |
278 | vcpu_assoc = get_vcpu_associativity(vcpu_id); | |
279 | ||
280 | if (!disp_cpu_assoc || !vcpu_assoc) | |
281 | return -EIO; | |
282 | ||
283 | return cpu_distance(disp_cpu_assoc, vcpu_assoc); | |
284 | } | |
285 | ||
286 | static void update_vcpu_disp_stat(int disp_cpu) | |
287 | { | |
288 | struct vcpu_dispatch_data *disp; | |
289 | int distance; | |
290 | ||
291 | disp = this_cpu_ptr(&vcpu_disp_data); | |
292 | if (disp->last_disp_cpu == -1) { | |
293 | disp->last_disp_cpu = disp_cpu; | |
294 | return; | |
295 | } | |
296 | ||
297 | disp->total_disp++; | |
298 | ||
299 | if (disp->last_disp_cpu == disp_cpu || | |
300 | (cpu_first_thread_sibling(disp->last_disp_cpu) == | |
301 | cpu_first_thread_sibling(disp_cpu))) | |
302 | disp->same_cpu_disp++; | |
303 | else { | |
304 | distance = cpu_relative_dispatch_distance(disp->last_disp_cpu, | |
305 | disp_cpu); | |
306 | if (distance < 0) | |
307 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", | |
308 | smp_processor_id()); | |
309 | else { | |
310 | switch (distance) { | |
311 | case 0: | |
312 | disp->same_chip_disp++; | |
313 | break; | |
314 | case 1: | |
315 | disp->diff_chip_disp++; | |
316 | break; | |
317 | case 2: | |
318 | disp->far_chip_disp++; | |
319 | break; | |
320 | default: | |
321 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n", | |
322 | smp_processor_id(), | |
323 | disp->last_disp_cpu, | |
324 | disp_cpu, | |
325 | distance); | |
326 | } | |
327 | } | |
328 | } | |
329 | ||
330 | distance = cpu_home_node_dispatch_distance(disp_cpu); | |
331 | if (distance < 0) | |
332 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", | |
333 | smp_processor_id()); | |
334 | else { | |
335 | switch (distance) { | |
336 | case 0: | |
337 | disp->numa_home_disp++; | |
338 | break; | |
339 | case 1: | |
340 | disp->numa_remote_disp++; | |
341 | break; | |
342 | case 2: | |
343 | disp->numa_far_disp++; | |
344 | break; | |
345 | default: | |
346 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n", | |
347 | smp_processor_id(), | |
348 | disp_cpu, | |
349 | distance); | |
350 | } | |
351 | } | |
352 | ||
353 | disp->last_disp_cpu = disp_cpu; | |
354 | } | |
355 | ||
356 | static void process_dtl_buffer(struct work_struct *work) | |
357 | { | |
358 | struct dtl_entry dtle; | |
359 | u64 i = __this_cpu_read(dtl_entry_ridx); | |
360 | struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | |
361 | struct dtl_entry *dtl_end = local_paca->dispatch_log_end; | |
362 | struct lppaca *vpa = local_paca->lppaca_ptr; | |
363 | struct dtl_worker *d = container_of(work, struct dtl_worker, work.work); | |
364 | ||
365 | if (!local_paca->dispatch_log) | |
366 | return; | |
367 | ||
368 | /* if we have been migrated away, we cancel ourself */ | |
369 | if (d->cpu != smp_processor_id()) { | |
370 | pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n", | |
371 | smp_processor_id()); | |
372 | return; | |
373 | } | |
374 | ||
375 | if (i == be64_to_cpu(vpa->dtl_idx)) | |
376 | goto out; | |
377 | ||
378 | while (i < be64_to_cpu(vpa->dtl_idx)) { | |
379 | dtle = *dtl; | |
380 | barrier(); | |
381 | if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { | |
382 | /* buffer has overflowed */ | |
383 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n", | |
384 | d->cpu, | |
385 | be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i); | |
386 | i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; | |
387 | dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | |
388 | continue; | |
389 | } | |
390 | update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id)); | |
391 | ++i; | |
392 | ++dtl; | |
393 | if (dtl == dtl_end) | |
394 | dtl = local_paca->dispatch_log; | |
395 | } | |
396 | ||
397 | __this_cpu_write(dtl_entry_ridx, i); | |
398 | ||
399 | out: | |
400 | schedule_delayed_work_on(d->cpu, to_delayed_work(work), | |
401 | HZ / vcpudispatch_stats_freq); | |
402 | } | |
403 | ||
404 | static int dtl_worker_online(unsigned int cpu) | |
405 | { | |
406 | struct dtl_worker *d = &per_cpu(dtl_workers, cpu); | |
407 | ||
408 | memset(d, 0, sizeof(*d)); | |
409 | INIT_DELAYED_WORK(&d->work, process_dtl_buffer); | |
410 | d->cpu = cpu; | |
411 | ||
412 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
413 | per_cpu(dtl_entry_ridx, cpu) = 0; | |
414 | register_dtl_buffer(cpu); | |
415 | #else | |
416 | per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx); | |
417 | #endif | |
418 | ||
419 | schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq); | |
420 | return 0; | |
421 | } | |
422 | ||
423 | static int dtl_worker_offline(unsigned int cpu) | |
424 | { | |
425 | struct dtl_worker *d = &per_cpu(dtl_workers, cpu); | |
426 | ||
427 | cancel_delayed_work_sync(&d->work); | |
428 | ||
429 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
430 | unregister_dtl(get_hard_smp_processor_id(cpu)); | |
431 | #endif | |
432 | ||
433 | return 0; | |
434 | } | |
435 | ||
436 | static void set_global_dtl_mask(u8 mask) | |
437 | { | |
438 | int cpu; | |
439 | ||
440 | dtl_mask = mask; | |
441 | for_each_present_cpu(cpu) | |
442 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; | |
443 | } | |
444 | ||
445 | static void reset_global_dtl_mask(void) | |
446 | { | |
447 | int cpu; | |
448 | ||
449 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
450 | dtl_mask = DTL_LOG_PREEMPT; | |
451 | #else | |
452 | dtl_mask = 0; | |
453 | #endif | |
454 | for_each_present_cpu(cpu) | |
455 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; | |
456 | } | |
457 | ||
18a593c8 | 458 | static int dtl_worker_enable(unsigned long *time_limit) |
d62c8dee NR |
459 | { |
460 | int rc = 0, state; | |
461 | ||
462 | if (!write_trylock(&dtl_access_lock)) { | |
463 | rc = -EBUSY; | |
464 | goto out; | |
465 | } | |
466 | ||
467 | set_global_dtl_mask(DTL_LOG_ALL); | |
468 | ||
469 | /* Setup dtl buffers and register those */ | |
18a593c8 | 470 | alloc_dtl_buffers(time_limit); |
d62c8dee NR |
471 | |
472 | state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online", | |
473 | dtl_worker_online, dtl_worker_offline); | |
474 | if (state < 0) { | |
475 | pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); | |
18a593c8 | 476 | free_dtl_buffers(time_limit); |
d62c8dee NR |
477 | reset_global_dtl_mask(); |
478 | write_unlock(&dtl_access_lock); | |
479 | rc = -EINVAL; | |
480 | goto out; | |
481 | } | |
482 | dtl_worker_state = state; | |
483 | ||
484 | out: | |
485 | return rc; | |
486 | } | |
487 | ||
18a593c8 | 488 | static void dtl_worker_disable(unsigned long *time_limit) |
d62c8dee NR |
489 | { |
490 | cpuhp_remove_state(dtl_worker_state); | |
18a593c8 | 491 | free_dtl_buffers(time_limit); |
d62c8dee NR |
492 | reset_global_dtl_mask(); |
493 | write_unlock(&dtl_access_lock); | |
494 | } | |
495 | ||
496 | static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, | |
497 | size_t count, loff_t *ppos) | |
498 | { | |
18a593c8 | 499 | unsigned long time_limit = jiffies + HZ; |
d62c8dee NR |
500 | struct vcpu_dispatch_data *disp; |
501 | int rc, cmd, cpu; | |
502 | char buf[16]; | |
503 | ||
504 | if (count > 15) | |
505 | return -EINVAL; | |
506 | ||
507 | if (copy_from_user(buf, p, count)) | |
508 | return -EFAULT; | |
509 | ||
510 | buf[count] = 0; | |
511 | rc = kstrtoint(buf, 0, &cmd); | |
512 | if (rc || cmd < 0 || cmd > 1) { | |
513 | pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n"); | |
514 | return rc ? rc : -EINVAL; | |
515 | } | |
516 | ||
517 | mutex_lock(&dtl_enable_mutex); | |
518 | ||
519 | if ((cmd == 0 && !vcpudispatch_stats_on) || | |
520 | (cmd == 1 && vcpudispatch_stats_on)) | |
521 | goto out; | |
522 | ||
523 | if (cmd) { | |
524 | rc = init_cpu_associativity(); | |
525 | if (rc) | |
526 | goto out; | |
527 | ||
528 | for_each_possible_cpu(cpu) { | |
529 | disp = per_cpu_ptr(&vcpu_disp_data, cpu); | |
530 | memset(disp, 0, sizeof(*disp)); | |
531 | disp->last_disp_cpu = -1; | |
532 | } | |
533 | ||
18a593c8 | 534 | rc = dtl_worker_enable(&time_limit); |
d62c8dee NR |
535 | if (rc) { |
536 | destroy_cpu_associativity(); | |
537 | goto out; | |
538 | } | |
539 | } else { | |
18a593c8 | 540 | dtl_worker_disable(&time_limit); |
d62c8dee NR |
541 | destroy_cpu_associativity(); |
542 | } | |
543 | ||
544 | vcpudispatch_stats_on = cmd; | |
545 | ||
546 | out: | |
547 | mutex_unlock(&dtl_enable_mutex); | |
548 | if (rc) | |
549 | return rc; | |
550 | return count; | |
551 | } | |
552 | ||
553 | static int vcpudispatch_stats_display(struct seq_file *p, void *v) | |
554 | { | |
555 | int cpu; | |
556 | struct vcpu_dispatch_data *disp; | |
557 | ||
558 | if (!vcpudispatch_stats_on) { | |
559 | seq_puts(p, "off\n"); | |
560 | return 0; | |
561 | } | |
562 | ||
563 | for_each_online_cpu(cpu) { | |
564 | disp = per_cpu_ptr(&vcpu_disp_data, cpu); | |
565 | seq_printf(p, "cpu%d", cpu); | |
566 | seq_put_decimal_ull(p, " ", disp->total_disp); | |
567 | seq_put_decimal_ull(p, " ", disp->same_cpu_disp); | |
568 | seq_put_decimal_ull(p, " ", disp->same_chip_disp); | |
569 | seq_put_decimal_ull(p, " ", disp->diff_chip_disp); | |
570 | seq_put_decimal_ull(p, " ", disp->far_chip_disp); | |
571 | seq_put_decimal_ull(p, " ", disp->numa_home_disp); | |
572 | seq_put_decimal_ull(p, " ", disp->numa_remote_disp); | |
573 | seq_put_decimal_ull(p, " ", disp->numa_far_disp); | |
574 | seq_puts(p, "\n"); | |
575 | } | |
576 | ||
577 | return 0; | |
578 | } | |
579 | ||
580 | static int vcpudispatch_stats_open(struct inode *inode, struct file *file) | |
581 | { | |
582 | return single_open(file, vcpudispatch_stats_display, NULL); | |
583 | } | |
584 | ||
97a32539 AD |
585 | static const struct proc_ops vcpudispatch_stats_proc_ops = { |
586 | .proc_open = vcpudispatch_stats_open, | |
587 | .proc_read = seq_read, | |
588 | .proc_write = vcpudispatch_stats_write, | |
589 | .proc_lseek = seq_lseek, | |
590 | .proc_release = single_release, | |
d62c8dee NR |
591 | }; |
592 | ||
593 | static ssize_t vcpudispatch_stats_freq_write(struct file *file, | |
594 | const char __user *p, size_t count, loff_t *ppos) | |
595 | { | |
596 | int rc, freq; | |
597 | char buf[16]; | |
598 | ||
599 | if (count > 15) | |
600 | return -EINVAL; | |
601 | ||
602 | if (copy_from_user(buf, p, count)) | |
603 | return -EFAULT; | |
604 | ||
605 | buf[count] = 0; | |
606 | rc = kstrtoint(buf, 0, &freq); | |
607 | if (rc || freq < 1 || freq > HZ) { | |
608 | pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n", | |
609 | HZ); | |
610 | return rc ? rc : -EINVAL; | |
611 | } | |
612 | ||
613 | vcpudispatch_stats_freq = freq; | |
614 | ||
615 | return count; | |
616 | } | |
617 | ||
618 | static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v) | |
619 | { | |
620 | seq_printf(p, "%d\n", vcpudispatch_stats_freq); | |
621 | return 0; | |
622 | } | |
623 | ||
624 | static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file) | |
625 | { | |
626 | return single_open(file, vcpudispatch_stats_freq_display, NULL); | |
627 | } | |
628 | ||
97a32539 AD |
629 | static const struct proc_ops vcpudispatch_stats_freq_proc_ops = { |
630 | .proc_open = vcpudispatch_stats_freq_open, | |
631 | .proc_read = seq_read, | |
632 | .proc_write = vcpudispatch_stats_freq_write, | |
633 | .proc_lseek = seq_lseek, | |
634 | .proc_release = single_release, | |
d62c8dee NR |
635 | }; |
636 | ||
637 | static int __init vcpudispatch_stats_procfs_init(void) | |
638 | { | |
adde8715 NP |
639 | /* |
640 | * Avoid smp_processor_id while preemptible. All CPUs should have | |
641 | * the same value for lppaca_shared_proc. | |
642 | */ | |
643 | preempt_disable(); | |
644 | if (!lppaca_shared_proc(get_lppaca())) { | |
645 | preempt_enable(); | |
d62c8dee | 646 | return 0; |
adde8715 NP |
647 | } |
648 | preempt_enable(); | |
d62c8dee NR |
649 | |
650 | if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL, | |
651 | &vcpudispatch_stats_proc_ops)) | |
652 | pr_err("vcpudispatch_stats: error creating procfs file\n"); | |
653 | else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL, | |
654 | &vcpudispatch_stats_freq_proc_ops)) | |
655 | pr_err("vcpudispatch_stats_freq: error creating procfs file\n"); | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
660 | machine_device_initcall(pseries, vcpudispatch_stats_procfs_init); | |
06220d78 NR |
661 | #endif /* CONFIG_PPC_SPLPAR */ |
662 | ||
1da177e4 LT |
663 | void vpa_init(int cpu) |
664 | { | |
665 | int hwcpu = get_hard_smp_processor_id(cpu); | |
2f6093c8 | 666 | unsigned long addr; |
1da177e4 | 667 | long ret; |
233ccd0d | 668 | |
b89bdfb8 ME |
669 | /* |
670 | * The spec says it "may be problematic" if CPU x registers the VPA of | |
671 | * CPU y. We should never do that, but wail if we ever do. | |
672 | */ | |
673 | WARN_ON(cpu != smp_processor_id()); | |
674 | ||
233ccd0d | 675 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
8154c5d2 | 676 | lppaca_of(cpu).vmxregs_in_use = 1; |
233ccd0d | 677 | |
6e0b8bc9 ME |
678 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
679 | lppaca_of(cpu).ebb_regs_in_use = 1; | |
680 | ||
8154c5d2 | 681 | addr = __pa(&lppaca_of(cpu)); |
2f6093c8 | 682 | ret = register_vpa(hwcpu, addr); |
1da177e4 | 683 | |
2f6093c8 | 684 | if (ret) { |
711ef84e AB |
685 | pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " |
686 | "%lx failed with %ld\n", cpu, hwcpu, addr, ret); | |
2f6093c8 MN |
687 | return; |
688 | } | |
d8c476ee | 689 | |
4e003747 | 690 | #ifdef CONFIG_PPC_BOOK3S_64 |
2f6093c8 MN |
691 | /* |
692 | * PAPR says this feature is SLB-Buffer but firmware never | |
693 | * reports that. All SPLPAR support SLB shadow buffer. | |
694 | */ | |
d8c476ee | 695 | if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) { |
d2e60075 | 696 | addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr); |
2f6093c8 MN |
697 | ret = register_slb_shadow(hwcpu, addr); |
698 | if (ret) | |
711ef84e AB |
699 | pr_err("WARNING: SLB shadow buffer registration for " |
700 | "cpu %d (hw %d) of area %lx failed with %ld\n", | |
701 | cpu, hwcpu, addr, ret); | |
2f6093c8 | 702 | } |
4e003747 | 703 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
cf9efce0 PM |
704 | |
705 | /* | |
706 | * Register dispatch trace log, if one has been allocated. | |
707 | */ | |
1c85a2a1 | 708 | register_dtl_buffer(cpu); |
1da177e4 LT |
709 | } |
710 | ||
4e003747 | 711 | #ifdef CONFIG_PPC_BOOK3S_64 |
d8c476ee | 712 | |
035223fb | 713 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
5524a27d AK |
714 | unsigned long vpn, unsigned long pa, |
715 | unsigned long rflags, unsigned long vflags, | |
b1022fbd | 716 | int psize, int apsize, int ssize) |
1da177e4 | 717 | { |
1da177e4 LT |
718 | unsigned long lpar_rc; |
719 | unsigned long flags; | |
720 | unsigned long slot; | |
96e28449 | 721 | unsigned long hpte_v, hpte_r; |
1da177e4 | 722 | |
3c726f8d | 723 | if (!(vflags & HPTE_V_BOLTED)) |
5524a27d AK |
724 | pr_devel("hpte_insert(group=%lx, vpn=%016lx, " |
725 | "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", | |
726 | hpte_group, vpn, pa, rflags, vflags, psize); | |
3c726f8d | 727 | |
b1022fbd | 728 | hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; |
6b243fcf | 729 | hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; |
3c726f8d BH |
730 | |
731 | if (!(vflags & HPTE_V_BOLTED)) | |
551a232c | 732 | pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); |
3c726f8d | 733 | |
1da177e4 LT |
734 | /* Now fill in the actual HPTE */ |
735 | /* Set CEC cookie to 0 */ | |
736 | /* Zero page = 0 */ | |
737 | /* I-cache Invalidate = 0 */ | |
738 | /* I-cache synchronize = 0 */ | |
739 | /* Exact = 0 */ | |
740 | flags = 0; | |
741 | ||
9ee820fa BK |
742 | if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) |
743 | flags |= H_COALESCE_CAND; | |
1da177e4 | 744 | |
b9377ffc | 745 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
706c8c93 | 746 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
ca42d8d2 | 747 | pr_devel("Hash table group is full\n"); |
1da177e4 | 748 | return -1; |
3c726f8d | 749 | } |
1da177e4 LT |
750 | |
751 | /* | |
752 | * Since we try and ioremap PHBs we don't own, the pte insert | |
753 | * will fail. However we must catch the failure in hash_page | |
754 | * or we will loop forever, so return -2 in this case. | |
755 | */ | |
706c8c93 | 756 | if (unlikely(lpar_rc != H_SUCCESS)) { |
ca42d8d2 | 757 | pr_err("Failed hash pte insert with error %ld\n", lpar_rc); |
1da177e4 | 758 | return -2; |
3c726f8d BH |
759 | } |
760 | if (!(vflags & HPTE_V_BOLTED)) | |
551a232c | 761 | pr_devel(" -> slot: %lu\n", slot & 7); |
1da177e4 LT |
762 | |
763 | /* Because of iSeries, we have to pass down the secondary | |
764 | * bucket bit here as well | |
765 | */ | |
96e28449 | 766 | return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); |
1da177e4 LT |
767 | } |
768 | ||
769 | static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); | |
770 | ||
771 | static long pSeries_lpar_hpte_remove(unsigned long hpte_group) | |
772 | { | |
773 | unsigned long slot_offset; | |
774 | unsigned long lpar_rc; | |
775 | int i; | |
776 | unsigned long dummy1, dummy2; | |
777 | ||
778 | /* pick a random slot to start at */ | |
779 | slot_offset = mftb() & 0x7; | |
780 | ||
781 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
782 | ||
783 | /* don't remove a bolted entry */ | |
784 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, | |
82ce028a | 785 | HPTE_V_BOLTED, &dummy1, &dummy2); |
706c8c93 | 786 | if (lpar_rc == H_SUCCESS) |
1da177e4 | 787 | return i; |
9fb26401 MW |
788 | |
789 | /* | |
790 | * The test for adjunct partition is performed before the | |
791 | * ANDCOND test. H_RESOURCE may be returned, so we need to | |
792 | * check for that as well. | |
793 | */ | |
794 | BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); | |
1da177e4 LT |
795 | |
796 | slot_offset++; | |
797 | slot_offset &= 0x7; | |
798 | } | |
799 | ||
800 | return -1; | |
801 | } | |
802 | ||
5246adec | 803 | static void manual_hpte_clear_all(void) |
1da177e4 LT |
804 | { |
805 | unsigned long size_bytes = 1UL << ppc64_pft_size; | |
806 | unsigned long hpte_count = size_bytes >> 4; | |
d504bed6 MN |
807 | struct { |
808 | unsigned long pteh; | |
809 | unsigned long ptel; | |
810 | } ptes[4]; | |
b7abc5c5 | 811 | long lpar_rc; |
bed9a315 | 812 | unsigned long i, j; |
d504bed6 MN |
813 | |
814 | /* Read in batches of 4, | |
815 | * invalidate only valid entries not in the VRMA | |
816 | * hpte_count will be a multiple of 4 | |
817 | */ | |
818 | for (i = 0; i < hpte_count; i += 4) { | |
819 | lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); | |
ca42d8d2 AK |
820 | if (lpar_rc != H_SUCCESS) { |
821 | pr_info("Failed to read hash page table at %ld err %ld\n", | |
822 | i, lpar_rc); | |
d504bed6 | 823 | continue; |
ca42d8d2 | 824 | } |
d504bed6 MN |
825 | for (j = 0; j < 4; j++){ |
826 | if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == | |
827 | HPTE_V_VRMA_MASK) | |
828 | continue; | |
829 | if (ptes[j].pteh & HPTE_V_VALID) | |
830 | plpar_pte_remove_raw(0, i + j, 0, | |
831 | &(ptes[j].pteh), &(ptes[j].ptel)); | |
b7abc5c5 SS |
832 | } |
833 | } | |
5246adec AB |
834 | } |
835 | ||
836 | static int hcall_hpte_clear_all(void) | |
837 | { | |
838 | int rc; | |
839 | ||
840 | do { | |
841 | rc = plpar_hcall_norets(H_CLEAR_HPT); | |
842 | } while (rc == H_CONTINUE); | |
843 | ||
844 | return rc; | |
845 | } | |
846 | ||
847 | static void pseries_hpte_clear_all(void) | |
848 | { | |
849 | int rc; | |
850 | ||
851 | rc = hcall_hpte_clear_all(); | |
852 | if (rc != H_SUCCESS) | |
853 | manual_hpte_clear_all(); | |
e844b1ee AB |
854 | |
855 | #ifdef __LITTLE_ENDIAN__ | |
408cddd9 HB |
856 | /* |
857 | * Reset exceptions to big endian. | |
858 | * | |
859 | * FIXME this is a hack for kexec, we need to reset the exception | |
860 | * endian before starting the new kernel and this is a convenient place | |
861 | * to do it. | |
862 | * | |
863 | * This is also called on boot when a fadump happens. In that case we | |
864 | * must not change the exception endian mode. | |
865 | */ | |
d3cbff1b BH |
866 | if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) |
867 | pseries_big_endian_exceptions(); | |
e844b1ee | 868 | #endif |
1da177e4 LT |
869 | } |
870 | ||
871 | /* | |
872 | * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and | |
873 | * the low 3 bits of flags happen to line up. So no transform is needed. | |
874 | * We can probably optimize here and assume the high bits of newpp are | |
875 | * already zero. For now I am paranoid. | |
876 | */ | |
3c726f8d BH |
877 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, |
878 | unsigned long newpp, | |
5524a27d | 879 | unsigned long vpn, |
db3d8534 | 880 | int psize, int apsize, |
aefa5688 | 881 | int ssize, unsigned long inv_flags) |
1da177e4 LT |
882 | { |
883 | unsigned long lpar_rc; | |
e71ff982 | 884 | unsigned long flags; |
3c726f8d | 885 | unsigned long want_v; |
1da177e4 | 886 | |
5524a27d | 887 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1da177e4 | 888 | |
e71ff982 BS |
889 | flags = (newpp & 7) | H_AVPN; |
890 | if (mmu_has_feature(MMU_FTR_KERNEL_RO)) | |
891 | /* Move pp0 into bit 8 (IBM 55) */ | |
892 | flags |= (newpp & HPTE_R_PP0) >> 55; | |
893 | ||
a8c0bf3c AK |
894 | pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", |
895 | want_v, slot, flags, psize); | |
896 | ||
1189be65 | 897 | lpar_rc = plpar_pte_protect(flags, slot, want_v); |
3c726f8d | 898 | |
706c8c93 | 899 | if (lpar_rc == H_NOT_FOUND) { |
551a232c | 900 | pr_devel("not found !\n"); |
1da177e4 | 901 | return -1; |
3c726f8d BH |
902 | } |
903 | ||
551a232c | 904 | pr_devel("ok\n"); |
1da177e4 | 905 | |
706c8c93 | 906 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
907 | |
908 | return 0; | |
909 | } | |
910 | ||
4ad90c86 | 911 | static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group) |
1da177e4 | 912 | { |
4ad90c86 AK |
913 | long lpar_rc; |
914 | unsigned long i, j; | |
915 | struct { | |
916 | unsigned long pteh; | |
917 | unsigned long ptel; | |
918 | } ptes[4]; | |
1da177e4 | 919 | |
4ad90c86 | 920 | for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { |
1da177e4 | 921 | |
4ad90c86 | 922 | lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); |
ca42d8d2 AK |
923 | if (lpar_rc != H_SUCCESS) { |
924 | pr_info("Failed to read hash page table at %ld err %ld\n", | |
925 | hpte_group, lpar_rc); | |
4ad90c86 | 926 | continue; |
ca42d8d2 | 927 | } |
1da177e4 | 928 | |
4ad90c86 AK |
929 | for (j = 0; j < 4; j++) { |
930 | if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && | |
931 | (ptes[j].pteh & HPTE_V_VALID)) | |
932 | return i + j; | |
933 | } | |
934 | } | |
1da177e4 | 935 | |
4ad90c86 | 936 | return -1; |
1da177e4 LT |
937 | } |
938 | ||
5524a27d | 939 | static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) |
1da177e4 | 940 | { |
1da177e4 | 941 | long slot; |
4ad90c86 AK |
942 | unsigned long hash; |
943 | unsigned long want_v; | |
944 | unsigned long hpte_group; | |
1da177e4 | 945 | |
5524a27d AK |
946 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
947 | want_v = hpte_encode_avpn(vpn, psize, ssize); | |
1189be65 | 948 | |
d78d5dac AK |
949 | /* |
950 | * We try to keep bolted entries always in primary hash | |
951 | * But in some case we can find them in secondary too. | |
952 | */ | |
4ad90c86 AK |
953 | hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
954 | slot = __pSeries_lpar_hpte_find(want_v, hpte_group); | |
d78d5dac AK |
955 | if (slot < 0) { |
956 | /* Try in secondary */ | |
957 | hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; | |
958 | slot = __pSeries_lpar_hpte_find(want_v, hpte_group); | |
959 | if (slot < 0) | |
960 | return -1; | |
961 | } | |
4ad90c86 AK |
962 | return hpte_group + slot; |
963 | } | |
1da177e4 LT |
964 | |
965 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, | |
3c726f8d | 966 | unsigned long ea, |
1189be65 | 967 | int psize, int ssize) |
1da177e4 | 968 | { |
5524a27d AK |
969 | unsigned long vpn; |
970 | unsigned long lpar_rc, slot, vsid, flags; | |
1da177e4 | 971 | |
1189be65 | 972 | vsid = get_kernel_vsid(ea, ssize); |
5524a27d | 973 | vpn = hpt_vpn(ea, vsid, ssize); |
1da177e4 | 974 | |
5524a27d | 975 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
1da177e4 LT |
976 | BUG_ON(slot == -1); |
977 | ||
978 | flags = newpp & 7; | |
e71ff982 BS |
979 | if (mmu_has_feature(MMU_FTR_KERNEL_RO)) |
980 | /* Move pp0 into bit 8 (IBM 55) */ | |
981 | flags |= (newpp & HPTE_R_PP0) >> 55; | |
982 | ||
1da177e4 LT |
983 | lpar_rc = plpar_pte_protect(flags, slot, 0); |
984 | ||
706c8c93 | 985 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
986 | } |
987 | ||
5524a27d | 988 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, |
db3d8534 AK |
989 | int psize, int apsize, |
990 | int ssize, int local) | |
1da177e4 | 991 | { |
3c726f8d | 992 | unsigned long want_v; |
1da177e4 LT |
993 | unsigned long lpar_rc; |
994 | unsigned long dummy1, dummy2; | |
995 | ||
5524a27d AK |
996 | pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", |
997 | slot, vpn, psize, local); | |
1da177e4 | 998 | |
5524a27d | 999 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1189be65 | 1000 | lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); |
706c8c93 | 1001 | if (lpar_rc == H_NOT_FOUND) |
1da177e4 LT |
1002 | return; |
1003 | ||
706c8c93 | 1004 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
1005 | } |
1006 | ||
ba2dd8a2 LD |
1007 | |
1008 | /* | |
1009 | * As defined in the PAPR's section 14.5.4.1.8 | |
1010 | * The control mask doesn't include the returned reference and change bit from | |
1011 | * the processed PTE. | |
1012 | */ | |
1013 | #define HBLKR_AVPN 0x0100000000000000UL | |
1014 | #define HBLKR_CTRL_MASK 0xf800000000000000UL | |
1015 | #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL | |
1016 | #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL | |
1017 | #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL | |
1018 | ||
59545ebe LD |
1019 | /* |
1020 | * Returned true if we are supporting this block size for the specified segment | |
1021 | * base page size and actual page size. | |
1022 | * | |
1023 | * Currently, we only support 8 size block. | |
1024 | */ | |
1025 | static inline bool is_supported_hlbkrm(int bpsize, int psize) | |
1026 | { | |
1027 | return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE); | |
1028 | } | |
1029 | ||
ba2dd8a2 LD |
1030 | /** |
1031 | * H_BLOCK_REMOVE caller. | |
1032 | * @idx should point to the latest @param entry set with a PTEX. | |
1033 | * If PTE cannot be processed because another CPUs has already locked that | |
1034 | * group, those entries are put back in @param starting at index 1. | |
1035 | * If entries has to be retried and @retry_busy is set to true, these entries | |
1036 | * are retried until success. If @retry_busy is set to false, the returned | |
1037 | * is the number of entries yet to process. | |
1038 | */ | |
1039 | static unsigned long call_block_remove(unsigned long idx, unsigned long *param, | |
1040 | bool retry_busy) | |
1041 | { | |
1042 | unsigned long i, rc, new_idx; | |
1043 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | |
1044 | ||
1045 | if (idx < 2) { | |
1046 | pr_warn("Unexpected empty call to H_BLOCK_REMOVE"); | |
1047 | return 0; | |
1048 | } | |
1049 | again: | |
1050 | new_idx = 0; | |
1051 | if (idx > PLPAR_HCALL9_BUFSIZE) { | |
1052 | pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx); | |
1053 | idx = PLPAR_HCALL9_BUFSIZE; | |
1054 | } else if (idx < PLPAR_HCALL9_BUFSIZE) | |
1055 | param[idx] = HBR_END; | |
1056 | ||
1057 | rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf, | |
1058 | param[0], /* AVA */ | |
1059 | param[1], param[2], param[3], param[4], /* TS0-7 */ | |
1060 | param[5], param[6], param[7], param[8]); | |
1061 | if (rc == H_SUCCESS) | |
1062 | return 0; | |
1063 | ||
1064 | BUG_ON(rc != H_PARTIAL); | |
1065 | ||
1066 | /* Check that the unprocessed entries were 'not found' or 'busy' */ | |
1067 | for (i = 0; i < idx-1; i++) { | |
1068 | unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK; | |
1069 | ||
1070 | if (ctrl == HBLKR_CTRL_ERRBUSY) { | |
1071 | param[++new_idx] = param[i+1]; | |
1072 | continue; | |
1073 | } | |
1074 | ||
1075 | BUG_ON(ctrl != HBLKR_CTRL_SUCCESS | |
1076 | && ctrl != HBLKR_CTRL_ERRNOTFOUND); | |
1077 | } | |
1078 | ||
1079 | /* | |
1080 | * If there were entries found busy, retry these entries if requested, | |
1081 | * of if all the entries have to be retried. | |
1082 | */ | |
1083 | if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) { | |
1084 | idx = new_idx + 1; | |
1085 | goto again; | |
1086 | } | |
1087 | ||
1088 | return new_idx; | |
1089 | } | |
1090 | ||
e34aa03c | 1091 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1a527286 AK |
1092 | /* |
1093 | * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need | |
1094 | * to make sure that we avoid bouncing the hypervisor tlbie lock. | |
1095 | */ | |
1096 | #define PPC64_HUGE_HPTE_BATCH 12 | |
1097 | ||
ba2dd8a2 LD |
1098 | static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn, |
1099 | int count, int psize, int ssize) | |
1a527286 | 1100 | { |
05af40e8 | 1101 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
ba2dd8a2 LD |
1102 | unsigned long shift, current_vpgb, vpgb; |
1103 | int i, pix = 0; | |
1a527286 | 1104 | |
ba2dd8a2 LD |
1105 | shift = mmu_psize_defs[psize].shift; |
1106 | ||
1107 | for (i = 0; i < count; i++) { | |
1108 | /* | |
1109 | * Shifting 3 bits more on the right to get a | |
1110 | * 8 pages aligned virtual addresse. | |
1111 | */ | |
1112 | vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3)); | |
1113 | if (!pix || vpgb != current_vpgb) { | |
1114 | /* | |
1115 | * Need to start a new 8 pages block, flush | |
1116 | * the current one if needed. | |
1117 | */ | |
1118 | if (pix) | |
1119 | (void)call_block_remove(pix, param, true); | |
1120 | current_vpgb = vpgb; | |
1121 | param[0] = hpte_encode_avpn(vpn[i], psize, ssize); | |
1122 | pix = 1; | |
1123 | } | |
1124 | ||
1125 | param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i]; | |
1126 | if (pix == PLPAR_HCALL9_BUFSIZE) { | |
1127 | pix = call_block_remove(pix, param, false); | |
1128 | /* | |
1129 | * pix = 0 means that all the entries were | |
1130 | * removed, we can start a new block. | |
1131 | * Otherwise, this means that there are entries | |
1132 | * to retry, and pix points to latest one, so | |
1133 | * we should increment it and try to continue | |
1134 | * the same block. | |
1135 | */ | |
1136 | if (pix) | |
1137 | pix++; | |
1138 | } | |
1139 | } | |
1140 | if (pix) | |
1141 | (void)call_block_remove(pix, param, true); | |
1142 | } | |
1143 | ||
1144 | static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn, | |
1145 | int count, int psize, int ssize) | |
1146 | { | |
1147 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; | |
1148 | int i = 0, pix = 0, rc; | |
1a527286 AK |
1149 | |
1150 | for (i = 0; i < count; i++) { | |
1151 | ||
1152 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | |
1153 | pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, | |
1154 | ssize, 0); | |
1155 | } else { | |
1156 | param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; | |
1157 | param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); | |
1158 | pix += 2; | |
1159 | if (pix == 8) { | |
1160 | rc = plpar_hcall9(H_BULK_REMOVE, param, | |
1161 | param[0], param[1], param[2], | |
1162 | param[3], param[4], param[5], | |
1163 | param[6], param[7]); | |
1164 | BUG_ON(rc != H_SUCCESS); | |
1165 | pix = 0; | |
1166 | } | |
1167 | } | |
1168 | } | |
1169 | if (pix) { | |
1170 | param[pix] = HBR_END; | |
1171 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], | |
1172 | param[2], param[3], param[4], param[5], | |
1173 | param[6], param[7]); | |
1174 | BUG_ON(rc != H_SUCCESS); | |
1175 | } | |
ba2dd8a2 LD |
1176 | } |
1177 | ||
1178 | static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, | |
1179 | unsigned long *vpn, | |
1180 | int count, int psize, | |
1181 | int ssize) | |
1182 | { | |
1183 | unsigned long flags = 0; | |
1184 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | |
1185 | ||
1186 | if (lock_tlbie) | |
1187 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | |
1188 | ||
59545ebe LD |
1189 | /* Assuming THP size is 16M */ |
1190 | if (is_supported_hlbkrm(psize, MMU_PAGE_16M)) | |
ba2dd8a2 LD |
1191 | hugepage_block_invalidate(slot, vpn, count, psize, ssize); |
1192 | else | |
1193 | hugepage_bulk_invalidate(slot, vpn, count, psize, ssize); | |
1a527286 AK |
1194 | |
1195 | if (lock_tlbie) | |
1196 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | |
1197 | } | |
1198 | ||
fa1f8ae8 AK |
1199 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, |
1200 | unsigned long addr, | |
1201 | unsigned char *hpte_slot_array, | |
d557b098 | 1202 | int psize, int ssize, int local) |
1a527286 | 1203 | { |
fa1f8ae8 | 1204 | int i, index = 0; |
1a527286 AK |
1205 | unsigned long s_addr = addr; |
1206 | unsigned int max_hpte_count, valid; | |
1207 | unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; | |
1208 | unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; | |
fa1f8ae8 | 1209 | unsigned long shift, hidx, vpn = 0, hash, slot; |
1a527286 AK |
1210 | |
1211 | shift = mmu_psize_defs[psize].shift; | |
1212 | max_hpte_count = 1U << (PMD_SHIFT - shift); | |
1213 | ||
1214 | for (i = 0; i < max_hpte_count; i++) { | |
1215 | valid = hpte_valid(hpte_slot_array, i); | |
1216 | if (!valid) | |
1217 | continue; | |
1218 | hidx = hpte_hash_index(hpte_slot_array, i); | |
1219 | ||
1220 | /* get the vpn */ | |
1221 | addr = s_addr + (i * (1ul << shift)); | |
1a527286 AK |
1222 | vpn = hpt_vpn(addr, vsid, ssize); |
1223 | hash = hpt_hash(vpn, shift, ssize); | |
1224 | if (hidx & _PTEIDX_SECONDARY) | |
1225 | hash = ~hash; | |
1226 | ||
1227 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1228 | slot += hidx & _PTEIDX_GROUP_IX; | |
1229 | ||
1230 | slot_array[index] = slot; | |
1231 | vpn_array[index] = vpn; | |
1232 | if (index == PPC64_HUGE_HPTE_BATCH - 1) { | |
1233 | /* | |
1234 | * Now do a bluk invalidate | |
1235 | */ | |
1236 | __pSeries_lpar_hugepage_invalidate(slot_array, | |
1237 | vpn_array, | |
1238 | PPC64_HUGE_HPTE_BATCH, | |
1239 | psize, ssize); | |
1240 | index = 0; | |
1241 | } else | |
1242 | index++; | |
1243 | } | |
1244 | if (index) | |
1245 | __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, | |
1246 | index, psize, ssize); | |
1247 | } | |
e34aa03c AK |
1248 | #else |
1249 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, | |
1250 | unsigned long addr, | |
1251 | unsigned char *hpte_slot_array, | |
1252 | int psize, int ssize, int local) | |
1253 | { | |
1254 | WARN(1, "%s called without THP support\n", __func__); | |
1255 | } | |
1256 | #endif | |
1a527286 | 1257 | |
27828f98 DG |
1258 | static int pSeries_lpar_hpte_removebolted(unsigned long ea, |
1259 | int psize, int ssize) | |
f8c8803b | 1260 | { |
5524a27d AK |
1261 | unsigned long vpn; |
1262 | unsigned long slot, vsid; | |
f8c8803b BP |
1263 | |
1264 | vsid = get_kernel_vsid(ea, ssize); | |
5524a27d | 1265 | vpn = hpt_vpn(ea, vsid, ssize); |
f8c8803b | 1266 | |
5524a27d | 1267 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
27828f98 DG |
1268 | if (slot == -1) |
1269 | return -ENOENT; | |
1270 | ||
db3d8534 AK |
1271 | /* |
1272 | * lpar doesn't use the passed actual page size | |
1273 | */ | |
1274 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); | |
27828f98 | 1275 | return 0; |
f8c8803b BP |
1276 | } |
1277 | ||
0effa488 LD |
1278 | |
1279 | static inline unsigned long compute_slot(real_pte_t pte, | |
1280 | unsigned long vpn, | |
1281 | unsigned long index, | |
1282 | unsigned long shift, | |
1283 | int ssize) | |
1284 | { | |
1285 | unsigned long slot, hash, hidx; | |
1286 | ||
1287 | hash = hpt_hash(vpn, shift, ssize); | |
1288 | hidx = __rpte_to_hidx(pte, index); | |
1289 | if (hidx & _PTEIDX_SECONDARY) | |
1290 | hash = ~hash; | |
1291 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1292 | slot += hidx & _PTEIDX_GROUP_IX; | |
1293 | return slot; | |
1294 | } | |
1295 | ||
ba2dd8a2 LD |
1296 | /** |
1297 | * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are | |
1298 | * "all within the same naturally aligned 8 page virtual address block". | |
1299 | */ | |
1300 | static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch, | |
1301 | unsigned long *param) | |
1302 | { | |
1303 | unsigned long vpn; | |
1304 | unsigned long i, pix = 0; | |
1305 | unsigned long index, shift, slot, current_vpgb, vpgb; | |
1306 | real_pte_t pte; | |
1307 | int psize, ssize; | |
1308 | ||
1309 | psize = batch->psize; | |
1310 | ssize = batch->ssize; | |
1311 | ||
1312 | for (i = 0; i < number; i++) { | |
1313 | vpn = batch->vpn[i]; | |
1314 | pte = batch->pte[i]; | |
1315 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { | |
1316 | /* | |
1317 | * Shifting 3 bits more on the right to get a | |
1318 | * 8 pages aligned virtual addresse. | |
1319 | */ | |
1320 | vpgb = (vpn >> (shift - VPN_SHIFT + 3)); | |
1321 | if (!pix || vpgb != current_vpgb) { | |
1322 | /* | |
1323 | * Need to start a new 8 pages block, flush | |
1324 | * the current one if needed. | |
1325 | */ | |
1326 | if (pix) | |
1327 | (void)call_block_remove(pix, param, | |
1328 | true); | |
1329 | current_vpgb = vpgb; | |
1330 | param[0] = hpte_encode_avpn(vpn, psize, | |
1331 | ssize); | |
1332 | pix = 1; | |
1333 | } | |
1334 | ||
1335 | slot = compute_slot(pte, vpn, index, shift, ssize); | |
1336 | param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot; | |
1337 | ||
1338 | if (pix == PLPAR_HCALL9_BUFSIZE) { | |
1339 | pix = call_block_remove(pix, param, false); | |
1340 | /* | |
1341 | * pix = 0 means that all the entries were | |
1342 | * removed, we can start a new block. | |
1343 | * Otherwise, this means that there are entries | |
1344 | * to retry, and pix points to latest one, so | |
1345 | * we should increment it and try to continue | |
1346 | * the same block. | |
1347 | */ | |
1348 | if (pix) | |
1349 | pix++; | |
1350 | } | |
1351 | } pte_iterate_hashed_end(); | |
1352 | } | |
1353 | ||
1354 | if (pix) | |
1355 | (void)call_block_remove(pix, param, true); | |
1356 | } | |
1357 | ||
1211ee61 LD |
1358 | /* |
1359 | * TLB Block Invalidate Characteristics | |
1360 | * | |
1361 | * These characteristics define the size of the block the hcall H_BLOCK_REMOVE | |
1362 | * is able to process for each couple segment base page size, actual page size. | |
1363 | * | |
1364 | * The ibm,get-system-parameter properties is returning a buffer with the | |
1365 | * following layout: | |
1366 | * | |
1367 | * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ] | |
1368 | * ----------------- | |
1369 | * TLB Block Invalidate Specifiers: | |
1370 | * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ] | |
1371 | * [ 1 byte Number of page sizes (N) that are supported for the specified | |
1372 | * TLB invalidate block size ] | |
1373 | * [ 1 byte Encoded segment base page size and actual page size | |
1374 | * MSB=0 means 4k segment base page size and actual page size | |
1375 | * MSB=1 the penc value in mmu_psize_def ] | |
1376 | * ... | |
1377 | * ----------------- | |
1378 | * Next TLB Block Invalidate Specifiers... | |
1379 | * ----------------- | |
1380 | * [ 0 ] | |
1381 | */ | |
1382 | static inline void set_hblkrm_bloc_size(int bpsize, int psize, | |
1383 | unsigned int block_size) | |
1384 | { | |
1385 | if (block_size > hblkrm_size[bpsize][psize]) | |
1386 | hblkrm_size[bpsize][psize] = block_size; | |
1387 | } | |
1388 | ||
1389 | /* | |
1390 | * Decode the Encoded segment base page size and actual page size. | |
1391 | * PAPR specifies: | |
1392 | * - bit 7 is the L bit | |
1393 | * - bits 0-5 are the penc value | |
1394 | * If the L bit is 0, this means 4K segment base page size and actual page size | |
1395 | * otherwise the penc value should be read. | |
1396 | */ | |
1397 | #define HBLKRM_L_MASK 0x80 | |
1398 | #define HBLKRM_PENC_MASK 0x3f | |
1399 | static inline void __init check_lp_set_hblkrm(unsigned int lp, | |
1400 | unsigned int block_size) | |
1401 | { | |
1402 | unsigned int bpsize, psize; | |
1403 | ||
1404 | /* First, check the L bit, if not set, this means 4K */ | |
1405 | if ((lp & HBLKRM_L_MASK) == 0) { | |
1406 | set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size); | |
1407 | return; | |
1408 | } | |
1409 | ||
1410 | lp &= HBLKRM_PENC_MASK; | |
1411 | for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) { | |
1412 | struct mmu_psize_def *def = &mmu_psize_defs[bpsize]; | |
1413 | ||
1414 | for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { | |
1415 | if (def->penc[psize] == lp) { | |
1416 | set_hblkrm_bloc_size(bpsize, psize, block_size); | |
1417 | return; | |
1418 | } | |
1419 | } | |
1420 | } | |
1421 | } | |
1422 | ||
1423 | #define SPLPAR_TLB_BIC_TOKEN 50 | |
1424 | ||
1425 | /* | |
1426 | * The size of the TLB Block Invalidate Characteristics is variable. But at the | |
1427 | * maximum it will be the number of possible page sizes *2 + 10 bytes. | |
1428 | * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size | |
1429 | * (128 bytes) for the buffer to get plenty of space. | |
1430 | */ | |
1431 | #define SPLPAR_TLB_BIC_MAXLENGTH 128 | |
1432 | ||
1433 | void __init pseries_lpar_read_hblkrm_characteristics(void) | |
1434 | { | |
1435 | unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH]; | |
1436 | int call_status, len, idx, bpsize; | |
1437 | ||
4ab8a485 LD |
1438 | if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) |
1439 | return; | |
1440 | ||
1211ee61 LD |
1441 | spin_lock(&rtas_data_buf_lock); |
1442 | memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); | |
1443 | call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, | |
1444 | NULL, | |
1445 | SPLPAR_TLB_BIC_TOKEN, | |
1446 | __pa(rtas_data_buf), | |
1447 | RTAS_DATA_BUF_SIZE); | |
1448 | memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH); | |
1449 | local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0'; | |
1450 | spin_unlock(&rtas_data_buf_lock); | |
1451 | ||
1452 | if (call_status != 0) { | |
1453 | pr_warn("%s %s Error calling get-system-parameter (0x%x)\n", | |
1454 | __FILE__, __func__, call_status); | |
1455 | return; | |
1456 | } | |
1457 | ||
1458 | /* | |
1459 | * The first two (2) bytes of the data in the buffer are the length of | |
1460 | * the returned data, not counting these first two (2) bytes. | |
1461 | */ | |
1462 | len = be16_to_cpu(*((u16 *)local_buffer)) + 2; | |
1463 | if (len > SPLPAR_TLB_BIC_MAXLENGTH) { | |
1464 | pr_warn("%s too large returned buffer %d", __func__, len); | |
1465 | return; | |
1466 | } | |
1467 | ||
1468 | idx = 2; | |
1469 | while (idx < len) { | |
1470 | u8 block_shift = local_buffer[idx++]; | |
1471 | u32 block_size; | |
1472 | unsigned int npsize; | |
1473 | ||
1474 | if (!block_shift) | |
1475 | break; | |
1476 | ||
1477 | block_size = 1 << block_shift; | |
1478 | ||
1479 | for (npsize = local_buffer[idx++]; | |
1480 | npsize > 0 && idx < len; npsize--) | |
1481 | check_lp_set_hblkrm((unsigned int) local_buffer[idx++], | |
1482 | block_size); | |
1483 | } | |
1484 | ||
1485 | for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) | |
1486 | for (idx = 0; idx < MMU_PAGE_COUNT; idx++) | |
1487 | if (hblkrm_size[bpsize][idx]) | |
1488 | pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d", | |
1489 | bpsize, idx, hblkrm_size[bpsize][idx]); | |
1490 | } | |
1491 | ||
1da177e4 LT |
1492 | /* |
1493 | * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie | |
1494 | * lock. | |
1495 | */ | |
035223fb | 1496 | static void pSeries_lpar_flush_hash_range(unsigned long number, int local) |
1da177e4 | 1497 | { |
5524a27d | 1498 | unsigned long vpn; |
f03e64f2 | 1499 | unsigned long i, pix, rc; |
12e86f92 | 1500 | unsigned long flags = 0; |
69111bac | 1501 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); |
44ae3ab3 | 1502 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
05af40e8 | 1503 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
0effa488 | 1504 | unsigned long index, shift, slot; |
f03e64f2 | 1505 | real_pte_t pte; |
1189be65 | 1506 | int psize, ssize; |
1da177e4 LT |
1507 | |
1508 | if (lock_tlbie) | |
1509 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | |
1510 | ||
59545ebe | 1511 | if (is_supported_hlbkrm(batch->psize, batch->psize)) { |
ba2dd8a2 LD |
1512 | do_block_remove(number, batch, param); |
1513 | goto out; | |
1514 | } | |
1515 | ||
f03e64f2 | 1516 | psize = batch->psize; |
1189be65 | 1517 | ssize = batch->ssize; |
f03e64f2 PM |
1518 | pix = 0; |
1519 | for (i = 0; i < number; i++) { | |
5524a27d | 1520 | vpn = batch->vpn[i]; |
f03e64f2 | 1521 | pte = batch->pte[i]; |
5524a27d | 1522 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
0effa488 | 1523 | slot = compute_slot(pte, vpn, index, shift, ssize); |
12e86f92 | 1524 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { |
db3d8534 AK |
1525 | /* |
1526 | * lpar doesn't use the passed actual page size | |
1527 | */ | |
5524a27d | 1528 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, |
db3d8534 | 1529 | 0, ssize, local); |
12e86f92 PM |
1530 | } else { |
1531 | param[pix] = HBR_REQUEST | HBR_AVPN | slot; | |
5524a27d | 1532 | param[pix+1] = hpte_encode_avpn(vpn, psize, |
1189be65 | 1533 | ssize); |
12e86f92 PM |
1534 | pix += 2; |
1535 | if (pix == 8) { | |
1536 | rc = plpar_hcall9(H_BULK_REMOVE, param, | |
f03e64f2 PM |
1537 | param[0], param[1], param[2], |
1538 | param[3], param[4], param[5], | |
1539 | param[6], param[7]); | |
12e86f92 PM |
1540 | BUG_ON(rc != H_SUCCESS); |
1541 | pix = 0; | |
1542 | } | |
f03e64f2 PM |
1543 | } |
1544 | } pte_iterate_hashed_end(); | |
1545 | } | |
1546 | if (pix) { | |
1547 | param[pix] = HBR_END; | |
1548 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], | |
1549 | param[2], param[3], param[4], param[5], | |
1550 | param[6], param[7]); | |
1551 | BUG_ON(rc != H_SUCCESS); | |
1552 | } | |
1da177e4 | 1553 | |
ba2dd8a2 | 1554 | out: |
1da177e4 LT |
1555 | if (lock_tlbie) |
1556 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | |
1557 | } | |
1558 | ||
4e89a2d8 WS |
1559 | static int __init disable_bulk_remove(char *str) |
1560 | { | |
1561 | if (strcmp(str, "off") == 0 && | |
1562 | firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | |
65471d76 AK |
1563 | pr_info("Disabling BULK_REMOVE firmware feature"); |
1564 | powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; | |
4e89a2d8 WS |
1565 | } |
1566 | return 1; | |
1567 | } | |
1568 | ||
1569 | __setup("bulk_remove=", disable_bulk_remove); | |
1570 | ||
dbcf929c DG |
1571 | #define HPT_RESIZE_TIMEOUT 10000 /* ms */ |
1572 | ||
1573 | struct hpt_resize_state { | |
1574 | unsigned long shift; | |
1575 | int commit_rc; | |
1576 | }; | |
1577 | ||
1578 | static int pseries_lpar_resize_hpt_commit(void *data) | |
1579 | { | |
1580 | struct hpt_resize_state *state = data; | |
1581 | ||
1582 | state->commit_rc = plpar_resize_hpt_commit(0, state->shift); | |
1583 | if (state->commit_rc != H_SUCCESS) | |
1584 | return -EIO; | |
1585 | ||
1586 | /* Hypervisor has transitioned the HTAB, update our globals */ | |
1587 | ppc64_pft_size = state->shift; | |
1588 | htab_size_bytes = 1UL << ppc64_pft_size; | |
1589 | htab_hash_mask = (htab_size_bytes >> 7) - 1; | |
1590 | ||
1591 | return 0; | |
1592 | } | |
1593 | ||
c784be43 GS |
1594 | /* |
1595 | * Must be called in process context. The caller must hold the | |
1596 | * cpus_lock. | |
1597 | */ | |
dbcf929c DG |
1598 | static int pseries_lpar_resize_hpt(unsigned long shift) |
1599 | { | |
1600 | struct hpt_resize_state state = { | |
1601 | .shift = shift, | |
1602 | .commit_rc = H_FUNCTION, | |
1603 | }; | |
1604 | unsigned int delay, total_delay = 0; | |
1605 | int rc; | |
1606 | ktime_t t0, t1, t2; | |
1607 | ||
1608 | might_sleep(); | |
1609 | ||
1610 | if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE)) | |
1611 | return -ENODEV; | |
1612 | ||
65471d76 | 1613 | pr_info("Attempting to resize HPT to shift %lu\n", shift); |
dbcf929c DG |
1614 | |
1615 | t0 = ktime_get(); | |
1616 | ||
1617 | rc = plpar_resize_hpt_prepare(0, shift); | |
1618 | while (H_IS_LONG_BUSY(rc)) { | |
1619 | delay = get_longbusy_msecs(rc); | |
1620 | total_delay += delay; | |
1621 | if (total_delay > HPT_RESIZE_TIMEOUT) { | |
1622 | /* prepare with shift==0 cancels an in-progress resize */ | |
1623 | rc = plpar_resize_hpt_prepare(0, 0); | |
1624 | if (rc != H_SUCCESS) | |
65471d76 | 1625 | pr_warn("Unexpected error %d cancelling timed out HPT resize\n", |
dbcf929c DG |
1626 | rc); |
1627 | return -ETIMEDOUT; | |
1628 | } | |
1629 | msleep(delay); | |
1630 | rc = plpar_resize_hpt_prepare(0, shift); | |
1631 | }; | |
1632 | ||
1633 | switch (rc) { | |
1634 | case H_SUCCESS: | |
1635 | /* Continue on */ | |
1636 | break; | |
1637 | ||
1638 | case H_PARAMETER: | |
f172acbf | 1639 | pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n"); |
dbcf929c DG |
1640 | return -EINVAL; |
1641 | case H_RESOURCE: | |
f172acbf | 1642 | pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n"); |
dbcf929c DG |
1643 | return -EPERM; |
1644 | default: | |
65471d76 | 1645 | pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc); |
dbcf929c DG |
1646 | return -EIO; |
1647 | } | |
1648 | ||
1649 | t1 = ktime_get(); | |
1650 | ||
c784be43 GS |
1651 | rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit, |
1652 | &state, NULL); | |
dbcf929c DG |
1653 | |
1654 | t2 = ktime_get(); | |
1655 | ||
1656 | if (rc != 0) { | |
1657 | switch (state.commit_rc) { | |
1658 | case H_PTEG_FULL: | |
dbcf929c DG |
1659 | return -ENOSPC; |
1660 | ||
1661 | default: | |
65471d76 AK |
1662 | pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n", |
1663 | state.commit_rc); | |
dbcf929c DG |
1664 | return -EIO; |
1665 | }; | |
1666 | } | |
1667 | ||
65471d76 AK |
1668 | pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n", |
1669 | shift, (long long) ktime_ms_delta(t1, t0), | |
1670 | (long long) ktime_ms_delta(t2, t1)); | |
dbcf929c DG |
1671 | |
1672 | return 0; | |
1673 | } | |
1674 | ||
cc3d2940 PM |
1675 | static int pseries_lpar_register_process_table(unsigned long base, |
1676 | unsigned long page_size, unsigned long table_size) | |
1677 | { | |
1678 | long rc; | |
dbfcf3cb | 1679 | unsigned long flags = 0; |
cc3d2940 | 1680 | |
dbfcf3cb PM |
1681 | if (table_size) |
1682 | flags |= PROC_TABLE_NEW; | |
cc3d2940 PM |
1683 | if (radix_enabled()) |
1684 | flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; | |
dbfcf3cb PM |
1685 | else |
1686 | flags |= PROC_TABLE_HPT_SLB; | |
cc3d2940 PM |
1687 | for (;;) { |
1688 | rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, | |
1689 | page_size, table_size); | |
1690 | if (!H_IS_LONG_BUSY(rc)) | |
1691 | break; | |
1692 | mdelay(get_longbusy_msecs(rc)); | |
1693 | } | |
1694 | if (rc != H_SUCCESS) { | |
1695 | pr_err("Failed to register process table (rc=%ld)\n", rc); | |
1696 | BUG(); | |
1697 | } | |
1698 | return rc; | |
1699 | } | |
1700 | ||
6364e84e | 1701 | void __init hpte_init_pseries(void) |
1da177e4 | 1702 | { |
7025776e BH |
1703 | mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate; |
1704 | mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp; | |
1705 | mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; | |
1706 | mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert; | |
1707 | mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove; | |
1708 | mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted; | |
1709 | mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; | |
5246adec | 1710 | mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; |
7025776e | 1711 | mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; |
8971e1c7 ME |
1712 | |
1713 | if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) | |
1714 | mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; | |
ed6546bd NP |
1715 | |
1716 | /* | |
1717 | * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall | |
1718 | * to inform the hypervisor that we wish to use the HPT. | |
1719 | */ | |
1720 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
1721 | pseries_lpar_register_process_table(0, 0, 0); | |
1da177e4 | 1722 | } |
14f966e7 | 1723 | |
cc3d2940 PM |
1724 | void radix_init_pseries(void) |
1725 | { | |
1726 | pr_info("Using radix MMU under hypervisor\n"); | |
ed6546bd NP |
1727 | |
1728 | pseries_lpar_register_process_table(__pa(process_tb), | |
1729 | 0, PRTB_SIZE_SHIFT - 12); | |
cc3d2940 PM |
1730 | } |
1731 | ||
14f966e7 RJ |
1732 | #ifdef CONFIG_PPC_SMLPAR |
1733 | #define CMO_FREE_HINT_DEFAULT 1 | |
1734 | static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; | |
1735 | ||
1736 | static int __init cmo_free_hint(char *str) | |
1737 | { | |
1738 | char *parm; | |
1739 | parm = strstrip(str); | |
1740 | ||
1741 | if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { | |
65471d76 | 1742 | pr_info("%s: CMO free page hinting is not active.\n", __func__); |
14f966e7 RJ |
1743 | cmo_free_hint_flag = 0; |
1744 | return 1; | |
1745 | } | |
1746 | ||
1747 | cmo_free_hint_flag = 1; | |
65471d76 | 1748 | pr_info("%s: CMO free page hinting is active.\n", __func__); |
14f966e7 RJ |
1749 | |
1750 | if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) | |
1751 | return 1; | |
1752 | ||
1753 | return 0; | |
1754 | } | |
1755 | ||
1756 | __setup("cmo_free_hint=", cmo_free_hint); | |
1757 | ||
1758 | static void pSeries_set_page_state(struct page *page, int order, | |
1759 | unsigned long state) | |
1760 | { | |
1761 | int i, j; | |
1762 | unsigned long cmo_page_sz, addr; | |
1763 | ||
1764 | cmo_page_sz = cmo_get_page_size(); | |
1765 | addr = __pa((unsigned long)page_address(page)); | |
1766 | ||
1767 | for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { | |
1768 | for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) | |
1769 | plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); | |
1770 | } | |
1771 | } | |
1772 | ||
1773 | void arch_free_page(struct page *page, int order) | |
1774 | { | |
d8c476ee AK |
1775 | if (radix_enabled()) |
1776 | return; | |
14f966e7 RJ |
1777 | if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) |
1778 | return; | |
1779 | ||
1780 | pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); | |
1781 | } | |
1782 | EXPORT_SYMBOL(arch_free_page); | |
1783 | ||
d8c476ee | 1784 | #endif /* CONFIG_PPC_SMLPAR */ |
4e003747 | 1785 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
c8cd093a AB |
1786 | |
1787 | #ifdef CONFIG_TRACEPOINTS | |
e9666d10 | 1788 | #ifdef CONFIG_JUMP_LABEL |
cc1adb5f AB |
1789 | struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; |
1790 | ||
8cf868af | 1791 | int hcall_tracepoint_regfunc(void) |
cc1adb5f AB |
1792 | { |
1793 | static_key_slow_inc(&hcall_tracepoint_key); | |
8cf868af | 1794 | return 0; |
cc1adb5f AB |
1795 | } |
1796 | ||
1797 | void hcall_tracepoint_unregfunc(void) | |
1798 | { | |
1799 | static_key_slow_dec(&hcall_tracepoint_key); | |
1800 | } | |
1801 | #else | |
c8cd093a AB |
1802 | /* |
1803 | * We optimise our hcall path by placing hcall_tracepoint_refcount | |
1804 | * directly in the TOC so we can check if the hcall tracepoints are | |
1805 | * enabled via a single load. | |
1806 | */ | |
1807 | ||
1808 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | |
1809 | extern long hcall_tracepoint_refcount; | |
1810 | ||
8cf868af | 1811 | int hcall_tracepoint_regfunc(void) |
c8cd093a AB |
1812 | { |
1813 | hcall_tracepoint_refcount++; | |
8cf868af | 1814 | return 0; |
c8cd093a AB |
1815 | } |
1816 | ||
1817 | void hcall_tracepoint_unregfunc(void) | |
1818 | { | |
1819 | hcall_tracepoint_refcount--; | |
1820 | } | |
cc1adb5f AB |
1821 | #endif |
1822 | ||
1823 | /* | |
1824 | * Since the tracing code might execute hcalls we need to guard against | |
1825 | * recursion. One example of this are spinlocks calling H_YIELD on | |
1826 | * shared processor partitions. | |
1827 | */ | |
1828 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | |
1829 | ||
c8cd093a | 1830 | |
6f26353c | 1831 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
c8cd093a | 1832 | { |
57cdfdf8 AB |
1833 | unsigned long flags; |
1834 | unsigned int *depth; | |
1835 | ||
a5ccfee0 AB |
1836 | /* |
1837 | * We cannot call tracepoints inside RCU idle regions which | |
1838 | * means we must not trace H_CEDE. | |
1839 | */ | |
1840 | if (opcode == H_CEDE) | |
1841 | return; | |
1842 | ||
57cdfdf8 AB |
1843 | local_irq_save(flags); |
1844 | ||
69111bac | 1845 | depth = this_cpu_ptr(&hcall_trace_depth); |
57cdfdf8 AB |
1846 | |
1847 | if (*depth) | |
1848 | goto out; | |
1849 | ||
1850 | (*depth)++; | |
e4f387d8 | 1851 | preempt_disable(); |
6f26353c | 1852 | trace_hcall_entry(opcode, args); |
57cdfdf8 AB |
1853 | (*depth)--; |
1854 | ||
1855 | out: | |
1856 | local_irq_restore(flags); | |
c8cd093a AB |
1857 | } |
1858 | ||
8f2133cc | 1859 | void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf) |
c8cd093a | 1860 | { |
57cdfdf8 AB |
1861 | unsigned long flags; |
1862 | unsigned int *depth; | |
1863 | ||
a5ccfee0 AB |
1864 | if (opcode == H_CEDE) |
1865 | return; | |
1866 | ||
57cdfdf8 AB |
1867 | local_irq_save(flags); |
1868 | ||
69111bac | 1869 | depth = this_cpu_ptr(&hcall_trace_depth); |
57cdfdf8 AB |
1870 | |
1871 | if (*depth) | |
1872 | goto out; | |
1873 | ||
1874 | (*depth)++; | |
6f26353c | 1875 | trace_hcall_exit(opcode, retval, retbuf); |
e4f387d8 | 1876 | preempt_enable(); |
57cdfdf8 AB |
1877 | (*depth)--; |
1878 | ||
1879 | out: | |
1880 | local_irq_restore(flags); | |
c8cd093a AB |
1881 | } |
1882 | #endif | |
9ee820fa BK |
1883 | |
1884 | /** | |
1885 | * h_get_mpp | |
1886 | * H_GET_MPP hcall returns info in 7 parms | |
1887 | */ | |
1888 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | |
1889 | { | |
1890 | int rc; | |
1891 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | |
1892 | ||
1893 | rc = plpar_hcall9(H_GET_MPP, retbuf); | |
1894 | ||
1895 | mpp_data->entitled_mem = retbuf[0]; | |
1896 | mpp_data->mapped_mem = retbuf[1]; | |
1897 | ||
1898 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | |
1899 | mpp_data->pool_num = retbuf[2] & 0xffff; | |
1900 | ||
1901 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | |
1902 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | |
b0d436c7 | 1903 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL; |
9ee820fa BK |
1904 | |
1905 | mpp_data->pool_size = retbuf[4]; | |
1906 | mpp_data->loan_request = retbuf[5]; | |
1907 | mpp_data->backing_mem = retbuf[6]; | |
1908 | ||
1909 | return rc; | |
1910 | } | |
1911 | EXPORT_SYMBOL(h_get_mpp); | |
1912 | ||
1913 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) | |
1914 | { | |
1915 | int rc; | |
1916 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; | |
1917 | ||
1918 | rc = plpar_hcall9(H_GET_MPP_X, retbuf); | |
1919 | ||
1920 | mpp_x_data->coalesced_bytes = retbuf[0]; | |
1921 | mpp_x_data->pool_coalesced_bytes = retbuf[1]; | |
1922 | mpp_x_data->pool_purr_cycles = retbuf[2]; | |
1923 | mpp_x_data->pool_spurr_cycles = retbuf[3]; | |
1924 | ||
1925 | return rc; | |
1926 | } | |
82228e36 AK |
1927 | |
1928 | static unsigned long vsid_unscramble(unsigned long vsid, int ssize) | |
1929 | { | |
1930 | unsigned long protovsid; | |
1931 | unsigned long va_bits = VA_BITS; | |
1932 | unsigned long modinv, vsid_modulus; | |
1933 | unsigned long max_mod_inv, tmp_modinv; | |
1934 | ||
1935 | if (!mmu_has_feature(MMU_FTR_68_BIT_VA)) | |
1936 | va_bits = 65; | |
1937 | ||
1938 | if (ssize == MMU_SEGSIZE_256M) { | |
1939 | modinv = VSID_MULINV_256M; | |
1940 | vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1); | |
1941 | } else { | |
1942 | modinv = VSID_MULINV_1T; | |
1943 | vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1); | |
1944 | } | |
1945 | ||
1946 | /* | |
1947 | * vsid outside our range. | |
1948 | */ | |
1949 | if (vsid >= vsid_modulus) | |
1950 | return 0; | |
1951 | ||
1952 | /* | |
1953 | * If modinv is the modular multiplicate inverse of (x % vsid_modulus) | |
1954 | * and vsid = (protovsid * x) % vsid_modulus, then we say: | |
1955 | * protovsid = (vsid * modinv) % vsid_modulus | |
1956 | */ | |
1957 | ||
1958 | /* Check if (vsid * modinv) overflow (63 bits) */ | |
1959 | max_mod_inv = 0x7fffffffffffffffull / vsid; | |
1960 | if (modinv < max_mod_inv) | |
1961 | return (vsid * modinv) % vsid_modulus; | |
1962 | ||
1963 | tmp_modinv = modinv/max_mod_inv; | |
1964 | modinv %= max_mod_inv; | |
1965 | ||
1966 | protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus; | |
1967 | protovsid = (protovsid + vsid * modinv) % vsid_modulus; | |
1968 | ||
1969 | return protovsid; | |
1970 | } | |
1971 | ||
1972 | static int __init reserve_vrma_context_id(void) | |
1973 | { | |
1974 | unsigned long protovsid; | |
1975 | ||
1976 | /* | |
1977 | * Reserve context ids which map to reserved virtual addresses. For now | |
1978 | * we only reserve the context id which maps to the VRMA VSID. We ignore | |
1979 | * the addresses in "ibm,adjunct-virtual-addresses" because we don't | |
1980 | * enable adjunct support via the "ibm,client-architecture-support" | |
1981 | * interface. | |
1982 | */ | |
1983 | protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T); | |
1984 | hash__reserve_context_id(protovsid >> ESID_BITS_1T); | |
1985 | return 0; | |
1986 | } | |
1987 | machine_device_initcall(pseries, reserve_vrma_context_id); | |
c6c26fb5 AP |
1988 | |
1989 | #ifdef CONFIG_DEBUG_FS | |
1990 | /* debugfs file interface for vpa data */ | |
1991 | static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len, | |
1992 | loff_t *pos) | |
1993 | { | |
1994 | int cpu = (long)filp->private_data; | |
1995 | struct lppaca *lppaca = &lppaca_of(cpu); | |
1996 | ||
1997 | return simple_read_from_buffer(buf, len, pos, lppaca, | |
1998 | sizeof(struct lppaca)); | |
1999 | } | |
2000 | ||
2001 | static const struct file_operations vpa_fops = { | |
2002 | .open = simple_open, | |
2003 | .read = vpa_file_read, | |
2004 | .llseek = default_llseek, | |
2005 | }; | |
2006 | ||
2007 | static int __init vpa_debugfs_init(void) | |
2008 | { | |
2009 | char name[16]; | |
2010 | long i; | |
11dd34f3 | 2011 | struct dentry *vpa_dir; |
c6c26fb5 AP |
2012 | |
2013 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | |
2014 | return 0; | |
2015 | ||
2016 | vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root); | |
c6c26fb5 AP |
2017 | |
2018 | /* set up the per-cpu vpa file*/ | |
2019 | for_each_possible_cpu(i) { | |
c6c26fb5 | 2020 | sprintf(name, "cpu-%ld", i); |
ff229319 | 2021 | debugfs_create_file(name, 0400, vpa_dir, (void *)i, &vpa_fops); |
c6c26fb5 AP |
2022 | } |
2023 | ||
2024 | return 0; | |
2025 | } | |
2026 | machine_arch_initcall(pseries, vpa_debugfs_init); | |
2027 | #endif /* CONFIG_DEBUG_FS */ |