]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/platforms/pseries/lpar.c
spi-imx: Implements handling of the SPI_READY mode flag.
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / platforms / pseries / lpar.c
1 /*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 /* Enables debugging of low-level hash table routines - careful! */
23 #undef DEBUG
24
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/console.h>
28 #include <linux/export.h>
29 #include <linux/jump_label.h>
30 #include <linux/delay.h>
31 #include <linux/stop_machine.h>
32 #include <asm/processor.h>
33 #include <asm/mmu.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm/machdep.h>
37 #include <asm/mmu_context.h>
38 #include <asm/iommu.h>
39 #include <asm/tlbflush.h>
40 #include <asm/tlb.h>
41 #include <asm/prom.h>
42 #include <asm/cputable.h>
43 #include <asm/udbg.h>
44 #include <asm/smp.h>
45 #include <asm/trace.h>
46 #include <asm/firmware.h>
47 #include <asm/plpar_wrappers.h>
48 #include <asm/kexec.h>
49 #include <asm/fadump.h>
50 #include <asm/asm-prototypes.h>
51
52 #include "pseries.h"
53
54 /* Flag bits for H_BULK_REMOVE */
55 #define HBR_REQUEST 0x4000000000000000UL
56 #define HBR_RESPONSE 0x8000000000000000UL
57 #define HBR_END 0xc000000000000000UL
58 #define HBR_AVPN 0x0200000000000000UL
59 #define HBR_ANDCOND 0x0100000000000000UL
60
61
62 /* in hvCall.S */
63 EXPORT_SYMBOL(plpar_hcall);
64 EXPORT_SYMBOL(plpar_hcall9);
65 EXPORT_SYMBOL(plpar_hcall_norets);
66
67 void vpa_init(int cpu)
68 {
69 int hwcpu = get_hard_smp_processor_id(cpu);
70 unsigned long addr;
71 long ret;
72 struct paca_struct *pp;
73 struct dtl_entry *dtl;
74
75 /*
76 * The spec says it "may be problematic" if CPU x registers the VPA of
77 * CPU y. We should never do that, but wail if we ever do.
78 */
79 WARN_ON(cpu != smp_processor_id());
80
81 if (cpu_has_feature(CPU_FTR_ALTIVEC))
82 lppaca_of(cpu).vmxregs_in_use = 1;
83
84 if (cpu_has_feature(CPU_FTR_ARCH_207S))
85 lppaca_of(cpu).ebb_regs_in_use = 1;
86
87 addr = __pa(&lppaca_of(cpu));
88 ret = register_vpa(hwcpu, addr);
89
90 if (ret) {
91 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
92 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
93 return;
94 }
95
96 #ifdef CONFIG_PPC_STD_MMU_64
97 /*
98 * PAPR says this feature is SLB-Buffer but firmware never
99 * reports that. All SPLPAR support SLB shadow buffer.
100 */
101 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
102 addr = __pa(paca[cpu].slb_shadow_ptr);
103 ret = register_slb_shadow(hwcpu, addr);
104 if (ret)
105 pr_err("WARNING: SLB shadow buffer registration for "
106 "cpu %d (hw %d) of area %lx failed with %ld\n",
107 cpu, hwcpu, addr, ret);
108 }
109 #endif /* CONFIG_PPC_STD_MMU_64 */
110
111 /*
112 * Register dispatch trace log, if one has been allocated.
113 */
114 pp = &paca[cpu];
115 dtl = pp->dispatch_log;
116 if (dtl) {
117 pp->dtl_ridx = 0;
118 pp->dtl_curr = dtl;
119 lppaca_of(cpu).dtl_idx = 0;
120
121 /* hypervisor reads buffer length from this field */
122 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
123 ret = register_dtl(hwcpu, __pa(dtl));
124 if (ret)
125 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
126 "failed with %ld\n", smp_processor_id(),
127 hwcpu, ret);
128 lppaca_of(cpu).dtl_enable_mask = 2;
129 }
130 }
131
132 #ifdef CONFIG_PPC_STD_MMU_64
133
134 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
135 unsigned long vpn, unsigned long pa,
136 unsigned long rflags, unsigned long vflags,
137 int psize, int apsize, int ssize)
138 {
139 unsigned long lpar_rc;
140 unsigned long flags;
141 unsigned long slot;
142 unsigned long hpte_v, hpte_r;
143
144 if (!(vflags & HPTE_V_BOLTED))
145 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
146 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
147 hpte_group, vpn, pa, rflags, vflags, psize);
148
149 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
150 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
151
152 if (!(vflags & HPTE_V_BOLTED))
153 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
154
155 /* Now fill in the actual HPTE */
156 /* Set CEC cookie to 0 */
157 /* Zero page = 0 */
158 /* I-cache Invalidate = 0 */
159 /* I-cache synchronize = 0 */
160 /* Exact = 0 */
161 flags = 0;
162
163 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
164 flags |= H_COALESCE_CAND;
165
166 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
167 if (unlikely(lpar_rc == H_PTEG_FULL)) {
168 if (!(vflags & HPTE_V_BOLTED))
169 pr_devel(" full\n");
170 return -1;
171 }
172
173 /*
174 * Since we try and ioremap PHBs we don't own, the pte insert
175 * will fail. However we must catch the failure in hash_page
176 * or we will loop forever, so return -2 in this case.
177 */
178 if (unlikely(lpar_rc != H_SUCCESS)) {
179 if (!(vflags & HPTE_V_BOLTED))
180 pr_devel(" lpar err %ld\n", lpar_rc);
181 return -2;
182 }
183 if (!(vflags & HPTE_V_BOLTED))
184 pr_devel(" -> slot: %lu\n", slot & 7);
185
186 /* Because of iSeries, we have to pass down the secondary
187 * bucket bit here as well
188 */
189 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
190 }
191
192 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
193
194 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
195 {
196 unsigned long slot_offset;
197 unsigned long lpar_rc;
198 int i;
199 unsigned long dummy1, dummy2;
200
201 /* pick a random slot to start at */
202 slot_offset = mftb() & 0x7;
203
204 for (i = 0; i < HPTES_PER_GROUP; i++) {
205
206 /* don't remove a bolted entry */
207 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
208 (0x1UL << 4), &dummy1, &dummy2);
209 if (lpar_rc == H_SUCCESS)
210 return i;
211
212 /*
213 * The test for adjunct partition is performed before the
214 * ANDCOND test. H_RESOURCE may be returned, so we need to
215 * check for that as well.
216 */
217 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
218
219 slot_offset++;
220 slot_offset &= 0x7;
221 }
222
223 return -1;
224 }
225
226 static void manual_hpte_clear_all(void)
227 {
228 unsigned long size_bytes = 1UL << ppc64_pft_size;
229 unsigned long hpte_count = size_bytes >> 4;
230 struct {
231 unsigned long pteh;
232 unsigned long ptel;
233 } ptes[4];
234 long lpar_rc;
235 unsigned long i, j;
236
237 /* Read in batches of 4,
238 * invalidate only valid entries not in the VRMA
239 * hpte_count will be a multiple of 4
240 */
241 for (i = 0; i < hpte_count; i += 4) {
242 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
243 if (lpar_rc != H_SUCCESS)
244 continue;
245 for (j = 0; j < 4; j++){
246 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
247 HPTE_V_VRMA_MASK)
248 continue;
249 if (ptes[j].pteh & HPTE_V_VALID)
250 plpar_pte_remove_raw(0, i + j, 0,
251 &(ptes[j].pteh), &(ptes[j].ptel));
252 }
253 }
254 }
255
256 static int hcall_hpte_clear_all(void)
257 {
258 int rc;
259
260 do {
261 rc = plpar_hcall_norets(H_CLEAR_HPT);
262 } while (rc == H_CONTINUE);
263
264 return rc;
265 }
266
267 static void pseries_hpte_clear_all(void)
268 {
269 int rc;
270
271 rc = hcall_hpte_clear_all();
272 if (rc != H_SUCCESS)
273 manual_hpte_clear_all();
274
275 #ifdef __LITTLE_ENDIAN__
276 /*
277 * Reset exceptions to big endian.
278 *
279 * FIXME this is a hack for kexec, we need to reset the exception
280 * endian before starting the new kernel and this is a convenient place
281 * to do it.
282 *
283 * This is also called on boot when a fadump happens. In that case we
284 * must not change the exception endian mode.
285 */
286 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
287 pseries_big_endian_exceptions();
288 #endif
289 }
290
291 /*
292 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
293 * the low 3 bits of flags happen to line up. So no transform is needed.
294 * We can probably optimize here and assume the high bits of newpp are
295 * already zero. For now I am paranoid.
296 */
297 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
298 unsigned long newpp,
299 unsigned long vpn,
300 int psize, int apsize,
301 int ssize, unsigned long inv_flags)
302 {
303 unsigned long lpar_rc;
304 unsigned long flags = (newpp & 7) | H_AVPN;
305 unsigned long want_v;
306
307 want_v = hpte_encode_avpn(vpn, psize, ssize);
308
309 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
310 want_v, slot, flags, psize);
311
312 lpar_rc = plpar_pte_protect(flags, slot, want_v);
313
314 if (lpar_rc == H_NOT_FOUND) {
315 pr_devel("not found !\n");
316 return -1;
317 }
318
319 pr_devel("ok\n");
320
321 BUG_ON(lpar_rc != H_SUCCESS);
322
323 return 0;
324 }
325
326 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
327 {
328 long lpar_rc;
329 unsigned long i, j;
330 struct {
331 unsigned long pteh;
332 unsigned long ptel;
333 } ptes[4];
334
335 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
336
337 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
338 if (lpar_rc != H_SUCCESS)
339 continue;
340
341 for (j = 0; j < 4; j++) {
342 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
343 (ptes[j].pteh & HPTE_V_VALID))
344 return i + j;
345 }
346 }
347
348 return -1;
349 }
350
351 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
352 {
353 long slot;
354 unsigned long hash;
355 unsigned long want_v;
356 unsigned long hpte_group;
357
358 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
359 want_v = hpte_encode_avpn(vpn, psize, ssize);
360
361 /* Bolted entries are always in the primary group */
362 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
363 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
364 if (slot < 0)
365 return -1;
366 return hpte_group + slot;
367 }
368
369 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
370 unsigned long ea,
371 int psize, int ssize)
372 {
373 unsigned long vpn;
374 unsigned long lpar_rc, slot, vsid, flags;
375
376 vsid = get_kernel_vsid(ea, ssize);
377 vpn = hpt_vpn(ea, vsid, ssize);
378
379 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
380 BUG_ON(slot == -1);
381
382 flags = newpp & 7;
383 lpar_rc = plpar_pte_protect(flags, slot, 0);
384
385 BUG_ON(lpar_rc != H_SUCCESS);
386 }
387
388 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
389 int psize, int apsize,
390 int ssize, int local)
391 {
392 unsigned long want_v;
393 unsigned long lpar_rc;
394 unsigned long dummy1, dummy2;
395
396 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
397 slot, vpn, psize, local);
398
399 want_v = hpte_encode_avpn(vpn, psize, ssize);
400 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
401 if (lpar_rc == H_NOT_FOUND)
402 return;
403
404 BUG_ON(lpar_rc != H_SUCCESS);
405 }
406
407 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
408 /*
409 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
410 * to make sure that we avoid bouncing the hypervisor tlbie lock.
411 */
412 #define PPC64_HUGE_HPTE_BATCH 12
413
414 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
415 unsigned long *vpn, int count,
416 int psize, int ssize)
417 {
418 unsigned long param[PLPAR_HCALL9_BUFSIZE];
419 int i = 0, pix = 0, rc;
420 unsigned long flags = 0;
421 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
422
423 if (lock_tlbie)
424 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
425
426 for (i = 0; i < count; i++) {
427
428 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
429 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
430 ssize, 0);
431 } else {
432 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
433 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
434 pix += 2;
435 if (pix == 8) {
436 rc = plpar_hcall9(H_BULK_REMOVE, param,
437 param[0], param[1], param[2],
438 param[3], param[4], param[5],
439 param[6], param[7]);
440 BUG_ON(rc != H_SUCCESS);
441 pix = 0;
442 }
443 }
444 }
445 if (pix) {
446 param[pix] = HBR_END;
447 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
448 param[2], param[3], param[4], param[5],
449 param[6], param[7]);
450 BUG_ON(rc != H_SUCCESS);
451 }
452
453 if (lock_tlbie)
454 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
455 }
456
457 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
458 unsigned long addr,
459 unsigned char *hpte_slot_array,
460 int psize, int ssize, int local)
461 {
462 int i, index = 0;
463 unsigned long s_addr = addr;
464 unsigned int max_hpte_count, valid;
465 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
466 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
467 unsigned long shift, hidx, vpn = 0, hash, slot;
468
469 shift = mmu_psize_defs[psize].shift;
470 max_hpte_count = 1U << (PMD_SHIFT - shift);
471
472 for (i = 0; i < max_hpte_count; i++) {
473 valid = hpte_valid(hpte_slot_array, i);
474 if (!valid)
475 continue;
476 hidx = hpte_hash_index(hpte_slot_array, i);
477
478 /* get the vpn */
479 addr = s_addr + (i * (1ul << shift));
480 vpn = hpt_vpn(addr, vsid, ssize);
481 hash = hpt_hash(vpn, shift, ssize);
482 if (hidx & _PTEIDX_SECONDARY)
483 hash = ~hash;
484
485 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
486 slot += hidx & _PTEIDX_GROUP_IX;
487
488 slot_array[index] = slot;
489 vpn_array[index] = vpn;
490 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
491 /*
492 * Now do a bluk invalidate
493 */
494 __pSeries_lpar_hugepage_invalidate(slot_array,
495 vpn_array,
496 PPC64_HUGE_HPTE_BATCH,
497 psize, ssize);
498 index = 0;
499 } else
500 index++;
501 }
502 if (index)
503 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
504 index, psize, ssize);
505 }
506 #else
507 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
508 unsigned long addr,
509 unsigned char *hpte_slot_array,
510 int psize, int ssize, int local)
511 {
512 WARN(1, "%s called without THP support\n", __func__);
513 }
514 #endif
515
516 static int pSeries_lpar_hpte_removebolted(unsigned long ea,
517 int psize, int ssize)
518 {
519 unsigned long vpn;
520 unsigned long slot, vsid;
521
522 vsid = get_kernel_vsid(ea, ssize);
523 vpn = hpt_vpn(ea, vsid, ssize);
524
525 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
526 if (slot == -1)
527 return -ENOENT;
528
529 /*
530 * lpar doesn't use the passed actual page size
531 */
532 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
533 return 0;
534 }
535
536 /*
537 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
538 * lock.
539 */
540 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
541 {
542 unsigned long vpn;
543 unsigned long i, pix, rc;
544 unsigned long flags = 0;
545 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
546 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
547 unsigned long param[PLPAR_HCALL9_BUFSIZE];
548 unsigned long hash, index, shift, hidx, slot;
549 real_pte_t pte;
550 int psize, ssize;
551
552 if (lock_tlbie)
553 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
554
555 psize = batch->psize;
556 ssize = batch->ssize;
557 pix = 0;
558 for (i = 0; i < number; i++) {
559 vpn = batch->vpn[i];
560 pte = batch->pte[i];
561 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
562 hash = hpt_hash(vpn, shift, ssize);
563 hidx = __rpte_to_hidx(pte, index);
564 if (hidx & _PTEIDX_SECONDARY)
565 hash = ~hash;
566 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
567 slot += hidx & _PTEIDX_GROUP_IX;
568 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
569 /*
570 * lpar doesn't use the passed actual page size
571 */
572 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
573 0, ssize, local);
574 } else {
575 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
576 param[pix+1] = hpte_encode_avpn(vpn, psize,
577 ssize);
578 pix += 2;
579 if (pix == 8) {
580 rc = plpar_hcall9(H_BULK_REMOVE, param,
581 param[0], param[1], param[2],
582 param[3], param[4], param[5],
583 param[6], param[7]);
584 BUG_ON(rc != H_SUCCESS);
585 pix = 0;
586 }
587 }
588 } pte_iterate_hashed_end();
589 }
590 if (pix) {
591 param[pix] = HBR_END;
592 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
593 param[2], param[3], param[4], param[5],
594 param[6], param[7]);
595 BUG_ON(rc != H_SUCCESS);
596 }
597
598 if (lock_tlbie)
599 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
600 }
601
602 static int __init disable_bulk_remove(char *str)
603 {
604 if (strcmp(str, "off") == 0 &&
605 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
606 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
607 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
608 }
609 return 1;
610 }
611
612 __setup("bulk_remove=", disable_bulk_remove);
613
614 #define HPT_RESIZE_TIMEOUT 10000 /* ms */
615
616 struct hpt_resize_state {
617 unsigned long shift;
618 int commit_rc;
619 };
620
621 static int pseries_lpar_resize_hpt_commit(void *data)
622 {
623 struct hpt_resize_state *state = data;
624
625 state->commit_rc = plpar_resize_hpt_commit(0, state->shift);
626 if (state->commit_rc != H_SUCCESS)
627 return -EIO;
628
629 /* Hypervisor has transitioned the HTAB, update our globals */
630 ppc64_pft_size = state->shift;
631 htab_size_bytes = 1UL << ppc64_pft_size;
632 htab_hash_mask = (htab_size_bytes >> 7) - 1;
633
634 return 0;
635 }
636
637 /* Must be called in user context */
638 static int pseries_lpar_resize_hpt(unsigned long shift)
639 {
640 struct hpt_resize_state state = {
641 .shift = shift,
642 .commit_rc = H_FUNCTION,
643 };
644 unsigned int delay, total_delay = 0;
645 int rc;
646 ktime_t t0, t1, t2;
647
648 might_sleep();
649
650 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
651 return -ENODEV;
652
653 printk(KERN_INFO "lpar: Attempting to resize HPT to shift %lu\n",
654 shift);
655
656 t0 = ktime_get();
657
658 rc = plpar_resize_hpt_prepare(0, shift);
659 while (H_IS_LONG_BUSY(rc)) {
660 delay = get_longbusy_msecs(rc);
661 total_delay += delay;
662 if (total_delay > HPT_RESIZE_TIMEOUT) {
663 /* prepare with shift==0 cancels an in-progress resize */
664 rc = plpar_resize_hpt_prepare(0, 0);
665 if (rc != H_SUCCESS)
666 printk(KERN_WARNING
667 "lpar: Unexpected error %d cancelling timed out HPT resize\n",
668 rc);
669 return -ETIMEDOUT;
670 }
671 msleep(delay);
672 rc = plpar_resize_hpt_prepare(0, shift);
673 };
674
675 switch (rc) {
676 case H_SUCCESS:
677 /* Continue on */
678 break;
679
680 case H_PARAMETER:
681 return -EINVAL;
682 case H_RESOURCE:
683 return -EPERM;
684 default:
685 printk(KERN_WARNING
686 "lpar: Unexpected error %d from H_RESIZE_HPT_PREPARE\n",
687 rc);
688 return -EIO;
689 }
690
691 t1 = ktime_get();
692
693 rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
694
695 t2 = ktime_get();
696
697 if (rc != 0) {
698 switch (state.commit_rc) {
699 case H_PTEG_FULL:
700 printk(KERN_WARNING
701 "lpar: Hash collision while resizing HPT\n");
702 return -ENOSPC;
703
704 default:
705 printk(KERN_WARNING
706 "lpar: Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
707 state.commit_rc);
708 return -EIO;
709 };
710 }
711
712 printk(KERN_INFO
713 "lpar: HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
714 shift, (long long) ktime_ms_delta(t1, t0),
715 (long long) ktime_ms_delta(t2, t1));
716
717 return 0;
718 }
719
720 /* Actually only used for radix, so far */
721 static int pseries_lpar_register_process_table(unsigned long base,
722 unsigned long page_size, unsigned long table_size)
723 {
724 long rc;
725 unsigned long flags = PROC_TABLE_NEW;
726
727 if (radix_enabled())
728 flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
729 for (;;) {
730 rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
731 page_size, table_size);
732 if (!H_IS_LONG_BUSY(rc))
733 break;
734 mdelay(get_longbusy_msecs(rc));
735 }
736 if (rc != H_SUCCESS) {
737 pr_err("Failed to register process table (rc=%ld)\n", rc);
738 BUG();
739 }
740 return rc;
741 }
742
743 void __init hpte_init_pseries(void)
744 {
745 mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
746 mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
747 mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
748 mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
749 mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
750 mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
751 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
752 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
753 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
754 mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
755 }
756
757 void radix_init_pseries(void)
758 {
759 pr_info("Using radix MMU under hypervisor\n");
760 register_process_table = pseries_lpar_register_process_table;
761 }
762
763 #ifdef CONFIG_PPC_SMLPAR
764 #define CMO_FREE_HINT_DEFAULT 1
765 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
766
767 static int __init cmo_free_hint(char *str)
768 {
769 char *parm;
770 parm = strstrip(str);
771
772 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
773 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
774 cmo_free_hint_flag = 0;
775 return 1;
776 }
777
778 cmo_free_hint_flag = 1;
779 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
780
781 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
782 return 1;
783
784 return 0;
785 }
786
787 __setup("cmo_free_hint=", cmo_free_hint);
788
789 static void pSeries_set_page_state(struct page *page, int order,
790 unsigned long state)
791 {
792 int i, j;
793 unsigned long cmo_page_sz, addr;
794
795 cmo_page_sz = cmo_get_page_size();
796 addr = __pa((unsigned long)page_address(page));
797
798 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
799 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
800 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
801 }
802 }
803
804 void arch_free_page(struct page *page, int order)
805 {
806 if (radix_enabled())
807 return;
808 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
809 return;
810
811 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
812 }
813 EXPORT_SYMBOL(arch_free_page);
814
815 #endif /* CONFIG_PPC_SMLPAR */
816 #endif /* CONFIG_PPC_STD_MMU_64 */
817
818 #ifdef CONFIG_TRACEPOINTS
819 #ifdef HAVE_JUMP_LABEL
820 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
821
822 int hcall_tracepoint_regfunc(void)
823 {
824 static_key_slow_inc(&hcall_tracepoint_key);
825 return 0;
826 }
827
828 void hcall_tracepoint_unregfunc(void)
829 {
830 static_key_slow_dec(&hcall_tracepoint_key);
831 }
832 #else
833 /*
834 * We optimise our hcall path by placing hcall_tracepoint_refcount
835 * directly in the TOC so we can check if the hcall tracepoints are
836 * enabled via a single load.
837 */
838
839 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
840 extern long hcall_tracepoint_refcount;
841
842 int hcall_tracepoint_regfunc(void)
843 {
844 hcall_tracepoint_refcount++;
845 return 0;
846 }
847
848 void hcall_tracepoint_unregfunc(void)
849 {
850 hcall_tracepoint_refcount--;
851 }
852 #endif
853
854 /*
855 * Since the tracing code might execute hcalls we need to guard against
856 * recursion. One example of this are spinlocks calling H_YIELD on
857 * shared processor partitions.
858 */
859 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
860
861
862 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
863 {
864 unsigned long flags;
865 unsigned int *depth;
866
867 /*
868 * We cannot call tracepoints inside RCU idle regions which
869 * means we must not trace H_CEDE.
870 */
871 if (opcode == H_CEDE)
872 return;
873
874 local_irq_save(flags);
875
876 depth = this_cpu_ptr(&hcall_trace_depth);
877
878 if (*depth)
879 goto out;
880
881 (*depth)++;
882 preempt_disable();
883 trace_hcall_entry(opcode, args);
884 (*depth)--;
885
886 out:
887 local_irq_restore(flags);
888 }
889
890 void __trace_hcall_exit(long opcode, unsigned long retval,
891 unsigned long *retbuf)
892 {
893 unsigned long flags;
894 unsigned int *depth;
895
896 if (opcode == H_CEDE)
897 return;
898
899 local_irq_save(flags);
900
901 depth = this_cpu_ptr(&hcall_trace_depth);
902
903 if (*depth)
904 goto out;
905
906 (*depth)++;
907 trace_hcall_exit(opcode, retval, retbuf);
908 preempt_enable();
909 (*depth)--;
910
911 out:
912 local_irq_restore(flags);
913 }
914 #endif
915
916 /**
917 * h_get_mpp
918 * H_GET_MPP hcall returns info in 7 parms
919 */
920 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
921 {
922 int rc;
923 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
924
925 rc = plpar_hcall9(H_GET_MPP, retbuf);
926
927 mpp_data->entitled_mem = retbuf[0];
928 mpp_data->mapped_mem = retbuf[1];
929
930 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
931 mpp_data->pool_num = retbuf[2] & 0xffff;
932
933 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
934 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
935 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
936
937 mpp_data->pool_size = retbuf[4];
938 mpp_data->loan_request = retbuf[5];
939 mpp_data->backing_mem = retbuf[6];
940
941 return rc;
942 }
943 EXPORT_SYMBOL(h_get_mpp);
944
945 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
946 {
947 int rc;
948 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
949
950 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
951
952 mpp_x_data->coalesced_bytes = retbuf[0];
953 mpp_x_data->pool_coalesced_bytes = retbuf[1];
954 mpp_x_data->pool_purr_cycles = retbuf[2];
955 mpp_x_data->pool_spurr_cycles = retbuf[3];
956
957 return rc;
958 }