]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/platforms/pseries/lpar.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / platforms / pseries / lpar.c
CommitLineData
1da177e4
LT
1/*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
f7ebf352
ME
22/* Enables debugging of low-level hash table routines - careful! */
23#undef DEBUG
1da177e4 24
1da177e4
LT
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
463ce0e1 27#include <linux/console.h>
66b15db6 28#include <linux/export.h>
58995a9a 29#include <linux/jump_label.h>
1da177e4
LT
30#include <asm/processor.h>
31#include <asm/mmu.h>
32#include <asm/page.h>
33#include <asm/pgtable.h>
34#include <asm/machdep.h>
1da177e4 35#include <asm/mmu_context.h>
1da177e4
LT
36#include <asm/iommu.h>
37#include <asm/tlbflush.h>
38#include <asm/tlb.h>
39#include <asm/prom.h>
1da177e4 40#include <asm/cputable.h>
dcad47fc 41#include <asm/udbg.h>
2249ca9d 42#include <asm/smp.h>
c8cd093a 43#include <asm/trace.h>
f5339277 44#include <asm/firmware.h>
212bebb4 45#include <asm/plpar_wrappers.h>
c1caae3d 46#include <asm/kexec.h>
408cddd9 47#include <asm/fadump.h>
42f5b4ca 48#include <asm/asm-prototypes.h>
a1218720 49
21cf9133 50#include "pseries.h"
1da177e4 51
1a527286
AK
52/* Flag bits for H_BULK_REMOVE */
53#define HBR_REQUEST 0x4000000000000000UL
54#define HBR_RESPONSE 0x8000000000000000UL
55#define HBR_END 0xc000000000000000UL
56#define HBR_AVPN 0x0200000000000000UL
57#define HBR_ANDCOND 0x0100000000000000UL
58
1da177e4 59
b9377ffc 60/* in hvCall.S */
1da177e4 61EXPORT_SYMBOL(plpar_hcall);
b9377ffc 62EXPORT_SYMBOL(plpar_hcall9);
1da177e4 63EXPORT_SYMBOL(plpar_hcall_norets);
b9377ffc 64
1da177e4
LT
65void vpa_init(int cpu)
66{
67 int hwcpu = get_hard_smp_processor_id(cpu);
2f6093c8 68 unsigned long addr;
1da177e4 69 long ret;
cf9efce0
PM
70 struct paca_struct *pp;
71 struct dtl_entry *dtl;
233ccd0d 72
b89bdfb8
ME
73 /*
74 * The spec says it "may be problematic" if CPU x registers the VPA of
75 * CPU y. We should never do that, but wail if we ever do.
76 */
77 WARN_ON(cpu != smp_processor_id());
78
233ccd0d 79 if (cpu_has_feature(CPU_FTR_ALTIVEC))
8154c5d2 80 lppaca_of(cpu).vmxregs_in_use = 1;
233ccd0d 81
6e0b8bc9
ME
82 if (cpu_has_feature(CPU_FTR_ARCH_207S))
83 lppaca_of(cpu).ebb_regs_in_use = 1;
84
8154c5d2 85 addr = __pa(&lppaca_of(cpu));
2f6093c8 86 ret = register_vpa(hwcpu, addr);
1da177e4 87
2f6093c8 88 if (ret) {
711ef84e
AB
89 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
90 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
2f6093c8
MN
91 return;
92 }
d8c476ee
AK
93
94#ifdef CONFIG_PPC_STD_MMU_64
2f6093c8
MN
95 /*
96 * PAPR says this feature is SLB-Buffer but firmware never
97 * reports that. All SPLPAR support SLB shadow buffer.
98 */
d8c476ee
AK
99 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
100 addr = __pa(paca[cpu].slb_shadow_ptr);
2f6093c8
MN
101 ret = register_slb_shadow(hwcpu, addr);
102 if (ret)
711ef84e
AB
103 pr_err("WARNING: SLB shadow buffer registration for "
104 "cpu %d (hw %d) of area %lx failed with %ld\n",
105 cpu, hwcpu, addr, ret);
2f6093c8 106 }
d8c476ee 107#endif /* CONFIG_PPC_STD_MMU_64 */
cf9efce0
PM
108
109 /*
110 * Register dispatch trace log, if one has been allocated.
111 */
112 pp = &paca[cpu];
113 dtl = pp->dispatch_log;
114 if (dtl) {
115 pp->dtl_ridx = 0;
116 pp->dtl_curr = dtl;
117 lppaca_of(cpu).dtl_idx = 0;
118
119 /* hypervisor reads buffer length from this field */
7ffcf8ec 120 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
cf9efce0
PM
121 ret = register_dtl(hwcpu, __pa(dtl));
122 if (ret)
711ef84e
AB
123 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
124 "failed with %ld\n", smp_processor_id(),
125 hwcpu, ret);
cf9efce0
PM
126 lppaca_of(cpu).dtl_enable_mask = 2;
127 }
1da177e4
LT
128}
129
d8c476ee
AK
130#ifdef CONFIG_PPC_STD_MMU_64
131
035223fb 132static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
5524a27d
AK
133 unsigned long vpn, unsigned long pa,
134 unsigned long rflags, unsigned long vflags,
b1022fbd 135 int psize, int apsize, int ssize)
1da177e4 136{
1da177e4
LT
137 unsigned long lpar_rc;
138 unsigned long flags;
139 unsigned long slot;
96e28449 140 unsigned long hpte_v, hpte_r;
1da177e4 141
3c726f8d 142 if (!(vflags & HPTE_V_BOLTED))
5524a27d
AK
143 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
144 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
145 hpte_group, vpn, pa, rflags, vflags, psize);
3c726f8d 146
b1022fbd 147 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
6b243fcf 148 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
3c726f8d
BH
149
150 if (!(vflags & HPTE_V_BOLTED))
551a232c 151 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
3c726f8d 152
1da177e4
LT
153 /* Now fill in the actual HPTE */
154 /* Set CEC cookie to 0 */
155 /* Zero page = 0 */
156 /* I-cache Invalidate = 0 */
157 /* I-cache synchronize = 0 */
158 /* Exact = 0 */
159 flags = 0;
160
9ee820fa
BK
161 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
162 flags |= H_COALESCE_CAND;
1da177e4 163
b9377ffc 164 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
706c8c93 165 if (unlikely(lpar_rc == H_PTEG_FULL)) {
3c726f8d 166 if (!(vflags & HPTE_V_BOLTED))
551a232c 167 pr_devel(" full\n");
1da177e4 168 return -1;
3c726f8d 169 }
1da177e4
LT
170
171 /*
172 * Since we try and ioremap PHBs we don't own, the pte insert
173 * will fail. However we must catch the failure in hash_page
174 * or we will loop forever, so return -2 in this case.
175 */
706c8c93 176 if (unlikely(lpar_rc != H_SUCCESS)) {
3c726f8d 177 if (!(vflags & HPTE_V_BOLTED))
4b8f63d9 178 pr_devel(" lpar err %ld\n", lpar_rc);
1da177e4 179 return -2;
3c726f8d
BH
180 }
181 if (!(vflags & HPTE_V_BOLTED))
551a232c 182 pr_devel(" -> slot: %lu\n", slot & 7);
1da177e4
LT
183
184 /* Because of iSeries, we have to pass down the secondary
185 * bucket bit here as well
186 */
96e28449 187 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
1da177e4
LT
188}
189
190static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
191
192static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
193{
194 unsigned long slot_offset;
195 unsigned long lpar_rc;
196 int i;
197 unsigned long dummy1, dummy2;
198
199 /* pick a random slot to start at */
200 slot_offset = mftb() & 0x7;
201
202 for (i = 0; i < HPTES_PER_GROUP; i++) {
203
204 /* don't remove a bolted entry */
205 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
206 (0x1UL << 4), &dummy1, &dummy2);
706c8c93 207 if (lpar_rc == H_SUCCESS)
1da177e4 208 return i;
9fb26401
MW
209
210 /*
211 * The test for adjunct partition is performed before the
212 * ANDCOND test. H_RESOURCE may be returned, so we need to
213 * check for that as well.
214 */
215 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
1da177e4
LT
216
217 slot_offset++;
218 slot_offset &= 0x7;
219 }
220
221 return -1;
222}
223
5246adec 224static void manual_hpte_clear_all(void)
1da177e4
LT
225{
226 unsigned long size_bytes = 1UL << ppc64_pft_size;
227 unsigned long hpte_count = size_bytes >> 4;
d504bed6
MN
228 struct {
229 unsigned long pteh;
230 unsigned long ptel;
231 } ptes[4];
b7abc5c5 232 long lpar_rc;
bed9a315 233 unsigned long i, j;
d504bed6
MN
234
235 /* Read in batches of 4,
236 * invalidate only valid entries not in the VRMA
237 * hpte_count will be a multiple of 4
238 */
239 for (i = 0; i < hpte_count; i += 4) {
240 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
241 if (lpar_rc != H_SUCCESS)
242 continue;
243 for (j = 0; j < 4; j++){
244 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
245 HPTE_V_VRMA_MASK)
246 continue;
247 if (ptes[j].pteh & HPTE_V_VALID)
248 plpar_pte_remove_raw(0, i + j, 0,
249 &(ptes[j].pteh), &(ptes[j].ptel));
b7abc5c5
SS
250 }
251 }
5246adec
AB
252}
253
254static int hcall_hpte_clear_all(void)
255{
256 int rc;
257
258 do {
259 rc = plpar_hcall_norets(H_CLEAR_HPT);
260 } while (rc == H_CONTINUE);
261
262 return rc;
263}
264
265static void pseries_hpte_clear_all(void)
266{
267 int rc;
268
269 rc = hcall_hpte_clear_all();
270 if (rc != H_SUCCESS)
271 manual_hpte_clear_all();
e844b1ee
AB
272
273#ifdef __LITTLE_ENDIAN__
408cddd9
HB
274 /*
275 * Reset exceptions to big endian.
276 *
277 * FIXME this is a hack for kexec, we need to reset the exception
278 * endian before starting the new kernel and this is a convenient place
279 * to do it.
280 *
281 * This is also called on boot when a fadump happens. In that case we
282 * must not change the exception endian mode.
283 */
d3cbff1b
BH
284 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
285 pseries_big_endian_exceptions();
e844b1ee 286#endif
1da177e4
LT
287}
288
289/*
290 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
291 * the low 3 bits of flags happen to line up. So no transform is needed.
292 * We can probably optimize here and assume the high bits of newpp are
293 * already zero. For now I am paranoid.
294 */
3c726f8d
BH
295static long pSeries_lpar_hpte_updatepp(unsigned long slot,
296 unsigned long newpp,
5524a27d 297 unsigned long vpn,
db3d8534 298 int psize, int apsize,
aefa5688 299 int ssize, unsigned long inv_flags)
1da177e4
LT
300{
301 unsigned long lpar_rc;
302 unsigned long flags = (newpp & 7) | H_AVPN;
3c726f8d 303 unsigned long want_v;
1da177e4 304
5524a27d 305 want_v = hpte_encode_avpn(vpn, psize, ssize);
1da177e4 306
551a232c 307 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
f7ebf352 308 want_v, slot, flags, psize);
1da177e4 309
1189be65 310 lpar_rc = plpar_pte_protect(flags, slot, want_v);
3c726f8d 311
706c8c93 312 if (lpar_rc == H_NOT_FOUND) {
551a232c 313 pr_devel("not found !\n");
1da177e4 314 return -1;
3c726f8d
BH
315 }
316
551a232c 317 pr_devel("ok\n");
1da177e4 318
706c8c93 319 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4
LT
320
321 return 0;
322}
323
4ad90c86 324static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
1da177e4 325{
4ad90c86
AK
326 long lpar_rc;
327 unsigned long i, j;
328 struct {
329 unsigned long pteh;
330 unsigned long ptel;
331 } ptes[4];
1da177e4 332
4ad90c86 333 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
1da177e4 334
4ad90c86
AK
335 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
336 if (lpar_rc != H_SUCCESS)
337 continue;
1da177e4 338
4ad90c86
AK
339 for (j = 0; j < 4; j++) {
340 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
341 (ptes[j].pteh & HPTE_V_VALID))
342 return i + j;
343 }
344 }
1da177e4 345
4ad90c86 346 return -1;
1da177e4
LT
347}
348
5524a27d 349static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
1da177e4 350{
1da177e4 351 long slot;
4ad90c86
AK
352 unsigned long hash;
353 unsigned long want_v;
354 unsigned long hpte_group;
1da177e4 355
5524a27d
AK
356 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
357 want_v = hpte_encode_avpn(vpn, psize, ssize);
1189be65
PM
358
359 /* Bolted entries are always in the primary group */
4ad90c86
AK
360 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
361 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
362 if (slot < 0)
363 return -1;
364 return hpte_group + slot;
365}
1da177e4
LT
366
367static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
3c726f8d 368 unsigned long ea,
1189be65 369 int psize, int ssize)
1da177e4 370{
5524a27d
AK
371 unsigned long vpn;
372 unsigned long lpar_rc, slot, vsid, flags;
1da177e4 373
1189be65 374 vsid = get_kernel_vsid(ea, ssize);
5524a27d 375 vpn = hpt_vpn(ea, vsid, ssize);
1da177e4 376
5524a27d 377 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
1da177e4
LT
378 BUG_ON(slot == -1);
379
380 flags = newpp & 7;
381 lpar_rc = plpar_pte_protect(flags, slot, 0);
382
706c8c93 383 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4
LT
384}
385
5524a27d 386static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
db3d8534
AK
387 int psize, int apsize,
388 int ssize, int local)
1da177e4 389{
3c726f8d 390 unsigned long want_v;
1da177e4
LT
391 unsigned long lpar_rc;
392 unsigned long dummy1, dummy2;
393
5524a27d
AK
394 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
395 slot, vpn, psize, local);
1da177e4 396
5524a27d 397 want_v = hpte_encode_avpn(vpn, psize, ssize);
1189be65 398 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
706c8c93 399 if (lpar_rc == H_NOT_FOUND)
1da177e4
LT
400 return;
401
706c8c93 402 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4
LT
403}
404
e34aa03c 405#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1a527286
AK
406/*
407 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
408 * to make sure that we avoid bouncing the hypervisor tlbie lock.
409 */
410#define PPC64_HUGE_HPTE_BATCH 12
411
412static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
413 unsigned long *vpn, int count,
414 int psize, int ssize)
415{
05af40e8 416 unsigned long param[PLPAR_HCALL9_BUFSIZE];
1a527286
AK
417 int i = 0, pix = 0, rc;
418 unsigned long flags = 0;
419 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
420
421 if (lock_tlbie)
422 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
423
424 for (i = 0; i < count; i++) {
425
426 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
427 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
428 ssize, 0);
429 } else {
430 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
431 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
432 pix += 2;
433 if (pix == 8) {
434 rc = plpar_hcall9(H_BULK_REMOVE, param,
435 param[0], param[1], param[2],
436 param[3], param[4], param[5],
437 param[6], param[7]);
438 BUG_ON(rc != H_SUCCESS);
439 pix = 0;
440 }
441 }
442 }
443 if (pix) {
444 param[pix] = HBR_END;
445 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
446 param[2], param[3], param[4], param[5],
447 param[6], param[7]);
448 BUG_ON(rc != H_SUCCESS);
449 }
450
451 if (lock_tlbie)
452 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
453}
454
fa1f8ae8
AK
455static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
456 unsigned long addr,
457 unsigned char *hpte_slot_array,
d557b098 458 int psize, int ssize, int local)
1a527286 459{
fa1f8ae8 460 int i, index = 0;
1a527286
AK
461 unsigned long s_addr = addr;
462 unsigned int max_hpte_count, valid;
463 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
464 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
fa1f8ae8 465 unsigned long shift, hidx, vpn = 0, hash, slot;
1a527286
AK
466
467 shift = mmu_psize_defs[psize].shift;
468 max_hpte_count = 1U << (PMD_SHIFT - shift);
469
470 for (i = 0; i < max_hpte_count; i++) {
471 valid = hpte_valid(hpte_slot_array, i);
472 if (!valid)
473 continue;
474 hidx = hpte_hash_index(hpte_slot_array, i);
475
476 /* get the vpn */
477 addr = s_addr + (i * (1ul << shift));
1a527286
AK
478 vpn = hpt_vpn(addr, vsid, ssize);
479 hash = hpt_hash(vpn, shift, ssize);
480 if (hidx & _PTEIDX_SECONDARY)
481 hash = ~hash;
482
483 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
484 slot += hidx & _PTEIDX_GROUP_IX;
485
486 slot_array[index] = slot;
487 vpn_array[index] = vpn;
488 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
489 /*
490 * Now do a bluk invalidate
491 */
492 __pSeries_lpar_hugepage_invalidate(slot_array,
493 vpn_array,
494 PPC64_HUGE_HPTE_BATCH,
495 psize, ssize);
496 index = 0;
497 } else
498 index++;
499 }
500 if (index)
501 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
502 index, psize, ssize);
503}
e34aa03c
AK
504#else
505static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
506 unsigned long addr,
507 unsigned char *hpte_slot_array,
508 int psize, int ssize, int local)
509{
510 WARN(1, "%s called without THP support\n", __func__);
511}
512#endif
1a527286 513
27828f98
DG
514static int pSeries_lpar_hpte_removebolted(unsigned long ea,
515 int psize, int ssize)
f8c8803b 516{
5524a27d
AK
517 unsigned long vpn;
518 unsigned long slot, vsid;
f8c8803b
BP
519
520 vsid = get_kernel_vsid(ea, ssize);
5524a27d 521 vpn = hpt_vpn(ea, vsid, ssize);
f8c8803b 522
5524a27d 523 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
27828f98
DG
524 if (slot == -1)
525 return -ENOENT;
526
db3d8534
AK
527 /*
528 * lpar doesn't use the passed actual page size
529 */
530 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
27828f98 531 return 0;
f8c8803b
BP
532}
533
1da177e4
LT
534/*
535 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
536 * lock.
537 */
035223fb 538static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
1da177e4 539{
5524a27d 540 unsigned long vpn;
f03e64f2 541 unsigned long i, pix, rc;
12e86f92 542 unsigned long flags = 0;
69111bac 543 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
44ae3ab3 544 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
05af40e8 545 unsigned long param[PLPAR_HCALL9_BUFSIZE];
f03e64f2
PM
546 unsigned long hash, index, shift, hidx, slot;
547 real_pte_t pte;
1189be65 548 int psize, ssize;
1da177e4
LT
549
550 if (lock_tlbie)
551 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
552
f03e64f2 553 psize = batch->psize;
1189be65 554 ssize = batch->ssize;
f03e64f2
PM
555 pix = 0;
556 for (i = 0; i < number; i++) {
5524a27d 557 vpn = batch->vpn[i];
f03e64f2 558 pte = batch->pte[i];
5524a27d
AK
559 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
560 hash = hpt_hash(vpn, shift, ssize);
f03e64f2
PM
561 hidx = __rpte_to_hidx(pte, index);
562 if (hidx & _PTEIDX_SECONDARY)
563 hash = ~hash;
564 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
565 slot += hidx & _PTEIDX_GROUP_IX;
12e86f92 566 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
db3d8534
AK
567 /*
568 * lpar doesn't use the passed actual page size
569 */
5524a27d 570 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
db3d8534 571 0, ssize, local);
12e86f92
PM
572 } else {
573 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
5524a27d 574 param[pix+1] = hpte_encode_avpn(vpn, psize,
1189be65 575 ssize);
12e86f92
PM
576 pix += 2;
577 if (pix == 8) {
578 rc = plpar_hcall9(H_BULK_REMOVE, param,
f03e64f2
PM
579 param[0], param[1], param[2],
580 param[3], param[4], param[5],
581 param[6], param[7]);
12e86f92
PM
582 BUG_ON(rc != H_SUCCESS);
583 pix = 0;
584 }
f03e64f2
PM
585 }
586 } pte_iterate_hashed_end();
587 }
588 if (pix) {
589 param[pix] = HBR_END;
590 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
591 param[2], param[3], param[4], param[5],
592 param[6], param[7]);
593 BUG_ON(rc != H_SUCCESS);
594 }
1da177e4
LT
595
596 if (lock_tlbie)
597 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
598}
599
4e89a2d8
WS
600static int __init disable_bulk_remove(char *str)
601{
602 if (strcmp(str, "off") == 0 &&
603 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
604 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
605 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
606 }
607 return 1;
608}
609
610__setup("bulk_remove=", disable_bulk_remove);
611
612b7949
PM
612/* Actually only used for radix, so far */
613static int pseries_lpar_register_process_table(unsigned long base,
614 unsigned long page_size, unsigned long table_size)
615{
616 long rc;
617 unsigned long flags = PROC_TABLE_NEW;
618
619 if (radix_enabled())
620 flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
621 for (;;) {
622 rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
623 page_size, table_size);
624 if (!H_IS_LONG_BUSY(rc))
625 break;
626 mdelay(get_longbusy_msecs(rc));
627 }
628 if (rc != H_SUCCESS) {
629 pr_err("Failed to register process table (rc=%ld)\n", rc);
630 BUG();
631 }
632 return rc;
633}
634
6364e84e 635void __init hpte_init_pseries(void)
1da177e4 636{
7025776e
BH
637 mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
638 mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
639 mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
640 mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
641 mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
642 mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
643 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
5246adec 644 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
7025776e 645 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
1da177e4 646}
14f966e7 647
612b7949
PM
648void radix_init_pseries(void)
649{
650 pr_info("Using radix MMU under hypervisor\n");
651 register_process_table = pseries_lpar_register_process_table;
652}
653
14f966e7
RJ
654#ifdef CONFIG_PPC_SMLPAR
655#define CMO_FREE_HINT_DEFAULT 1
656static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
657
658static int __init cmo_free_hint(char *str)
659{
660 char *parm;
661 parm = strstrip(str);
662
663 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
664 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
665 cmo_free_hint_flag = 0;
666 return 1;
667 }
668
669 cmo_free_hint_flag = 1;
670 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
671
672 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
673 return 1;
674
675 return 0;
676}
677
678__setup("cmo_free_hint=", cmo_free_hint);
679
680static void pSeries_set_page_state(struct page *page, int order,
681 unsigned long state)
682{
683 int i, j;
684 unsigned long cmo_page_sz, addr;
685
686 cmo_page_sz = cmo_get_page_size();
687 addr = __pa((unsigned long)page_address(page));
688
689 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
690 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
691 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
692 }
693}
694
695void arch_free_page(struct page *page, int order)
696{
d8c476ee
AK
697 if (radix_enabled())
698 return;
14f966e7
RJ
699 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
700 return;
701
702 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
703}
704EXPORT_SYMBOL(arch_free_page);
705
d8c476ee
AK
706#endif /* CONFIG_PPC_SMLPAR */
707#endif /* CONFIG_PPC_STD_MMU_64 */
c8cd093a
AB
708
709#ifdef CONFIG_TRACEPOINTS
d4fe0965 710#ifdef HAVE_JUMP_LABEL
cc1adb5f
AB
711struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
712
8cf868af 713int hcall_tracepoint_regfunc(void)
cc1adb5f
AB
714{
715 static_key_slow_inc(&hcall_tracepoint_key);
8cf868af 716 return 0;
cc1adb5f
AB
717}
718
719void hcall_tracepoint_unregfunc(void)
720{
721 static_key_slow_dec(&hcall_tracepoint_key);
722}
723#else
c8cd093a
AB
724/*
725 * We optimise our hcall path by placing hcall_tracepoint_refcount
726 * directly in the TOC so we can check if the hcall tracepoints are
727 * enabled via a single load.
728 */
729
730/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
731extern long hcall_tracepoint_refcount;
732
8cf868af 733int hcall_tracepoint_regfunc(void)
c8cd093a
AB
734{
735 hcall_tracepoint_refcount++;
8cf868af 736 return 0;
c8cd093a
AB
737}
738
739void hcall_tracepoint_unregfunc(void)
740{
741 hcall_tracepoint_refcount--;
742}
cc1adb5f
AB
743#endif
744
745/*
746 * Since the tracing code might execute hcalls we need to guard against
747 * recursion. One example of this are spinlocks calling H_YIELD on
748 * shared processor partitions.
749 */
750static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
751
c8cd093a 752
6f26353c 753void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
c8cd093a 754{
57cdfdf8
AB
755 unsigned long flags;
756 unsigned int *depth;
757
a5ccfee0
AB
758 /*
759 * We cannot call tracepoints inside RCU idle regions which
760 * means we must not trace H_CEDE.
761 */
762 if (opcode == H_CEDE)
763 return;
764
57cdfdf8
AB
765 local_irq_save(flags);
766
69111bac 767 depth = this_cpu_ptr(&hcall_trace_depth);
57cdfdf8
AB
768
769 if (*depth)
770 goto out;
771
772 (*depth)++;
e4f387d8 773 preempt_disable();
6f26353c 774 trace_hcall_entry(opcode, args);
57cdfdf8
AB
775 (*depth)--;
776
777out:
778 local_irq_restore(flags);
c8cd093a
AB
779}
780
6f26353c
AB
781void __trace_hcall_exit(long opcode, unsigned long retval,
782 unsigned long *retbuf)
c8cd093a 783{
57cdfdf8
AB
784 unsigned long flags;
785 unsigned int *depth;
786
a5ccfee0
AB
787 if (opcode == H_CEDE)
788 return;
789
57cdfdf8
AB
790 local_irq_save(flags);
791
69111bac 792 depth = this_cpu_ptr(&hcall_trace_depth);
57cdfdf8
AB
793
794 if (*depth)
795 goto out;
796
797 (*depth)++;
6f26353c 798 trace_hcall_exit(opcode, retval, retbuf);
e4f387d8 799 preempt_enable();
57cdfdf8
AB
800 (*depth)--;
801
802out:
803 local_irq_restore(flags);
c8cd093a
AB
804}
805#endif
9ee820fa
BK
806
807/**
808 * h_get_mpp
809 * H_GET_MPP hcall returns info in 7 parms
810 */
811int h_get_mpp(struct hvcall_mpp_data *mpp_data)
812{
813 int rc;
814 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
815
816 rc = plpar_hcall9(H_GET_MPP, retbuf);
817
818 mpp_data->entitled_mem = retbuf[0];
819 mpp_data->mapped_mem = retbuf[1];
820
821 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
822 mpp_data->pool_num = retbuf[2] & 0xffff;
823
824 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
825 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
b0d436c7 826 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
9ee820fa
BK
827
828 mpp_data->pool_size = retbuf[4];
829 mpp_data->loan_request = retbuf[5];
830 mpp_data->backing_mem = retbuf[6];
831
832 return rc;
833}
834EXPORT_SYMBOL(h_get_mpp);
835
836int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
837{
838 int rc;
839 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
840
841 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
842
843 mpp_x_data->coalesced_bytes = retbuf[0];
844 mpp_x_data->pool_coalesced_bytes = retbuf[1];
845 mpp_x_data->pool_purr_cycles = retbuf[2];
846 mpp_x_data->pool_spurr_cycles = retbuf[3];
847
848 return rc;
849}