]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/include/asm/paravirt.h
Merge tag 'jfs-4.13' of git://github.com/kleikamp/linux-shaggy
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
54321d94 7#include <asm/pgtable_types.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
ac5672f8 10#include <asm/paravirt_types.h>
ecb93d1c 11
d3561b7f 12#ifndef __ASSEMBLY__
187f1882 13#include <linux/bug.h>
3dc494e8 14#include <linux/types.h>
d4c10477 15#include <linux/cpumask.h>
87b240cb 16#include <asm/frame.h>
1a45b7aa 17
faca6227 18static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
19 struct thread_struct *thread)
20{
faca6227 21 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
22}
23
d3561b7f
RR
24/* The paravirtualized CPUID instruction. */
25static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
26 unsigned int *ecx, unsigned int *edx)
27{
93b1eab3 28 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
29}
30
31/*
32 * These special macros can be used to get or set a debugging register
33 */
f8822f42
JF
34static inline unsigned long paravirt_get_debugreg(int reg)
35{
93b1eab3 36 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
37}
38#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
39static inline void set_debugreg(unsigned long val, int reg)
40{
93b1eab3 41 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 42}
d3561b7f 43
f8822f42
JF
44static inline unsigned long read_cr0(void)
45{
93b1eab3 46 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 47}
d3561b7f 48
f8822f42
JF
49static inline void write_cr0(unsigned long x)
50{
93b1eab3 51 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
52}
53
54static inline unsigned long read_cr2(void)
55{
93b1eab3 56 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
57}
58
59static inline void write_cr2(unsigned long x)
60{
93b1eab3 61 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
62}
63
6c690ee1 64static inline unsigned long __read_cr3(void)
f8822f42 65{
93b1eab3 66 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 67}
d3561b7f 68
f8822f42
JF
69static inline void write_cr3(unsigned long x)
70{
93b1eab3 71 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 72}
d3561b7f 73
1e02ce4c 74static inline unsigned long __read_cr4(void)
f8822f42 75{
93b1eab3 76 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42 77}
d3561b7f 78
1e02ce4c 79static inline void __write_cr4(unsigned long x)
f8822f42 80{
93b1eab3 81 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 82}
3dc494e8 83
94ea03cd 84#ifdef CONFIG_X86_64
4c9890c2
GOC
85static inline unsigned long read_cr8(void)
86{
87 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
88}
89
90static inline void write_cr8(unsigned long x)
91{
92 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
93}
94ea03cd 94#endif
4c9890c2 95
df9ee292 96static inline void arch_safe_halt(void)
d3561b7f 97{
93b1eab3 98 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
99}
100
101static inline void halt(void)
102{
c8217b83 103 PVOP_VCALL0(pv_irq_ops.halt);
f8822f42
JF
104}
105
106static inline void wbinvd(void)
107{
93b1eab3 108 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 109}
d3561b7f 110
93b1eab3 111#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 112
dd2f4a00
AL
113static inline u64 paravirt_read_msr(unsigned msr)
114{
115 return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
116}
117
118static inline void paravirt_write_msr(unsigned msr,
119 unsigned low, unsigned high)
120{
e8ad8bc4 121 PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
dd2f4a00
AL
122}
123
c2ee03b2 124static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
f8822f42 125{
c2ee03b2 126 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
f8822f42 127}
132ec92f 128
c2ee03b2
AL
129static inline int paravirt_write_msr_safe(unsigned msr,
130 unsigned low, unsigned high)
f8822f42 131{
c2ee03b2 132 return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
f8822f42
JF
133}
134
49cd740b
JP
135#define rdmsr(msr, val1, val2) \
136do { \
4985ce15 137 u64 _l = paravirt_read_msr(msr); \
f8822f42
JF
138 val1 = (u32)_l; \
139 val2 = _l >> 32; \
49cd740b 140} while (0)
d3561b7f 141
49cd740b
JP
142#define wrmsr(msr, val1, val2) \
143do { \
4985ce15 144 paravirt_write_msr(msr, val1, val2); \
49cd740b 145} while (0)
d3561b7f 146
49cd740b
JP
147#define rdmsrl(msr, val) \
148do { \
4985ce15 149 val = paravirt_read_msr(msr); \
49cd740b 150} while (0)
d3561b7f 151
47edb651
AL
152static inline void wrmsrl(unsigned msr, u64 val)
153{
154 wrmsr(msr, (u32)val, (u32)(val>>32));
155}
156
c2ee03b2 157#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
d3561b7f
RR
158
159/* rdmsr with exception handling */
c2ee03b2
AL
160#define rdmsr_safe(msr, a, b) \
161({ \
162 int _err; \
163 u64 _l = paravirt_read_msr_safe(msr, &_err); \
164 (*a) = (u32)_l; \
165 (*b) = _l >> 32; \
166 _err; \
49cd740b 167})
d3561b7f 168
1de87bd4
AK
169static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170{
171 int err;
172
c2ee03b2 173 *p = paravirt_read_msr_safe(msr, &err);
1de87bd4
AK
174 return err;
175}
177fed1e 176
688340ea
JF
177static inline unsigned long long paravirt_sched_clock(void)
178{
93b1eab3 179 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 180}
6cb9a835 181
c5905afb
IM
182struct static_key;
183extern struct static_key paravirt_steal_enabled;
184extern struct static_key paravirt_steal_rq_enabled;
3c404b57
GC
185
186static inline u64 paravirt_steal_clock(int cpu)
187{
188 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
189}
190
f8822f42
JF
191static inline unsigned long long paravirt_read_pmc(int counter)
192{
93b1eab3 193 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 194}
d3561b7f 195
49cd740b
JP
196#define rdpmc(counter, low, high) \
197do { \
f8822f42
JF
198 u64 _l = paravirt_read_pmc(counter); \
199 low = (u32)_l; \
200 high = _l >> 32; \
49cd740b 201} while (0)
3dc494e8 202
1ff4d58a
AK
203#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
204
38ffbe66
JF
205static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
206{
207 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
208}
209
210static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
211{
212 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
213}
214
f8822f42
JF
215static inline void load_TR_desc(void)
216{
93b1eab3 217 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 218}
6b68f01b 219static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 220{
93b1eab3 221 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 222}
6b68f01b 223static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 224{
93b1eab3 225 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
226}
227static inline void set_ldt(const void *addr, unsigned entries)
228{
93b1eab3 229 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 230}
6b68f01b 231static inline void store_idt(struct desc_ptr *dtr)
f8822f42 232{
93b1eab3 233 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
234}
235static inline unsigned long paravirt_store_tr(void)
236{
93b1eab3 237 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
238}
239#define store_tr(tr) ((tr) = paravirt_store_tr())
240static inline void load_TLS(struct thread_struct *t, unsigned cpu)
241{
93b1eab3 242 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 243}
75b8bb3e 244
9f9d489a
JF
245#ifdef CONFIG_X86_64
246static inline void load_gs_index(unsigned int gs)
247{
248 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
249}
250#endif
251
75b8bb3e
GOC
252static inline void write_ldt_entry(struct desc_struct *dt, int entry,
253 const void *desc)
f8822f42 254{
75b8bb3e 255 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 256}
014b15be
GOC
257
258static inline void write_gdt_entry(struct desc_struct *dt, int entry,
259 void *desc, int type)
f8822f42 260{
014b15be 261 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 262}
014b15be 263
8d947344 264static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 265{
8d947344 266 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
267}
268static inline void set_iopl_mask(unsigned mask)
269{
93b1eab3 270 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 271}
3dc494e8 272
d3561b7f 273/* The paravirtualized I/O functions */
49cd740b
JP
274static inline void slow_down_io(void)
275{
93b1eab3 276 pv_cpu_ops.io_delay();
d3561b7f 277#ifdef REALLY_SLOW_IO
93b1eab3
JF
278 pv_cpu_ops.io_delay();
279 pv_cpu_ops.io_delay();
280 pv_cpu_ops.io_delay();
d3561b7f
RR
281#endif
282}
283
d6dd61c8
JF
284static inline void paravirt_activate_mm(struct mm_struct *prev,
285 struct mm_struct *next)
286{
93b1eab3 287 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
288}
289
a1ea1c03
DH
290static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
291 struct mm_struct *mm)
d6dd61c8 292{
93b1eab3 293 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
294}
295
a1ea1c03 296static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
d6dd61c8 297{
93b1eab3 298 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
299}
300
f8822f42
JF
301static inline void __flush_tlb(void)
302{
93b1eab3 303 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
304}
305static inline void __flush_tlb_global(void)
306{
93b1eab3 307 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
308}
309static inline void __flush_tlb_single(unsigned long addr)
310{
93b1eab3 311 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 312}
da181a8b 313
4595f962 314static inline void flush_tlb_others(const struct cpumask *cpumask,
a2055abe 315 const struct flush_tlb_info *info)
d4c10477 316{
a2055abe 317 PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
d4c10477
JF
318}
319
eba0045f
JF
320static inline int paravirt_pgd_alloc(struct mm_struct *mm)
321{
322 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
323}
324
325static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
326{
327 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
328}
329
f8639939 330static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 331{
6944a9c8 332 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 333}
f8639939 334static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 335{
6944a9c8 336 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 337}
c119ecce 338
f8639939 339static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 340{
6944a9c8 341 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 342}
c119ecce 343
f8639939 344static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 345{
6944a9c8 346 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
347}
348
f8639939 349static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
350{
351 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
352}
f8639939 353static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
354{
355 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
356}
357
335437fb
KS
358static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
359{
360 PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
361}
362
363static inline void paravirt_release_p4d(unsigned long pfn)
364{
365 PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
366}
367
f8822f42
JF
368static inline void pte_update(struct mm_struct *mm, unsigned long addr,
369 pte_t *ptep)
da181a8b 370{
93b1eab3 371 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b 372}
331127f7 373
773221f4 374static inline pte_t __pte(pteval_t val)
da181a8b 375{
773221f4
JF
376 pteval_t ret;
377
378 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
379 ret = PVOP_CALLEE2(pteval_t,
380 pv_mmu_ops.make_pte,
381 val, (u64)val >> 32);
773221f4 382 else
da5de7c2
JF
383 ret = PVOP_CALLEE1(pteval_t,
384 pv_mmu_ops.make_pte,
385 val);
773221f4 386
c8e5393a 387 return (pte_t) { .pte = ret };
da181a8b
RR
388}
389
773221f4
JF
390static inline pteval_t pte_val(pte_t pte)
391{
392 pteval_t ret;
393
394 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
395 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
396 pte.pte, (u64)pte.pte >> 32);
773221f4 397 else
da5de7c2
JF
398 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
399 pte.pte);
773221f4
JF
400
401 return ret;
402}
403
ef38503e 404static inline pgd_t __pgd(pgdval_t val)
da181a8b 405{
ef38503e
JF
406 pgdval_t ret;
407
408 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
409 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
410 val, (u64)val >> 32);
ef38503e 411 else
da5de7c2
JF
412 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
413 val);
ef38503e
JF
414
415 return (pgd_t) { ret };
416}
417
418static inline pgdval_t pgd_val(pgd_t pgd)
419{
420 pgdval_t ret;
421
422 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
423 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
424 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 425 else
da5de7c2
JF
426 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
427 pgd.pgd);
ef38503e
JF
428
429 return ret;
f8822f42
JF
430}
431
08b882c6
JF
432#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
433static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
434 pte_t *ptep)
435{
436 pteval_t ret;
437
438 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
439 mm, addr, ptep);
440
441 return (pte_t) { .pte = ret };
442}
443
444static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
445 pte_t *ptep, pte_t pte)
446{
447 if (sizeof(pteval_t) > sizeof(long))
448 /* 5 arg words */
449 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
450 else
451 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
452 mm, addr, ptep, pte.pte);
453}
454
4eed80cd
JF
455static inline void set_pte(pte_t *ptep, pte_t pte)
456{
457 if (sizeof(pteval_t) > sizeof(long))
458 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
459 pte.pte, (u64)pte.pte >> 32);
460 else
461 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
462 pte.pte);
463}
464
465static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
466 pte_t *ptep, pte_t pte)
467{
468 if (sizeof(pteval_t) > sizeof(long))
469 /* 5 arg words */
470 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
471 else
472 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
473}
474
331127f7
AA
475static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
476 pmd_t *pmdp, pmd_t pmd)
477{
331127f7
AA
478 if (sizeof(pmdval_t) > sizeof(long))
479 /* 5 arg words */
480 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
481 else
cacf061c
AA
482 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
483 native_pmd_val(pmd));
331127f7 484}
331127f7 485
a00cc7d9
MW
486static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
487 pud_t *pudp, pud_t pud)
488{
489 if (sizeof(pudval_t) > sizeof(long))
490 /* 5 arg words */
491 pv_mmu_ops.set_pud_at(mm, addr, pudp, pud);
492 else
493 PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp,
494 native_pud_val(pud));
495}
496
60b3f626
JF
497static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
498{
499 pmdval_t val = native_pmd_val(pmd);
500
501 if (sizeof(pmdval_t) > sizeof(long))
502 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
503 else
504 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
505}
506
98233368 507#if CONFIG_PGTABLE_LEVELS >= 3
1fe91514
GOC
508static inline pmd_t __pmd(pmdval_t val)
509{
510 pmdval_t ret;
511
512 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
513 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
514 val, (u64)val >> 32);
1fe91514 515 else
da5de7c2
JF
516 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
517 val);
1fe91514
GOC
518
519 return (pmd_t) { ret };
520}
521
522static inline pmdval_t pmd_val(pmd_t pmd)
523{
524 pmdval_t ret;
525
526 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
527 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
528 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 529 else
da5de7c2
JF
530 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
531 pmd.pmd);
1fe91514
GOC
532
533 return ret;
534}
535
536static inline void set_pud(pud_t *pudp, pud_t pud)
537{
538 pudval_t val = native_pud_val(pud);
539
540 if (sizeof(pudval_t) > sizeof(long))
541 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
542 val, (u64)val >> 32);
543 else
544 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
545 val);
546}
f2a6a705 547#if CONFIG_PGTABLE_LEVELS >= 4
9042219c
EH
548static inline pud_t __pud(pudval_t val)
549{
550 pudval_t ret;
551
552 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
553 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
554 val, (u64)val >> 32);
9042219c 555 else
da5de7c2
JF
556 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
557 val);
9042219c
EH
558
559 return (pud_t) { ret };
560}
561
562static inline pudval_t pud_val(pud_t pud)
563{
564 pudval_t ret;
565
566 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
567 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
568 pud.pud, (u64)pud.pud >> 32);
9042219c 569 else
4767afbf
JF
570 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
571 pud.pud);
9042219c
EH
572
573 return ret;
574}
575
f2a6a705
KS
576static inline void pud_clear(pud_t *pudp)
577{
578 set_pud(pudp, __pud(0));
579}
580
581static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
582{
583 p4dval_t val = native_p4d_val(p4d);
584
585 if (sizeof(p4dval_t) > sizeof(long))
586 PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
587 val, (u64)val >> 32);
588 else
589 PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
590 val);
591}
592
335437fb
KS
593#if CONFIG_PGTABLE_LEVELS >= 5
594
595static inline p4d_t __p4d(p4dval_t val)
f2a6a705 596{
335437fb 597 p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
f2a6a705 598
335437fb
KS
599 return (p4d_t) { ret };
600}
f2a6a705 601
335437fb
KS
602static inline p4dval_t p4d_val(p4d_t p4d)
603{
604 return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
605}
f2a6a705 606
9042219c
EH
607static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
608{
609 pgdval_t val = native_pgd_val(pgd);
610
335437fb 611 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val);
9042219c
EH
612}
613
614static inline void pgd_clear(pgd_t *pgdp)
615{
616 set_pgd(pgdp, __pgd(0));
617}
618
f2a6a705 619#endif /* CONFIG_PGTABLE_LEVELS == 5 */
9042219c 620
335437fb
KS
621static inline void p4d_clear(p4d_t *p4dp)
622{
623 set_p4d(p4dp, __p4d(0));
624}
625
98233368 626#endif /* CONFIG_PGTABLE_LEVELS == 4 */
9042219c 627
98233368 628#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
1fe91514 629
4eed80cd
JF
630#ifdef CONFIG_X86_PAE
631/* Special-case pte-setting operations for PAE, which can't update a
632 64-bit pte atomically */
633static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
634{
635 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
636 pte.pte, pte.pte >> 32);
637}
638
4eed80cd
JF
639static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
640 pte_t *ptep)
641{
642 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
643}
60b3f626
JF
644
645static inline void pmd_clear(pmd_t *pmdp)
646{
647 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
648}
4eed80cd
JF
649#else /* !CONFIG_X86_PAE */
650static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
651{
652 set_pte(ptep, pte);
653}
654
4eed80cd
JF
655static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
656 pte_t *ptep)
657{
658 set_pte_at(mm, addr, ptep, __pte(0));
659}
60b3f626
JF
660
661static inline void pmd_clear(pmd_t *pmdp)
662{
663 set_pmd(pmdp, __pmd(0));
664}
4eed80cd
JF
665#endif /* CONFIG_X86_PAE */
666
7fd7d83d 667#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 668static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 669{
224101ed 670 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
f8822f42
JF
671}
672
224101ed 673static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 674{
224101ed 675 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
f8822f42
JF
676}
677
9226d125 678#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
679static inline void arch_enter_lazy_mmu_mode(void)
680{
8965c1c0 681 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
682}
683
684static inline void arch_leave_lazy_mmu_mode(void)
685{
8965c1c0 686 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
687}
688
511ba86e
BO
689static inline void arch_flush_lazy_mmu_mode(void)
690{
691 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
692}
9226d125 693
aeaaa59c 694static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 695 phys_addr_t phys, pgprot_t flags)
aeaaa59c
JF
696{
697 pv_mmu_ops.set_fixmap(idx, phys, flags);
698}
699
b4ecc126 700#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 701
f233f7f1
PZI
702static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
703 u32 val)
704{
705 PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
706}
707
708static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
709{
710 PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
711}
712
713static __always_inline void pv_wait(u8 *ptr, u8 val)
714{
715 PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
716}
717
718static __always_inline void pv_kick(int cpu)
719{
720 PVOP_VCALL1(pv_lock_ops.kick, cpu);
721}
722
6c62985d 723static __always_inline bool pv_vcpu_is_preempted(long cpu)
3cded417
PZ
724{
725 return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
726}
727
f233f7f1 728#endif /* SMP && PARAVIRT_SPINLOCKS */
4bb689ee 729
2e47d3e6 730#ifdef CONFIG_X86_32
ecb93d1c
JF
731#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
732#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
733
734/* save and restore all caller-save registers, except return value */
e584f559
JF
735#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
736#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 737
2e47d3e6
GOC
738#define PV_FLAGS_ARG "0"
739#define PV_EXTRA_CLOBBERS
740#define PV_VEXTRA_CLOBBERS
741#else
ecb93d1c
JF
742/* save and restore all caller-save registers, except return value */
743#define PV_SAVE_ALL_CALLER_REGS \
744 "push %rcx;" \
745 "push %rdx;" \
746 "push %rsi;" \
747 "push %rdi;" \
748 "push %r8;" \
749 "push %r9;" \
750 "push %r10;" \
751 "push %r11;"
752#define PV_RESTORE_ALL_CALLER_REGS \
753 "pop %r11;" \
754 "pop %r10;" \
755 "pop %r9;" \
756 "pop %r8;" \
757 "pop %rdi;" \
758 "pop %rsi;" \
759 "pop %rdx;" \
760 "pop %rcx;"
761
2e47d3e6
GOC
762/* We save some registers, but all of them, that's too much. We clobber all
763 * caller saved registers but the argument parameter */
764#define PV_SAVE_REGS "pushq %%rdi;"
765#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
766#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
767#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
768#define PV_FLAGS_ARG "D"
769#endif
770
ecb93d1c
JF
771/*
772 * Generate a thunk around a function which saves all caller-save
773 * registers except for the return value. This allows C functions to
774 * be called from assembler code where fewer than normal registers are
775 * available. It may also help code generation around calls from C
776 * code if the common case doesn't use many registers.
777 *
778 * When a callee is wrapped in a thunk, the caller can assume that all
779 * arg regs and all scratch registers are preserved across the
780 * call. The return value in rax/eax will not be saved, even for void
781 * functions.
782 */
87b240cb 783#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
ecb93d1c
JF
784#define PV_CALLEE_SAVE_REGS_THUNK(func) \
785 extern typeof(func) __raw_callee_save_##func; \
ecb93d1c
JF
786 \
787 asm(".pushsection .text;" \
87b240cb
JP
788 ".globl " PV_THUNK_NAME(func) ";" \
789 ".type " PV_THUNK_NAME(func) ", @function;" \
790 PV_THUNK_NAME(func) ":" \
791 FRAME_BEGIN \
ecb93d1c
JF
792 PV_SAVE_ALL_CALLER_REGS \
793 "call " #func ";" \
794 PV_RESTORE_ALL_CALLER_REGS \
87b240cb 795 FRAME_END \
ecb93d1c
JF
796 "ret;" \
797 ".popsection")
798
799/* Get a reference to a callee-save function */
800#define PV_CALLEE_SAVE(func) \
801 ((struct paravirt_callee_save) { __raw_callee_save_##func })
802
803/* Promise that "func" already uses the right calling convention */
804#define __PV_IS_CALLEE_SAVE(func) \
805 ((struct paravirt_callee_save) { func })
806
b5908548 807static inline notrace unsigned long arch_local_save_flags(void)
139ec7c4 808{
71999d98 809 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
139ec7c4
RR
810}
811
b5908548 812static inline notrace void arch_local_irq_restore(unsigned long f)
139ec7c4 813{
71999d98 814 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
139ec7c4
RR
815}
816
b5908548 817static inline notrace void arch_local_irq_disable(void)
139ec7c4 818{
71999d98 819 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
139ec7c4
RR
820}
821
b5908548 822static inline notrace void arch_local_irq_enable(void)
139ec7c4 823{
71999d98 824 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
139ec7c4
RR
825}
826
b5908548 827static inline notrace unsigned long arch_local_irq_save(void)
139ec7c4
RR
828{
829 unsigned long f;
830
df9ee292
DH
831 f = arch_local_save_flags();
832 arch_local_irq_disable();
139ec7c4
RR
833 return f;
834}
835
74d4affd 836
294688c0 837/* Make sure as little as possible of this mess escapes. */
d5822035 838#undef PARAVIRT_CALL
1a45b7aa
JF
839#undef __PVOP_CALL
840#undef __PVOP_VCALL
f8822f42
JF
841#undef PVOP_VCALL0
842#undef PVOP_CALL0
843#undef PVOP_VCALL1
844#undef PVOP_CALL1
845#undef PVOP_VCALL2
846#undef PVOP_CALL2
847#undef PVOP_VCALL3
848#undef PVOP_CALL3
849#undef PVOP_VCALL4
850#undef PVOP_CALL4
139ec7c4 851
6f30c1ac
TG
852extern void default_banner(void);
853
d3561b7f
RR
854#else /* __ASSEMBLY__ */
855
658be9d3 856#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
857771:; \
858 ops; \
859772:; \
860 .pushsection .parainstructions,"a"; \
658be9d3
GOC
861 .align algn; \
862 word 771b; \
139ec7c4
RR
863 .byte ptype; \
864 .byte 772b-771b; \
865 .short clobbers; \
866 .popsection
867
658be9d3 868
9104a18d 869#define COND_PUSH(set, mask, reg) \
ecb93d1c 870 .if ((~(set)) & mask); push %reg; .endif
9104a18d 871#define COND_POP(set, mask, reg) \
ecb93d1c 872 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 873
658be9d3 874#ifdef CONFIG_X86_64
9104a18d
JF
875
876#define PV_SAVE_REGS(set) \
877 COND_PUSH(set, CLBR_RAX, rax); \
878 COND_PUSH(set, CLBR_RCX, rcx); \
879 COND_PUSH(set, CLBR_RDX, rdx); \
880 COND_PUSH(set, CLBR_RSI, rsi); \
881 COND_PUSH(set, CLBR_RDI, rdi); \
882 COND_PUSH(set, CLBR_R8, r8); \
883 COND_PUSH(set, CLBR_R9, r9); \
884 COND_PUSH(set, CLBR_R10, r10); \
885 COND_PUSH(set, CLBR_R11, r11)
886#define PV_RESTORE_REGS(set) \
887 COND_POP(set, CLBR_R11, r11); \
888 COND_POP(set, CLBR_R10, r10); \
889 COND_POP(set, CLBR_R9, r9); \
890 COND_POP(set, CLBR_R8, r8); \
891 COND_POP(set, CLBR_RDI, rdi); \
892 COND_POP(set, CLBR_RSI, rsi); \
893 COND_POP(set, CLBR_RDX, rdx); \
894 COND_POP(set, CLBR_RCX, rcx); \
895 COND_POP(set, CLBR_RAX, rax)
896
6057fc82 897#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 898#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 899#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 900#else
9104a18d
JF
901#define PV_SAVE_REGS(set) \
902 COND_PUSH(set, CLBR_EAX, eax); \
903 COND_PUSH(set, CLBR_EDI, edi); \
904 COND_PUSH(set, CLBR_ECX, ecx); \
905 COND_PUSH(set, CLBR_EDX, edx)
906#define PV_RESTORE_REGS(set) \
907 COND_POP(set, CLBR_EDX, edx); \
908 COND_POP(set, CLBR_ECX, ecx); \
909 COND_POP(set, CLBR_EDI, edi); \
910 COND_POP(set, CLBR_EAX, eax)
911
6057fc82 912#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 913#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 914#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
915#endif
916
93b1eab3
JF
917#define INTERRUPT_RETURN \
918 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 919 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
920
921#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 922 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 923 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 924 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 925 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
926
927#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 928 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 929 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 930 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 931 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 932
6057fc82 933#ifdef CONFIG_X86_32
491eccb7
JF
934#define GET_CR0_INTO_EAX \
935 push %ecx; push %edx; \
936 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 937 pop %edx; pop %ecx
2be29982 938#else /* !CONFIG_X86_32 */
a00394f8
JF
939
940/*
941 * If swapgs is used while the userspace stack is still current,
942 * there's no way to call a pvop. The PV replacement *must* be
943 * inlined, or the swapgs instruction must be trapped and emulated.
944 */
945#define SWAPGS_UNSAFE_STACK \
946 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
947 swapgs)
948
9104a18d
JF
949/*
950 * Note: swapgs is very special, and in practise is either going to be
951 * implemented with a single "swapgs" instruction or something very
952 * special. Either way, we don't need to save any registers for
953 * it.
954 */
e801f864
GOC
955#define SWAPGS \
956 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 957 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
958 )
959
ffc4bc9c
PA
960#define GET_CR2_INTO_RAX \
961 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
4a8c4c4e 962
fab58420
JF
963#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
964 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
965 CLBR_NONE, \
966 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
967
2be29982
JF
968#define USERGS_SYSRET64 \
969 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 970 CLBR_NONE, \
2be29982 971 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
2be29982 972#endif /* CONFIG_X86_32 */
139ec7c4 973
d3561b7f 974#endif /* __ASSEMBLY__ */
6f30c1ac
TG
975#else /* CONFIG_PARAVIRT */
976# define default_banner x86_init_noop
a1ea1c03
DH
977#ifndef __ASSEMBLY__
978static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
979 struct mm_struct *mm)
980{
981}
982
983static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
984{
985}
986#endif /* __ASSEMBLY__ */
6f30c1ac 987#endif /* !CONFIG_PARAVIRT */
1965aae3 988#endif /* _ASM_X86_PARAVIRT_H */