]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/s390/kernel/uv.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[mirror_ubuntu-kernels.git] / arch / s390 / kernel / uv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Ultravisor functions and initialization
4 *
5 * Copyright IBM Corp. 2019, 2020
6 */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 #endif
25
26 /*
27 * uv_info contains both host and guest information but it's currently only
28 * expected to be used within modules if it's the KVM module or for
29 * any PV guest module.
30 *
31 * The kernel itself will write these values once in uv_query_info()
32 * and then make some of them readable via a sysfs interface.
33 */
34 struct uv_info __bootdata_preserved(uv_info);
35 EXPORT_SYMBOL(uv_info);
36
37 #if IS_ENABLED(CONFIG_KVM)
38 int __bootdata_preserved(prot_virt_host);
39 EXPORT_SYMBOL(prot_virt_host);
40
41 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
42 {
43 struct uv_cb_init uvcb = {
44 .header.cmd = UVC_CMD_INIT_UV,
45 .header.len = sizeof(uvcb),
46 .stor_origin = stor_base,
47 .stor_len = stor_len,
48 };
49
50 if (uv_call(0, (uint64_t)&uvcb)) {
51 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
52 uvcb.header.rc, uvcb.header.rrc);
53 return -1;
54 }
55 return 0;
56 }
57
58 void __init setup_uv(void)
59 {
60 void *uv_stor_base;
61
62 if (!is_prot_virt_host())
63 return;
64
65 uv_stor_base = memblock_alloc_try_nid(
66 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
67 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
68 if (!uv_stor_base) {
69 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
70 uv_info.uv_base_stor_len);
71 goto fail;
72 }
73
74 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
75 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
76 goto fail;
77 }
78
79 pr_info("Reserving %luMB as ultravisor base storage\n",
80 uv_info.uv_base_stor_len >> 20);
81 return;
82 fail:
83 pr_info("Disabling support for protected virtualization");
84 prot_virt_host = 0;
85 }
86
87 /*
88 * Requests the Ultravisor to pin the page in the shared state. This will
89 * cause an intercept when the guest attempts to unshare the pinned page.
90 */
91 static int uv_pin_shared(unsigned long paddr)
92 {
93 struct uv_cb_cfs uvcb = {
94 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
95 .header.len = sizeof(uvcb),
96 .paddr = paddr,
97 };
98
99 if (uv_call(0, (u64)&uvcb))
100 return -EINVAL;
101 return 0;
102 }
103
104 /*
105 * Requests the Ultravisor to destroy a guest page and make it
106 * accessible to the host. The destroy clears the page instead of
107 * exporting.
108 *
109 * @paddr: Absolute host address of page to be destroyed
110 */
111 static int uv_destroy_page(unsigned long paddr)
112 {
113 struct uv_cb_cfs uvcb = {
114 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
115 .header.len = sizeof(uvcb),
116 .paddr = paddr
117 };
118
119 if (uv_call(0, (u64)&uvcb)) {
120 /*
121 * Older firmware uses 107/d as an indication of a non secure
122 * page. Let us emulate the newer variant (no-op).
123 */
124 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
125 return 0;
126 return -EINVAL;
127 }
128 return 0;
129 }
130
131 /*
132 * The caller must already hold a reference to the page
133 */
134 int uv_destroy_owned_page(unsigned long paddr)
135 {
136 struct page *page = phys_to_page(paddr);
137 int rc;
138
139 get_page(page);
140 rc = uv_destroy_page(paddr);
141 if (!rc)
142 clear_bit(PG_arch_1, &page->flags);
143 put_page(page);
144 return rc;
145 }
146
147 /*
148 * Requests the Ultravisor to encrypt a guest page and make it
149 * accessible to the host for paging (export).
150 *
151 * @paddr: Absolute host address of page to be exported
152 */
153 int uv_convert_from_secure(unsigned long paddr)
154 {
155 struct uv_cb_cfs uvcb = {
156 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
157 .header.len = sizeof(uvcb),
158 .paddr = paddr
159 };
160
161 if (uv_call(0, (u64)&uvcb))
162 return -EINVAL;
163 return 0;
164 }
165
166 /*
167 * The caller must already hold a reference to the page
168 */
169 int uv_convert_owned_from_secure(unsigned long paddr)
170 {
171 struct page *page = phys_to_page(paddr);
172 int rc;
173
174 get_page(page);
175 rc = uv_convert_from_secure(paddr);
176 if (!rc)
177 clear_bit(PG_arch_1, &page->flags);
178 put_page(page);
179 return rc;
180 }
181
182 /*
183 * Calculate the expected ref_count for a page that would otherwise have no
184 * further pins. This was cribbed from similar functions in other places in
185 * the kernel, but with some slight modifications. We know that a secure
186 * page can not be a huge page for example.
187 */
188 static int expected_page_refs(struct page *page)
189 {
190 int res;
191
192 res = page_mapcount(page);
193 if (PageSwapCache(page)) {
194 res++;
195 } else if (page_mapping(page)) {
196 res++;
197 if (page_has_private(page))
198 res++;
199 }
200 return res;
201 }
202
203 static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
204 {
205 int expected, cc = 0;
206
207 if (PageWriteback(page))
208 return -EAGAIN;
209 expected = expected_page_refs(page);
210 if (!page_ref_freeze(page, expected))
211 return -EBUSY;
212 set_bit(PG_arch_1, &page->flags);
213 /*
214 * If the UVC does not succeed or fail immediately, we don't want to
215 * loop for long, or we might get stall notifications.
216 * On the other hand, this is a complex scenario and we are holding a lot of
217 * locks, so we can't easily sleep and reschedule. We try only once,
218 * and if the UVC returned busy or partial completion, we return
219 * -EAGAIN and we let the callers deal with it.
220 */
221 cc = __uv_call(0, (u64)uvcb);
222 page_ref_unfreeze(page, expected);
223 /*
224 * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
225 * If busy or partially completed, return -EAGAIN.
226 */
227 if (cc == UVC_CC_OK)
228 return 0;
229 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
230 return -EAGAIN;
231 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
232 }
233
234 /**
235 * should_export_before_import - Determine whether an export is needed
236 * before an import-like operation
237 * @uvcb: the Ultravisor control block of the UVC to be performed
238 * @mm: the mm of the process
239 *
240 * Returns whether an export is needed before every import-like operation.
241 * This is needed for shared pages, which don't trigger a secure storage
242 * exception when accessed from a different guest.
243 *
244 * Although considered as one, the Unpin Page UVC is not an actual import,
245 * so it is not affected.
246 *
247 * No export is needed also when there is only one protected VM, because the
248 * page cannot belong to the wrong VM in that case (there is no "other VM"
249 * it can belong to).
250 *
251 * Return: true if an export is needed before every import, otherwise false.
252 */
253 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
254 {
255 /*
256 * The misc feature indicates, among other things, that importing a
257 * shared page from a different protected VM will automatically also
258 * transfer its ownership.
259 */
260 if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
261 return false;
262 if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
263 return false;
264 return atomic_read(&mm->context.protected_count) > 1;
265 }
266
267 /*
268 * Requests the Ultravisor to make a page accessible to a guest.
269 * If it's brought in the first time, it will be cleared. If
270 * it has been exported before, it will be decrypted and integrity
271 * checked.
272 */
273 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
274 {
275 struct vm_area_struct *vma;
276 bool local_drain = false;
277 spinlock_t *ptelock;
278 unsigned long uaddr;
279 struct page *page;
280 pte_t *ptep;
281 int rc;
282
283 again:
284 rc = -EFAULT;
285 mmap_read_lock(gmap->mm);
286
287 uaddr = __gmap_translate(gmap, gaddr);
288 if (IS_ERR_VALUE(uaddr))
289 goto out;
290 vma = vma_lookup(gmap->mm, uaddr);
291 if (!vma)
292 goto out;
293 /*
294 * Secure pages cannot be huge and userspace should not combine both.
295 * In case userspace does it anyway this will result in an -EFAULT for
296 * the unpack. The guest is thus never reaching secure mode. If
297 * userspace is playing dirty tricky with mapping huge pages later
298 * on this will result in a segmentation fault.
299 */
300 if (is_vm_hugetlb_page(vma))
301 goto out;
302
303 rc = -ENXIO;
304 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
305 if (!ptep)
306 goto out;
307 if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
308 page = pte_page(*ptep);
309 rc = -EAGAIN;
310 if (trylock_page(page)) {
311 if (should_export_before_import(uvcb, gmap->mm))
312 uv_convert_from_secure(page_to_phys(page));
313 rc = make_page_secure(page, uvcb);
314 unlock_page(page);
315 }
316 }
317 pte_unmap_unlock(ptep, ptelock);
318 out:
319 mmap_read_unlock(gmap->mm);
320
321 if (rc == -EAGAIN) {
322 /*
323 * If we are here because the UVC returned busy or partial
324 * completion, this is just a useless check, but it is safe.
325 */
326 wait_on_page_writeback(page);
327 } else if (rc == -EBUSY) {
328 /*
329 * If we have tried a local drain and the page refcount
330 * still does not match our expected safe value, try with a
331 * system wide drain. This is needed if the pagevecs holding
332 * the page are on a different CPU.
333 */
334 if (local_drain) {
335 lru_add_drain_all();
336 /* We give up here, and let the caller try again */
337 return -EAGAIN;
338 }
339 /*
340 * We are here if the page refcount does not match the
341 * expected safe value. The main culprits are usually
342 * pagevecs. With lru_add_drain() we drain the pagevecs
343 * on the local CPU so that hopefully the refcount will
344 * reach the expected safe value.
345 */
346 lru_add_drain();
347 local_drain = true;
348 /* And now we try again immediately after draining */
349 goto again;
350 } else if (rc == -ENXIO) {
351 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
352 return -EFAULT;
353 return -EAGAIN;
354 }
355 return rc;
356 }
357 EXPORT_SYMBOL_GPL(gmap_make_secure);
358
359 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
360 {
361 struct uv_cb_cts uvcb = {
362 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
363 .header.len = sizeof(uvcb),
364 .guest_handle = gmap->guest_handle,
365 .gaddr = gaddr,
366 };
367
368 return gmap_make_secure(gmap, gaddr, &uvcb);
369 }
370 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
371
372 /**
373 * gmap_destroy_page - Destroy a guest page.
374 * @gmap: the gmap of the guest
375 * @gaddr: the guest address to destroy
376 *
377 * An attempt will be made to destroy the given guest page. If the attempt
378 * fails, an attempt is made to export the page. If both attempts fail, an
379 * appropriate error is returned.
380 */
381 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
382 {
383 struct vm_area_struct *vma;
384 unsigned long uaddr;
385 struct page *page;
386 int rc;
387
388 rc = -EFAULT;
389 mmap_read_lock(gmap->mm);
390
391 uaddr = __gmap_translate(gmap, gaddr);
392 if (IS_ERR_VALUE(uaddr))
393 goto out;
394 vma = vma_lookup(gmap->mm, uaddr);
395 if (!vma)
396 goto out;
397 /*
398 * Huge pages should not be able to become secure
399 */
400 if (is_vm_hugetlb_page(vma))
401 goto out;
402
403 rc = 0;
404 /* we take an extra reference here */
405 page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
406 if (IS_ERR_OR_NULL(page))
407 goto out;
408 rc = uv_destroy_owned_page(page_to_phys(page));
409 /*
410 * Fault handlers can race; it is possible that two CPUs will fault
411 * on the same secure page. One CPU can destroy the page, reboot,
412 * re-enter secure mode and import it, while the second CPU was
413 * stuck at the beginning of the handler. At some point the second
414 * CPU will be able to progress, and it will not be able to destroy
415 * the page. In that case we do not want to terminate the process,
416 * we instead try to export the page.
417 */
418 if (rc)
419 rc = uv_convert_owned_from_secure(page_to_phys(page));
420 put_page(page);
421 out:
422 mmap_read_unlock(gmap->mm);
423 return rc;
424 }
425 EXPORT_SYMBOL_GPL(gmap_destroy_page);
426
427 /*
428 * To be called with the page locked or with an extra reference! This will
429 * prevent gmap_make_secure from touching the page concurrently. Having 2
430 * parallel make_page_accessible is fine, as the UV calls will become a
431 * no-op if the page is already exported.
432 */
433 int arch_make_page_accessible(struct page *page)
434 {
435 int rc = 0;
436
437 /* Hugepage cannot be protected, so nothing to do */
438 if (PageHuge(page))
439 return 0;
440
441 /*
442 * PG_arch_1 is used in 3 places:
443 * 1. for kernel page tables during early boot
444 * 2. for storage keys of huge pages and KVM
445 * 3. As an indication that this page might be secure. This can
446 * overindicate, e.g. we set the bit before calling
447 * convert_to_secure.
448 * As secure pages are never huge, all 3 variants can co-exists.
449 */
450 if (!test_bit(PG_arch_1, &page->flags))
451 return 0;
452
453 rc = uv_pin_shared(page_to_phys(page));
454 if (!rc) {
455 clear_bit(PG_arch_1, &page->flags);
456 return 0;
457 }
458
459 rc = uv_convert_from_secure(page_to_phys(page));
460 if (!rc) {
461 clear_bit(PG_arch_1, &page->flags);
462 return 0;
463 }
464
465 return rc;
466 }
467 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
468
469 #endif
470
471 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
472 static ssize_t uv_query_facilities(struct kobject *kobj,
473 struct kobj_attribute *attr, char *buf)
474 {
475 return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
476 uv_info.inst_calls_list[0],
477 uv_info.inst_calls_list[1],
478 uv_info.inst_calls_list[2],
479 uv_info.inst_calls_list[3]);
480 }
481
482 static struct kobj_attribute uv_query_facilities_attr =
483 __ATTR(facilities, 0444, uv_query_facilities, NULL);
484
485 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
486 struct kobj_attribute *attr, char *buf)
487 {
488 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
489 }
490
491 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
492 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
493
494 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
495 struct kobj_attribute *attr, char *buf)
496 {
497 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
498 }
499
500 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
501 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
502
503 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
504 struct kobj_attribute *attr, char *buf)
505 {
506 return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
507 }
508
509 static struct kobj_attribute uv_query_dump_cpu_len_attr =
510 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
511
512 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
513 struct kobj_attribute *attr, char *buf)
514 {
515 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
516 }
517
518 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
519 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
520
521 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
522 struct kobj_attribute *attr, char *buf)
523 {
524 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
525 }
526
527 static struct kobj_attribute uv_query_dump_finalize_len_attr =
528 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
529
530 static ssize_t uv_query_feature_indications(struct kobject *kobj,
531 struct kobj_attribute *attr, char *buf)
532 {
533 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
534 }
535
536 static struct kobj_attribute uv_query_feature_indications_attr =
537 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
538
539 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
540 struct kobj_attribute *attr, char *buf)
541 {
542 return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
543 }
544
545 static struct kobj_attribute uv_query_max_guest_cpus_attr =
546 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
547
548 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
549 struct kobj_attribute *attr, char *buf)
550 {
551 return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
552 }
553
554 static struct kobj_attribute uv_query_max_guest_vms_attr =
555 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
556
557 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
558 struct kobj_attribute *attr, char *buf)
559 {
560 return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
561 }
562
563 static struct kobj_attribute uv_query_max_guest_addr_attr =
564 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
565
566 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
567 struct kobj_attribute *attr, char *buf)
568 {
569 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
570 }
571
572 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
573 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
574
575 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
576 struct kobj_attribute *attr, char *buf)
577 {
578 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
579 }
580
581 static struct kobj_attribute uv_query_supp_att_pflags_attr =
582 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
583
584 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
585 struct kobj_attribute *attr, char *buf)
586 {
587 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
588 }
589
590 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
591 __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
592
593 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
594 struct kobj_attribute *attr, char *buf)
595 {
596 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
597 }
598
599 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
600 __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
601
602 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
603 struct kobj_attribute *attr, char *buf)
604 {
605 return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
606 }
607
608 static struct kobj_attribute uv_query_supp_secret_types_attr =
609 __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
610
611 static ssize_t uv_query_max_secrets(struct kobject *kobj,
612 struct kobj_attribute *attr, char *buf)
613 {
614 return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
615 }
616
617 static struct kobj_attribute uv_query_max_secrets_attr =
618 __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
619
620 static struct attribute *uv_query_attrs[] = {
621 &uv_query_facilities_attr.attr,
622 &uv_query_feature_indications_attr.attr,
623 &uv_query_max_guest_cpus_attr.attr,
624 &uv_query_max_guest_vms_attr.attr,
625 &uv_query_max_guest_addr_attr.attr,
626 &uv_query_supp_se_hdr_ver_attr.attr,
627 &uv_query_supp_se_hdr_pcf_attr.attr,
628 &uv_query_dump_storage_state_len_attr.attr,
629 &uv_query_dump_finalize_len_attr.attr,
630 &uv_query_dump_cpu_len_attr.attr,
631 &uv_query_supp_att_req_hdr_ver_attr.attr,
632 &uv_query_supp_att_pflags_attr.attr,
633 &uv_query_supp_add_secret_req_ver_attr.attr,
634 &uv_query_supp_add_secret_pcf_attr.attr,
635 &uv_query_supp_secret_types_attr.attr,
636 &uv_query_max_secrets_attr.attr,
637 NULL,
638 };
639
640 static struct attribute_group uv_query_attr_group = {
641 .attrs = uv_query_attrs,
642 };
643
644 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
645 struct kobj_attribute *attr, char *buf)
646 {
647 int val = 0;
648
649 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
650 val = prot_virt_guest;
651 #endif
652 return sysfs_emit(buf, "%d\n", val);
653 }
654
655 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
656 struct kobj_attribute *attr, char *buf)
657 {
658 int val = 0;
659
660 #if IS_ENABLED(CONFIG_KVM)
661 val = prot_virt_host;
662 #endif
663
664 return sysfs_emit(buf, "%d\n", val);
665 }
666
667 static struct kobj_attribute uv_prot_virt_guest =
668 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
669
670 static struct kobj_attribute uv_prot_virt_host =
671 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
672
673 static const struct attribute *uv_prot_virt_attrs[] = {
674 &uv_prot_virt_guest.attr,
675 &uv_prot_virt_host.attr,
676 NULL,
677 };
678
679 static struct kset *uv_query_kset;
680 static struct kobject *uv_kobj;
681
682 static int __init uv_info_init(void)
683 {
684 int rc = -ENOMEM;
685
686 if (!test_facility(158))
687 return 0;
688
689 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
690 if (!uv_kobj)
691 return -ENOMEM;
692
693 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
694 if (rc)
695 goto out_kobj;
696
697 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
698 if (!uv_query_kset) {
699 rc = -ENOMEM;
700 goto out_ind_files;
701 }
702
703 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
704 if (!rc)
705 return 0;
706
707 kset_unregister(uv_query_kset);
708 out_ind_files:
709 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
710 out_kobj:
711 kobject_del(uv_kobj);
712 kobject_put(uv_kobj);
713 return rc;
714 }
715 device_initcall(uv_info_init);
716 #endif