2 * Hypervisor supplied "24x7" performance counter support
4 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5 * Copyright 2014 IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "hv-24x7: " fmt
15 #include <linux/perf_event.h>
16 #include <linux/rbtree.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <asm/firmware.h>
22 #include <asm/hvcall.h>
24 #include <linux/byteorder/generic.h>
27 #include "hv-24x7-catalog.h"
28 #include "hv-common.h"
30 static const char *event_domain_suffix(unsigned domain
)
33 #define DOMAIN(n, v, x, c) \
34 case HV_PERF_DOMAIN_##n: \
36 #include "hv-24x7-domains.h"
39 WARN(1, "unknown domain %d\n", domain
);
40 return "__UNKNOWN_DOMAIN_SUFFIX";
44 static bool domain_is_valid(unsigned domain
)
47 #define DOMAIN(n, v, x, c) \
48 case HV_PERF_DOMAIN_##n: \
50 #include "hv-24x7-domains.h"
58 static bool is_physical_domain(unsigned domain
)
61 #define DOMAIN(n, v, x, c) \
62 case HV_PERF_DOMAIN_##n: \
64 #include "hv-24x7-domains.h"
71 static bool catalog_entry_domain_is_valid(unsigned domain
)
73 return is_physical_domain(domain
);
77 * TODO: Merging events:
78 * - Think of the hcall as an interface to a 4d array of counters:
80 * - y = indexes in the domain (core, chip, vcpu, node, etc)
81 * - z = offset into the counter space
82 * - w = lpars (guest vms, "logical partitions")
83 * - A single request is: x,y,y_last,z,z_last,w,w_last
84 * - this means we can retrieve a rectangle of counters in y,z for a single x.
86 * - Things to consider (ignoring w):
87 * - input cost_per_request = 16
88 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
89 * - limited number of requests per hcall (must fit into 4K bytes)
90 * - 4k = 16 [buffer header] - 16 [request size] * request_count
91 * - 255 requests per hcall
92 * - sometimes it will be more efficient to read extra data and discard
97 * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
100 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
101 EVENT_DEFINE_RANGE_FORMAT(domain
, config
, 0, 3);
103 EVENT_DEFINE_RANGE_FORMAT(core
, config
, 16, 31);
104 EVENT_DEFINE_RANGE_FORMAT(vcpu
, config
, 16, 31);
105 /* u32, see "data_offset" */
106 EVENT_DEFINE_RANGE_FORMAT(offset
, config
, 32, 63);
108 EVENT_DEFINE_RANGE_FORMAT(lpar
, config1
, 0, 15);
110 EVENT_DEFINE_RANGE(reserved1
, config
, 4, 15);
111 EVENT_DEFINE_RANGE(reserved2
, config1
, 16, 63);
112 EVENT_DEFINE_RANGE(reserved3
, config2
, 0, 63);
114 static struct attribute
*format_attrs
[] = {
115 &format_attr_domain
.attr
,
116 &format_attr_offset
.attr
,
117 &format_attr_core
.attr
,
118 &format_attr_vcpu
.attr
,
119 &format_attr_lpar
.attr
,
123 static struct attribute_group format_group
= {
125 .attrs
= format_attrs
,
128 static struct attribute_group event_group
= {
130 /* .attrs is set in init */
133 static struct attribute_group event_desc_group
= {
134 .name
= "event_descs",
135 /* .attrs is set in init */
138 static struct attribute_group event_long_desc_group
= {
139 .name
= "event_long_descs",
140 /* .attrs is set in init */
143 static struct kmem_cache
*hv_page_cache
;
145 static char *event_name(struct hv_24x7_event_data
*ev
, int *len
)
147 *len
= be16_to_cpu(ev
->event_name_len
) - 2;
148 return (char *)ev
->remainder
;
151 static char *event_desc(struct hv_24x7_event_data
*ev
, int *len
)
153 unsigned nl
= be16_to_cpu(ev
->event_name_len
);
154 __be16
*desc_len
= (__be16
*)(ev
->remainder
+ nl
- 2);
155 *len
= be16_to_cpu(*desc_len
) - 2;
156 return (char *)ev
->remainder
+ nl
;
159 static char *event_long_desc(struct hv_24x7_event_data
*ev
, int *len
)
161 unsigned nl
= be16_to_cpu(ev
->event_name_len
);
162 __be16
*desc_len_
= (__be16
*)(ev
->remainder
+ nl
- 2);
163 unsigned desc_len
= be16_to_cpu(*desc_len_
);
164 __be16
*long_desc_len
= (__be16
*)(ev
->remainder
+ nl
+ desc_len
- 2);
165 *len
= be16_to_cpu(*long_desc_len
) - 2;
166 return (char *)ev
->remainder
+ nl
+ desc_len
;
169 static bool event_fixed_portion_is_within(struct hv_24x7_event_data
*ev
,
174 return (start
+ offsetof(struct hv_24x7_event_data
, remainder
)) < end
;
178 * Things we don't check:
179 * - padding for desc, name, and long/detailed desc is required to be '\0'
182 * Return NULL if we pass end,
183 * Otherwise return the address of the byte just following the event.
185 static void *event_end(struct hv_24x7_event_data
*ev
, void *end
)
190 unsigned nl
= be16_to_cpu(ev
->event_name_len
);
193 pr_debug("%s: name length too short: %d", __func__
, nl
);
197 if (start
+ nl
> end
) {
198 pr_debug("%s: start=%p + nl=%u > end=%p",
199 __func__
, start
, nl
, end
);
203 dl_
= (__be16
*)(ev
->remainder
+ nl
- 2);
204 if (!IS_ALIGNED((uintptr_t)dl_
, 2))
205 pr_warn("desc len not aligned %p", dl_
);
206 dl
= be16_to_cpu(*dl_
);
208 pr_debug("%s: desc len too short: %d", __func__
, dl
);
212 if (start
+ nl
+ dl
> end
) {
213 pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
214 __func__
, start
, nl
, dl
, start
+ nl
+ dl
, end
);
218 ldl_
= (__be16
*)(ev
->remainder
+ nl
+ dl
- 2);
219 if (!IS_ALIGNED((uintptr_t)ldl_
, 2))
220 pr_warn("long desc len not aligned %p", ldl_
);
221 ldl
= be16_to_cpu(*ldl_
);
223 pr_debug("%s: long desc len too short (ldl=%u)",
228 if (start
+ nl
+ dl
+ ldl
> end
) {
229 pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
230 __func__
, start
, nl
, dl
, ldl
, end
);
234 return start
+ nl
+ dl
+ ldl
;
237 static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096
,
238 unsigned long version
,
241 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
245 WARN_ON(!IS_ALIGNED(phys_4096
, 4096));
246 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE
,
252 static unsigned long h_get_24x7_catalog_page(char page
[],
253 u64 version
, u32 index
)
255 return h_get_24x7_catalog_page_(virt_to_phys(page
),
259 static unsigned core_domains
[] = {
260 HV_PERF_DOMAIN_PHYS_CORE
,
261 HV_PERF_DOMAIN_VCPU_HOME_CORE
,
262 HV_PERF_DOMAIN_VCPU_HOME_CHIP
,
263 HV_PERF_DOMAIN_VCPU_HOME_NODE
,
264 HV_PERF_DOMAIN_VCPU_REMOTE_NODE
,
266 /* chip event data always yeilds a single event, core yeilds multiple */
267 #define MAX_EVENTS_PER_EVENT_DATA ARRAY_SIZE(core_domains)
269 static char *event_fmt(struct hv_24x7_event_data
*event
, unsigned domain
)
274 if (is_physical_domain(domain
)) {
282 return kasprintf(GFP_KERNEL
,
283 "domain=0x%x,offset=0x%x,%s=?,lpar=%s",
285 be16_to_cpu(event
->event_counter_offs
) +
286 be16_to_cpu(event
->event_group_record_offs
),
291 /* Avoid trusting fw to NUL terminate strings */
292 static char *memdup_to_str(char *maybe_str
, int max_len
, gfp_t gfp
)
294 return kasprintf(gfp
, "%.*s", max_len
, maybe_str
);
297 static ssize_t
device_show_string(struct device
*dev
,
298 struct device_attribute
*attr
, char *buf
)
300 struct dev_ext_attribute
*d
;
302 d
= container_of(attr
, struct dev_ext_attribute
, attr
);
303 return sprintf(buf
, "%s\n", (char *)d
->var
);
306 static struct attribute
*device_str_attr_create_(char *name
, char *str
)
308 struct dev_ext_attribute
*attr
= kzalloc(sizeof(*attr
), GFP_KERNEL
);
314 attr
->attr
.attr
.name
= name
;
315 attr
->attr
.attr
.mode
= 0444;
316 attr
->attr
.show
= device_show_string
;
317 return &attr
->attr
.attr
;
320 static struct attribute
*device_str_attr_create(char *name
, int name_max
,
322 char *str
, size_t str_max
)
325 char *s
= memdup_to_str(str
, str_max
, GFP_KERNEL
);
332 n
= kasprintf(GFP_KERNEL
, "%.*s", name_max
, name
);
334 n
= kasprintf(GFP_KERNEL
, "%.*s__%d", name_max
, name
,
339 a
= device_str_attr_create_(n
, s
);
351 static void device_str_attr_destroy(struct attribute
*attr
)
353 struct dev_ext_attribute
*d
;
355 d
= container_of(attr
, struct dev_ext_attribute
, attr
.attr
);
357 kfree(d
->attr
.attr
.name
);
361 static struct attribute
*event_to_attr(unsigned ix
,
362 struct hv_24x7_event_data
*event
,
367 char *ev_name
, *a_ev_name
, *val
;
368 const char *ev_suffix
;
369 struct attribute
*attr
;
371 if (!domain_is_valid(domain
)) {
372 pr_warn("catalog event %u has invalid domain %u\n",
377 val
= event_fmt(event
, domain
);
381 ev_suffix
= event_domain_suffix(domain
);
382 ev_name
= event_name(event
, &event_name_len
);
384 a_ev_name
= kasprintf(GFP_KERNEL
, "%.*s%s",
385 (int)event_name_len
, ev_name
, ev_suffix
);
387 a_ev_name
= kasprintf(GFP_KERNEL
, "%.*s%s__%d",
388 (int)event_name_len
, ev_name
, ev_suffix
, nonce
);
394 attr
= device_str_attr_create_(a_ev_name
, val
);
406 static struct attribute
*event_to_desc_attr(struct hv_24x7_event_data
*event
,
410 char *name
= event_name(event
, &nl
);
411 char *desc
= event_desc(event
, &dl
);
413 /* If there isn't a description, don't create the sysfs file */
417 return device_str_attr_create(name
, nl
, nonce
, desc
, dl
);
420 static struct attribute
*
421 event_to_long_desc_attr(struct hv_24x7_event_data
*event
, int nonce
)
424 char *name
= event_name(event
, &nl
);
425 char *desc
= event_long_desc(event
, &dl
);
427 /* If there isn't a description, don't create the sysfs file */
431 return device_str_attr_create(name
, nl
, nonce
, desc
, dl
);
434 static ssize_t
event_data_to_attrs(unsigned ix
, struct attribute
**attrs
,
435 struct hv_24x7_event_data
*event
, int nonce
)
439 switch (event
->domain
) {
440 case HV_PERF_DOMAIN_PHYS_CHIP
:
441 *attrs
= event_to_attr(ix
, event
, event
->domain
, nonce
);
443 case HV_PERF_DOMAIN_PHYS_CORE
:
444 for (i
= 0; i
< ARRAY_SIZE(core_domains
); i
++) {
445 attrs
[i
] = event_to_attr(ix
, event
, core_domains
[i
],
448 pr_warn("catalog event %u: individual attr %u "
449 "creation failure\n", ix
, i
);
451 device_str_attr_destroy(attrs
[i
- 1]);
457 pr_warn("catalog event %u: domain %u is not allowed in the "
458 "catalog\n", ix
, event
->domain
);
463 static size_t event_to_attr_ct(struct hv_24x7_event_data
*event
)
465 switch (event
->domain
) {
466 case HV_PERF_DOMAIN_PHYS_CHIP
:
468 case HV_PERF_DOMAIN_PHYS_CORE
:
469 return ARRAY_SIZE(core_domains
);
475 static unsigned long vmalloc_to_phys(void *v
)
477 struct page
*p
= vmalloc_to_page(v
);
480 return page_to_phys(p
) + offset_in_page(v
);
492 static int memord(const void *d1
, size_t s1
, const void *d2
, size_t s2
)
499 return memcmp(d1
, d2
, s1
);
502 static int ev_uniq_ord(const void *v1
, size_t s1
, unsigned d1
, const void *v2
,
503 size_t s2
, unsigned d2
)
505 int r
= memord(v1
, s1
, v2
, s2
);
516 static int event_uniq_add(struct rb_root
*root
, const char *name
, int nl
,
519 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
520 struct event_uniq
*data
;
522 /* Figure out where to put new node */
524 struct event_uniq
*it
;
527 it
= container_of(*new, struct event_uniq
, node
);
528 result
= ev_uniq_ord(name
, nl
, domain
, it
->name
, it
->nl
,
533 new = &((*new)->rb_left
);
535 new = &((*new)->rb_right
);
538 pr_info("found a duplicate event %.*s, ct=%u\n", nl
,
544 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
548 *data
= (struct event_uniq
) {
555 /* Add new node and rebalance tree. */
556 rb_link_node(&data
->node
, parent
, new);
557 rb_insert_color(&data
->node
, root
);
563 static void event_uniq_destroy(struct rb_root
*root
)
566 * the strings we point to are in the giant block of memory filled by
567 * the catalog, and are freed separately.
569 struct event_uniq
*pos
, *n
;
571 rbtree_postorder_for_each_entry_safe(pos
, n
, root
, node
)
577 * ensure the event structure's sizes are self consistent and don't cause us to
578 * read outside of the event
580 * On success, return the event length in bytes.
581 * Otherwise, return -1 (and print as appropriate).
583 static ssize_t
catalog_event_len_validate(struct hv_24x7_event_data
*event
,
585 size_t event_data_bytes
,
586 size_t event_entry_count
,
587 size_t offset
, void *end
)
590 void *ev_end
, *calc_ev_end
;
592 if (offset
>= event_data_bytes
)
595 if (event_idx
>= event_entry_count
) {
596 pr_devel("catalog event data has %zu bytes of padding after last event\n",
597 event_data_bytes
- offset
);
601 if (!event_fixed_portion_is_within(event
, end
)) {
602 pr_warn("event %zu fixed portion is not within range\n",
607 ev_len
= be16_to_cpu(event
->length
);
610 pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
611 event_idx
, ev_len
, event
);
613 ev_end
= (__u8
*)event
+ ev_len
;
615 pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
616 event_idx
, ev_len
, ev_end
, end
,
621 calc_ev_end
= event_end(event
, end
);
623 pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
624 event_idx
, event_data_bytes
, event
, end
,
629 if (calc_ev_end
> ev_end
) {
630 pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
631 event_idx
, event
, ev_end
, offset
, calc_ev_end
);
638 #define MAX_4K (SIZE_MAX / 4096)
640 static void create_events_from_catalog(struct attribute
***events_
,
641 struct attribute
***event_descs_
,
642 struct attribute
***event_long_descs_
)
645 size_t catalog_len
, catalog_page_len
, event_entry_count
,
646 event_data_len
, event_data_offs
,
647 event_data_bytes
, junk_events
, event_idx
, event_attr_ct
, i
,
648 attr_max
, event_idx_last
, desc_ct
, long_desc_ct
;
650 uint32_t catalog_version_num
;
651 struct attribute
**events
, **event_descs
, **event_long_descs
;
652 struct hv_24x7_catalog_page_0
*page_0
=
653 kmem_cache_alloc(hv_page_cache
, GFP_KERNEL
);
655 void *event_data
, *end
;
656 struct hv_24x7_event_data
*event
;
657 struct rb_root ev_uniq
= RB_ROOT
;
662 hret
= h_get_24x7_catalog_page(page
, 0, 0);
666 catalog_version_num
= be64_to_cpu(page_0
->version
);
667 catalog_page_len
= be32_to_cpu(page_0
->length
);
669 if (MAX_4K
< catalog_page_len
) {
670 pr_err("invalid page count: %zu\n", catalog_page_len
);
674 catalog_len
= catalog_page_len
* 4096;
676 event_entry_count
= be16_to_cpu(page_0
->event_entry_count
);
677 event_data_offs
= be16_to_cpu(page_0
->event_data_offs
);
678 event_data_len
= be16_to_cpu(page_0
->event_data_len
);
680 pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n",
681 (size_t)catalog_version_num
, catalog_len
,
682 event_entry_count
, event_data_offs
, event_data_len
);
684 if ((MAX_4K
< event_data_len
)
685 || (MAX_4K
< event_data_offs
)
686 || (MAX_4K
- event_data_offs
< event_data_len
)) {
687 pr_err("invalid event data offs %zu and/or len %zu\n",
688 event_data_offs
, event_data_len
);
692 if ((event_data_offs
+ event_data_len
) > catalog_page_len
) {
693 pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
695 event_data_offs
+ event_data_len
,
700 if (SIZE_MAX
/ MAX_EVENTS_PER_EVENT_DATA
- 1 < event_entry_count
) {
701 pr_err("event_entry_count %zu is invalid\n",
706 event_data_bytes
= event_data_len
* 4096;
709 * event data can span several pages, events can cross between these
710 * pages. Use vmalloc to make this easier.
712 event_data
= vmalloc(event_data_bytes
);
714 pr_err("could not allocate event data\n");
718 end
= event_data
+ event_data_bytes
;
721 * using vmalloc_to_phys() like this only works if PAGE_SIZE is
724 BUILD_BUG_ON(PAGE_SIZE
% 4096);
726 for (i
= 0; i
< event_data_len
; i
++) {
727 hret
= h_get_24x7_catalog_page_(
728 vmalloc_to_phys(event_data
+ i
* 4096),
730 i
+ event_data_offs
);
732 pr_err("failed to get event data in page %zu\n",
733 i
+ event_data_offs
);
739 * scan the catalog to determine the number of attributes we need, and
740 * verify it at the same time.
742 for (junk_events
= 0, event
= event_data
, event_idx
= 0, attr_max
= 0;
744 event_idx
++, event
= (void *)event
+ ev_len
) {
745 size_t offset
= (void *)event
- (void *)event_data
;
749 ev_len
= catalog_event_len_validate(event
, event_idx
,
756 name
= event_name(event
, &nl
);
758 if (event
->event_group_record_len
== 0) {
759 pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
760 event_idx
, nl
, name
);
765 if (!catalog_entry_domain_is_valid(event
->domain
)) {
766 pr_info("event %zu (%.*s) has invalid domain %d\n",
767 event_idx
, nl
, name
, event
->domain
);
772 attr_max
+= event_to_attr_ct(event
);
775 event_idx_last
= event_idx
;
776 if (event_idx_last
!= event_entry_count
)
777 pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
778 event_idx_last
, event_entry_count
, junk_events
);
780 events
= kmalloc_array(attr_max
+ 1, sizeof(*events
), GFP_KERNEL
);
784 event_descs
= kmalloc_array(event_idx
+ 1, sizeof(*event_descs
),
789 event_long_descs
= kmalloc_array(event_idx
+ 1,
790 sizeof(*event_long_descs
), GFP_KERNEL
);
791 if (!event_long_descs
)
794 /* Iterate over the catalog filling in the attribute vector */
795 for (junk_events
= 0, event_attr_ct
= 0, desc_ct
= 0, long_desc_ct
= 0,
796 event
= event_data
, event_idx
= 0;
797 event_idx
< event_idx_last
;
798 event_idx
++, ev_len
= be16_to_cpu(event
->length
),
799 event
= (void *)event
+ ev_len
) {
804 * these are the only "bad" events that are intermixed and that
805 * we can ignore without issue. make sure to skip them here
807 if (event
->event_group_record_len
== 0)
809 if (!catalog_entry_domain_is_valid(event
->domain
))
812 name
= event_name(event
, &nl
);
813 nonce
= event_uniq_add(&ev_uniq
, name
, nl
, event
->domain
);
814 ct
= event_data_to_attrs(event_idx
, events
+ event_attr_ct
,
817 pr_warn("event %zu (%.*s) creation failure, skipping\n",
818 event_idx
, nl
, name
);
822 event_descs
[desc_ct
] = event_to_desc_attr(event
, nonce
);
823 if (event_descs
[desc_ct
])
825 event_long_descs
[long_desc_ct
] =
826 event_to_long_desc_attr(event
, nonce
);
827 if (event_long_descs
[long_desc_ct
])
832 pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
833 event_idx
, event_attr_ct
, junk_events
, desc_ct
);
835 events
[event_attr_ct
] = NULL
;
836 event_descs
[desc_ct
] = NULL
;
837 event_long_descs
[long_desc_ct
] = NULL
;
839 event_uniq_destroy(&ev_uniq
);
841 kmem_cache_free(hv_page_cache
, page
);
844 *event_descs_
= event_descs
;
845 *event_long_descs_
= event_long_descs
;
855 kmem_cache_free(hv_page_cache
, page
);
858 *event_descs_
= NULL
;
859 *event_long_descs_
= NULL
;
862 static ssize_t
catalog_read(struct file
*filp
, struct kobject
*kobj
,
863 struct bin_attribute
*bin_attr
, char *buf
,
864 loff_t offset
, size_t count
)
868 size_t catalog_len
= 0, catalog_page_len
= 0;
869 loff_t page_offset
= 0;
870 loff_t offset_in_page
;
872 uint64_t catalog_version_num
= 0;
873 void *page
= kmem_cache_alloc(hv_page_cache
, GFP_USER
);
874 struct hv_24x7_catalog_page_0
*page_0
= page
;
878 hret
= h_get_24x7_catalog_page(page
, 0, 0);
884 catalog_version_num
= be64_to_cpu(page_0
->version
);
885 catalog_page_len
= be32_to_cpu(page_0
->length
);
886 catalog_len
= catalog_page_len
* 4096;
888 page_offset
= offset
/ 4096;
889 offset_in_page
= offset
% 4096;
891 if (page_offset
>= catalog_page_len
)
894 if (page_offset
!= 0) {
895 hret
= h_get_24x7_catalog_page(page
, catalog_version_num
,
903 copy_len
= 4096 - offset_in_page
;
904 if (copy_len
> count
)
907 memcpy(buf
, page
+offset_in_page
, copy_len
);
912 pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
914 catalog_version_num
, page_offset
, hret
);
915 kmem_cache_free(hv_page_cache
, page
);
917 pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
918 "catalog_len=%zu(%zu) => %zd\n", offset
, page_offset
,
919 count
, catalog_len
, catalog_page_len
, ret
);
924 #define PAGE_0_ATTR(_name, _fmt, _expr) \
925 static ssize_t _name##_show(struct device *dev, \
926 struct device_attribute *dev_attr, \
929 unsigned long hret; \
931 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
932 struct hv_24x7_catalog_page_0 *page_0 = page; \
935 hret = h_get_24x7_catalog_page(page, 0, 0); \
940 ret = sprintf(buf, _fmt, _expr); \
942 kmem_cache_free(hv_page_cache, page); \
945 static DEVICE_ATTR_RO(_name)
947 PAGE_0_ATTR(catalog_version
, "%lld\n",
948 (unsigned long long)be64_to_cpu(page_0
->version
));
949 PAGE_0_ATTR(catalog_len
, "%lld\n",
950 (unsigned long long)be32_to_cpu(page_0
->length
) * 4096);
951 static BIN_ATTR_RO(catalog
, 0/* real length varies */);
953 static struct bin_attribute
*if_bin_attrs
[] = {
958 static struct attribute
*if_attrs
[] = {
959 &dev_attr_catalog_len
.attr
,
960 &dev_attr_catalog_version
.attr
,
964 static struct attribute_group if_group
= {
966 .bin_attrs
= if_bin_attrs
,
970 static const struct attribute_group
*attr_groups
[] = {
974 &event_long_desc_group
,
979 DEFINE_PER_CPU(char, hv_24x7_reqb
[4096]) __aligned(4096);
980 DEFINE_PER_CPU(char, hv_24x7_resb
[4096]) __aligned(4096);
982 static unsigned long single_24x7_request(u8 domain
, u32 offset
, u16 ix
,
984 bool success_expected
)
989 * request_buffer and result_buffer are not required to be 4k aligned,
990 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
991 * the simplest way to ensure that.
994 struct hv_24x7_request_buffer buf
;
995 struct hv_24x7_request req
;
996 } __packed
*request_buffer
;
999 struct hv_24x7_data_result_buffer buf
;
1000 struct hv_24x7_result res
;
1001 struct hv_24x7_result_element elem
;
1003 } __packed
*result_buffer
;
1005 BUILD_BUG_ON(sizeof(*request_buffer
) > 4096);
1006 BUILD_BUG_ON(sizeof(*result_buffer
) > 4096);
1008 request_buffer
= (void *)get_cpu_var(hv_24x7_reqb
);
1009 result_buffer
= (void *)get_cpu_var(hv_24x7_resb
);
1011 memset(request_buffer
, 0, 4096);
1012 memset(result_buffer
, 0, 4096);
1014 *request_buffer
= (struct reqb
) {
1016 .interface_version
= HV_24X7_IF_VERSION_CURRENT
,
1020 .performance_domain
= domain
,
1021 .data_size
= cpu_to_be16(8),
1022 .data_offset
= cpu_to_be32(offset
),
1023 .starting_lpar_ix
= cpu_to_be16(lpar
),
1024 .max_num_lpars
= cpu_to_be16(1),
1025 .starting_ix
= cpu_to_be16(ix
),
1026 .max_ix
= cpu_to_be16(1),
1030 ret
= plpar_hcall_norets(H_GET_24X7_DATA
,
1031 virt_to_phys(request_buffer
), sizeof(*request_buffer
),
1032 virt_to_phys(result_buffer
), sizeof(*result_buffer
));
1035 if (success_expected
)
1036 pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
1037 "0x%lx (%ld) detail=0x%x failing ix=%x\n",
1038 domain
, offset
, ix
, lpar
, ret
, ret
,
1039 result_buffer
->buf
.detailed_rc
,
1040 result_buffer
->buf
.failing_request_ix
);
1044 *res
= be64_to_cpu(result_buffer
->result
);
1050 static unsigned long event_24x7_request(struct perf_event
*event
, u64
*res
,
1051 bool success_expected
)
1054 unsigned domain
= event_get_domain(event
);
1056 if (is_physical_domain(domain
))
1057 idx
= event_get_core(event
);
1059 idx
= event_get_vcpu(event
);
1061 return single_24x7_request(event_get_domain(event
),
1062 event_get_offset(event
),
1064 event_get_lpar(event
),
1069 static int h_24x7_event_init(struct perf_event
*event
)
1071 struct hv_perf_caps caps
;
1077 if (event
->attr
.type
!= event
->pmu
->type
)
1080 /* Unused areas must be 0 */
1081 if (event_get_reserved1(event
) ||
1082 event_get_reserved2(event
) ||
1083 event_get_reserved3(event
)) {
1084 pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
1086 event_get_reserved1(event
),
1087 event
->attr
.config1
,
1088 event_get_reserved2(event
),
1089 event
->attr
.config2
,
1090 event_get_reserved3(event
));
1094 /* unsupported modes and filters */
1095 if (event
->attr
.exclude_user
||
1096 event
->attr
.exclude_kernel
||
1097 event
->attr
.exclude_hv
||
1098 event
->attr
.exclude_idle
||
1099 event
->attr
.exclude_host
||
1100 event
->attr
.exclude_guest
)
1103 /* no branch sampling */
1104 if (has_branch_stack(event
))
1107 /* offset must be 8 byte aligned */
1108 if (event_get_offset(event
) % 8) {
1109 pr_devel("bad alignment\n");
1113 /* Domains above 6 are invalid */
1114 domain
= event_get_domain(event
);
1116 pr_devel("invalid domain %d\n", domain
);
1120 hret
= hv_perf_caps_get(&caps
);
1122 pr_devel("could not get capabilities: rc=%ld\n", hret
);
1126 /* Physical domains & other lpars require extra capabilities */
1127 if (!caps
.collect_privileged
&& (is_physical_domain(domain
) ||
1128 (event_get_lpar(event
) != event_get_lpar_max()))) {
1129 pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
1130 is_physical_domain(domain
),
1131 event_get_lpar(event
));
1135 /* see if the event complains */
1136 if (event_24x7_request(event
, &ct
, false)) {
1137 pr_devel("test hcall failed\n");
1144 static u64
h_24x7_get_value(struct perf_event
*event
)
1148 ret
= event_24x7_request(event
, &ct
, true);
1150 /* We checked this in event init, shouldn't fail here... */
1156 static void h_24x7_event_update(struct perf_event
*event
)
1160 now
= h_24x7_get_value(event
);
1161 prev
= local64_xchg(&event
->hw
.prev_count
, now
);
1162 local64_add(now
- prev
, &event
->count
);
1165 static void h_24x7_event_start(struct perf_event
*event
, int flags
)
1167 if (flags
& PERF_EF_RELOAD
)
1168 local64_set(&event
->hw
.prev_count
, h_24x7_get_value(event
));
1171 static void h_24x7_event_stop(struct perf_event
*event
, int flags
)
1173 h_24x7_event_update(event
);
1176 static int h_24x7_event_add(struct perf_event
*event
, int flags
)
1178 if (flags
& PERF_EF_START
)
1179 h_24x7_event_start(event
, flags
);
1184 static struct pmu h_24x7_pmu
= {
1185 .task_ctx_nr
= perf_invalid_context
,
1188 .attr_groups
= attr_groups
,
1189 .event_init
= h_24x7_event_init
,
1190 .add
= h_24x7_event_add
,
1191 .del
= h_24x7_event_stop
,
1192 .start
= h_24x7_event_start
,
1193 .stop
= h_24x7_event_stop
,
1194 .read
= h_24x7_event_update
,
1197 static int hv_24x7_init(void)
1201 struct hv_perf_caps caps
;
1203 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
1204 pr_debug("not a virtualized system, not enabling\n");
1208 hret
= hv_perf_caps_get(&caps
);
1210 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
1215 hv_page_cache
= kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL
);
1219 /* sampling not supported */
1220 h_24x7_pmu
.capabilities
|= PERF_PMU_CAP_NO_INTERRUPT
;
1222 create_events_from_catalog(&event_group
.attrs
,
1223 &event_desc_group
.attrs
,
1224 &event_long_desc_group
.attrs
);
1226 r
= perf_pmu_register(&h_24x7_pmu
, h_24x7_pmu
.name
, -1);
1233 device_initcall(hv_24x7_init
);