2 * Resource Director Technology(RDT)
3 * - Cache Allocation code.
5 * Copyright (C) 2016 Intel Corporation
8 * Fenghua Yu <fenghua.yu@intel.com>
9 * Tony Luck <tony.luck@intel.com>
10 * Vikas Shivappa <vikas.shivappa@intel.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * More information about RDT be found in the Intel (R) x86 Architecture
22 * Software Developer Manual June 2016, volume 3, section 17.17.
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/slab.h>
28 #include <linux/err.h>
29 #include <linux/cacheinfo.h>
30 #include <linux/cpuhotplug.h>
32 #include <asm/intel-family.h>
33 #include <asm/intel_rdt_sched.h>
34 #include "intel_rdt.h"
36 #define MAX_MBA_BW 100u
37 #define MBA_IS_LINEAR 0x4
39 /* Mutex to protect rdtgroup access. */
40 DEFINE_MUTEX(rdtgroup_mutex
);
43 * The cached intel_pqr_state is strictly per CPU and can never be
44 * updated from a remote CPU. Functions which modify the state
45 * are called with interrupts disabled and no preemption, which
46 * is sufficient for the protection.
48 DEFINE_PER_CPU(struct intel_pqr_state
, pqr_state
);
51 * Used to store the max resource name width and max resource data width
52 * to display the schemata in a tabular format
54 int max_name_width
, max_data_width
;
57 * Global boolean for rdt_alloc which is true if any
58 * resource allocation is enabled.
60 bool rdt_alloc_capable
;
63 mba_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
);
65 cat_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
);
67 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
69 struct rdt_resource rdt_resources_all
[] = {
72 .rid
= RDT_RESOURCE_L3
,
74 .domains
= domain_init(RDT_RESOURCE_L3
),
75 .msr_base
= IA32_L3_CBM_BASE
,
76 .msr_update
= cat_wrmsr
,
83 .parse_ctrlval
= parse_cbm
,
84 .format_str
= "%d=%0*x",
85 .fflags
= RFTYPE_RES_CACHE
,
87 [RDT_RESOURCE_L3DATA
] =
89 .rid
= RDT_RESOURCE_L3DATA
,
91 .domains
= domain_init(RDT_RESOURCE_L3DATA
),
92 .msr_base
= IA32_L3_CBM_BASE
,
93 .msr_update
= cat_wrmsr
,
100 .parse_ctrlval
= parse_cbm
,
101 .format_str
= "%d=%0*x",
102 .fflags
= RFTYPE_RES_CACHE
,
104 [RDT_RESOURCE_L3CODE
] =
106 .rid
= RDT_RESOURCE_L3CODE
,
108 .domains
= domain_init(RDT_RESOURCE_L3CODE
),
109 .msr_base
= IA32_L3_CBM_BASE
,
110 .msr_update
= cat_wrmsr
,
117 .parse_ctrlval
= parse_cbm
,
118 .format_str
= "%d=%0*x",
119 .fflags
= RFTYPE_RES_CACHE
,
123 .rid
= RDT_RESOURCE_L2
,
125 .domains
= domain_init(RDT_RESOURCE_L2
),
126 .msr_base
= IA32_L2_CBM_BASE
,
127 .msr_update
= cat_wrmsr
,
134 .parse_ctrlval
= parse_cbm
,
135 .format_str
= "%d=%0*x",
136 .fflags
= RFTYPE_RES_CACHE
,
140 .rid
= RDT_RESOURCE_MBA
,
142 .domains
= domain_init(RDT_RESOURCE_MBA
),
143 .msr_base
= IA32_MBA_THRTL_BASE
,
144 .msr_update
= mba_wrmsr
,
146 .parse_ctrlval
= parse_bw
,
147 .format_str
= "%d=%*d",
148 .fflags
= RFTYPE_RES_MB
,
152 static unsigned int cbm_idx(struct rdt_resource
*r
, unsigned int closid
)
154 return closid
* r
->cache
.cbm_idx_mult
+ r
->cache
.cbm_idx_offset
;
158 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
159 * as they do not have CPUID enumeration support for Cache allocation.
160 * The check for Vendor/Family/Model is not enough to guarantee that
161 * the MSRs won't #GP fault because only the following SKUs support
163 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
164 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
165 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
166 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
167 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
168 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
170 * Probe by trying to write the first of the L3 cach mask registers
171 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
172 * is always 20 on hsw server parts. The minimum cache bitmask length
173 * allowed for HSW server is always 2 bits. Hardcode all of them.
175 static inline void cache_alloc_hsw_probe(void)
177 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
178 u32 l
, h
, max_cbm
= BIT_MASK(20) - 1;
180 if (wrmsr_safe(IA32_L3_CBM_BASE
, max_cbm
, 0))
182 rdmsr(IA32_L3_CBM_BASE
, l
, h
);
184 /* If all the bits were set in MSR, return success */
189 r
->default_ctrl
= max_cbm
;
190 r
->cache
.cbm_len
= 20;
191 r
->cache
.shareable_bits
= 0xc0000;
192 r
->cache
.min_cbm_bits
= 2;
193 r
->alloc_capable
= true;
194 r
->alloc_enabled
= true;
196 rdt_alloc_capable
= true;
200 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
201 * exposed to user interface and the h/w understandable delay values.
203 * The non-linear delay values have the granularity of power of two
204 * and also the h/w does not guarantee a curve for configured delay
205 * values vs. actual b/w enforced.
206 * Hence we need a mapping that is pre calibrated so the user can
207 * express the memory b/w as a percentage value.
209 static inline bool rdt_get_mb_table(struct rdt_resource
*r
)
212 * There are no Intel SKUs as of now to support non-linear delay.
214 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
215 boot_cpu_data
.x86
, boot_cpu_data
.x86_model
);
220 static bool rdt_get_mem_config(struct rdt_resource
*r
)
222 union cpuid_0x10_3_eax eax
;
223 union cpuid_0x10_x_edx edx
;
226 cpuid_count(0x00000010, 3, &eax
.full
, &ebx
, &ecx
, &edx
.full
);
227 r
->num_closid
= edx
.split
.cos_max
+ 1;
228 r
->membw
.max_delay
= eax
.split
.max_delay
+ 1;
229 r
->default_ctrl
= MAX_MBA_BW
;
230 if (ecx
& MBA_IS_LINEAR
) {
231 r
->membw
.delay_linear
= true;
232 r
->membw
.min_bw
= MAX_MBA_BW
- r
->membw
.max_delay
;
233 r
->membw
.bw_gran
= MAX_MBA_BW
- r
->membw
.max_delay
;
235 if (!rdt_get_mb_table(r
))
240 r
->alloc_capable
= true;
241 r
->alloc_enabled
= true;
246 static void rdt_get_cache_alloc_cfg(int idx
, struct rdt_resource
*r
)
248 union cpuid_0x10_1_eax eax
;
249 union cpuid_0x10_x_edx edx
;
252 cpuid_count(0x00000010, idx
, &eax
.full
, &ebx
, &ecx
, &edx
.full
);
253 r
->num_closid
= edx
.split
.cos_max
+ 1;
254 r
->cache
.cbm_len
= eax
.split
.cbm_len
+ 1;
255 r
->default_ctrl
= BIT_MASK(eax
.split
.cbm_len
+ 1) - 1;
256 r
->cache
.shareable_bits
= ebx
& r
->default_ctrl
;
257 r
->data_width
= (r
->cache
.cbm_len
+ 3) / 4;
258 r
->alloc_capable
= true;
259 r
->alloc_enabled
= true;
262 static void rdt_get_cdp_l3_config(int type
)
264 struct rdt_resource
*r_l3
= &rdt_resources_all
[RDT_RESOURCE_L3
];
265 struct rdt_resource
*r
= &rdt_resources_all
[type
];
267 r
->num_closid
= r_l3
->num_closid
/ 2;
268 r
->cache
.cbm_len
= r_l3
->cache
.cbm_len
;
269 r
->default_ctrl
= r_l3
->default_ctrl
;
270 r
->data_width
= (r
->cache
.cbm_len
+ 3) / 4;
271 r
->alloc_capable
= true;
273 * By default, CDP is disabled. CDP can be enabled by mount parameter
274 * "cdp" during resctrl file system mount time.
276 r
->alloc_enabled
= false;
279 static int get_cache_id(int cpu
, int level
)
281 struct cpu_cacheinfo
*ci
= get_cpu_cacheinfo(cpu
);
284 for (i
= 0; i
< ci
->num_leaves
; i
++) {
285 if (ci
->info_list
[i
].level
== level
)
286 return ci
->info_list
[i
].id
;
293 * Map the memory b/w percentage value to delay values
294 * that can be written to QOS_MSRs.
295 * There are currently no SKUs which support non linear delay values.
297 static u32
delay_bw_map(unsigned long bw
, struct rdt_resource
*r
)
299 if (r
->membw
.delay_linear
)
300 return MAX_MBA_BW
- bw
;
302 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
303 return r
->default_ctrl
;
307 mba_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
)
311 /* Write the delay values for mba. */
312 for (i
= m
->low
; i
< m
->high
; i
++)
313 wrmsrl(r
->msr_base
+ i
, delay_bw_map(d
->ctrl_val
[i
], r
));
317 cat_wrmsr(struct rdt_domain
*d
, struct msr_param
*m
, struct rdt_resource
*r
)
321 for (i
= m
->low
; i
< m
->high
; i
++)
322 wrmsrl(r
->msr_base
+ cbm_idx(r
, i
), d
->ctrl_val
[i
]);
325 struct rdt_domain
*get_domain_from_cpu(int cpu
, struct rdt_resource
*r
)
327 struct rdt_domain
*d
;
329 list_for_each_entry(d
, &r
->domains
, list
) {
330 /* Find the domain that contains this CPU */
331 if (cpumask_test_cpu(cpu
, &d
->cpu_mask
))
338 void rdt_ctrl_update(void *arg
)
340 struct msr_param
*m
= arg
;
341 struct rdt_resource
*r
= m
->res
;
342 int cpu
= smp_processor_id();
343 struct rdt_domain
*d
;
345 d
= get_domain_from_cpu(cpu
, r
);
347 r
->msr_update(d
, m
, r
);
350 pr_warn_once("cpu %d not found in any domain for resource %s\n",
355 * rdt_find_domain - Find a domain in a resource that matches input resource id
357 * Search resource r's domain list to find the resource id. If the resource
358 * id is found in a domain, return the domain. Otherwise, if requested by
359 * caller, return the first domain whose id is bigger than the input id.
360 * The domain list is sorted by id in ascending order.
362 struct rdt_domain
*rdt_find_domain(struct rdt_resource
*r
, int id
,
363 struct list_head
**pos
)
365 struct rdt_domain
*d
;
371 list_for_each(l
, &r
->domains
) {
372 d
= list_entry(l
, struct rdt_domain
, list
);
373 /* When id is found, return its domain. */
376 /* Stop searching when finding id's position in sorted list. */
387 static int domain_setup_ctrlval(struct rdt_resource
*r
, struct rdt_domain
*d
)
393 dc
= kmalloc_array(r
->num_closid
, sizeof(*d
->ctrl_val
), GFP_KERNEL
);
400 * Initialize the Control MSRs to having no control.
401 * For Cache Allocation: Set all bits in cbm
402 * For Memory Allocation: Set b/w requested to 100
404 for (i
= 0; i
< r
->num_closid
; i
++, dc
++)
405 *dc
= r
->default_ctrl
;
408 m
.high
= r
->num_closid
;
409 r
->msr_update(d
, &m
, r
);
413 static int domain_setup_mon_state(struct rdt_resource
*r
, struct rdt_domain
*d
)
417 if (is_llc_occupancy_enabled()) {
418 d
->rmid_busy_llc
= kcalloc(BITS_TO_LONGS(r
->num_rmid
),
419 sizeof(unsigned long),
421 if (!d
->rmid_busy_llc
)
423 INIT_DELAYED_WORK(&d
->cqm_limbo
, cqm_handle_limbo
);
425 if (is_mbm_total_enabled()) {
426 tsize
= sizeof(*d
->mbm_total
);
427 d
->mbm_total
= kcalloc(r
->num_rmid
, tsize
, GFP_KERNEL
);
429 kfree(d
->rmid_busy_llc
);
433 if (is_mbm_local_enabled()) {
434 tsize
= sizeof(*d
->mbm_local
);
435 d
->mbm_local
= kcalloc(r
->num_rmid
, tsize
, GFP_KERNEL
);
437 kfree(d
->rmid_busy_llc
);
443 if (is_mbm_enabled()) {
444 INIT_DELAYED_WORK(&d
->mbm_over
, mbm_handle_overflow
);
445 mbm_setup_overflow_handler(d
, MBM_OVERFLOW_INTERVAL
);
452 * domain_add_cpu - Add a cpu to a resource's domain list.
454 * If an existing domain in the resource r's domain list matches the cpu's
455 * resource id, add the cpu in the domain.
457 * Otherwise, a new domain is allocated and inserted into the right position
458 * in the domain list sorted by id in ascending order.
460 * The order in the domain list is visible to users when we print entries
461 * in the schemata file and schemata input is validated to have the same order
464 static void domain_add_cpu(int cpu
, struct rdt_resource
*r
)
466 int id
= get_cache_id(cpu
, r
->cache_level
);
467 struct list_head
*add_pos
= NULL
;
468 struct rdt_domain
*d
;
470 d
= rdt_find_domain(r
, id
, &add_pos
);
472 pr_warn("Could't find cache id for cpu %d\n", cpu
);
477 cpumask_set_cpu(cpu
, &d
->cpu_mask
);
481 d
= kzalloc_node(sizeof(*d
), GFP_KERNEL
, cpu_to_node(cpu
));
486 cpumask_set_cpu(cpu
, &d
->cpu_mask
);
488 if (r
->alloc_capable
&& domain_setup_ctrlval(r
, d
)) {
493 if (r
->mon_capable
&& domain_setup_mon_state(r
, d
)) {
498 list_add_tail(&d
->list
, add_pos
);
501 * If resctrl is mounted, add
502 * per domain monitor data directories.
504 if (static_branch_unlikely(&rdt_mon_enable_key
))
505 mkdir_mondata_subdir_allrdtgrp(r
, d
);
508 static void domain_remove_cpu(int cpu
, struct rdt_resource
*r
)
510 int id
= get_cache_id(cpu
, r
->cache_level
);
511 struct rdt_domain
*d
;
513 d
= rdt_find_domain(r
, id
, NULL
);
514 if (IS_ERR_OR_NULL(d
)) {
515 pr_warn("Could't find cache id for cpu %d\n", cpu
);
519 cpumask_clear_cpu(cpu
, &d
->cpu_mask
);
520 if (cpumask_empty(&d
->cpu_mask
)) {
522 * If resctrl is mounted, remove all the
523 * per domain monitor data directories.
525 if (static_branch_unlikely(&rdt_mon_enable_key
))
526 rmdir_mondata_subdir_allrdtgrp(r
, d
->id
);
528 kfree(d
->rmid_busy_llc
);
532 if (is_mbm_enabled())
533 cancel_delayed_work(&d
->mbm_over
);
534 if (is_llc_occupancy_enabled() && has_busy_rmid(r
, d
)) {
536 * When a package is going down, forcefully
537 * decrement rmid->ebusy. There is no way to know
538 * that the L3 was flushed and hence may lead to
539 * incorrect counts in rare scenarios, but leaving
540 * the RMID as busy creates RMID leaks if the
541 * package never comes back.
543 __check_limbo(d
, true);
544 cancel_delayed_work(&d
->cqm_limbo
);
551 if (r
== &rdt_resources_all
[RDT_RESOURCE_L3
]) {
552 if (is_mbm_enabled() && cpu
== d
->mbm_work_cpu
) {
553 cancel_delayed_work(&d
->mbm_over
);
554 mbm_setup_overflow_handler(d
, 0);
556 if (is_llc_occupancy_enabled() && cpu
== d
->cqm_work_cpu
&&
557 has_busy_rmid(r
, d
)) {
558 cancel_delayed_work(&d
->cqm_limbo
);
559 cqm_setup_limbo_handler(d
, 0);
564 static void clear_closid_rmid(int cpu
)
566 struct intel_pqr_state
*state
= this_cpu_ptr(&pqr_state
);
568 state
->default_closid
= 0;
569 state
->default_rmid
= 0;
570 state
->cur_closid
= 0;
572 wrmsr(IA32_PQR_ASSOC
, 0, 0);
575 static int intel_rdt_online_cpu(unsigned int cpu
)
577 struct rdt_resource
*r
;
579 mutex_lock(&rdtgroup_mutex
);
580 for_each_capable_rdt_resource(r
)
581 domain_add_cpu(cpu
, r
);
582 /* The cpu is set in default rdtgroup after online. */
583 cpumask_set_cpu(cpu
, &rdtgroup_default
.cpu_mask
);
584 clear_closid_rmid(cpu
);
585 mutex_unlock(&rdtgroup_mutex
);
590 static void clear_childcpus(struct rdtgroup
*r
, unsigned int cpu
)
594 list_for_each_entry(cr
, &r
->mon
.crdtgrp_list
, mon
.crdtgrp_list
) {
595 if (cpumask_test_and_clear_cpu(cpu
, &cr
->cpu_mask
)) {
601 static int intel_rdt_offline_cpu(unsigned int cpu
)
603 struct rdtgroup
*rdtgrp
;
604 struct rdt_resource
*r
;
606 mutex_lock(&rdtgroup_mutex
);
607 for_each_capable_rdt_resource(r
)
608 domain_remove_cpu(cpu
, r
);
609 list_for_each_entry(rdtgrp
, &rdt_all_groups
, rdtgroup_list
) {
610 if (cpumask_test_and_clear_cpu(cpu
, &rdtgrp
->cpu_mask
)) {
611 clear_childcpus(rdtgrp
, cpu
);
615 clear_closid_rmid(cpu
);
616 mutex_unlock(&rdtgroup_mutex
);
622 * Choose a width for the resource name and resource data based on the
623 * resource that has widest name and cbm.
625 static __init
void rdt_init_padding(void)
627 struct rdt_resource
*r
;
630 for_each_alloc_capable_rdt_resource(r
) {
631 cl
= strlen(r
->name
);
632 if (cl
> max_name_width
)
635 if (r
->data_width
> max_data_width
)
636 max_data_width
= r
->data_width
;
650 #define RDT_OPT(idx, n, f) \
659 bool force_off
, force_on
;
662 static struct rdt_options rdt_options
[] __initdata
= {
663 RDT_OPT(RDT_FLAG_CMT
, "cmt", X86_FEATURE_CQM_OCCUP_LLC
),
664 RDT_OPT(RDT_FLAG_MBM_TOTAL
, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL
),
665 RDT_OPT(RDT_FLAG_MBM_LOCAL
, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL
),
666 RDT_OPT(RDT_FLAG_L3_CAT
, "l3cat", X86_FEATURE_CAT_L3
),
667 RDT_OPT(RDT_FLAG_L3_CDP
, "l3cdp", X86_FEATURE_CDP_L3
),
668 RDT_OPT(RDT_FLAG_L2_CAT
, "l2cat", X86_FEATURE_CAT_L2
),
669 RDT_OPT(RDT_FLAG_MBA
, "mba", X86_FEATURE_MBA
),
671 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
673 static int __init
set_rdt_options(char *str
)
675 struct rdt_options
*o
;
681 while ((tok
= strsep(&str
, ",")) != NULL
) {
682 force_off
= *tok
== '!';
685 for (o
= rdt_options
; o
< &rdt_options
[NUM_RDT_OPTIONS
]; o
++) {
686 if (strcmp(tok
, o
->name
) == 0) {
697 __setup("rdt", set_rdt_options
);
699 static bool __init
rdt_cpu_has(int flag
)
701 bool ret
= boot_cpu_has(flag
);
702 struct rdt_options
*o
;
707 for (o
= rdt_options
; o
< &rdt_options
[NUM_RDT_OPTIONS
]; o
++) {
708 if (flag
== o
->flag
) {
719 static __init
bool get_rdt_alloc_resources(void)
723 if (rdt_alloc_capable
)
726 if (!boot_cpu_has(X86_FEATURE_RDT_A
))
729 if (rdt_cpu_has(X86_FEATURE_CAT_L3
)) {
730 rdt_get_cache_alloc_cfg(1, &rdt_resources_all
[RDT_RESOURCE_L3
]);
731 if (rdt_cpu_has(X86_FEATURE_CDP_L3
)) {
732 rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA
);
733 rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE
);
737 if (rdt_cpu_has(X86_FEATURE_CAT_L2
)) {
738 /* CPUID 0x10.2 fields are same format at 0x10.1 */
739 rdt_get_cache_alloc_cfg(2, &rdt_resources_all
[RDT_RESOURCE_L2
]);
743 if (rdt_cpu_has(X86_FEATURE_MBA
)) {
744 if (rdt_get_mem_config(&rdt_resources_all
[RDT_RESOURCE_MBA
]))
750 static __init
bool get_rdt_mon_resources(void)
752 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC
))
753 rdt_mon_features
|= (1 << QOS_L3_OCCUP_EVENT_ID
);
754 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL
))
755 rdt_mon_features
|= (1 << QOS_L3_MBM_TOTAL_EVENT_ID
);
756 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL
))
757 rdt_mon_features
|= (1 << QOS_L3_MBM_LOCAL_EVENT_ID
);
759 if (!rdt_mon_features
)
762 return !rdt_get_mon_l3_config(&rdt_resources_all
[RDT_RESOURCE_L3
]);
765 static __init
void rdt_quirks(void)
767 switch (boot_cpu_data
.x86_model
) {
768 case INTEL_FAM6_HASWELL_X
:
769 if (!rdt_options
[RDT_FLAG_L3_CAT
].force_off
)
770 cache_alloc_hsw_probe();
772 case INTEL_FAM6_SKYLAKE_X
:
773 if (boot_cpu_data
.x86_mask
<= 4)
774 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
778 static __init
bool get_rdt_resources(void)
781 rdt_alloc_capable
= get_rdt_alloc_resources();
782 rdt_mon_capable
= get_rdt_mon_resources();
784 return (rdt_mon_capable
|| rdt_alloc_capable
);
787 static int __init
intel_rdt_late_init(void)
789 struct rdt_resource
*r
;
792 if (!get_rdt_resources())
797 state
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
,
798 "x86/rdt/cat:online:",
799 intel_rdt_online_cpu
, intel_rdt_offline_cpu
);
803 ret
= rdtgroup_init();
805 cpuhp_remove_state(state
);
809 for_each_alloc_capable_rdt_resource(r
)
810 pr_info("Intel RDT %s allocation detected\n", r
->name
);
812 for_each_mon_capable_rdt_resource(r
)
813 pr_info("Intel RDT %s monitoring detected\n", r
->name
);
818 late_initcall(intel_rdt_late_init
);