struct intel_uncore_box;
struct uncore_event_desc;
struct freerunning_counters;
+struct intel_uncore_topology;
struct intel_uncore_type {
const char *name;
* to identify which platform component each PMON block of that type is
* supposed to monitor.
*/
- u64 *topology;
+ struct intel_uncore_topology *topology;
/*
* Optional callbacks for managing mapping of Uncore units to PMONs
*/
unsigned *box_offsets;
};
+struct intel_uncore_topology {
+ u64 configuration;
+ int segment;
+};
+
struct pci2phy_map {
struct list_head list;
int segment;
struct pci2phy_map *__find_pci2phy_map(int segment);
int uncore_pcibus_to_dieid(struct pci_bus *bus);
+int uncore_die_to_segment(int die);
ssize_t uncore_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
{
- return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
+ return pmu->type->topology[die].configuration >>
+ (pmu->pmu_idx * BUS_NUM_STRIDE);
}
static umode_t
}
static ssize_t skx_iio_mapping_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct pci_bus *bus = pci_find_next_bus(NULL);
- struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
+ struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
long die = (long)ea->var;
- /*
- * Current implementation is for single segment configuration hence it's
- * safe to take the segment value from the first available root bus.
- */
- return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
- skx_iio_stack(uncore_pmu, die));
+ return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
+ skx_iio_stack(pmu, die));
}
static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
static int skx_iio_get_topology(struct intel_uncore_type *type)
{
- int i, ret;
- struct pci_bus *bus = NULL;
-
- /*
- * Verified single-segment environments only; disabled for multiple
- * segment topologies for now except VMD domains.
- * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
- */
- while ((bus = pci_find_next_bus(bus))
- && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
- ;
- if (bus)
- return -EPERM;
+ int die, ret = -EPERM;
- type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
+ type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
+ GFP_KERNEL);
if (!type->topology)
return -ENOMEM;
- for (i = 0; i < uncore_max_dies(); i++) {
- ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
- if (ret) {
- kfree(type->topology);
- type->topology = NULL;
- return ret;
- }
+ for (die = 0; die < uncore_max_dies(); die++) {
+ ret = skx_msr_cpu_bus_read(die_to_cpu(die),
+ &type->topology[die].configuration);
+ if (ret)
+ break;
+
+ ret = uncore_die_to_segment(die);
+ if (ret < 0)
+ break;
+
+ type->topology[die].segment = ret;
}
- return 0;
+ if (ret < 0) {
+ kfree(type->topology);
+ type->topology = NULL;
+ }
+
+ return ret;
}
static struct attribute_group skx_iio_mapping_group = {
struct dev_ext_attribute *eas = NULL;
ret = skx_iio_get_topology(type);
- if (ret)
+ if (ret < 0)
goto clear_attr_update;
ret = -ENOMEM;