]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/base/cacheinfo.c
powerpc/speculation: Support 'mitigations=' cmdline option
[mirror_ubuntu-bionic-kernel.git] / drivers / base / cacheinfo.c
index 07532d83be0bca7d06aa5e99670ac920435e0ed9..8a2ae25b65d5986f7384afb0b26d585484d5d36a 100644 (file)
@@ -43,50 +43,10 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
 }
 
 #ifdef CONFIG_OF
-static int cache_setup_of_node(unsigned int cpu)
-{
-       struct device_node *np;
-       struct cacheinfo *this_leaf;
-       struct device *cpu_dev = get_cpu_device(cpu);
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
-       unsigned int index = 0;
-
-       /* skip if of_node is already populated */
-       if (this_cpu_ci->info_list->of_node)
-               return 0;
-
-       if (!cpu_dev) {
-               pr_err("No cpu device for CPU %d\n", cpu);
-               return -ENODEV;
-       }
-       np = cpu_dev->of_node;
-       if (!np) {
-               pr_err("Failed to find cpu%d device node\n", cpu);
-               return -ENOENT;
-       }
-
-       while (index < cache_leaves(cpu)) {
-               this_leaf = this_cpu_ci->info_list + index;
-               if (this_leaf->level != 1)
-                       np = of_find_next_cache_node(np);
-               else
-                       np = of_node_get(np);/* cpu node itself */
-               if (!np)
-                       break;
-               this_leaf->of_node = np;
-               index++;
-       }
-
-       if (index != cache_leaves(cpu)) /* not all OF nodes populated */
-               return -ENOENT;
-
-       return 0;
-}
-
 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
                                           struct cacheinfo *sib_leaf)
 {
-       return sib_leaf->of_node == this_leaf->of_node;
+       return sib_leaf->fw_token == this_leaf->fw_token;
 }
 
 /* OF properties to query for a given cache type */
@@ -122,7 +82,7 @@ static inline int get_cacheinfo_idx(enum cache_type type)
        return type;
 }
 
-static void cache_size(struct cacheinfo *this_leaf)
+static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
 {
        const char *propname;
        const __be32 *cache_size;
@@ -131,13 +91,14 @@ static void cache_size(struct cacheinfo *this_leaf)
        ct_idx = get_cacheinfo_idx(this_leaf->type);
        propname = cache_type_info[ct_idx].size_prop;
 
-       cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+       cache_size = of_get_property(np, propname, NULL);
        if (cache_size)
                this_leaf->size = of_read_number(cache_size, 1);
 }
 
 /* not cache_line_size() because that's a macro in include/linux/cache.h */
-static void cache_get_line_size(struct cacheinfo *this_leaf)
+static void cache_get_line_size(struct cacheinfo *this_leaf,
+                               struct device_node *np)
 {
        const __be32 *line_size;
        int i, lim, ct_idx;
@@ -149,7 +110,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
                const char *propname;
 
                propname = cache_type_info[ct_idx].line_size_props[i];
-               line_size = of_get_property(this_leaf->of_node, propname, NULL);
+               line_size = of_get_property(np, propname, NULL);
                if (line_size)
                        break;
        }
@@ -158,7 +119,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
                this_leaf->coherency_line_size = of_read_number(line_size, 1);
 }
 
-static void cache_nr_sets(struct cacheinfo *this_leaf)
+static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
 {
        const char *propname;
        const __be32 *nr_sets;
@@ -167,7 +128,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf)
        ct_idx = get_cacheinfo_idx(this_leaf->type);
        propname = cache_type_info[ct_idx].nr_sets_prop;
 
-       nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+       nr_sets = of_get_property(np, propname, NULL);
        if (nr_sets)
                this_leaf->number_of_sets = of_read_number(nr_sets, 1);
 }
@@ -186,41 +147,77 @@ static void cache_associativity(struct cacheinfo *this_leaf)
                this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
 }
 
-static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+static bool cache_node_is_unified(struct cacheinfo *this_leaf,
+                                 struct device_node *np)
 {
-       return of_property_read_bool(this_leaf->of_node, "cache-unified");
+       return of_property_read_bool(np, "cache-unified");
 }
 
-static void cache_of_override_properties(unsigned int cpu)
+static void cache_of_set_props(struct cacheinfo *this_leaf,
+                              struct device_node *np)
 {
-       int index;
+       /*
+        * init_cache_level must setup the cache level correctly
+        * overriding the architecturally specified levels, so
+        * if type is NONE at this stage, it should be unified
+        */
+       if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+           cache_node_is_unified(this_leaf, np))
+               this_leaf->type = CACHE_TYPE_UNIFIED;
+       cache_size(this_leaf, np);
+       cache_get_line_size(this_leaf, np);
+       cache_nr_sets(this_leaf, np);
+       cache_associativity(this_leaf);
+}
+
+static int cache_setup_of_node(unsigned int cpu)
+{
+       struct device_node *np;
        struct cacheinfo *this_leaf;
+       struct device *cpu_dev = get_cpu_device(cpu);
        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       unsigned int index = 0;
 
-       for (index = 0; index < cache_leaves(cpu); index++) {
+       /* skip if fw_token is already populated */
+       if (this_cpu_ci->info_list->fw_token) {
+               return 0;
+       }
+
+       if (!cpu_dev) {
+               pr_err("No cpu device for CPU %d\n", cpu);
+               return -ENODEV;
+       }
+       np = cpu_dev->of_node;
+       if (!np) {
+               pr_err("Failed to find cpu%d device node\n", cpu);
+               return -ENOENT;
+       }
+
+       while (index < cache_leaves(cpu)) {
                this_leaf = this_cpu_ci->info_list + index;
-               /*
-                * init_cache_level must setup the cache level correctly
-                * overriding the architecturally specified levels, so
-                * if type is NONE at this stage, it should be unified
-                */
-               if (this_leaf->type == CACHE_TYPE_NOCACHE &&
-                   cache_node_is_unified(this_leaf))
-                       this_leaf->type = CACHE_TYPE_UNIFIED;
-               cache_size(this_leaf);
-               cache_get_line_size(this_leaf);
-               cache_nr_sets(this_leaf);
-               cache_associativity(this_leaf);
+               if (this_leaf->level != 1)
+                       np = of_find_next_cache_node(np);
+               else
+                       np = of_node_get(np);/* cpu node itself */
+               if (!np)
+                       break;
+               cache_of_set_props(this_leaf, np);
+               this_leaf->fw_token = np;
+               index++;
        }
+
+       if (index != cache_leaves(cpu)) /* not all OF nodes populated */
+               return -ENOENT;
+
+       return 0;
 }
 #else
-static void cache_of_override_properties(unsigned int cpu) { }
 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
                                           struct cacheinfo *sib_leaf)
 {
        /*
-        * For non-DT systems, assume unique level 1 cache, system-wide
+        * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
         * shared caches for all other levels. This will be used only if
         * arch specific code has not populated shared_cpu_map
         */
@@ -228,6 +225,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
 }
 #endif
 
+int __weak cache_setup_acpi(unsigned int cpu)
+{
+       return -ENOTSUPP;
+}
+
 static int cache_shared_cpu_map_setup(unsigned int cpu)
 {
        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -241,8 +243,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
        if (of_have_populated_dt())
                ret = cache_setup_of_node(cpu);
        else if (!acpi_disabled)
-               /* No cache property/hierarchy support yet in ACPI */
-               ret = -ENOTSUPP;
+               ret = cache_setup_acpi(cpu);
+
        if (ret)
                return ret;
 
@@ -293,16 +295,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
                        cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
                        cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
                }
-               of_node_put(this_leaf->of_node);
+               if (of_have_populated_dt())
+                       of_node_put(this_leaf->fw_token);
        }
 }
 
-static void cache_override_properties(unsigned int cpu)
-{
-       if (of_have_populated_dt())
-               return cache_of_override_properties(cpu);
-}
-
 static void free_cache_attributes(unsigned int cpu)
 {
        if (!per_cpu_cacheinfo(cpu))
@@ -336,12 +333,17 @@ static int detect_cache_attributes(unsigned int cpu)
        if (per_cpu_cacheinfo(cpu) == NULL)
                return -ENOMEM;
 
+       /*
+        * populate_cache_leaves() may completely setup the cache leaves and
+        * shared_cpu_map or it may leave it partially setup.
+        */
        ret = populate_cache_leaves(cpu);
        if (ret)
                goto free_ci;
        /*
-        * For systems using DT for cache hierarchy, of_node and shared_cpu_map
-        * will be set up here only if they are not populated already
+        * For systems using DT for cache hierarchy, fw_token
+        * and shared_cpu_map will be set up here only if they are
+        * not populated already
         */
        ret = cache_shared_cpu_map_setup(cpu);
        if (ret) {
@@ -349,7 +351,6 @@ static int detect_cache_attributes(unsigned int cpu)
                goto free_ci;
        }
 
-       cache_override_properties(cpu);
        return 0;
 
 free_ci: