]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm: fix memory_failure() handling of dax-namespace metadata
authorDan Williams <dan.j.williams@intel.com>
Fri, 26 Feb 2021 01:17:08 +0000 (17:17 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Feb 2021 17:41:00 +0000 (09:41 -0800)
Given 'struct dev_pagemap' spans both data pages and metadata pages be
careful to consult the altmap if present to delineate metadata.  In fact
the pfn_first() helper already identifies the first valid data pfn, so
export that helper for other code paths via pgmap_pfn_valid().

Other usage of get_dev_pagemap() are not a concern because those are
operating on known data pfns having been looked up by get_user_pages().
I.e.  metadata pfns are never user mapped.

Link: https://lkml.kernel.org/r/161058501758.1840162.4239831989762604527.stgit@dwillia2-desk3.amr.corp.intel.com
Fixes: 6100e34b2526 ("mm, memory_failure: Teach memory_failure() about dev_pagemap pages")
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reported-by: David Hildenbrand <david@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memremap.h
mm/memory-failure.c
mm/memremap.c

index 79c49e7f5c304fa5b98e039e9e077e2c85b0d061..f5b464daeeca50dada35e40fdd8641a1f8e8a815 100644 (file)
@@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
                struct dev_pagemap *pgmap);
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
 
 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
@@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
        return NULL;
 }
 
+static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+       return false;
+}
+
 static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
 {
        return 0;
index 55c671904aacf8a5c220031cdfbf8d8aadd0df45..24210c9bd8434816ce4451e6041e4e7dab24b165 100644 (file)
@@ -1312,6 +1312,12 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
                 */
                put_page(page);
 
+       /* device metadata space is not recoverable */
+       if (!pgmap_pfn_valid(pgmap, pfn)) {
+               rc = -ENXIO;
+               goto out;
+       }
+
        /*
         * Prevent the inode from being freed while we are interrogating
         * the address_space, typically this would be handled by
index 16b2fb482da11245092e732c858264ada92d2364..2455bac895066d4458ae4a4c261b955dad531817 100644 (file)
@@ -80,6 +80,21 @@ static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
        return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
 }
 
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+       int i;
+
+       for (i = 0; i < pgmap->nr_range; i++) {
+               struct range *range = &pgmap->ranges[i];
+
+               if (pfn >= PHYS_PFN(range->start) &&
+                   pfn <= PHYS_PFN(range->end))
+                       return pfn >= pfn_first(pgmap, i);
+       }
+
+       return false;
+}
+
 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
 {
        const struct range *range = &pgmap->ranges[range_id];