]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/memory.c
Merge remote-tracking branch 'asoc/fix/ux500' into asoc-linus
[mirror_ubuntu-bionic-kernel.git] / mm / memory.c
index 13cbc420fead06d96f82b437a67d03b6ff9d5fca..6dc1882fbd725c61badb4fd072418c7330001ce1 100644 (file)
@@ -715,11 +715,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
         * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
         */
        if (vma->vm_ops)
-               print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
-                               (unsigned long)vma->vm_ops->fault);
+               printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
+                      vma->vm_ops->fault);
        if (vma->vm_file && vma->vm_file->f_op)
-               print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
-                               (unsigned long)vma->vm_file->f_op->mmap);
+               printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
+                      vma->vm_file->f_op->mmap);
        dump_stack();
        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 }
@@ -2393,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
+/**
+ * vm_iomap_memory - remap memory to userspace
+ * @vma: user vma to map to
+ * @start: start of area
+ * @len: size of area
+ *
+ * This is a simplified io_remap_pfn_range() for common driver use. The
+ * driver just needs to give us the physical memory range to be mapped,
+ * we'll figure out the rest from the vma information.
+ *
+ * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
+ * whatever write-combining details or similar.
+ */
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
+{
+       unsigned long vm_len, pfn, pages;
+
+       /* Check that the physical memory area passed in looks valid */
+       if (start + len < start)
+               return -EINVAL;
+       /*
+        * You *really* shouldn't map things that aren't page-aligned,
+        * but we've historically allowed it because IO memory might
+        * just have smaller alignment.
+        */
+       len += start & ~PAGE_MASK;
+       pfn = start >> PAGE_SHIFT;
+       pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
+       if (pfn + pages < pfn)
+               return -EINVAL;
+
+       /* We start the mapping 'vm_pgoff' pages into the area */
+       if (vma->vm_pgoff > pages)
+               return -EINVAL;
+       pfn += vma->vm_pgoff;
+       pages -= vma->vm_pgoff;
+
+       /* Can we fit all of the mapping? */
+       vm_len = vma->vm_end - vma->vm_start;
+       if (vm_len >> PAGE_SHIFT > pages)
+               return -EINVAL;
+
+       /* Ok, let it rip */
+       return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_iomap_memory);
+
 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                     unsigned long addr, unsigned long end,
                                     pte_fn_t fn, void *data)
@@ -3197,6 +3244,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page = alloc_zeroed_user_highpage_movable(vma, address);
        if (!page)
                goto oom;
+       /*
+        * The memory barrier inside __SetPageUptodate makes sure that
+        * preceeding stores to the page contents become visible before
+        * the set_pte_at() write.
+        */
        __SetPageUptodate(page);
 
        if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))