]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/vmalloc.c
tuntap: correctly set SOCKWQ_ASYNC_NOSPACE
[mirror_ubuntu-bionic-kernel.git] / mm / vmalloc.c
index 673942094328a710b059b2b50e149ce7eb3d5f11..c7aaddff83aeb2f127b33bd873f4379bfe7d9e5e 100644 (file)
@@ -498,7 +498,11 @@ nocache:
        }
 
 found:
-       if (addr + size > vend)
+       /*
+        * Check also calculated address against the vstart,
+        * because it can be 0 because of big align request.
+        */
+       if (addr + size > vend || addr < vstart)
                goto overflow;
 
        va->va_start = addr;
@@ -677,23 +681,33 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
        struct llist_node *valist;
        struct vmap_area *va;
        struct vmap_area *n_va;
-       bool do_free = false;
+       int resched_threshold;
 
        lockdep_assert_held(&vmap_purge_lock);
 
        valist = llist_del_all(&vmap_purge_list);
+       if (unlikely(valist == NULL))
+               return false;
+
+       /*
+        * First make sure the mappings are removed from all page-tables
+        * before they are freed.
+        */
+       vmalloc_sync_all();
+
+       /*
+        * TODO: to calculate a flush range without looping.
+        * The list can be up to lazy_max_pages() elements.
+        */
        llist_for_each_entry(va, valist, purge_list) {
                if (va->va_start < start)
                        start = va->va_start;
                if (va->va_end > end)
                        end = va->va_end;
-               do_free = true;
        }
 
-       if (!do_free)
-               return false;
-
        flush_tlb_kernel_range(start, end);
+       resched_threshold = (int) lazy_max_pages() << 1;
 
        spin_lock(&vmap_area_lock);
        llist_for_each_entry_safe(va, n_va, valist, purge_list) {
@@ -701,7 +715,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 
                __free_vmap_area(va);
                atomic_sub(nr, &vmap_lazy_nr);
-               cond_resched_lock(&vmap_area_lock);
+
+               if (atomic_read(&vmap_lazy_nr) < resched_threshold)
+                       cond_resched_lock(&vmap_area_lock);
        }
        spin_unlock(&vmap_area_lock);
        return true;
@@ -1519,7 +1535,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
                        addr))
                return;
 
-       area = remove_vm_area(addr);
+       area = find_vmap_area((unsigned long)addr)->vm;
        if (unlikely(!area)) {
                WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
                                addr);
@@ -1529,6 +1545,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
        debug_check_no_locks_freed(addr, get_vm_area_size(area));
        debug_check_no_obj_freed(addr, get_vm_area_size(area));
 
+       remove_vm_area(addr);
        if (deallocate_pages) {
                int i;
 
@@ -1943,11 +1960,15 @@ void *vmalloc_exec(unsigned long size)
 }
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
-#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
+#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
-#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
+#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
 #else
-#define GFP_VMALLOC32 GFP_KERNEL
+/*
+ * 64b systems should always have either DMA or DMA32 zones. For others
+ * GFP_DMA32 should do the right thing and use the normal zone.
+ */
+#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
 #endif
 
 /**
@@ -2257,7 +2278,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
        if (!(area->flags & VM_USERMAP))
                return -EINVAL;
 
-       if (kaddr + size > area->addr + area->size)
+       if (kaddr + size > area->addr + get_vm_area_size(area))
                return -EINVAL;
 
        do {
@@ -2305,6 +2326,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
 /*
  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  * have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
  */
 void __weak vmalloc_sync_all(void)
 {