__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
+#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+ return prot;
+}
+#endif
+
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
- return __pgprot(pgprot_val(protection_map[vm_flags &
+ pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
pgprot_val(arch_vm_get_page_prot(vm_flags)));
+
+ return arch_filter_pgprot(ret);
}
EXPORT_SYMBOL(vm_get_page_prot);
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
- fput(vma->vm_file);
+ vma_fput(vma);
mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
return next;
if (remove_next) {
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
- fput(file);
+ vma_fput(vma);
}
if (next->anon_vma)
anon_vma_merge(vma, next);
return 0;
}
+static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode))
+ return MAX_LFS_FILESIZE;
+
+ if (S_ISBLK(inode->i_mode))
+ return MAX_LFS_FILESIZE;
+
+ /* Special "we do even unsigned file positions" case */
+ if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+ return 0;
+
+ /* Yes, random drivers might want more. But I'm tired of buggy drivers */
+ return ULONG_MAX;
+}
+
+static inline bool file_mmap_ok(struct file *file, struct inode *inode,
+ unsigned long pgoff, unsigned long len)
+{
+ u64 maxsize = file_mmap_size_max(file, inode);
+
+ if (maxsize && len > maxsize)
+ return false;
+ maxsize -= len;
+ if (pgoff > maxsize >> PAGE_SHIFT)
+ return false;
+ return true;
+}
+
/*
* The caller must hold down_write(¤t->mm->mmap_sem).
*/
struct inode *inode = file_inode(file);
unsigned long flags_mask;
+ if (!file_mmap_ok(file, inode, pgoff, len))
+ return -EOVERFLOW;
+
flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
switch (flags & MAP_TYPE) {
return addr;
unmap_and_free_vma:
+ vma_fput(vma);
vma->vm_file = NULL;
- fput(file);
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
goto out_free_mpol;
if (new->vm_file)
- get_file(new->vm_file);
+ vma_get_file(new);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
if (new->vm_file)
- fput(new->vm_file);
+ vma_fput(new);
unlink_anon_vmas(new);
out_free_mpol:
mpol_put(vma_policy(new));
struct vm_area_struct *vma;
unsigned long populate = 0;
unsigned long ret = -EINVAL;
- struct file *file;
+ struct file *file, *prfile;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n",
current->comm, current->pid);
}
}
- file = get_file(vma->vm_file);
+ vma_get_file(vma);
+ file = vma->vm_file;
+ prfile = vma->vm_prfile;
ret = do_mmap_pgoff(vma->vm_file, start, size,
prot, flags, pgoff, &populate, NULL);
+ if (!IS_ERR_VALUE(ret) && file && prfile) {
+ struct vm_area_struct *new_vma;
+
+ new_vma = find_vma(mm, ret);
+ if (!new_vma->vm_prfile)
+ new_vma->vm_prfile = prfile;
+ if (new_vma != vma)
+ get_file(prfile);
+ }
+ /*
+ * two fput()s instead of vma_fput(vma),
+ * coz vma may not be available anymore.
+ */
fput(file);
+ if (prfile)
+ fput(prfile);
out:
up_write(&mm->mmap_sem);
if (populate)
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
+ if (unlikely(mm_is_oom_victim(mm))) {
+ /*
+ * Manually reap the mm to free as much memory as possible.
+ * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
+ * this mm from further consideration. Taking mm->mmap_sem for
+ * write after setting MMF_OOM_SKIP will guarantee that the oom
+ * reaper will not run on this mm again after mmap_sem is
+ * dropped.
+ *
+ * Nothing can be holding mm->mmap_sem here and the above call
+ * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
+ * __oom_reap_task_mm() will not block.
+ *
+ * This needs to be done before calling munlock_vma_pages_all(),
+ * which clears VM_LOCKED, otherwise the oom reaper cannot
+ * reliably test it.
+ */
+ mutex_lock(&oom_lock);
+ __oom_reap_task_mm(mm);
+ mutex_unlock(&oom_lock);
+
+ set_bit(MMF_OOM_SKIP, &mm->flags);
+ down_write(&mm->mmap_sem);
+ up_write(&mm->mmap_sem);
+ }
+
if (mm->locked_vm) {
vma = mm->mmap;
while (vma) {
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
-
- if (unlikely(mm_is_oom_victim(mm))) {
- /*
- * Wait for oom_reap_task() to stop working on this
- * mm. Because MMF_OOM_SKIP is already set before
- * calling down_read(), oom_reap_task() will not run
- * on this "mm" post up_write().
- *
- * mm_is_oom_victim() cannot be set from under us
- * either because victim->mm is already set to NULL
- * under task_lock before calling mmput and oom_mm is
- * set not NULL by the OOM killer only if victim->mm
- * is found not NULL while holding the task_lock.
- */
- set_bit(MMF_OOM_SKIP, &mm->flags);
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
- }
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1);
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
if (new_vma->vm_file)
- get_file(new_vma->vm_file);
+ vma_get_file(new_vma);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent);