2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
12 #include <linux/backing-dev.h>
13 #include <linux/pagemap.h>
14 #include <linux/export.h>
15 #include <linux/uio.h>
16 #include <linux/rmap.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/sched.h>
19 #include <linux/seqlock.h>
20 #include <linux/mutex.h>
21 #include <linux/gfp.h>
22 #include <asm/tlbflush.h>
26 * We do use our own empty page to avoid interference with other users
27 * of ZERO_PAGE(), such as /dev/zero
29 static DEFINE_MUTEX(xip_sparse_mutex
);
30 static seqcount_t xip_sparse_seq
= SEQCNT_ZERO(xip_sparse_seq
);
31 static struct page
*__xip_sparse_page
;
33 /* called under xip_sparse_mutex */
34 static struct page
*xip_sparse_page(void)
36 if (!__xip_sparse_page
) {
37 struct page
*page
= alloc_page(GFP_HIGHUSER
| __GFP_ZERO
);
40 __xip_sparse_page
= page
;
42 return __xip_sparse_page
;
46 * This is a file read routine for execute in place files, and uses
47 * the mapping->a_ops->get_xip_mem() function for the actual low-level
50 * Note the struct file* is not used at all. It may be NULL.
53 do_xip_mapping_read(struct address_space
*mapping
,
54 struct file_ra_state
*_ra
,
60 struct inode
*inode
= mapping
->host
;
61 pgoff_t index
, end_index
;
64 size_t copied
= 0, error
= 0;
66 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
69 index
= pos
>> PAGE_CACHE_SHIFT
;
70 offset
= pos
& ~PAGE_CACHE_MASK
;
72 isize
= i_size_read(inode
);
76 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
78 unsigned long nr
, left
;
80 unsigned long xip_pfn
;
83 /* nr is the maximum number of bytes to copy from this page */
85 if (index
>= end_index
) {
86 if (index
> end_index
)
88 nr
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
94 if (nr
> len
- copied
)
97 error
= mapping
->a_ops
->get_xip_mem(mapping
, index
, 0,
99 if (unlikely(error
)) {
100 if (error
== -ENODATA
) {
107 /* If users can be writing to this page using arbitrary
108 * virtual addresses, take care about potential aliasing
109 * before reading the page on the kernel side.
111 if (mapping_writably_mapped(mapping
))
112 /* address based flush */ ;
115 * Ok, we have the mem, so now we can copy it to user space...
117 * The actor routine returns how many bytes were actually used..
118 * NOTE! This may not be the same as how much of a user buffer
119 * we filled up (we may be padding etc), so we can only update
120 * "pos" here (the actor routine has to update the user buffer
121 * pointers and the remaining count).
124 left
= __copy_to_user(buf
+copied
, xip_mem
+offset
, nr
);
126 left
= __clear_user(buf
+ copied
, nr
);
133 copied
+= (nr
- left
);
134 offset
+= (nr
- left
);
135 index
+= offset
>> PAGE_CACHE_SHIFT
;
136 offset
&= ~PAGE_CACHE_MASK
;
137 } while (copied
< len
);
140 *ppos
= pos
+ copied
;
144 return (copied
? copied
: error
);
148 xip_file_read(struct file
*filp
, char __user
*buf
, size_t len
, loff_t
*ppos
)
150 if (!access_ok(VERIFY_WRITE
, buf
, len
))
153 return do_xip_mapping_read(filp
->f_mapping
, &filp
->f_ra
, filp
,
156 EXPORT_SYMBOL_GPL(xip_file_read
);
159 * __xip_unmap is invoked from xip_unmap and xip_write
161 * This function walks all vmas of the address_space and unmaps the
162 * __xip_sparse_page when found at pgoff.
164 static void __xip_unmap(struct address_space
* mapping
, unsigned long pgoff
)
166 struct vm_area_struct
*vma
;
171 count
= read_seqcount_begin(&xip_sparse_seq
);
173 page
= __xip_sparse_page
;
178 i_mmap_lock_read(mapping
);
179 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
182 struct mm_struct
*mm
= vma
->vm_mm
;
183 unsigned long address
= vma
->vm_start
+
184 ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
186 BUG_ON(address
< vma
->vm_start
|| address
>= vma
->vm_end
);
187 pte
= page_check_address(page
, mm
, address
, &ptl
, 1);
189 /* Nuke the page table entry. */
190 flush_cache_page(vma
, address
, pte_pfn(*pte
));
191 pteval
= ptep_clear_flush(vma
, address
, pte
);
192 page_remove_rmap(page
);
193 dec_mm_counter(mm
, MM_FILEPAGES
);
194 BUG_ON(pte_dirty(pteval
));
195 pte_unmap_unlock(pte
, ptl
);
196 /* must invalidate_page _before_ freeing the page */
197 mmu_notifier_invalidate_page(mm
, address
);
198 page_cache_release(page
);
201 i_mmap_unlock_read(mapping
);
204 mutex_unlock(&xip_sparse_mutex
);
205 } else if (read_seqcount_retry(&xip_sparse_seq
, count
)) {
206 mutex_lock(&xip_sparse_mutex
);
213 * xip_fault() is invoked via the vma operations vector for a
214 * mapped memory region to read in file data during a page fault.
216 * This function is derived from filemap_fault, but used for execute in place
218 static int xip_file_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
220 struct file
*file
= vma
->vm_file
;
221 struct address_space
*mapping
= file
->f_mapping
;
222 struct inode
*inode
= mapping
->host
;
225 unsigned long xip_pfn
;
229 /* XXX: are VM_FAULT_ codes OK? */
231 size
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
232 if (vmf
->pgoff
>= size
)
233 return VM_FAULT_SIGBUS
;
235 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 0,
239 if (error
!= -ENODATA
)
243 if ((vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
)) &&
244 (vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
)) &&
245 (!(mapping
->host
->i_sb
->s_flags
& MS_RDONLY
))) {
248 /* maybe shared writable, allocate new block */
249 mutex_lock(&xip_sparse_mutex
);
250 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 1,
252 mutex_unlock(&xip_sparse_mutex
);
254 return VM_FAULT_SIGBUS
;
255 /* unmap sparse mappings at pgoff from all other vmas */
256 __xip_unmap(mapping
, vmf
->pgoff
);
259 err
= vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
,
264 * err == -EBUSY is fine, we've raced against another thread
265 * that faulted-in the same page
269 return VM_FAULT_NOPAGE
;
271 int err
, ret
= VM_FAULT_OOM
;
273 mutex_lock(&xip_sparse_mutex
);
274 write_seqcount_begin(&xip_sparse_seq
);
275 error
= mapping
->a_ops
->get_xip_mem(mapping
, vmf
->pgoff
, 0,
277 if (unlikely(!error
)) {
278 write_seqcount_end(&xip_sparse_seq
);
279 mutex_unlock(&xip_sparse_mutex
);
282 if (error
!= -ENODATA
)
284 /* not shared and writable, use xip_sparse_page() */
285 page
= xip_sparse_page();
288 err
= vm_insert_page(vma
, (unsigned long)vmf
->virtual_address
,
293 ret
= VM_FAULT_NOPAGE
;
295 write_seqcount_end(&xip_sparse_seq
);
296 mutex_unlock(&xip_sparse_mutex
);
302 static const struct vm_operations_struct xip_file_vm_ops
= {
303 .fault
= xip_file_fault
,
304 .page_mkwrite
= filemap_page_mkwrite
,
307 int xip_file_mmap(struct file
* file
, struct vm_area_struct
* vma
)
309 BUG_ON(!file
->f_mapping
->a_ops
->get_xip_mem
);
312 vma
->vm_ops
= &xip_file_vm_ops
;
313 vma
->vm_flags
|= VM_MIXEDMAP
;
316 EXPORT_SYMBOL_GPL(xip_file_mmap
);
319 __xip_file_write(struct file
*filp
, const char __user
*buf
,
320 size_t count
, loff_t pos
, loff_t
*ppos
)
322 struct address_space
* mapping
= filp
->f_mapping
;
323 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
324 struct inode
*inode
= mapping
->host
;
329 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
333 unsigned long offset
;
336 unsigned long xip_pfn
;
338 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
339 index
= pos
>> PAGE_CACHE_SHIFT
;
340 bytes
= PAGE_CACHE_SIZE
- offset
;
344 status
= a_ops
->get_xip_mem(mapping
, index
, 0,
346 if (status
== -ENODATA
) {
347 /* we allocate a new page unmap it */
348 mutex_lock(&xip_sparse_mutex
);
349 status
= a_ops
->get_xip_mem(mapping
, index
, 1,
351 mutex_unlock(&xip_sparse_mutex
);
353 /* unmap page at pgoff from all other vmas */
354 __xip_unmap(mapping
, index
);
361 __copy_from_user_nocache(xip_mem
+ offset
, buf
, bytes
);
363 if (likely(copied
> 0)) {
373 if (unlikely(copied
!= bytes
))
381 * No need to use i_size_read() here, the i_size
382 * cannot change under us because we hold i_mutex.
384 if (pos
> inode
->i_size
) {
385 i_size_write(inode
, pos
);
386 mark_inode_dirty(inode
);
389 return written
? written
: status
;
393 xip_file_write(struct file
*filp
, const char __user
*buf
, size_t len
,
396 struct address_space
*mapping
= filp
->f_mapping
;
397 struct inode
*inode
= mapping
->host
;
402 mutex_lock(&inode
->i_mutex
);
404 if (!access_ok(VERIFY_READ
, buf
, len
)) {
412 /* We can write back this queue in page reclaim */
413 current
->backing_dev_info
= inode_to_bdi(inode
);
415 ret
= generic_write_checks(filp
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
421 ret
= file_remove_suid(filp
);
425 ret
= file_update_time(filp
);
429 ret
= __xip_file_write (filp
, buf
, count
, pos
, ppos
);
432 current
->backing_dev_info
= NULL
;
434 mutex_unlock(&inode
->i_mutex
);
437 EXPORT_SYMBOL_GPL(xip_file_write
);
440 * truncate a page used for execute in place
441 * functionality is analog to block_truncate_page but does use get_xip_mem
442 * to get the page instead of page cache
445 xip_truncate_page(struct address_space
*mapping
, loff_t from
)
447 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
448 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
452 unsigned long xip_pfn
;
455 BUG_ON(!mapping
->a_ops
->get_xip_mem
);
457 blocksize
= 1 << mapping
->host
->i_blkbits
;
458 length
= offset
& (blocksize
- 1);
460 /* Block boundary? Nothing to do */
464 length
= blocksize
- length
;
466 err
= mapping
->a_ops
->get_xip_mem(mapping
, index
, 0,
470 /* Hole? No need to truncate */
475 memset(xip_mem
+ offset
, 0, length
);
478 EXPORT_SYMBOL_GPL(xip_truncate_page
);