]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/lustre/lustre/llite/llite_mmap.c
staging: add Lustre file system client support
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / lustre / lustre / llite / llite_mmap.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/string.h>
40 #include <linux/stat.h>
41 #include <linux/errno.h>
42 #include <linux/unistd.h>
43 #include <linux/version.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/fs.h>
47 #include <linux/stat.h>
48 #include <asm/uaccess.h>
49 #include <linux/mm.h>
50 #include <linux/pagemap.h>
51
52 #define DEBUG_SUBSYSTEM S_LLITE
53
54 #include <lustre_lite.h>
55 #include "llite_internal.h"
56 #include <linux/lustre_compat25.h>
57
58 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
59 int *type);
60
61 static struct vm_operations_struct ll_file_vm_ops;
62
63 void policy_from_vma(ldlm_policy_data_t *policy,
64 struct vm_area_struct *vma, unsigned long addr,
65 size_t count)
66 {
67 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
68 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
69 policy->l_extent.end = (policy->l_extent.start + count - 1) |
70 ~CFS_PAGE_MASK;
71 }
72
73 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
74 size_t count)
75 {
76 struct vm_area_struct *vma, *ret = NULL;
77 ENTRY;
78
79 /* mmap_sem must have been held by caller. */
80 LASSERT(!down_write_trylock(&mm->mmap_sem));
81
82 for(vma = find_vma(mm, addr);
83 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
84 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
85 vma->vm_flags & VM_SHARED) {
86 ret = vma;
87 break;
88 }
89 }
90 RETURN(ret);
91 }
92
93 /**
94 * API independent part for page fault initialization.
95 * \param vma - virtual memory area addressed to page fault
96 * \param env - corespondent lu_env to processing
97 * \param nest - nested level
98 * \param index - page index corespondent to fault.
99 * \parm ra_flags - vma readahead flags.
100 *
101 * \return allocated and initialized env for fault operation.
102 * \retval EINVAL if env can't allocated
103 * \return other error codes from cl_io_init.
104 */
105 struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
106 struct lu_env **env_ret,
107 struct cl_env_nest *nest,
108 pgoff_t index, unsigned long *ra_flags)
109 {
110 struct file *file = vma->vm_file;
111 struct inode *inode = file->f_dentry->d_inode;
112 struct cl_io *io;
113 struct cl_fault_io *fio;
114 struct lu_env *env;
115 ENTRY;
116
117 *env_ret = NULL;
118 if (ll_file_nolock(file))
119 RETURN(ERR_PTR(-EOPNOTSUPP));
120
121 /*
122 * page fault can be called when lustre IO is
123 * already active for the current thread, e.g., when doing read/write
124 * against user level buffer mapped from Lustre buffer. To avoid
125 * stomping on existing context, optionally force an allocation of a new
126 * one.
127 */
128 env = cl_env_nested_get(nest);
129 if (IS_ERR(env))
130 RETURN(ERR_PTR(-EINVAL));
131
132 *env_ret = env;
133
134 io = ccc_env_thread_io(env);
135 io->ci_obj = ll_i2info(inode)->lli_clob;
136 LASSERT(io->ci_obj != NULL);
137
138 fio = &io->u.ci_fault;
139 fio->ft_index = index;
140 fio->ft_executable = vma->vm_flags&VM_EXEC;
141
142 /*
143 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
144 * the kernel will not read other pages not covered by ldlm in
145 * filemap_nopage. we do our readahead in ll_readpage.
146 */
147 if (ra_flags != NULL)
148 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
149 vma->vm_flags &= ~VM_SEQ_READ;
150 vma->vm_flags |= VM_RAND_READ;
151
152 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
153 fio->ft_index, fio->ft_executable);
154
155 if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
156 struct ccc_io *cio = ccc_env_io(env);
157 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
158
159 LASSERT(cio->cui_cl.cis_io == io);
160
161 /* mmap lock must be MANDATORY
162 * it has to cache pages. */
163 io->ci_lockreq = CILR_MANDATORY;
164
165 cio->cui_fd = fd;
166 }
167
168 return io;
169 }
170
171 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
172 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
173 bool *retry)
174 {
175 struct lu_env *env;
176 struct cl_io *io;
177 struct vvp_io *vio;
178 struct cl_env_nest nest;
179 int result;
180 sigset_t set;
181 struct inode *inode;
182 struct ll_inode_info *lli;
183 ENTRY;
184
185 LASSERT(vmpage != NULL);
186
187 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
188 if (IS_ERR(io))
189 GOTO(out, result = PTR_ERR(io));
190
191 result = io->ci_result;
192 if (result < 0)
193 GOTO(out, result);
194
195 io->u.ci_fault.ft_mkwrite = 1;
196 io->u.ci_fault.ft_writable = 1;
197
198 vio = vvp_env_io(env);
199 vio->u.fault.ft_vma = vma;
200 vio->u.fault.ft_vmpage = vmpage;
201
202 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
203
204 /* we grab lli_trunc_sem to exclude truncate case.
205 * Otherwise, we could add dirty pages into osc cache
206 * while truncate is on-going. */
207 inode = ccc_object_inode(io->ci_obj);
208 lli = ll_i2info(inode);
209 down_read(&lli->lli_trunc_sem);
210
211 result = cl_io_loop(env, io);
212
213 up_read(&lli->lli_trunc_sem);
214
215 cfs_restore_sigs(set);
216
217 if (result == 0) {
218 struct inode *inode = vma->vm_file->f_dentry->d_inode;
219 struct ll_inode_info *lli = ll_i2info(inode);
220
221 lock_page(vmpage);
222 if (vmpage->mapping == NULL) {
223 unlock_page(vmpage);
224
225 /* page was truncated and lock was cancelled, return
226 * ENODATA so that VM_FAULT_NOPAGE will be returned
227 * to handle_mm_fault(). */
228 if (result == 0)
229 result = -ENODATA;
230 } else if (!PageDirty(vmpage)) {
231 /* race, the page has been cleaned by ptlrpcd after
232 * it was unlocked, it has to be added into dirty
233 * cache again otherwise this soon-to-dirty page won't
234 * consume any grants, even worse if this page is being
235 * transferred because it will break RPC checksum.
236 */
237 unlock_page(vmpage);
238
239 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
240 "been written out, retry.\n",
241 vmpage, vmpage->index);
242
243 *retry = true;
244 result = -EAGAIN;
245 }
246
247 if (result == 0) {
248 spin_lock(&lli->lli_lock);
249 lli->lli_flags |= LLIF_DATA_MODIFIED;
250 spin_unlock(&lli->lli_lock);
251 }
252 }
253 EXIT;
254
255 out:
256 cl_io_fini(env, io);
257 cl_env_nested_put(&nest, env);
258
259 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
260
261 LASSERT(ergo(result == 0, PageLocked(vmpage)));
262 return(result);
263 }
264
265
266
267 static inline int to_fault_error(int result)
268 {
269 switch(result) {
270 case 0:
271 result = VM_FAULT_LOCKED;
272 break;
273 case -EFAULT:
274 result = VM_FAULT_NOPAGE;
275 break;
276 case -ENOMEM:
277 result = VM_FAULT_OOM;
278 break;
279 default:
280 result = VM_FAULT_SIGBUS;
281 break;
282 }
283 return result;
284 }
285
286 /**
287 * Lustre implementation of a vm_operations_struct::fault() method, called by
288 * VM to server page fault (both in kernel and user space).
289 *
290 * \param vma - is virtiual area struct related to page fault
291 * \param vmf - structure which describe type and address where hit fault
292 *
293 * \return allocated and filled _locked_ page for address
294 * \retval VM_FAULT_ERROR on general error
295 * \retval NOPAGE_OOM not have memory for allocate new page
296 */
297 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
298 {
299 struct lu_env *env;
300 struct cl_io *io;
301 struct vvp_io *vio = NULL;
302 struct page *vmpage;
303 unsigned long ra_flags;
304 struct cl_env_nest nest;
305 int result;
306 int fault_ret = 0;
307 ENTRY;
308
309 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
310 if (IS_ERR(io))
311 RETURN(to_fault_error(PTR_ERR(io)));
312
313 result = io->ci_result;
314 if (result == 0) {
315 vio = vvp_env_io(env);
316 vio->u.fault.ft_vma = vma;
317 vio->u.fault.ft_vmpage = NULL;
318 vio->u.fault.fault.ft_vmf = vmf;
319
320 result = cl_io_loop(env, io);
321
322 fault_ret = vio->u.fault.fault.ft_flags;
323 vmpage = vio->u.fault.ft_vmpage;
324 if (result != 0 && vmpage != NULL) {
325 page_cache_release(vmpage);
326 vmf->page = NULL;
327 }
328 }
329 cl_io_fini(env, io);
330 cl_env_nested_put(&nest, env);
331
332 vma->vm_flags |= ra_flags;
333 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
334 fault_ret |= to_fault_error(result);
335
336 CDEBUG(D_MMAP, "%s fault %d/%d\n",
337 current->comm, fault_ret, result);
338 RETURN(fault_ret);
339 }
340
341 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
342 {
343 int count = 0;
344 bool printed = false;
345 int result;
346 sigset_t set;
347
348 /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
349 * so that it can be killed by admin but not cause segfault by
350 * other signals. */
351 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
352
353 restart:
354 result = ll_fault0(vma, vmf);
355 LASSERT(!(result & VM_FAULT_LOCKED));
356 if (result == 0) {
357 struct page *vmpage = vmf->page;
358
359 /* check if this page has been truncated */
360 lock_page(vmpage);
361 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
362 unlock_page(vmpage);
363 page_cache_release(vmpage);
364 vmf->page = NULL;
365
366 if (!printed && ++count > 16) {
367 CWARN("the page is under heavy contention,"
368 "maybe your app(%s) needs revising :-)\n",
369 current->comm);
370 printed = true;
371 }
372
373 goto restart;
374 }
375
376 result |= VM_FAULT_LOCKED;
377 }
378 cfs_restore_sigs(set);
379 return result;
380 }
381
382 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
383 {
384 int count = 0;
385 bool printed = false;
386 bool retry;
387 int result;
388
389 do {
390 retry = false;
391 result = ll_page_mkwrite0(vma, vmf->page, &retry);
392
393 if (!printed && ++count > 16) {
394 CWARN("app(%s): the page %lu of file %lu is under heavy"
395 " contention.\n",
396 current->comm, vmf->pgoff,
397 vma->vm_file->f_dentry->d_inode->i_ino);
398 printed = true;
399 }
400 } while (retry);
401
402 switch(result) {
403 case 0:
404 LASSERT(PageLocked(vmf->page));
405 result = VM_FAULT_LOCKED;
406 break;
407 case -ENODATA:
408 case -EFAULT:
409 result = VM_FAULT_NOPAGE;
410 break;
411 case -ENOMEM:
412 result = VM_FAULT_OOM;
413 break;
414 case -EAGAIN:
415 result = VM_FAULT_RETRY;
416 break;
417 default:
418 result = VM_FAULT_SIGBUS;
419 break;
420 }
421
422 return result;
423 }
424
425 /**
426 * To avoid cancel the locks covering mmapped region for lock cache pressure,
427 * we track the mapped vma count in ccc_object::cob_mmap_cnt.
428 */
429 static void ll_vm_open(struct vm_area_struct * vma)
430 {
431 struct inode *inode = vma->vm_file->f_dentry->d_inode;
432 struct ccc_object *vob = cl_inode2ccc(inode);
433
434 ENTRY;
435 LASSERT(vma->vm_file);
436 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
437 atomic_inc(&vob->cob_mmap_cnt);
438 EXIT;
439 }
440
441 /**
442 * Dual to ll_vm_open().
443 */
444 static void ll_vm_close(struct vm_area_struct *vma)
445 {
446 struct inode *inode = vma->vm_file->f_dentry->d_inode;
447 struct ccc_object *vob = cl_inode2ccc(inode);
448
449 ENTRY;
450 LASSERT(vma->vm_file);
451 atomic_dec(&vob->cob_mmap_cnt);
452 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
453 EXIT;
454 }
455
456
457 /* return the user space pointer that maps to a file offset via a vma */
458 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
459 {
460 return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
461
462 }
463
464 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
465 * nopage's reference passing to the pte */
466 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
467 {
468 int rc = -ENOENT;
469 ENTRY;
470
471 LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
472 if (mapping_mapped(mapping)) {
473 rc = 0;
474 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
475 last - first + 1, 0);
476 }
477
478 RETURN(rc);
479 }
480
481 static struct vm_operations_struct ll_file_vm_ops = {
482 .fault = ll_fault,
483 .page_mkwrite = ll_page_mkwrite,
484 .open = ll_vm_open,
485 .close = ll_vm_close,
486 };
487
488 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
489 {
490 struct inode *inode = file->f_dentry->d_inode;
491 int rc;
492 ENTRY;
493
494 if (ll_file_nolock(file))
495 RETURN(-EOPNOTSUPP);
496
497 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
498 rc = generic_file_mmap(file, vma);
499 if (rc == 0) {
500 vma->vm_ops = &ll_file_vm_ops;
501 vma->vm_ops->open(vma);
502 /* update the inode's size and mtime */
503 rc = ll_glimpse_size(inode);
504 }
505
506 RETURN(rc);
507 }