]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/staging/lustre/lustre/llite/vvp_io.c
Merge tag 'armsoc-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / lustre / lustre / llite / vvp_io.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Implementation of cl_io for VVP layer.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LLITE
43
44
67a235f5
GKH
45#include "../include/obd.h"
46#include "../include/lustre_lite.h"
d7e09d03
PT
47
48#include "vvp_internal.h"
49
50static struct vvp_io *cl2vvp_io(const struct lu_env *env,
51 const struct cl_io_slice *slice);
52
53/**
74c0da19 54 * True, if \a io is a normal io, False for splice_{read,write}
d7e09d03
PT
55 */
56int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
57{
58 struct vvp_io *vio = vvp_env_io(env);
59
60 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
61
62 return vio->cui_io_subtype == IO_NORMAL;
63}
64
65/**
66 * For swapping layout. The file's layout may have changed.
67 * To avoid populating pages to a wrong stripe, we have to verify the
68 * correctness of layout. It works because swapping layout processes
69 * have to acquire group lock.
70 */
71static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
72 struct inode *inode)
73{
74 struct ll_inode_info *lli = ll_i2info(inode);
75 struct ccc_io *cio = ccc_env_io(env);
76 bool rc = true;
77
78 switch (io->ci_type) {
79 case CIT_READ:
80 case CIT_WRITE:
81 /* don't need lock here to check lli_layout_gen as we have held
82 * extent lock and GROUP lock has to hold to swap layout */
09aed8a5 83 if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
d7e09d03
PT
84 io->ci_need_restart = 1;
85 /* this will return application a short read/write */
86 io->ci_continue = 0;
87 rc = false;
88 }
89 case CIT_FAULT:
90 /* fault is okay because we've already had a page. */
91 default:
92 break;
93 }
94
95 return rc;
96}
97
98/*****************************************************************************
99 *
100 * io operations.
101 *
102 */
103
104static int vvp_io_fault_iter_init(const struct lu_env *env,
105 const struct cl_io_slice *ios)
106{
107 struct vvp_io *vio = cl2vvp_io(env, ios);
108 struct inode *inode = ccc_object_inode(ios->cis_obj);
109
110 LASSERT(inode ==
2a8a3597 111 file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
d7e09d03
PT
112 vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
113 return 0;
114}
115
116static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
117{
118 struct cl_io *io = ios->cis_io;
119 struct cl_object *obj = io->ci_obj;
120 struct ccc_io *cio = cl2ccc_io(env, ios);
121
122 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
123
5ea17d6c
JL
124 CDEBUG(D_VFSTRACE, DFID
125 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
126 PFID(lu_object_fid(&obj->co_lu)),
127 io->ci_ignore_layout, io->ci_verify_layout,
128 cio->cui_layout_gen, io->ci_restore_needed);
129
130 if (io->ci_restore_needed == 1) {
131 int rc;
132
133 /* file was detected release, we need to restore it
134 * before finishing the io
135 */
136 rc = ll_layout_restore(ccc_object_inode(obj));
137 /* if restore registration failed, no restart,
138 * we will return -ENODATA */
139 /* The layout will change after restore, so we need to
140 * block on layout lock hold by the MDT
141 * as MDT will not send new layout in lvb (see LU-3124)
142 * we have to explicitly fetch it, all this will be done
143 * by ll_layout_refresh()
144 */
145 if (rc == 0) {
146 io->ci_restore_needed = 0;
147 io->ci_need_restart = 1;
148 io->ci_verify_layout = 1;
149 } else {
150 io->ci_restore_needed = 1;
151 io->ci_need_restart = 0;
152 io->ci_verify_layout = 0;
153 io->ci_result = rc;
154 }
155 }
d7e09d03
PT
156
157 if (!io->ci_ignore_layout && io->ci_verify_layout) {
158 __u32 gen = 0;
159
160 /* check layout version */
161 ll_layout_refresh(ccc_object_inode(obj), &gen);
162 io->ci_need_restart = cio->cui_layout_gen != gen;
5ea17d6c
JL
163 if (io->ci_need_restart) {
164 CDEBUG(D_VFSTRACE,
165 DFID" layout changed from %d to %d.\n",
166 PFID(lu_object_fid(&obj->co_lu)),
167 cio->cui_layout_gen, gen);
168 /* today successful restore is the only possible
169 * case */
170 /* restore was done, clear restoring state */
171 ll_i2info(ccc_object_inode(obj))->lli_flags &=
172 ~LLIF_FILE_RESTORING;
173 }
d7e09d03
PT
174 }
175}
176
177static void vvp_io_fault_fini(const struct lu_env *env,
178 const struct cl_io_slice *ios)
179{
180 struct cl_io *io = ios->cis_io;
181 struct cl_page *page = io->u.ci_fault.ft_page;
182
183 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
184
185 if (page != NULL) {
186 lu_ref_del(&page->cp_reference, "fault", io);
187 cl_page_put(env, page);
188 io->u.ci_fault.ft_page = NULL;
189 }
190 vvp_io_fini(env, ios);
191}
192
2d95f10e 193static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
d7e09d03
PT
194{
195 /*
196 * we only want to hold PW locks if the mmap() can generate
197 * writes back to the file and that only happens in shared
198 * writable vmas
199 */
200 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
201 return CLM_WRITE;
202 return CLM_READ;
203}
204
205static int vvp_mmap_locks(const struct lu_env *env,
206 struct ccc_io *vio, struct cl_io *io)
207{
208 struct ccc_thread_info *cti = ccc_env_info(env);
209 struct mm_struct *mm = current->mm;
210 struct vm_area_struct *vma;
211 struct cl_lock_descr *descr = &cti->cti_descr;
212 ldlm_policy_data_t policy;
213 unsigned long addr;
d7e09d03
PT
214 ssize_t count;
215 int result;
b42b15fd
AV
216 struct iov_iter i;
217 struct iovec iov;
d7e09d03
PT
218
219 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
220
221 if (!cl_is_normalio(env, io))
0a3bdb00 222 return 0;
d7e09d03 223
b42b15fd 224 if (vio->cui_iter == NULL) /* nfs or loop back device write */
0a3bdb00 225 return 0;
d7e09d03
PT
226
227 /* No MM (e.g. NFS)? No vmas too. */
228 if (mm == NULL)
0a3bdb00 229 return 0;
d7e09d03 230
b42b15fd
AV
231 iov_for_each(iov, i, *(vio->cui_iter)) {
232 addr = (unsigned long)iov.iov_base;
233 count = iov.iov_len;
d7e09d03
PT
234 if (count == 0)
235 continue;
236
237 count += addr & (~CFS_PAGE_MASK);
238 addr &= CFS_PAGE_MASK;
239
240 down_read(&mm->mmap_sem);
a58a38ac 241 while ((vma = our_vma(mm, addr, count)) != NULL) {
2a8a3597 242 struct inode *inode = file_inode(vma->vm_file);
d7e09d03
PT
243 int flags = CEF_MUST;
244
245 if (ll_file_nolock(vma->vm_file)) {
246 /*
247 * For no lock case, a lockless lock will be
248 * generated.
249 */
250 flags = CEF_NEVER;
251 }
252
253 /*
254 * XXX: Required lock mode can be weakened: CIT_WRITE
255 * io only ever reads user level buffer, and CIT_READ
256 * only writes on it.
257 */
258 policy_from_vma(&policy, vma, addr, count);
259 descr->cld_mode = vvp_mode_from_vma(vma);
260 descr->cld_obj = ll_i2info(inode)->lli_clob;
261 descr->cld_start = cl_index(descr->cld_obj,
262 policy.l_extent.start);
263 descr->cld_end = cl_index(descr->cld_obj,
264 policy.l_extent.end);
265 descr->cld_enq_flags = flags;
266 result = cl_io_lock_alloc_add(env, io, descr);
267
268 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
269 descr->cld_mode, descr->cld_start,
270 descr->cld_end);
271
09516500
PF
272 if (result < 0) {
273 up_read(&mm->mmap_sem);
0a3bdb00 274 return result;
09516500 275 }
d7e09d03
PT
276
277 if (vma->vm_end - addr >= count)
278 break;
279
280 count -= vma->vm_end - addr;
281 addr = vma->vm_end;
282 }
283 up_read(&mm->mmap_sem);
284 }
0a3bdb00 285 return 0;
d7e09d03
PT
286}
287
288static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
289 enum cl_lock_mode mode, loff_t start, loff_t end)
290{
291 struct ccc_io *cio = ccc_env_io(env);
292 int result;
293 int ast_flags = 0;
294
295 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
d7e09d03
PT
296
297 ccc_io_update_iov(env, cio, io);
298
299 if (io->u.ci_rw.crw_nonblock)
300 ast_flags |= CEF_NONBLOCK;
301 result = vvp_mmap_locks(env, cio, io);
302 if (result == 0)
303 result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
0a3bdb00 304 return result;
d7e09d03
PT
305}
306
307static int vvp_io_read_lock(const struct lu_env *env,
308 const struct cl_io_slice *ios)
309{
4c309612
JX
310 struct cl_io *io = ios->cis_io;
311 struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
d7e09d03
PT
312 int result;
313
4c309612
JX
314 result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
315 rd->crw_pos + rd->crw_count - 1);
316
0a3bdb00 317 return result;
d7e09d03
PT
318}
319
320static int vvp_io_fault_lock(const struct lu_env *env,
321 const struct cl_io_slice *ios)
322{
323 struct cl_io *io = ios->cis_io;
324 struct vvp_io *vio = cl2vvp_io(env, ios);
325 /*
326 * XXX LDLM_FL_CBPENDING
327 */
328 return ccc_io_one_lock_index
329 (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
330 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
331}
332
333static int vvp_io_write_lock(const struct lu_env *env,
334 const struct cl_io_slice *ios)
335{
336 struct cl_io *io = ios->cis_io;
337 loff_t start;
338 loff_t end;
339
340 if (io->u.ci_wr.wr_append) {
341 start = 0;
342 end = OBD_OBJECT_EOF;
343 } else {
344 start = io->u.ci_wr.wr.crw_pos;
345 end = start + io->u.ci_wr.wr.crw_count - 1;
346 }
347 return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
348}
349
350static int vvp_io_setattr_iter_init(const struct lu_env *env,
351 const struct cl_io_slice *ios)
352{
353 return 0;
354}
355
356/**
357 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
358 *
359 * Handles "lockless io" mode when extent locking is done by server.
360 */
361static int vvp_io_setattr_lock(const struct lu_env *env,
362 const struct cl_io_slice *ios)
363{
364 struct ccc_io *cio = ccc_env_io(env);
365 struct cl_io *io = ios->cis_io;
366 __u64 new_size;
367 __u32 enqflags = 0;
368
369 if (cl_io_is_trunc(io)) {
370 new_size = io->u.ci_setattr.sa_attr.lvb_size;
371 if (new_size == 0)
372 enqflags = CEF_DISCARD_DATA;
373 } else {
374 if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
375 io->u.ci_setattr.sa_attr.lvb_ctime) ||
376 (io->u.ci_setattr.sa_attr.lvb_atime >=
377 io->u.ci_setattr.sa_attr.lvb_ctime))
378 return 0;
379 new_size = 0;
380 }
381 cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
382 return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
383 new_size, OBD_OBJECT_EOF);
384}
385
386static int vvp_do_vmtruncate(struct inode *inode, size_t size)
387{
388 int result;
389 /*
390 * Only ll_inode_size_lock is taken at this level.
391 */
392 ll_inode_size_lock(inode);
393 result = inode_newsize_ok(inode, size);
394 if (result < 0) {
395 ll_inode_size_unlock(inode);
396 return result;
397 }
398 truncate_setsize(inode, size);
399 ll_inode_size_unlock(inode);
400 return result;
401}
402
403static int vvp_io_setattr_trunc(const struct lu_env *env,
404 const struct cl_io_slice *ios,
405 struct inode *inode, loff_t size)
406{
407 inode_dio_wait(inode);
408 return 0;
409}
410
411static int vvp_io_setattr_time(const struct lu_env *env,
412 const struct cl_io_slice *ios)
413{
414 struct cl_io *io = ios->cis_io;
415 struct cl_object *obj = io->ci_obj;
416 struct cl_attr *attr = ccc_env_thread_attr(env);
417 int result;
418 unsigned valid = CAT_CTIME;
419
420 cl_object_attr_lock(obj);
421 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
422 if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
423 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
424 valid |= CAT_ATIME;
425 }
426 if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
427 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
428 valid |= CAT_MTIME;
429 }
430 result = cl_object_attr_set(env, obj, attr, valid);
431 cl_object_attr_unlock(obj);
432
433 return result;
434}
435
436static int vvp_io_setattr_start(const struct lu_env *env,
437 const struct cl_io_slice *ios)
438{
439 struct cl_io *io = ios->cis_io;
440 struct inode *inode = ccc_object_inode(io->ci_obj);
5dd16419 441 int result = 0;
d7e09d03
PT
442
443 mutex_lock(&inode->i_mutex);
444 if (cl_io_is_trunc(io))
5dd16419
JX
445 result = vvp_io_setattr_trunc(env, ios, inode,
446 io->u.ci_setattr.sa_attr.lvb_size);
447 if (result == 0)
448 result = vvp_io_setattr_time(env, ios);
449 return result;
d7e09d03
PT
450}
451
452static void vvp_io_setattr_end(const struct lu_env *env,
453 const struct cl_io_slice *ios)
454{
455 struct cl_io *io = ios->cis_io;
456 struct inode *inode = ccc_object_inode(io->ci_obj);
457
81e053c7 458 if (cl_io_is_trunc(io))
d7e09d03
PT
459 /* Truncate in memory pages - they must be clean pages
460 * because osc has already notified to destroy osc_extents. */
461 vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
81e053c7 462
d7e09d03
PT
463 mutex_unlock(&inode->i_mutex);
464}
465
466static void vvp_io_setattr_fini(const struct lu_env *env,
467 const struct cl_io_slice *ios)
468{
469 vvp_io_fini(env, ios);
470}
471
d7e09d03
PT
472static int vvp_io_read_start(const struct lu_env *env,
473 const struct cl_io_slice *ios)
474{
475 struct vvp_io *vio = cl2vvp_io(env, ios);
476 struct ccc_io *cio = cl2ccc_io(env, ios);
477 struct cl_io *io = ios->cis_io;
478 struct cl_object *obj = io->ci_obj;
479 struct inode *inode = ccc_object_inode(obj);
480 struct ll_ra_read *bead = &vio->cui_bead;
481 struct file *file = cio->cui_fd->fd_file;
482
483 int result;
484 loff_t pos = io->u.ci_rd.rd.crw_pos;
485 long cnt = io->u.ci_rd.rd.crw_count;
486 long tot = cio->cui_tot_count;
487 int exceed = 0;
488
489 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
490
491 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
492
493 if (!can_populate_pages(env, io, inode))
494 return 0;
495
496 result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
497 if (result != 0)
498 return result;
499 else if (exceed != 0)
500 goto out;
501
502 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
503 "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
504 inode->i_ino, cnt, pos, i_size_read(inode));
505
506 /* turn off the kernel's read-ahead */
507 cio->cui_fd->fd_file->f_ra.ra_pages = 0;
508
509 /* initialize read-ahead window once per syscall */
510 if (!vio->cui_ra_window_set) {
511 vio->cui_ra_window_set = 1;
512 bead->lrr_start = cl_index(obj, pos);
513 /*
514 * XXX: explicit PAGE_CACHE_SIZE
515 */
516 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
517 ll_ra_read_in(file, bead);
518 }
519
520 /* BUG: 5972 */
521 file_accessed(file);
522 switch (vio->cui_io_subtype) {
523 case IO_NORMAL:
74c0da19 524 LASSERT(cio->cui_iocb->ki_pos == pos);
b42b15fd 525 result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
74c0da19 526 break;
d7e09d03
PT
527 case IO_SPLICE:
528 result = generic_file_splice_read(file, &pos,
529 vio->u.splice.cui_pipe, cnt,
530 vio->u.splice.cui_flags);
531 /* LU-1109: do splice read stripe by stripe otherwise if it
532 * may make nfsd stuck if this read occupied all internal pipe
533 * buffers. */
534 io->ci_continue = 0;
535 break;
536 default:
537 CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
538 LBUG();
539 }
540
541out:
542 if (result >= 0) {
543 if (result < cnt)
544 io->ci_continue = 0;
545 io->ci_nob += result;
546 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
4f37bc04 547 cio->cui_fd, pos, result, READ);
d7e09d03
PT
548 result = 0;
549 }
550 return result;
551}
552
553static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
554{
555 struct vvp_io *vio = cl2vvp_io(env, ios);
556 struct ccc_io *cio = cl2ccc_io(env, ios);
557
558 if (vio->cui_ra_window_set)
559 ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
560
561 vvp_io_fini(env, ios);
562}
563
564static int vvp_io_write_start(const struct lu_env *env,
565 const struct cl_io_slice *ios)
566{
567 struct ccc_io *cio = cl2ccc_io(env, ios);
568 struct cl_io *io = ios->cis_io;
569 struct cl_object *obj = io->ci_obj;
570 struct inode *inode = ccc_object_inode(obj);
d7e09d03
PT
571 ssize_t result = 0;
572 loff_t pos = io->u.ci_wr.wr.crw_pos;
573 size_t cnt = io->u.ci_wr.wr.crw_count;
574
d7e09d03
PT
575 if (!can_populate_pages(env, io, inode))
576 return 0;
577
578 if (cl_io_is_append(io)) {
579 /*
580 * PARALLEL IO This has to be changed for parallel IO doing
581 * out-of-order writes.
582 */
583 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
584 cio->cui_iocb->ki_pos = pos;
74c0da19
JX
585 } else {
586 LASSERT(cio->cui_iocb->ki_pos == pos);
d7e09d03
PT
587 }
588
589 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
590
b42b15fd 591 if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
d7e09d03
PT
592 result = 0;
593 else
b42b15fd
AV
594 result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
595
d7e09d03
PT
596 if (result > 0) {
597 if (result < cnt)
598 io->ci_continue = 0;
599 io->ci_nob += result;
600 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
4f37bc04 601 cio->cui_fd, pos, result, WRITE);
d7e09d03
PT
602 result = 0;
603 }
0a3bdb00 604 return result;
d7e09d03
PT
605}
606
607static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
608{
609 struct vm_fault *vmf = cfio->fault.ft_vmf;
610
611 cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
6aa51072 612 cfio->fault.ft_flags_valid = 1;
d7e09d03
PT
613
614 if (vmf->page) {
aa3bee0d
GKH
615 CDEBUG(D_PAGE,
616 "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
617 vmf->page, vmf->page->mapping, vmf->page->index,
618 (long)vmf->page->flags, page_count(vmf->page),
619 page_private(vmf->page), vmf->virtual_address);
d7e09d03
PT
620 if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
621 lock_page(vmf->page);
733bd244 622 cfio->fault.ft_flags |= VM_FAULT_LOCKED;
d7e09d03
PT
623 }
624
625 cfio->ft_vmpage = vmf->page;
626 return 0;
627 }
628
33692f27 629 if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
d7e09d03
PT
630 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
631 return -EFAULT;
632 }
633
634 if (cfio->fault.ft_flags & VM_FAULT_OOM) {
635 CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
636 return -ENOMEM;
637 }
638
639 if (cfio->fault.ft_flags & VM_FAULT_RETRY)
640 return -EAGAIN;
641
d0a0acc3 642 CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
d7e09d03
PT
643 return -EINVAL;
644}
645
646
647static int vvp_io_fault_start(const struct lu_env *env,
648 const struct cl_io_slice *ios)
649{
650 struct vvp_io *vio = cl2vvp_io(env, ios);
651 struct cl_io *io = ios->cis_io;
652 struct cl_object *obj = io->ci_obj;
653 struct inode *inode = ccc_object_inode(obj);
654 struct cl_fault_io *fio = &io->u.ci_fault;
655 struct vvp_fault_io *cfio = &vio->u.fault;
656 loff_t offset;
657 int result = 0;
658 struct page *vmpage = NULL;
659 struct cl_page *page;
660 loff_t size;
661 pgoff_t last; /* last page in a file data region */
662
663 if (fio->ft_executable &&
664 LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
665 CWARN("binary "DFID
666 " changed while waiting for the page fault lock\n",
667 PFID(lu_object_fid(&obj->co_lu)));
668
669 /* offset of the last byte on the page */
670 offset = cl_offset(obj, fio->ft_index + 1) - 1;
671 LASSERT(cl_index(obj, offset) == fio->ft_index);
672 result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
673 if (result != 0)
674 return result;
675
676 /* must return locked page */
677 if (fio->ft_mkwrite) {
678 LASSERT(cfio->ft_vmpage != NULL);
679 lock_page(cfio->ft_vmpage);
680 } else {
681 result = vvp_io_kernel_fault(cfio);
682 if (result != 0)
683 return result;
684 }
685
686 vmpage = cfio->ft_vmpage;
687 LASSERT(PageLocked(vmpage));
688
689 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
690 ll_invalidate_page(vmpage);
691
692 size = i_size_read(inode);
693 /* Though we have already held a cl_lock upon this page, but
694 * it still can be truncated locally. */
695 if (unlikely((vmpage->mapping != inode->i_mapping) ||
696 (page_offset(vmpage) > size))) {
697 CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
698
699 /* return +1 to stop cl_io_loop() and ll_fault() will catch
700 * and retry. */
34e1f2bb
JL
701 result = +1;
702 goto out;
d7e09d03
PT
703 }
704
705
557732ad 706 if (fio->ft_mkwrite) {
d7e09d03
PT
707 pgoff_t last_index;
708 /*
709 * Capture the size while holding the lli_trunc_sem from above
710 * we want to make sure that we complete the mkwrite action
711 * while holding this lock. We need to make sure that we are
712 * not past the end of the file.
713 */
714 last_index = cl_index(obj, size - 1);
715 if (last_index < fio->ft_index) {
716 CDEBUG(D_PAGE,
2d00bd17
JP
717 "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
718 vmpage->mapping, fio->ft_index, last_index);
d7e09d03
PT
719 /*
720 * We need to return if we are
721 * passed the end of the file. This will propagate
722 * up the call stack to ll_page_mkwrite where
723 * we will return VM_FAULT_NOPAGE. Any non-negative
724 * value returned here will be silently
725 * converted to 0. If the vmpage->mapping is null
726 * the error code would be converted back to ENODATA
727 * in ll_page_mkwrite0. Thus we return -ENODATA
728 * to handle both cases
729 */
34e1f2bb
JL
730 result = -ENODATA;
731 goto out;
d7e09d03
PT
732 }
733 }
734
735 page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
34e1f2bb
JL
736 if (IS_ERR(page)) {
737 result = PTR_ERR(page);
738 goto out;
739 }
d7e09d03
PT
740
741 /* if page is going to be written, we should add this page into cache
742 * earlier. */
743 if (fio->ft_mkwrite) {
744 wait_on_page_writeback(vmpage);
745 if (set_page_dirty(vmpage)) {
746 struct ccc_page *cp;
747
748 /* vvp_page_assume() calls wait_on_page_writeback(). */
749 cl_page_assume(env, io, page);
750
751 cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
752 vvp_write_pending(cl2ccc(obj), cp);
753
754 /* Do not set Dirty bit here so that in case IO is
755 * started before the page is really made dirty, we
756 * still have chance to detect it. */
757 result = cl_page_cache_add(env, io, page, CRT_WRITE);
758 LASSERT(cl_page_is_owned(page, io));
759
760 vmpage = NULL;
761 if (result < 0) {
762 cl_page_unmap(env, io, page);
763 cl_page_discard(env, io, page);
764 cl_page_disown(env, io, page);
765
766 cl_page_put(env, page);
767
768 /* we're in big trouble, what can we do now? */
769 if (result == -EDQUOT)
770 result = -ENOSPC;
34e1f2bb 771 goto out;
d7e09d03
PT
772 } else
773 cl_page_disown(env, io, page);
774 }
775 }
776
777 last = cl_index(obj, size - 1);
778 /*
779 * The ft_index is only used in the case of
780 * a mkwrite action. We need to check
781 * our assertions are correct, since
782 * we should have caught this above
783 */
784 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
785 if (fio->ft_index == last)
786 /*
787 * Last page is mapped partially.
788 */
789 fio->ft_nob = size - cl_offset(obj, fio->ft_index);
790 else
791 fio->ft_nob = cl_page_size(obj);
792
793 lu_ref_add(&page->cp_reference, "fault", io);
794 fio->ft_page = page;
d7e09d03
PT
795
796out:
797 /* return unlocked vmpage to avoid deadlocking */
798 if (vmpage != NULL)
799 unlock_page(vmpage);
800 cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
801 return result;
802}
803
804static int vvp_io_fsync_start(const struct lu_env *env,
805 const struct cl_io_slice *ios)
806{
807 /* we should mark TOWRITE bit to each dirty page in radix tree to
808 * verify pages have been written, but this is difficult because of
809 * race. */
810 return 0;
811}
812
813static int vvp_io_read_page(const struct lu_env *env,
814 const struct cl_io_slice *ios,
815 const struct cl_page_slice *slice)
816{
817 struct cl_io *io = ios->cis_io;
818 struct cl_object *obj = slice->cpl_obj;
819 struct ccc_page *cp = cl2ccc_page(slice);
820 struct cl_page *page = slice->cpl_page;
821 struct inode *inode = ccc_object_inode(obj);
822 struct ll_sb_info *sbi = ll_i2sbi(inode);
823 struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
824 struct ll_readahead_state *ras = &fd->fd_ras;
825 struct page *vmpage = cp->cpg_page;
826 struct cl_2queue *queue = &io->ci_queue;
827 int rc;
828
829 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
830 LASSERT(slice->cpl_obj == obj);
831
d7e09d03
PT
832 if (sbi->ll_ra_info.ra_max_pages_per_file &&
833 sbi->ll_ra_info.ra_max_pages)
834 ras_update(sbi, inode, ras, page->cp_index,
835 cp->cpg_defer_uptodate);
836
837 /* Sanity check whether the page is protected by a lock. */
838 rc = cl_page_is_under_lock(env, io, page);
839 if (rc != -EBUSY) {
840 CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
841 rc == -ENODATA ? "without a lock" :
842 "match failed", rc);
843 if (rc != -ENODATA)
0a3bdb00 844 return rc;
d7e09d03
PT
845 }
846
847 if (cp->cpg_defer_uptodate) {
848 cp->cpg_ra_used = 1;
849 cl_page_export(env, page, 1);
850 }
851 /*
852 * Add page into the queue even when it is marked uptodate above.
853 * this will unlock it automatically as part of cl_page_list_disown().
854 */
855 cl_2queue_add(queue, page);
856 if (sbi->ll_ra_info.ra_max_pages_per_file &&
857 sbi->ll_ra_info.ra_max_pages)
858 ll_readahead(env, io, ras,
859 vmpage->mapping, &queue->c2_qin, fd->fd_flags);
860
0a3bdb00 861 return 0;
d7e09d03
PT
862}
863
864static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
865 struct cl_page *page, struct ccc_page *cp,
866 enum cl_req_type crt)
867{
868 struct cl_2queue *queue;
869 int result;
870
871 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
872
873 queue = &io->ci_queue;
874 cl_2queue_init_page(queue, page);
875
876 result = cl_io_submit_sync(env, io, crt, queue, 0);
877 LASSERT(cl_page_is_owned(page, io));
878
879 if (crt == CRT_READ)
880 /*
881 * in CRT_WRITE case page is left locked even in case of
882 * error.
883 */
884 cl_page_list_disown(env, io, &queue->c2_qin);
885 cl_2queue_fini(env, queue);
886
887 return result;
888}
889
890/**
891 * Prepare partially written-to page for a write.
892 */
893static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
894 struct cl_object *obj, struct cl_page *pg,
895 struct ccc_page *cp,
896 unsigned from, unsigned to)
897{
898 struct cl_attr *attr = ccc_env_thread_attr(env);
899 loff_t offset = cl_offset(obj, pg->cp_index);
900 int result;
901
902 cl_object_attr_lock(obj);
903 result = cl_object_attr_get(env, obj, attr);
904 cl_object_attr_unlock(obj);
905 if (result == 0) {
906 /*
907 * If are writing to a new page, no need to read old data.
908 * The extent locking will have updated the KMS, and for our
909 * purposes here we can treat it like i_size.
910 */
911 if (attr->cat_kms <= offset) {
5e8ebf13 912 char *kaddr = kmap_atomic(cp->cpg_page);
d7e09d03
PT
913
914 memset(kaddr, 0, cl_page_size(obj));
5e8ebf13 915 kunmap_atomic(kaddr);
d7e09d03
PT
916 } else if (cp->cpg_defer_uptodate)
917 cp->cpg_ra_used = 1;
918 else
919 result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
920 /*
921 * In older implementations, obdo_refresh_inode is called here
922 * to update the inode because the write might modify the
923 * object info at OST. However, this has been proven useless,
924 * since LVB functions will be called when user space program
925 * tries to retrieve inode attribute. Also, see bug 15909 for
926 * details. -jay
927 */
928 if (result == 0)
929 cl_page_export(env, pg, 1);
930 }
931 return result;
932}
933
934static int vvp_io_prepare_write(const struct lu_env *env,
935 const struct cl_io_slice *ios,
936 const struct cl_page_slice *slice,
937 unsigned from, unsigned to)
938{
939 struct cl_object *obj = slice->cpl_obj;
940 struct ccc_page *cp = cl2ccc_page(slice);
941 struct cl_page *pg = slice->cpl_page;
942 struct page *vmpage = cp->cpg_page;
943
944 int result;
945
d7e09d03
PT
946 LINVRNT(cl_page_is_vmlocked(env, pg));
947 LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
948
949 result = 0;
950
951 CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
952 if (!PageUptodate(vmpage)) {
953 /*
954 * We're completely overwriting an existing page, so _don't_
955 * set it up to date until commit_write
956 */
957 if (from == 0 && to == PAGE_CACHE_SIZE) {
958 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
959 POISON_PAGE(page, 0x11);
960 } else
961 result = vvp_io_prepare_partial(env, ios->cis_io, obj,
962 pg, cp, from, to);
963 } else
964 CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
0a3bdb00 965 return result;
d7e09d03
PT
966}
967
968static int vvp_io_commit_write(const struct lu_env *env,
969 const struct cl_io_slice *ios,
970 const struct cl_page_slice *slice,
971 unsigned from, unsigned to)
972{
973 struct cl_object *obj = slice->cpl_obj;
974 struct cl_io *io = ios->cis_io;
975 struct ccc_page *cp = cl2ccc_page(slice);
976 struct cl_page *pg = slice->cpl_page;
977 struct inode *inode = ccc_object_inode(obj);
978 struct ll_sb_info *sbi = ll_i2sbi(inode);
979 struct ll_inode_info *lli = ll_i2info(inode);
980 struct page *vmpage = cp->cpg_page;
981
982 int result;
983 int tallyop;
984 loff_t size;
985
d7e09d03
PT
986 LINVRNT(cl_page_is_vmlocked(env, pg));
987 LASSERT(vmpage->mapping->host == inode);
988
c4f39553 989 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
d7e09d03
PT
990 CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
991
992 /*
993 * queue a write for some time in the future the first time we
994 * dirty the page.
995 *
996 * This is different from what other file systems do: they usually
997 * just mark page (and some of its buffers) dirty and rely on
998 * balance_dirty_pages() to start a write-back. Lustre wants write-back
999 * to be started earlier for the following reasons:
1000 *
1001 * (1) with a large number of clients we need to limit the amount
1002 * of cached data on the clients a lot;
1003 *
1004 * (2) large compute jobs generally want compute-only then io-only
1005 * and the IO should complete as quickly as possible;
1006 *
1007 * (3) IO is batched up to the RPC size and is async until the
1008 * client max cache is hit
1009 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
1010 *
1011 */
1012 if (!PageDirty(vmpage)) {
1013 tallyop = LPROC_LL_DIRTY_MISSES;
1014 result = cl_page_cache_add(env, io, pg, CRT_WRITE);
1015 if (result == 0) {
1016 /* page was added into cache successfully. */
1017 set_page_dirty(vmpage);
1018 vvp_write_pending(cl2ccc(obj), cp);
1019 } else if (result == -EDQUOT) {
1020 pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
1021 bool need_clip = true;
1022
1023 /*
1024 * Client ran out of disk space grant. Possible
1025 * strategies are:
1026 *
1027 * (a) do a sync write, renewing grant;
1028 *
1029 * (b) stop writing on this stripe, switch to the
1030 * next one.
1031 *
1032 * (b) is a part of "parallel io" design that is the
1033 * ultimate goal. (a) is what "old" client did, and
1034 * what the new code continues to do for the time
1035 * being.
1036 */
1037 if (last_index > pg->cp_index) {
1038 to = PAGE_CACHE_SIZE;
1039 need_clip = false;
1040 } else if (last_index == pg->cp_index) {
1041 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
1042 if (to < size_to)
1043 to = size_to;
1044 }
1045 if (need_clip)
1046 cl_page_clip(env, pg, 0, to);
1047 result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
1048 if (result)
1049 CERROR("Write page %lu of inode %p failed %d\n",
1050 pg->cp_index, inode, result);
1051 }
1052 } else {
1053 tallyop = LPROC_LL_DIRTY_HITS;
1054 result = 0;
1055 }
1056 ll_stats_ops_tally(sbi, tallyop, 1);
1057
1058 /* Inode should be marked DIRTY even if no new page was marked DIRTY
1059 * because page could have been not flushed between 2 modifications.
1060 * It is important the file is marked DIRTY as soon as the I/O is done
1061 * Indeed, when cache is flushed, file could be already closed and it
1062 * is too late to warn the MDT.
1063 * It is acceptable that file is marked DIRTY even if I/O is dropped
1064 * for some reasons before being flushed to OST.
1065 */
1066 if (result == 0) {
1067 spin_lock(&lli->lli_lock);
1068 lli->lli_flags |= LLIF_DATA_MODIFIED;
1069 spin_unlock(&lli->lli_lock);
1070 }
1071
1072 size = cl_offset(obj, pg->cp_index) + to;
1073
1074 ll_inode_size_lock(inode);
1075 if (result == 0) {
1076 if (size > i_size_read(inode)) {
1077 cl_isize_write_nolock(inode, size);
1078 CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
1079 PFID(lu_object_fid(&obj->co_lu)),
1080 (unsigned long)size);
1081 }
1082 cl_page_export(env, pg, 1);
1083 } else {
1084 if (size > i_size_read(inode))
1085 cl_page_discard(env, io, pg);
1086 }
1087 ll_inode_size_unlock(inode);
0a3bdb00 1088 return result;
d7e09d03
PT
1089}
1090
1091static const struct cl_io_operations vvp_io_ops = {
1092 .op = {
1093 [CIT_READ] = {
1094 .cio_fini = vvp_io_read_fini,
1095 .cio_lock = vvp_io_read_lock,
1096 .cio_start = vvp_io_read_start,
1097 .cio_advance = ccc_io_advance
1098 },
1099 [CIT_WRITE] = {
1100 .cio_fini = vvp_io_fini,
1101 .cio_lock = vvp_io_write_lock,
1102 .cio_start = vvp_io_write_start,
1103 .cio_advance = ccc_io_advance
1104 },
1105 [CIT_SETATTR] = {
1106 .cio_fini = vvp_io_setattr_fini,
1107 .cio_iter_init = vvp_io_setattr_iter_init,
1108 .cio_lock = vvp_io_setattr_lock,
1109 .cio_start = vvp_io_setattr_start,
1110 .cio_end = vvp_io_setattr_end
1111 },
1112 [CIT_FAULT] = {
1113 .cio_fini = vvp_io_fault_fini,
1114 .cio_iter_init = vvp_io_fault_iter_init,
1115 .cio_lock = vvp_io_fault_lock,
1116 .cio_start = vvp_io_fault_start,
1117 .cio_end = ccc_io_end
1118 },
1119 [CIT_FSYNC] = {
1120 .cio_start = vvp_io_fsync_start,
1121 .cio_fini = vvp_io_fini
1122 },
1123 [CIT_MISC] = {
1124 .cio_fini = vvp_io_fini
1125 }
1126 },
1127 .cio_read_page = vvp_io_read_page,
1128 .cio_prepare_write = vvp_io_prepare_write,
1129 .cio_commit_write = vvp_io_commit_write
1130};
1131
1132int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1133 struct cl_io *io)
1134{
1135 struct vvp_io *vio = vvp_env_io(env);
1136 struct ccc_io *cio = ccc_env_io(env);
1137 struct inode *inode = ccc_object_inode(obj);
1138 int result;
1139
1140 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
d7e09d03 1141
5ea17d6c
JL
1142 CDEBUG(D_VFSTRACE, DFID
1143 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1144 PFID(lu_object_fid(&obj->co_lu)),
1145 io->ci_ignore_layout, io->ci_verify_layout,
1146 cio->cui_layout_gen, io->ci_restore_needed);
1147
d7e09d03
PT
1148 CL_IO_SLICE_CLEAN(cio, cui_cl);
1149 cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
1150 vio->cui_ra_window_set = 0;
1151 result = 0;
1152 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1153 size_t count;
1154 struct ll_inode_info *lli = ll_i2info(inode);
1155
1156 count = io->u.ci_rw.crw_count;
1157 /* "If nbyte is 0, read() will return 0 and have no other
1158 * results." -- Single Unix Spec */
1159 if (count == 0)
1160 result = 1;
b42b15fd 1161 else
d7e09d03 1162 cio->cui_tot_count = count;
b42b15fd 1163
d7e09d03
PT
1164 /* for read/write, we store the jobid in the inode, and
1165 * it'll be fetched by osc when building RPC.
1166 *
1167 * it's not accurate if the file is shared by different
1168 * jobs.
1169 */
1170 lustre_get_jobid(lli->lli_jobid);
1171 } else if (io->ci_type == CIT_SETATTR) {
1172 if (!cl_io_is_trunc(io))
1173 io->ci_lockreq = CILR_MANDATORY;
1174 }
1175
1176 /* ignore layout change for generic CIT_MISC but not for glimpse.
1177 * io context for glimpse must set ci_verify_layout to true,
1178 * see cl_glimpse_size0() for details. */
1179 if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
1180 io->ci_ignore_layout = 1;
1181
1182 /* Enqueue layout lock and get layout version. We need to do this
1183 * even for operations requiring to open file, such as read and write,
1184 * because it might not grant layout lock in IT_OPEN. */
65fb55d1 1185 if (result == 0 && !io->ci_ignore_layout) {
d7e09d03 1186 result = ll_layout_refresh(inode, &cio->cui_layout_gen);
65fb55d1
NY
1187 if (result == -ENOENT)
1188 /* If the inode on MDS has been removed, but the objects
1189 * on OSTs haven't been destroyed (async unlink), layout
d0a0acc3 1190 * fetch will return -ENOENT, we'd ignore this error
65fb55d1
NY
1191 * and continue with dirty flush. LU-3230. */
1192 result = 0;
1193 if (result < 0)
1194 CERROR("%s: refresh file layout " DFID " error %d.\n",
1195 ll_get_fsname(inode->i_sb, NULL, 0),
1196 PFID(lu_object_fid(&obj->co_lu)), result);
1197 }
d7e09d03 1198
0a3bdb00 1199 return result;
d7e09d03
PT
1200}
1201
1202static struct vvp_io *cl2vvp_io(const struct lu_env *env,
1203 const struct cl_io_slice *slice)
1204{
d0a0acc3 1205 /* Calling just for assertion */
d7e09d03
PT
1206 cl2ccc_io(env, slice);
1207 return vvp_env_io(env);
1208}