4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include "../include/obd.h"
45 #include "../include/lustre_lite.h"
47 #include "vvp_internal.h"
49 /*****************************************************************************
55 static void vvp_page_fini_common(struct ccc_page
*cp
)
57 struct page
*vmpage
= cp
->cpg_page
;
63 static void vvp_page_fini(const struct lu_env
*env
,
64 struct cl_page_slice
*slice
)
66 struct ccc_page
*cp
= cl2ccc_page(slice
);
67 struct page
*vmpage
= cp
->cpg_page
;
70 * vmpage->private was already cleared when page was moved into
73 LASSERT((struct cl_page
*)vmpage
->private != slice
->cpl_page
);
74 vvp_page_fini_common(cp
);
77 static int vvp_page_own(const struct lu_env
*env
,
78 const struct cl_page_slice
*slice
, struct cl_io
*io
,
81 struct ccc_page
*vpg
= cl2ccc_page(slice
);
82 struct page
*vmpage
= vpg
->cpg_page
;
86 if (!trylock_page(vmpage
))
89 if (unlikely(PageWriteback(vmpage
))) {
98 wait_on_page_writeback(vmpage
);
102 static void vvp_page_assume(const struct lu_env
*env
,
103 const struct cl_page_slice
*slice
,
104 struct cl_io
*unused
)
106 struct page
*vmpage
= cl2vm_page(slice
);
109 LASSERT(PageLocked(vmpage
));
110 wait_on_page_writeback(vmpage
);
113 static void vvp_page_unassume(const struct lu_env
*env
,
114 const struct cl_page_slice
*slice
,
115 struct cl_io
*unused
)
117 struct page
*vmpage
= cl2vm_page(slice
);
120 LASSERT(PageLocked(vmpage
));
123 static void vvp_page_disown(const struct lu_env
*env
,
124 const struct cl_page_slice
*slice
, struct cl_io
*io
)
126 struct page
*vmpage
= cl2vm_page(slice
);
129 LASSERT(PageLocked(vmpage
));
131 unlock_page(cl2vm_page(slice
));
134 static void vvp_page_discard(const struct lu_env
*env
,
135 const struct cl_page_slice
*slice
,
136 struct cl_io
*unused
)
138 struct page
*vmpage
= cl2vm_page(slice
);
139 struct address_space
*mapping
;
140 struct ccc_page
*cpg
= cl2ccc_page(slice
);
143 LASSERT(PageLocked(vmpage
));
145 mapping
= vmpage
->mapping
;
147 if (cpg
->cpg_defer_uptodate
&& !cpg
->cpg_ra_used
)
148 ll_ra_stats_inc(mapping
, RA_STAT_DISCARDED
);
151 * truncate_complete_page() calls
152 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
154 truncate_complete_page(mapping
, vmpage
);
157 static int vvp_page_unmap(const struct lu_env
*env
,
158 const struct cl_page_slice
*slice
,
159 struct cl_io
*unused
)
161 struct page
*vmpage
= cl2vm_page(slice
);
165 LASSERT(PageLocked(vmpage
));
167 offset
= vmpage
->index
<< PAGE_SHIFT
;
170 * XXX is it safe to call this with the page lock held?
172 ll_teardown_mmaps(vmpage
->mapping
, offset
, offset
+ PAGE_SIZE
);
176 static void vvp_page_delete(const struct lu_env
*env
,
177 const struct cl_page_slice
*slice
)
179 struct page
*vmpage
= cl2vm_page(slice
);
180 struct inode
*inode
= vmpage
->mapping
->host
;
181 struct cl_object
*obj
= slice
->cpl_obj
;
183 LASSERT(PageLocked(vmpage
));
184 LASSERT((struct cl_page
*)vmpage
->private == slice
->cpl_page
);
185 LASSERT(inode
== ccc_object_inode(obj
));
187 vvp_write_complete(cl2ccc(obj
), cl2ccc_page(slice
));
188 ClearPagePrivate(vmpage
);
191 * Reference from vmpage to cl_page is removed, but the reference back
192 * is still here. It is removed later in vvp_page_fini().
196 static void vvp_page_export(const struct lu_env
*env
,
197 const struct cl_page_slice
*slice
,
200 struct page
*vmpage
= cl2vm_page(slice
);
203 LASSERT(PageLocked(vmpage
));
205 SetPageUptodate(vmpage
);
207 ClearPageUptodate(vmpage
);
210 static int vvp_page_is_vmlocked(const struct lu_env
*env
,
211 const struct cl_page_slice
*slice
)
213 return PageLocked(cl2vm_page(slice
)) ? -EBUSY
: -ENODATA
;
216 static int vvp_page_prep_read(const struct lu_env
*env
,
217 const struct cl_page_slice
*slice
,
218 struct cl_io
*unused
)
220 /* Skip the page already marked as PG_uptodate. */
221 return PageUptodate(cl2vm_page(slice
)) ? -EALREADY
: 0;
224 static int vvp_page_prep_write(const struct lu_env
*env
,
225 const struct cl_page_slice
*slice
,
226 struct cl_io
*unused
)
228 struct page
*vmpage
= cl2vm_page(slice
);
229 struct cl_page
*pg
= slice
->cpl_page
;
231 LASSERT(PageLocked(vmpage
));
232 LASSERT(!PageDirty(vmpage
));
234 /* ll_writepage path is not a sync write, so need to set page writeback
238 set_page_writeback(vmpage
);
240 vvp_write_pending(cl2ccc(slice
->cpl_obj
), cl2ccc_page(slice
));
246 * Handles page transfer errors at VM level.
248 * This takes inode as a separate argument, because inode on which error is to
249 * be set can be different from \a vmpage inode in case of direct-io.
251 static void vvp_vmpage_error(struct inode
*inode
, struct page
*vmpage
, int ioret
)
253 struct ccc_object
*obj
= cl_inode2ccc(inode
);
256 ClearPageError(vmpage
);
257 obj
->cob_discard_page_warned
= 0;
259 SetPageError(vmpage
);
260 if (ioret
== -ENOSPC
)
261 set_bit(AS_ENOSPC
, &inode
->i_mapping
->flags
);
263 set_bit(AS_EIO
, &inode
->i_mapping
->flags
);
265 if ((ioret
== -ESHUTDOWN
|| ioret
== -EINTR
) &&
266 obj
->cob_discard_page_warned
== 0) {
267 obj
->cob_discard_page_warned
= 1;
268 ll_dirty_page_discard_warn(vmpage
, ioret
);
273 static void vvp_page_completion_read(const struct lu_env
*env
,
274 const struct cl_page_slice
*slice
,
277 struct ccc_page
*cp
= cl2ccc_page(slice
);
278 struct page
*vmpage
= cp
->cpg_page
;
279 struct cl_page
*page
= cl_page_top(slice
->cpl_page
);
280 struct inode
*inode
= ccc_object_inode(page
->cp_obj
);
282 LASSERT(PageLocked(vmpage
));
283 CL_PAGE_HEADER(D_PAGE
, env
, page
, "completing READ with %d\n", ioret
);
285 if (cp
->cpg_defer_uptodate
)
286 ll_ra_count_put(ll_i2sbi(inode
), 1);
289 if (!cp
->cpg_defer_uptodate
)
290 cl_page_export(env
, page
, 1);
292 cp
->cpg_defer_uptodate
= 0;
294 if (!page
->cp_sync_io
)
298 static void vvp_page_completion_write(const struct lu_env
*env
,
299 const struct cl_page_slice
*slice
,
302 struct ccc_page
*cp
= cl2ccc_page(slice
);
303 struct cl_page
*pg
= slice
->cpl_page
;
304 struct page
*vmpage
= cp
->cpg_page
;
306 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "completing WRITE with %d\n", ioret
);
309 * TODO: Actually it makes sense to add the page into oap pending
310 * list again and so that we don't need to take the page out from
311 * SoM write pending list, if we just meet a recoverable error,
313 * To implement this, we just need to return a non zero value in
314 * ->cpo_completion method. The underlying transfer should be notified
315 * and then re-add the page into pending transfer queue. -jay
318 cp
->cpg_write_queued
= 0;
319 vvp_write_complete(cl2ccc(slice
->cpl_obj
), cp
);
321 if (pg
->cp_sync_io
) {
322 LASSERT(PageLocked(vmpage
));
323 LASSERT(!PageWriteback(vmpage
));
325 LASSERT(PageWriteback(vmpage
));
327 * Only mark the page error only when it's an async write
328 * because applications won't wait for IO to finish.
330 vvp_vmpage_error(ccc_object_inode(pg
->cp_obj
), vmpage
, ioret
);
332 end_page_writeback(vmpage
);
337 * Implements cl_page_operations::cpo_make_ready() method.
339 * This is called to yank a page from the transfer cache and to send it out as
340 * a part of transfer. This function try-locks the page. If try-lock failed,
341 * page is owned by some concurrent IO, and should be skipped (this is bad,
342 * but hopefully rare situation, as it usually results in transfer being
343 * shorter than possible).
345 * \retval 0 success, page can be placed into transfer
347 * \retval -EAGAIN page is either used by concurrent IO has been
348 * truncated. Skip it.
350 static int vvp_page_make_ready(const struct lu_env
*env
,
351 const struct cl_page_slice
*slice
)
353 struct page
*vmpage
= cl2vm_page(slice
);
354 struct cl_page
*pg
= slice
->cpl_page
;
358 if (clear_page_dirty_for_io(vmpage
)) {
359 LASSERT(pg
->cp_state
== CPS_CACHED
);
360 /* This actually clears the dirty bit in the radix tree. */
361 set_page_writeback(vmpage
);
362 vvp_write_pending(cl2ccc(slice
->cpl_obj
), cl2ccc_page(slice
));
363 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "readied\n");
364 } else if (pg
->cp_state
== CPS_PAGEOUT
) {
365 /* is it possible for osc_flush_async_page() to already
370 CL_PAGE_DEBUG(D_ERROR
, env
, pg
, "Unexpecting page state %d.\n",
378 static int vvp_page_print(const struct lu_env
*env
,
379 const struct cl_page_slice
*slice
,
380 void *cookie
, lu_printer_t printer
)
382 struct ccc_page
*vp
= cl2ccc_page(slice
);
383 struct page
*vmpage
= vp
->cpg_page
;
385 (*printer
)(env
, cookie
, LUSTRE_VVP_NAME
"-page@%p(%d:%d:%d) vm@%p ",
386 vp
, vp
->cpg_defer_uptodate
, vp
->cpg_ra_used
,
387 vp
->cpg_write_queued
, vmpage
);
389 (*printer
)(env
, cookie
, "%lx %d:%d %lx %lu %slru",
390 (long)vmpage
->flags
, page_count(vmpage
),
391 page_mapcount(vmpage
), vmpage
->private,
393 list_empty(&vmpage
->lru
) ? "not-" : "");
395 (*printer
)(env
, cookie
, "\n");
399 static const struct cl_page_operations vvp_page_ops
= {
400 .cpo_own
= vvp_page_own
,
401 .cpo_assume
= vvp_page_assume
,
402 .cpo_unassume
= vvp_page_unassume
,
403 .cpo_disown
= vvp_page_disown
,
404 .cpo_vmpage
= ccc_page_vmpage
,
405 .cpo_discard
= vvp_page_discard
,
406 .cpo_delete
= vvp_page_delete
,
407 .cpo_unmap
= vvp_page_unmap
,
408 .cpo_export
= vvp_page_export
,
409 .cpo_is_vmlocked
= vvp_page_is_vmlocked
,
410 .cpo_fini
= vvp_page_fini
,
411 .cpo_print
= vvp_page_print
,
412 .cpo_is_under_lock
= ccc_page_is_under_lock
,
415 .cpo_prep
= vvp_page_prep_read
,
416 .cpo_completion
= vvp_page_completion_read
,
417 .cpo_make_ready
= ccc_fail
,
420 .cpo_prep
= vvp_page_prep_write
,
421 .cpo_completion
= vvp_page_completion_write
,
422 .cpo_make_ready
= vvp_page_make_ready
,
427 static void vvp_transient_page_verify(const struct cl_page
*page
)
429 struct inode
*inode
= ccc_object_inode(page
->cp_obj
);
431 LASSERT(!inode_trylock(inode
));
434 static int vvp_transient_page_own(const struct lu_env
*env
,
435 const struct cl_page_slice
*slice
,
436 struct cl_io
*unused
, int nonblock
)
438 vvp_transient_page_verify(slice
->cpl_page
);
442 static void vvp_transient_page_assume(const struct lu_env
*env
,
443 const struct cl_page_slice
*slice
,
444 struct cl_io
*unused
)
446 vvp_transient_page_verify(slice
->cpl_page
);
449 static void vvp_transient_page_unassume(const struct lu_env
*env
,
450 const struct cl_page_slice
*slice
,
451 struct cl_io
*unused
)
453 vvp_transient_page_verify(slice
->cpl_page
);
456 static void vvp_transient_page_disown(const struct lu_env
*env
,
457 const struct cl_page_slice
*slice
,
458 struct cl_io
*unused
)
460 vvp_transient_page_verify(slice
->cpl_page
);
463 static void vvp_transient_page_discard(const struct lu_env
*env
,
464 const struct cl_page_slice
*slice
,
465 struct cl_io
*unused
)
467 struct cl_page
*page
= slice
->cpl_page
;
469 vvp_transient_page_verify(slice
->cpl_page
);
472 * For transient pages, remove it from the radix tree.
474 cl_page_delete(env
, page
);
477 static int vvp_transient_page_is_vmlocked(const struct lu_env
*env
,
478 const struct cl_page_slice
*slice
)
480 struct inode
*inode
= ccc_object_inode(slice
->cpl_obj
);
483 locked
= !inode_trylock(inode
);
486 return locked
? -EBUSY
: -ENODATA
;
490 vvp_transient_page_completion(const struct lu_env
*env
,
491 const struct cl_page_slice
*slice
,
494 vvp_transient_page_verify(slice
->cpl_page
);
497 static void vvp_transient_page_fini(const struct lu_env
*env
,
498 struct cl_page_slice
*slice
)
500 struct ccc_page
*cp
= cl2ccc_page(slice
);
501 struct cl_page
*clp
= slice
->cpl_page
;
502 struct ccc_object
*clobj
= cl2ccc(clp
->cp_obj
);
504 vvp_page_fini_common(cp
);
505 LASSERT(!inode_trylock(clobj
->cob_inode
));
506 clobj
->cob_transient_pages
--;
509 static const struct cl_page_operations vvp_transient_page_ops
= {
510 .cpo_own
= vvp_transient_page_own
,
511 .cpo_assume
= vvp_transient_page_assume
,
512 .cpo_unassume
= vvp_transient_page_unassume
,
513 .cpo_disown
= vvp_transient_page_disown
,
514 .cpo_discard
= vvp_transient_page_discard
,
515 .cpo_vmpage
= ccc_page_vmpage
,
516 .cpo_fini
= vvp_transient_page_fini
,
517 .cpo_is_vmlocked
= vvp_transient_page_is_vmlocked
,
518 .cpo_print
= vvp_page_print
,
519 .cpo_is_under_lock
= ccc_page_is_under_lock
,
522 .cpo_prep
= ccc_transient_page_prep
,
523 .cpo_completion
= vvp_transient_page_completion
,
526 .cpo_prep
= ccc_transient_page_prep
,
527 .cpo_completion
= vvp_transient_page_completion
,
532 int vvp_page_init(const struct lu_env
*env
, struct cl_object
*obj
,
533 struct cl_page
*page
, struct page
*vmpage
)
535 struct ccc_page
*cpg
= cl_object_page_slice(obj
, page
);
537 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
539 cpg
->cpg_page
= vmpage
;
542 INIT_LIST_HEAD(&cpg
->cpg_pending_linkage
);
543 if (page
->cp_type
== CPT_CACHEABLE
) {
544 SetPagePrivate(vmpage
);
545 vmpage
->private = (unsigned long)page
;
546 cl_page_slice_add(page
, &cpg
->cpg_cl
, obj
, &vvp_page_ops
);
548 struct ccc_object
*clobj
= cl2ccc(obj
);
550 LASSERT(!inode_trylock(clobj
->cob_inode
));
551 cl_page_slice_add(page
, &cpg
->cpg_cl
, obj
,
552 &vvp_transient_page_ops
);
553 clobj
->cob_transient_pages
++;