]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/lustre/lustre/obdclass/cl_page.c
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include "../../include/linux/libcfs/libcfs.h"
41 #include "../include/obd_class.h"
42 #include "../include/obd_support.h"
43 #include <linux/list.h>
45 #include "../include/cl_object.h"
46 #include "cl_internal.h"
48 static void cl_page_delete0(const struct lu_env
*env
, struct cl_page
*pg
);
50 # define PASSERT(env, page, expr) \
52 if (unlikely(!(expr))) { \
53 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
58 # define PINVRNT(env, page, exp) \
59 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
62 * Internal version of cl_page_get().
64 * This function can be used to obtain initial reference to previously
65 * unreferenced cached object. It can be called only if concurrent page
66 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
67 * associated with \a page.
69 * Use with care! Not exported.
71 static void cl_page_get_trust(struct cl_page
*page
)
73 LASSERT(atomic_read(&page
->cp_ref
) > 0);
74 atomic_inc(&page
->cp_ref
);
78 * Returns a slice within a page, corresponding to the given layer in the
83 static const struct cl_page_slice
*
84 cl_page_at_trusted(const struct cl_page
*page
,
85 const struct lu_device_type
*dtype
)
87 const struct cl_page_slice
*slice
;
89 list_for_each_entry(slice
, &page
->cp_layers
, cpl_linkage
) {
90 if (slice
->cpl_obj
->co_lu
.lo_dev
->ld_type
== dtype
)
96 static void cl_page_free(const struct lu_env
*env
, struct cl_page
*page
)
98 struct cl_object
*obj
= page
->cp_obj
;
100 PASSERT(env
, page
, list_empty(&page
->cp_batch
));
101 PASSERT(env
, page
, !page
->cp_owner
);
102 PASSERT(env
, page
, page
->cp_state
== CPS_FREEING
);
104 while (!list_empty(&page
->cp_layers
)) {
105 struct cl_page_slice
*slice
;
107 slice
= list_entry(page
->cp_layers
.next
,
108 struct cl_page_slice
, cpl_linkage
);
109 list_del_init(page
->cp_layers
.next
);
110 if (unlikely(slice
->cpl_ops
->cpo_fini
))
111 slice
->cpl_ops
->cpo_fini(env
, slice
);
113 lu_object_ref_del_at(&obj
->co_lu
, &page
->cp_obj_ref
, "cl_page", page
);
114 cl_object_put(env
, obj
);
115 lu_ref_fini(&page
->cp_reference
);
120 * Helper function updating page state. This is the only place in the code
121 * where cl_page::cp_state field is mutated.
123 static inline void cl_page_state_set_trust(struct cl_page
*page
,
124 enum cl_page_state state
)
127 *(enum cl_page_state
*)&page
->cp_state
= state
;
130 struct cl_page
*cl_page_alloc(const struct lu_env
*env
,
131 struct cl_object
*o
, pgoff_t ind
,
133 enum cl_page_type type
)
135 struct cl_page
*page
;
136 struct lu_object_header
*head
;
138 page
= kzalloc(cl_object_header(o
)->coh_page_bufsize
, GFP_NOFS
);
142 atomic_set(&page
->cp_ref
, 1);
145 lu_object_ref_add_at(&o
->co_lu
, &page
->cp_obj_ref
, "cl_page",
147 page
->cp_vmpage
= vmpage
;
148 cl_page_state_set_trust(page
, CPS_CACHED
);
149 page
->cp_type
= type
;
150 INIT_LIST_HEAD(&page
->cp_layers
);
151 INIT_LIST_HEAD(&page
->cp_batch
);
152 lu_ref_init(&page
->cp_reference
);
153 head
= o
->co_lu
.lo_header
;
154 list_for_each_entry(o
, &head
->loh_layers
, co_lu
.lo_linkage
) {
155 if (o
->co_ops
->coo_page_init
) {
156 result
= o
->co_ops
->coo_page_init(env
, o
, page
,
159 cl_page_delete0(env
, page
);
160 cl_page_free(env
, page
);
161 page
= ERR_PTR(result
);
167 page
= ERR_PTR(-ENOMEM
);
173 * Returns a cl_page with index \a idx at the object \a o, and associated with
174 * the VM page \a vmpage.
176 * This is the main entry point into the cl_page caching interface. First, a
177 * cache (implemented as a per-object radix tree) is consulted. If page is
178 * found there, it is returned immediately. Otherwise new page is allocated
179 * and returned. In any case, additional reference to page is acquired.
181 * \see cl_object_find(), cl_lock_find()
183 struct cl_page
*cl_page_find(const struct lu_env
*env
,
185 pgoff_t idx
, struct page
*vmpage
,
186 enum cl_page_type type
)
188 struct cl_page
*page
= NULL
;
189 struct cl_object_header
*hdr
;
191 LASSERT(type
== CPT_CACHEABLE
|| type
== CPT_TRANSIENT
);
194 hdr
= cl_object_header(o
);
196 CDEBUG(D_PAGE
, "%lu@"DFID
" %p %lx %d\n",
197 idx
, PFID(&hdr
->coh_lu
.loh_fid
), vmpage
, vmpage
->private, type
);
199 if (type
== CPT_CACHEABLE
) {
201 * vmpage lock is used to protect the child/parent
204 KLASSERT(PageLocked(vmpage
));
206 * cl_vmpage_page() can be called here without any locks as
208 * - "vmpage" is locked (which prevents ->private from
209 * concurrent updates), and
211 * - "o" cannot be destroyed while current thread holds a
214 page
= cl_vmpage_page(vmpage
, o
);
220 /* allocate and initialize cl_page */
221 page
= cl_page_alloc(env
, o
, idx
, vmpage
, type
);
224 EXPORT_SYMBOL(cl_page_find
);
226 static inline int cl_page_invariant(const struct cl_page
*pg
)
228 return cl_page_in_use_noref(pg
);
231 static void cl_page_state_set0(const struct lu_env
*env
,
232 struct cl_page
*page
, enum cl_page_state state
)
234 enum cl_page_state old
;
237 * Matrix of allowed state transitions [old][new], for sanity
240 static const int allowed_transitions
[CPS_NR
][CPS_NR
] = {
243 [CPS_OWNED
] = 1, /* io finds existing cached page */
245 [CPS_PAGEOUT
] = 1, /* write-out from the cache */
246 [CPS_FREEING
] = 1, /* eviction on the memory pressure */
249 [CPS_CACHED
] = 1, /* release to the cache */
251 [CPS_PAGEIN
] = 1, /* start read immediately */
252 [CPS_PAGEOUT
] = 1, /* start write immediately */
253 [CPS_FREEING
] = 1, /* lock invalidation or truncate */
256 [CPS_CACHED
] = 1, /* io completion */
263 [CPS_CACHED
] = 1, /* io completion */
278 old
= page
->cp_state
;
279 PASSERT(env
, page
, allowed_transitions
[old
][state
]);
280 CL_PAGE_HEADER(D_TRACE
, env
, page
, "%d -> %d\n", old
, state
);
281 PASSERT(env
, page
, page
->cp_state
== old
);
282 PASSERT(env
, page
, equi(state
== CPS_OWNED
, page
->cp_owner
));
283 cl_page_state_set_trust(page
, state
);
286 static void cl_page_state_set(const struct lu_env
*env
,
287 struct cl_page
*page
, enum cl_page_state state
)
289 cl_page_state_set0(env
, page
, state
);
293 * Acquires an additional reference to a page.
295 * This can be called only by caller already possessing a reference to \a
298 * \see cl_object_get(), cl_lock_get().
300 void cl_page_get(struct cl_page
*page
)
302 cl_page_get_trust(page
);
304 EXPORT_SYMBOL(cl_page_get
);
307 * Releases a reference to a page.
309 * When last reference is released, page is returned to the cache, unless it
310 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
313 * \see cl_object_put(), cl_lock_put().
315 void cl_page_put(const struct lu_env
*env
, struct cl_page
*page
)
317 CL_PAGE_HEADER(D_TRACE
, env
, page
, "%d\n",
318 atomic_read(&page
->cp_ref
));
320 if (atomic_dec_and_test(&page
->cp_ref
)) {
321 LASSERT(page
->cp_state
== CPS_FREEING
);
323 LASSERT(atomic_read(&page
->cp_ref
) == 0);
324 PASSERT(env
, page
, !page
->cp_owner
);
325 PASSERT(env
, page
, list_empty(&page
->cp_batch
));
327 * Page is no longer reachable by other threads. Tear
330 cl_page_free(env
, page
);
333 EXPORT_SYMBOL(cl_page_put
);
336 * Returns a cl_page associated with a VM page, and given cl_object.
338 struct cl_page
*cl_vmpage_page(struct page
*vmpage
, struct cl_object
*obj
)
340 struct cl_page
*page
;
342 KLASSERT(PageLocked(vmpage
));
345 * NOTE: absence of races and liveness of data are guaranteed by page
346 * lock on a "vmpage". That works because object destruction has
347 * bottom-to-top pass.
350 page
= (struct cl_page
*)vmpage
->private;
352 cl_page_get_trust(page
);
353 LASSERT(page
->cp_type
== CPT_CACHEABLE
);
357 EXPORT_SYMBOL(cl_vmpage_page
);
359 const struct cl_page_slice
*cl_page_at(const struct cl_page
*page
,
360 const struct lu_device_type
*dtype
)
362 return cl_page_at_trusted(page
, dtype
);
364 EXPORT_SYMBOL(cl_page_at
);
366 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
368 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
370 const struct lu_env *__env = (_env); \
371 struct cl_page *__page = (_page); \
372 const struct cl_page_slice *__scan; \
374 ptrdiff_t __op = (_op); \
375 int (*__method)_proto; \
378 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
379 __method = *(void **)((char *)__scan->cpl_ops + __op); \
381 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
391 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
393 const struct lu_env *__env = (_env); \
394 struct cl_page *__page = (_page); \
395 const struct cl_page_slice *__scan; \
396 ptrdiff_t __op = (_op); \
397 void (*__method)_proto; \
399 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
400 __method = *(void **)((char *)__scan->cpl_ops + __op); \
402 (*__method)(__env, __scan, ## __VA_ARGS__); \
406 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
408 const struct lu_env *__env = (_env); \
409 struct cl_page *__page = (_page); \
410 const struct cl_page_slice *__scan; \
411 ptrdiff_t __op = (_op); \
412 void (*__method)_proto; \
414 list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
415 __method = *(void **)((char *)__scan->cpl_ops + __op); \
417 (*__method)(__env, __scan, ## __VA_ARGS__); \
421 static int cl_page_invoke(const struct lu_env
*env
,
422 struct cl_io
*io
, struct cl_page
*page
, ptrdiff_t op
)
425 PINVRNT(env
, page
, cl_object_same(page
->cp_obj
, io
->ci_obj
));
426 return CL_PAGE_INVOKE(env
, page
, op
,
427 (const struct lu_env
*,
428 const struct cl_page_slice
*, struct cl_io
*),
432 static void cl_page_invoid(const struct lu_env
*env
,
433 struct cl_io
*io
, struct cl_page
*page
, ptrdiff_t op
)
436 PINVRNT(env
, page
, cl_object_same(page
->cp_obj
, io
->ci_obj
));
437 CL_PAGE_INVOID(env
, page
, op
,
438 (const struct lu_env
*,
439 const struct cl_page_slice
*, struct cl_io
*), io
);
442 static void cl_page_owner_clear(struct cl_page
*page
)
444 if (page
->cp_owner
) {
445 LASSERT(page
->cp_owner
->ci_owned_nr
> 0);
446 page
->cp_owner
->ci_owned_nr
--;
447 page
->cp_owner
= NULL
;
451 static void cl_page_owner_set(struct cl_page
*page
)
453 page
->cp_owner
->ci_owned_nr
++;
456 void cl_page_disown0(const struct lu_env
*env
,
457 struct cl_io
*io
, struct cl_page
*pg
)
459 enum cl_page_state state
;
461 state
= pg
->cp_state
;
462 PINVRNT(env
, pg
, state
== CPS_OWNED
|| state
== CPS_FREEING
);
463 PINVRNT(env
, pg
, cl_page_invariant(pg
) || state
== CPS_FREEING
);
464 cl_page_owner_clear(pg
);
466 if (state
== CPS_OWNED
)
467 cl_page_state_set(env
, pg
, CPS_CACHED
);
469 * Completion call-backs are executed in the bottom-up order, so that
470 * uppermost layer (llite), responsible for VFS/VM interaction runs
471 * last and can release locks safely.
473 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(cpo_disown
),
474 (const struct lu_env
*,
475 const struct cl_page_slice
*, struct cl_io
*),
480 * returns true, iff page is owned by the given io.
482 int cl_page_is_owned(const struct cl_page
*pg
, const struct cl_io
*io
)
484 struct cl_io
*top
= cl_io_top((struct cl_io
*)io
);
486 LINVRNT(cl_object_same(pg
->cp_obj
, io
->ci_obj
));
487 return pg
->cp_state
== CPS_OWNED
&& pg
->cp_owner
== top
;
489 EXPORT_SYMBOL(cl_page_is_owned
);
492 * Try to own a page by IO.
494 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
495 * into cl_page_state::CPS_OWNED state.
497 * \pre !cl_page_is_owned(pg, io)
498 * \post result == 0 iff cl_page_is_owned(pg, io)
502 * \retval -ve failure, e.g., page was destroyed (and landed in
503 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
504 * or, page was owned by another thread, or in IO.
506 * \see cl_page_disown()
507 * \see cl_page_operations::cpo_own()
508 * \see cl_page_own_try()
511 static int cl_page_own0(const struct lu_env
*env
, struct cl_io
*io
,
512 struct cl_page
*pg
, int nonblock
)
516 PINVRNT(env
, pg
, !cl_page_is_owned(pg
, io
));
520 if (pg
->cp_state
== CPS_FREEING
) {
523 result
= CL_PAGE_INVOKE(env
, pg
, CL_PAGE_OP(cpo_own
),
524 (const struct lu_env
*,
525 const struct cl_page_slice
*,
526 struct cl_io
*, int),
529 PASSERT(env
, pg
, !pg
->cp_owner
);
530 pg
->cp_owner
= cl_io_top(io
);
531 cl_page_owner_set(pg
);
532 if (pg
->cp_state
!= CPS_FREEING
) {
533 cl_page_state_set(env
, pg
, CPS_OWNED
);
535 cl_page_disown0(env
, io
, pg
);
540 PINVRNT(env
, pg
, ergo(result
== 0, cl_page_invariant(pg
)));
545 * Own a page, might be blocked.
547 * \see cl_page_own0()
549 int cl_page_own(const struct lu_env
*env
, struct cl_io
*io
, struct cl_page
*pg
)
551 return cl_page_own0(env
, io
, pg
, 0);
553 EXPORT_SYMBOL(cl_page_own
);
556 * Nonblock version of cl_page_own().
558 * \see cl_page_own0()
560 int cl_page_own_try(const struct lu_env
*env
, struct cl_io
*io
,
563 return cl_page_own0(env
, io
, pg
, 1);
565 EXPORT_SYMBOL(cl_page_own_try
);
568 * Assume page ownership.
570 * Called when page is already locked by the hosting VM.
572 * \pre !cl_page_is_owned(pg, io)
573 * \post cl_page_is_owned(pg, io)
575 * \see cl_page_operations::cpo_assume()
577 void cl_page_assume(const struct lu_env
*env
,
578 struct cl_io
*io
, struct cl_page
*pg
)
580 PINVRNT(env
, pg
, cl_object_same(pg
->cp_obj
, io
->ci_obj
));
584 cl_page_invoid(env
, io
, pg
, CL_PAGE_OP(cpo_assume
));
585 PASSERT(env
, pg
, !pg
->cp_owner
);
586 pg
->cp_owner
= cl_io_top(io
);
587 cl_page_owner_set(pg
);
588 cl_page_state_set(env
, pg
, CPS_OWNED
);
590 EXPORT_SYMBOL(cl_page_assume
);
593 * Releases page ownership without unlocking the page.
595 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
596 * underlying VM page (as VM is supposed to do this itself).
598 * \pre cl_page_is_owned(pg, io)
599 * \post !cl_page_is_owned(pg, io)
601 * \see cl_page_assume()
603 void cl_page_unassume(const struct lu_env
*env
,
604 struct cl_io
*io
, struct cl_page
*pg
)
606 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
607 PINVRNT(env
, pg
, cl_page_invariant(pg
));
610 cl_page_owner_clear(pg
);
611 cl_page_state_set(env
, pg
, CPS_CACHED
);
612 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(cpo_unassume
),
613 (const struct lu_env
*,
614 const struct cl_page_slice
*, struct cl_io
*),
617 EXPORT_SYMBOL(cl_page_unassume
);
620 * Releases page ownership.
622 * Moves page into cl_page_state::CPS_CACHED.
624 * \pre cl_page_is_owned(pg, io)
625 * \post !cl_page_is_owned(pg, io)
628 * \see cl_page_operations::cpo_disown()
630 void cl_page_disown(const struct lu_env
*env
,
631 struct cl_io
*io
, struct cl_page
*pg
)
633 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
) ||
634 pg
->cp_state
== CPS_FREEING
);
637 cl_page_disown0(env
, io
, pg
);
639 EXPORT_SYMBOL(cl_page_disown
);
642 * Called when page is to be removed from the object, e.g., as a result of
645 * Calls cl_page_operations::cpo_discard() top-to-bottom.
647 * \pre cl_page_is_owned(pg, io)
649 * \see cl_page_operations::cpo_discard()
651 void cl_page_discard(const struct lu_env
*env
,
652 struct cl_io
*io
, struct cl_page
*pg
)
654 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
655 PINVRNT(env
, pg
, cl_page_invariant(pg
));
657 cl_page_invoid(env
, io
, pg
, CL_PAGE_OP(cpo_discard
));
659 EXPORT_SYMBOL(cl_page_discard
);
662 * Version of cl_page_delete() that can be called for not fully constructed
663 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
664 * path. Doesn't check page invariant.
666 static void cl_page_delete0(const struct lu_env
*env
, struct cl_page
*pg
)
668 PASSERT(env
, pg
, pg
->cp_state
!= CPS_FREEING
);
671 * Sever all ways to obtain new pointers to @pg.
673 cl_page_owner_clear(pg
);
675 cl_page_state_set0(env
, pg
, CPS_FREEING
);
677 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(cpo_delete
),
678 (const struct lu_env
*,
679 const struct cl_page_slice
*));
683 * Called when a decision is made to throw page out of memory.
685 * Notifies all layers about page destruction by calling
686 * cl_page_operations::cpo_delete() method top-to-bottom.
688 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
689 * where transition to this state happens).
691 * Eliminates all venues through which new references to the page can be
694 * - removes page from the radix trees,
696 * - breaks linkage from VM page to cl_page.
698 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
699 * drain after some time, at which point page will be recycled.
701 * \pre VM page is locked
702 * \post pg->cp_state == CPS_FREEING
704 * \see cl_page_operations::cpo_delete()
706 void cl_page_delete(const struct lu_env
*env
, struct cl_page
*pg
)
708 PINVRNT(env
, pg
, cl_page_invariant(pg
));
709 cl_page_delete0(env
, pg
);
711 EXPORT_SYMBOL(cl_page_delete
);
714 * Marks page up-to-date.
716 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
717 * layer responsible for VM interaction has to mark/clear page as up-to-date
718 * by the \a uptodate argument.
720 * \see cl_page_operations::cpo_export()
722 void cl_page_export(const struct lu_env
*env
, struct cl_page
*pg
, int uptodate
)
724 PINVRNT(env
, pg
, cl_page_invariant(pg
));
725 CL_PAGE_INVOID(env
, pg
, CL_PAGE_OP(cpo_export
),
726 (const struct lu_env
*,
727 const struct cl_page_slice
*, int), uptodate
);
729 EXPORT_SYMBOL(cl_page_export
);
732 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
735 int cl_page_is_vmlocked(const struct lu_env
*env
, const struct cl_page
*pg
)
738 const struct cl_page_slice
*slice
;
740 slice
= container_of(pg
->cp_layers
.next
,
741 const struct cl_page_slice
, cpl_linkage
);
742 PASSERT(env
, pg
, slice
->cpl_ops
->cpo_is_vmlocked
);
744 * Call ->cpo_is_vmlocked() directly instead of going through
745 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
746 * cl_page_invariant().
748 result
= slice
->cpl_ops
->cpo_is_vmlocked(env
, slice
);
749 PASSERT(env
, pg
, result
== -EBUSY
|| result
== -ENODATA
);
750 return result
== -EBUSY
;
752 EXPORT_SYMBOL(cl_page_is_vmlocked
);
754 static enum cl_page_state
cl_req_type_state(enum cl_req_type crt
)
756 return crt
== CRT_WRITE
? CPS_PAGEOUT
: CPS_PAGEIN
;
759 static void cl_page_io_start(const struct lu_env
*env
,
760 struct cl_page
*pg
, enum cl_req_type crt
)
763 * Page is queued for IO, change its state.
765 cl_page_owner_clear(pg
);
766 cl_page_state_set(env
, pg
, cl_req_type_state(crt
));
770 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
771 * called top-to-bottom. Every layer either agrees to submit this page (by
772 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
773 * handling interactions with the VM also has to inform VM that page is under
776 int cl_page_prep(const struct lu_env
*env
, struct cl_io
*io
,
777 struct cl_page
*pg
, enum cl_req_type crt
)
781 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
782 PINVRNT(env
, pg
, cl_page_invariant(pg
));
783 PINVRNT(env
, pg
, crt
< CRT_NR
);
786 * XXX this has to be called bottom-to-top, so that llite can set up
787 * PG_writeback without risking other layers deciding to skip this
792 result
= cl_page_invoke(env
, io
, pg
, CL_PAGE_OP(io
[crt
].cpo_prep
));
794 cl_page_io_start(env
, pg
, crt
);
796 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", crt
, result
);
799 EXPORT_SYMBOL(cl_page_prep
);
802 * Notify layers about transfer completion.
804 * Invoked by transfer sub-system (which is a part of osc) to notify layers
805 * that a transfer, of which this page is a part of has completed.
807 * Completion call-backs are executed in the bottom-up order, so that
808 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
809 * and can release locks safely.
811 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
812 * \post pg->cp_state == CPS_CACHED
814 * \see cl_page_operations::cpo_completion()
816 void cl_page_completion(const struct lu_env
*env
,
817 struct cl_page
*pg
, enum cl_req_type crt
, int ioret
)
819 struct cl_sync_io
*anchor
= pg
->cp_sync_io
;
821 PASSERT(env
, pg
, crt
< CRT_NR
);
822 PASSERT(env
, pg
, pg
->cp_state
== cl_req_type_state(crt
));
824 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", crt
, ioret
);
826 cl_page_state_set(env
, pg
, CPS_CACHED
);
829 CL_PAGE_INVOID_REVERSE(env
, pg
, CL_PAGE_OP(io
[crt
].cpo_completion
),
830 (const struct lu_env
*,
831 const struct cl_page_slice
*, int), ioret
);
833 LASSERT(pg
->cp_sync_io
== anchor
);
834 pg
->cp_sync_io
= NULL
;
835 cl_sync_io_note(env
, anchor
, ioret
);
838 EXPORT_SYMBOL(cl_page_completion
);
841 * Notify layers that transfer formation engine decided to yank this page from
842 * the cache and to make it a part of a transfer.
844 * \pre pg->cp_state == CPS_CACHED
845 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
847 * \see cl_page_operations::cpo_make_ready()
849 int cl_page_make_ready(const struct lu_env
*env
, struct cl_page
*pg
,
850 enum cl_req_type crt
)
854 PINVRNT(env
, pg
, crt
< CRT_NR
);
858 result
= CL_PAGE_INVOKE(env
, pg
, CL_PAGE_OP(io
[crt
].cpo_make_ready
),
859 (const struct lu_env
*,
860 const struct cl_page_slice
*));
862 PASSERT(env
, pg
, pg
->cp_state
== CPS_CACHED
);
863 cl_page_io_start(env
, pg
, crt
);
865 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", crt
, result
);
868 EXPORT_SYMBOL(cl_page_make_ready
);
871 * Called if a pge is being written back by kernel's intention.
873 * \pre cl_page_is_owned(pg, io)
874 * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
876 * \see cl_page_operations::cpo_flush()
878 int cl_page_flush(const struct lu_env
*env
, struct cl_io
*io
,
883 PINVRNT(env
, pg
, cl_page_is_owned(pg
, io
));
884 PINVRNT(env
, pg
, cl_page_invariant(pg
));
886 result
= cl_page_invoke(env
, io
, pg
, CL_PAGE_OP(cpo_flush
));
888 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d\n", result
);
891 EXPORT_SYMBOL(cl_page_flush
);
894 * Tells transfer engine that only part of a page is to be transmitted.
896 * \see cl_page_operations::cpo_clip()
898 void cl_page_clip(const struct lu_env
*env
, struct cl_page
*pg
,
901 PINVRNT(env
, pg
, cl_page_invariant(pg
));
903 CL_PAGE_HEADER(D_TRACE
, env
, pg
, "%d %d\n", from
, to
);
904 CL_PAGE_INVOID(env
, pg
, CL_PAGE_OP(cpo_clip
),
905 (const struct lu_env
*,
906 const struct cl_page_slice
*, int, int),
909 EXPORT_SYMBOL(cl_page_clip
);
912 * Prints human readable representation of \a pg to the \a f.
914 void cl_page_header_print(const struct lu_env
*env
, void *cookie
,
915 lu_printer_t printer
, const struct cl_page
*pg
)
917 (*printer
)(env
, cookie
,
918 "page@%p[%d %p %d %d %p]\n",
919 pg
, atomic_read(&pg
->cp_ref
), pg
->cp_obj
,
920 pg
->cp_state
, pg
->cp_type
,
923 EXPORT_SYMBOL(cl_page_header_print
);
926 * Prints human readable representation of \a pg to the \a f.
928 void cl_page_print(const struct lu_env
*env
, void *cookie
,
929 lu_printer_t printer
, const struct cl_page
*pg
)
931 cl_page_header_print(env
, cookie
, printer
, pg
);
932 CL_PAGE_INVOKE(env
, (struct cl_page
*)pg
, CL_PAGE_OP(cpo_print
),
933 (const struct lu_env
*env
,
934 const struct cl_page_slice
*slice
,
935 void *cookie
, lu_printer_t p
), cookie
, printer
);
936 (*printer
)(env
, cookie
, "end page@%p\n", pg
);
938 EXPORT_SYMBOL(cl_page_print
);
941 * Cancel a page which is still in a transfer.
943 int cl_page_cancel(const struct lu_env
*env
, struct cl_page
*page
)
945 return CL_PAGE_INVOKE(env
, page
, CL_PAGE_OP(cpo_cancel
),
946 (const struct lu_env
*,
947 const struct cl_page_slice
*));
951 * Converts a byte offset within object \a obj into a page index.
953 loff_t
cl_offset(const struct cl_object
*obj
, pgoff_t idx
)
958 return (loff_t
)idx
<< PAGE_SHIFT
;
960 EXPORT_SYMBOL(cl_offset
);
963 * Converts a page index into a byte offset within object \a obj.
965 pgoff_t
cl_index(const struct cl_object
*obj
, loff_t offset
)
970 return offset
>> PAGE_SHIFT
;
972 EXPORT_SYMBOL(cl_index
);
974 size_t cl_page_size(const struct cl_object
*obj
)
976 return 1UL << PAGE_SHIFT
;
978 EXPORT_SYMBOL(cl_page_size
);
981 * Adds page slice to the compound page.
983 * This is called by cl_object_operations::coo_page_init() methods to add a
984 * per-layer state to the page. New state is added at the end of
985 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
987 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
989 void cl_page_slice_add(struct cl_page
*page
, struct cl_page_slice
*slice
,
990 struct cl_object
*obj
, pgoff_t index
,
991 const struct cl_page_operations
*ops
)
993 list_add_tail(&slice
->cpl_linkage
, &page
->cp_layers
);
994 slice
->cpl_obj
= obj
;
995 slice
->cpl_index
= index
;
996 slice
->cpl_ops
= ops
;
997 slice
->cpl_page
= page
;
999 EXPORT_SYMBOL(cl_page_slice_add
);
1002 * Allocate and initialize cl_cache, called by ll_init_sbi().
1004 struct cl_client_cache
*cl_cache_init(unsigned long lru_page_max
)
1006 struct cl_client_cache
*cache
= NULL
;
1008 cache
= kzalloc(sizeof(*cache
), GFP_KERNEL
);
1012 /* Initialize cache data */
1013 atomic_set(&cache
->ccc_users
, 1);
1014 cache
->ccc_lru_max
= lru_page_max
;
1015 atomic_long_set(&cache
->ccc_lru_left
, lru_page_max
);
1016 spin_lock_init(&cache
->ccc_lru_lock
);
1017 INIT_LIST_HEAD(&cache
->ccc_lru
);
1019 atomic_long_set(&cache
->ccc_unstable_nr
, 0);
1020 init_waitqueue_head(&cache
->ccc_unstable_waitq
);
1024 EXPORT_SYMBOL(cl_cache_init
);
1027 * Increase cl_cache refcount
1029 void cl_cache_incref(struct cl_client_cache
*cache
)
1031 atomic_inc(&cache
->ccc_users
);
1033 EXPORT_SYMBOL(cl_cache_incref
);
1036 * Decrease cl_cache refcount and free the cache if refcount=0.
1037 * Since llite, lov and osc all hold cl_cache refcount,
1038 * the free will not cause race. (LU-6173)
1040 void cl_cache_decref(struct cl_client_cache
*cache
)
1042 if (atomic_dec_and_test(&cache
->ccc_users
))
1045 EXPORT_SYMBOL(cl_cache_decref
);