]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/lustre/lustre/osc/osc_cache.c
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
34 * This file is part of Lustre, http://www.lustre.org/
35 * Lustre is a trademark of Sun Microsystems, Inc.
37 * osc cache management.
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_OSC
44 #include "osc_cl_internal.h"
45 #include "osc_internal.h"
47 static int extent_debug
; /* set it to be true for more debug */
49 static void osc_update_pending(struct osc_object
*obj
, int cmd
, int delta
);
50 static int osc_extent_wait(const struct lu_env
*env
, struct osc_extent
*ext
,
52 static void osc_ap_completion(const struct lu_env
*env
, struct client_obd
*cli
,
53 struct osc_async_page
*oap
, int sent
, int rc
);
54 static int osc_make_ready(const struct lu_env
*env
, struct osc_async_page
*oap
,
56 static int osc_refresh_count(const struct lu_env
*env
,
57 struct osc_async_page
*oap
, int cmd
);
58 static int osc_io_unplug_async(const struct lu_env
*env
,
59 struct client_obd
*cli
, struct osc_object
*osc
);
60 static void osc_free_grant(struct client_obd
*cli
, unsigned int nr_pages
,
61 unsigned int lost_grant
);
63 static void osc_extent_tree_dump0(int level
, struct osc_object
*obj
,
64 const char *func
, int line
);
65 #define osc_extent_tree_dump(lvl, obj) \
66 osc_extent_tree_dump0(lvl, obj, __func__, __LINE__)
72 /* ------------------ osc extent ------------------ */
73 static inline char *ext_flags(struct osc_extent
*ext
, char *flags
)
76 *buf
++ = ext
->oe_rw
? 'r' : 'w';
87 if (ext
->oe_trunc_pending
)
89 if (ext
->oe_fsync_wait
)
95 static inline char list_empty_marker(struct list_head
*list
)
97 return list_empty(list
) ? '-' : '+';
100 #define EXTSTR "[%lu -> %lu/%lu]"
101 #define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
102 static const char *oes_strings
[] = {
103 "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL
};
105 #define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
106 struct osc_extent *__ext = (extent); \
110 "extent %p@{" EXTSTR ", " \
111 "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
112 /* ----- extent part 0 ----- */ \
113 __ext, EXTPARA(__ext), \
114 /* ----- part 1 ----- */ \
115 atomic_read(&__ext->oe_refc), \
116 atomic_read(&__ext->oe_users), \
117 list_empty_marker(&__ext->oe_link), \
118 oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
120 /* ----- part 2 ----- */ \
121 __ext->oe_grants, __ext->oe_nr_pages, \
122 list_empty_marker(&__ext->oe_pages), \
123 waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
124 __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
125 /* ----- part 4 ----- */ \
130 #define EASSERTF(expr, ext, fmt, args...) do { \
132 OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args); \
133 osc_extent_tree_dump(D_ERROR, (ext)->oe_obj); \
139 #define EASSERT(expr, ext) EASSERTF(expr, ext, "\n")
141 static inline struct osc_extent
*rb_extent(struct rb_node
*n
)
146 return container_of(n
, struct osc_extent
, oe_node
);
149 static inline struct osc_extent
*next_extent(struct osc_extent
*ext
)
154 LASSERT(ext
->oe_intree
);
155 return rb_extent(rb_next(&ext
->oe_node
));
158 static inline struct osc_extent
*prev_extent(struct osc_extent
*ext
)
163 LASSERT(ext
->oe_intree
);
164 return rb_extent(rb_prev(&ext
->oe_node
));
167 static inline struct osc_extent
*first_extent(struct osc_object
*obj
)
169 return rb_extent(rb_first(&obj
->oo_root
));
172 /* object must be locked by caller. */
173 static int osc_extent_sanity_check0(struct osc_extent
*ext
,
174 const char *func
, const int line
)
176 struct osc_object
*obj
= ext
->oe_obj
;
177 struct osc_async_page
*oap
;
181 if (!osc_object_is_locked(obj
)) {
186 if (ext
->oe_state
>= OES_STATE_MAX
) {
191 if (atomic_read(&ext
->oe_refc
) <= 0) {
196 if (atomic_read(&ext
->oe_refc
) < atomic_read(&ext
->oe_users
)) {
201 switch (ext
->oe_state
) {
203 if (ext
->oe_nr_pages
> 0 || !list_empty(&ext
->oe_pages
))
209 if (atomic_read(&ext
->oe_users
) == 0) {
217 if (ext
->oe_fsync_wait
&& !ext
->oe_urgent
) {
223 if (ext
->oe_grants
== 0) {
227 if (ext
->oe_fsync_wait
&& !ext
->oe_urgent
&& !ext
->oe_hp
) {
232 if (atomic_read(&ext
->oe_users
) > 0) {
238 if (ext
->oe_max_end
< ext
->oe_end
|| ext
->oe_end
< ext
->oe_start
) {
243 if (ext
->oe_osclock
== NULL
&& ext
->oe_grants
> 0) {
248 if (ext
->oe_osclock
) {
249 struct cl_lock_descr
*descr
;
250 descr
= &ext
->oe_osclock
->cll_descr
;
251 if (!(descr
->cld_start
<= ext
->oe_start
&&
252 descr
->cld_end
>= ext
->oe_max_end
)) {
258 if (ext
->oe_nr_pages
> ext
->oe_mppr
) {
263 /* Do not verify page list if extent is in RPC. This is because an
264 * in-RPC extent is supposed to be exclusively accessible w/o lock. */
265 if (ext
->oe_state
> OES_CACHE
) {
276 list_for_each_entry(oap
, &ext
->oe_pages
, oap_pending_item
) {
277 pgoff_t index
= oap2cl_page(oap
)->cp_index
;
279 if (index
> ext
->oe_end
|| index
< ext
->oe_start
) {
284 if (page_count
!= ext
->oe_nr_pages
) {
291 OSC_EXTENT_DUMP(D_ERROR
, ext
,
292 "%s:%d sanity check %p failed with rc = %d\n",
293 func
, line
, ext
, rc
);
297 #define sanity_check_nolock(ext) \
298 osc_extent_sanity_check0(ext, __func__, __LINE__)
300 #define sanity_check(ext) ({ \
302 osc_object_lock((ext)->oe_obj); \
303 __res = sanity_check_nolock(ext); \
304 osc_object_unlock((ext)->oe_obj); \
310 * sanity check - to make sure there is no overlapped extent in the tree.
312 static int osc_extent_is_overlapped(struct osc_object
*obj
,
313 struct osc_extent
*ext
)
315 struct osc_extent
*tmp
;
317 LASSERT(osc_object_is_locked(obj
));
322 for (tmp
= first_extent(obj
); tmp
!= NULL
; tmp
= next_extent(tmp
)) {
325 if (tmp
->oe_end
>= ext
->oe_start
&&
326 tmp
->oe_start
<= ext
->oe_end
)
332 static void osc_extent_state_set(struct osc_extent
*ext
, int state
)
334 LASSERT(osc_object_is_locked(ext
->oe_obj
));
335 LASSERT(state
>= OES_INV
&& state
< OES_STATE_MAX
);
337 /* Never try to sanity check a state changing extent :-) */
338 /* LASSERT(sanity_check_nolock(ext) == 0); */
340 /* TODO: validate the state machine */
341 ext
->oe_state
= state
;
342 wake_up_all(&ext
->oe_waitq
);
345 static struct osc_extent
*osc_extent_alloc(struct osc_object
*obj
)
347 struct osc_extent
*ext
;
349 OBD_SLAB_ALLOC_PTR_GFP(ext
, osc_extent_kmem
, GFP_IOFS
);
353 RB_CLEAR_NODE(&ext
->oe_node
);
355 atomic_set(&ext
->oe_refc
, 1);
356 atomic_set(&ext
->oe_users
, 0);
357 INIT_LIST_HEAD(&ext
->oe_link
);
358 ext
->oe_state
= OES_INV
;
359 INIT_LIST_HEAD(&ext
->oe_pages
);
360 init_waitqueue_head(&ext
->oe_waitq
);
361 ext
->oe_osclock
= NULL
;
366 static void osc_extent_free(struct osc_extent
*ext
)
368 OBD_SLAB_FREE_PTR(ext
, osc_extent_kmem
);
371 static struct osc_extent
*osc_extent_get(struct osc_extent
*ext
)
373 LASSERT(atomic_read(&ext
->oe_refc
) >= 0);
374 atomic_inc(&ext
->oe_refc
);
378 static void osc_extent_put(const struct lu_env
*env
, struct osc_extent
*ext
)
380 LASSERT(atomic_read(&ext
->oe_refc
) > 0);
381 if (atomic_dec_and_test(&ext
->oe_refc
)) {
382 LASSERT(list_empty(&ext
->oe_link
));
383 LASSERT(atomic_read(&ext
->oe_users
) == 0);
384 LASSERT(ext
->oe_state
== OES_INV
);
385 LASSERT(!ext
->oe_intree
);
387 if (ext
->oe_osclock
) {
388 cl_lock_put(env
, ext
->oe_osclock
);
389 ext
->oe_osclock
= NULL
;
391 osc_extent_free(ext
);
396 * osc_extent_put_trust() is a special version of osc_extent_put() when
397 * it's known that the caller is not the last user. This is to address the
398 * problem of lacking of lu_env ;-).
400 static void osc_extent_put_trust(struct osc_extent
*ext
)
402 LASSERT(atomic_read(&ext
->oe_refc
) > 1);
403 LASSERT(osc_object_is_locked(ext
->oe_obj
));
404 atomic_dec(&ext
->oe_refc
);
408 * Return the extent which includes pgoff @index, or return the greatest
409 * previous extent in the tree.
411 static struct osc_extent
*osc_extent_search(struct osc_object
*obj
,
414 struct rb_node
*n
= obj
->oo_root
.rb_node
;
415 struct osc_extent
*tmp
, *p
= NULL
;
417 LASSERT(osc_object_is_locked(obj
));
420 if (index
< tmp
->oe_start
) {
422 } else if (index
> tmp
->oe_end
) {
433 * Return the extent covering @index, otherwise return NULL.
434 * caller must have held object lock.
436 static struct osc_extent
*osc_extent_lookup(struct osc_object
*obj
,
439 struct osc_extent
*ext
;
441 ext
= osc_extent_search(obj
, index
);
442 if (ext
!= NULL
&& ext
->oe_start
<= index
&& index
<= ext
->oe_end
)
443 return osc_extent_get(ext
);
447 /* caller must have held object lock. */
448 static void osc_extent_insert(struct osc_object
*obj
, struct osc_extent
*ext
)
450 struct rb_node
**n
= &obj
->oo_root
.rb_node
;
451 struct rb_node
*parent
= NULL
;
452 struct osc_extent
*tmp
;
454 LASSERT(ext
->oe_intree
== 0);
455 LASSERT(ext
->oe_obj
== obj
);
456 LASSERT(osc_object_is_locked(obj
));
461 if (ext
->oe_end
< tmp
->oe_start
)
463 else if (ext
->oe_start
> tmp
->oe_end
)
466 EASSERTF(0, tmp
, EXTSTR
, EXTPARA(ext
));
468 rb_link_node(&ext
->oe_node
, parent
, n
);
469 rb_insert_color(&ext
->oe_node
, &obj
->oo_root
);
474 /* caller must have held object lock. */
475 static void osc_extent_erase(struct osc_extent
*ext
)
477 struct osc_object
*obj
= ext
->oe_obj
;
478 LASSERT(osc_object_is_locked(obj
));
479 if (ext
->oe_intree
) {
480 rb_erase(&ext
->oe_node
, &obj
->oo_root
);
482 /* rbtree held a refcount */
483 osc_extent_put_trust(ext
);
487 static struct osc_extent
*osc_extent_hold(struct osc_extent
*ext
)
489 struct osc_object
*obj
= ext
->oe_obj
;
491 LASSERT(osc_object_is_locked(obj
));
492 LASSERT(ext
->oe_state
== OES_ACTIVE
|| ext
->oe_state
== OES_CACHE
);
493 if (ext
->oe_state
== OES_CACHE
) {
494 osc_extent_state_set(ext
, OES_ACTIVE
);
495 osc_update_pending(obj
, OBD_BRW_WRITE
, -ext
->oe_nr_pages
);
497 atomic_inc(&ext
->oe_users
);
498 list_del_init(&ext
->oe_link
);
499 return osc_extent_get(ext
);
502 static void __osc_extent_remove(struct osc_extent
*ext
)
504 LASSERT(osc_object_is_locked(ext
->oe_obj
));
505 LASSERT(list_empty(&ext
->oe_pages
));
506 osc_extent_erase(ext
);
507 list_del_init(&ext
->oe_link
);
508 osc_extent_state_set(ext
, OES_INV
);
509 OSC_EXTENT_DUMP(D_CACHE
, ext
, "destroyed.\n");
512 static void osc_extent_remove(struct osc_extent
*ext
)
514 struct osc_object
*obj
= ext
->oe_obj
;
516 osc_object_lock(obj
);
517 __osc_extent_remove(ext
);
518 osc_object_unlock(obj
);
522 * This function is used to merge extents to get better performance. It checks
523 * if @cur and @victim are contiguous at chunk level.
525 static int osc_extent_merge(const struct lu_env
*env
, struct osc_extent
*cur
,
526 struct osc_extent
*victim
)
528 struct osc_object
*obj
= cur
->oe_obj
;
533 LASSERT(cur
->oe_state
== OES_CACHE
);
534 LASSERT(osc_object_is_locked(obj
));
538 if (victim
->oe_state
!= OES_CACHE
|| victim
->oe_fsync_wait
)
541 if (cur
->oe_max_end
!= victim
->oe_max_end
)
544 LASSERT(cur
->oe_osclock
== victim
->oe_osclock
);
545 ppc_bits
= osc_cli(obj
)->cl_chunkbits
- PAGE_CACHE_SHIFT
;
546 chunk_start
= cur
->oe_start
>> ppc_bits
;
547 chunk_end
= cur
->oe_end
>> ppc_bits
;
548 if (chunk_start
!= (victim
->oe_end
>> ppc_bits
) + 1 &&
549 chunk_end
+ 1 != victim
->oe_start
>> ppc_bits
)
552 OSC_EXTENT_DUMP(D_CACHE
, victim
, "will be merged by %p.\n", cur
);
554 cur
->oe_start
= min(cur
->oe_start
, victim
->oe_start
);
555 cur
->oe_end
= max(cur
->oe_end
, victim
->oe_end
);
556 cur
->oe_grants
+= victim
->oe_grants
;
557 cur
->oe_nr_pages
+= victim
->oe_nr_pages
;
558 /* only the following bits are needed to merge */
559 cur
->oe_urgent
|= victim
->oe_urgent
;
560 cur
->oe_memalloc
|= victim
->oe_memalloc
;
561 list_splice_init(&victim
->oe_pages
, &cur
->oe_pages
);
562 list_del_init(&victim
->oe_link
);
563 victim
->oe_nr_pages
= 0;
565 osc_extent_get(victim
);
566 __osc_extent_remove(victim
);
567 osc_extent_put(env
, victim
);
569 OSC_EXTENT_DUMP(D_CACHE
, cur
, "after merging %p.\n", victim
);
574 * Drop user count of osc_extent, and unplug IO asynchronously.
576 void osc_extent_release(const struct lu_env
*env
, struct osc_extent
*ext
)
578 struct osc_object
*obj
= ext
->oe_obj
;
580 LASSERT(atomic_read(&ext
->oe_users
) > 0);
581 LASSERT(sanity_check(ext
) == 0);
582 LASSERT(ext
->oe_grants
> 0);
584 if (atomic_dec_and_lock(&ext
->oe_users
, &obj
->oo_lock
)) {
585 LASSERT(ext
->oe_state
== OES_ACTIVE
);
586 if (ext
->oe_trunc_pending
) {
587 /* a truncate process is waiting for this extent.
588 * This may happen due to a race, check
589 * osc_cache_truncate_start(). */
590 osc_extent_state_set(ext
, OES_TRUNC
);
591 ext
->oe_trunc_pending
= 0;
593 osc_extent_state_set(ext
, OES_CACHE
);
594 osc_update_pending(obj
, OBD_BRW_WRITE
,
597 /* try to merge the previous and next extent. */
598 osc_extent_merge(env
, ext
, prev_extent(ext
));
599 osc_extent_merge(env
, ext
, next_extent(ext
));
602 list_move_tail(&ext
->oe_link
,
603 &obj
->oo_urgent_exts
);
605 osc_object_unlock(obj
);
607 osc_io_unplug_async(env
, osc_cli(obj
), obj
);
609 osc_extent_put(env
, ext
);
612 static inline int overlapped(struct osc_extent
*ex1
, struct osc_extent
*ex2
)
614 return !(ex1
->oe_end
< ex2
->oe_start
|| ex2
->oe_end
< ex1
->oe_start
);
618 * Find or create an extent which includes @index, core function to manage
621 struct osc_extent
*osc_extent_find(const struct lu_env
*env
,
622 struct osc_object
*obj
, pgoff_t index
,
626 struct client_obd
*cli
= osc_cli(obj
);
627 struct cl_lock
*lock
;
628 struct osc_extent
*cur
;
629 struct osc_extent
*ext
;
630 struct osc_extent
*conflict
= NULL
;
631 struct osc_extent
*found
= NULL
;
634 int max_pages
; /* max_pages_per_rpc */
636 int ppc_bits
; /* pages per chunk bits */
640 cur
= osc_extent_alloc(obj
);
642 return ERR_PTR(-ENOMEM
);
644 lock
= cl_lock_at_pgoff(env
, osc2cl(obj
), index
, NULL
, 1, 0);
645 LASSERT(lock
!= NULL
);
646 LASSERT(lock
->cll_descr
.cld_mode
>= CLM_WRITE
);
648 LASSERT(cli
->cl_chunkbits
>= PAGE_CACHE_SHIFT
);
649 ppc_bits
= cli
->cl_chunkbits
- PAGE_CACHE_SHIFT
;
650 chunk_mask
= ~((1 << ppc_bits
) - 1);
651 chunksize
= 1 << cli
->cl_chunkbits
;
652 chunk
= index
>> ppc_bits
;
654 /* align end to rpc edge, rpc size may not be a power 2 integer. */
655 max_pages
= cli
->cl_max_pages_per_rpc
;
656 LASSERT((max_pages
& ~chunk_mask
) == 0);
657 max_end
= index
- (index
% max_pages
) + max_pages
- 1;
658 max_end
= min_t(pgoff_t
, max_end
, lock
->cll_descr
.cld_end
);
660 /* initialize new extent by parameters so far */
661 cur
->oe_max_end
= max_end
;
662 cur
->oe_start
= index
& chunk_mask
;
663 cur
->oe_end
= ((index
+ ~chunk_mask
+ 1) & chunk_mask
) - 1;
664 if (cur
->oe_start
< lock
->cll_descr
.cld_start
)
665 cur
->oe_start
= lock
->cll_descr
.cld_start
;
666 if (cur
->oe_end
> max_end
)
667 cur
->oe_end
= max_end
;
668 cur
->oe_osclock
= lock
;
670 cur
->oe_mppr
= max_pages
;
672 /* grants has been allocated by caller */
673 LASSERTF(*grants
>= chunksize
+ cli
->cl_extent_tax
,
674 "%u/%u/%u.\n", *grants
, chunksize
, cli
->cl_extent_tax
);
675 LASSERTF((max_end
- cur
->oe_start
) < max_pages
, EXTSTR
, EXTPARA(cur
));
678 osc_object_lock(obj
);
679 ext
= osc_extent_search(obj
, cur
->oe_start
);
681 ext
= first_extent(obj
);
682 while (ext
!= NULL
) {
683 loff_t ext_chk_start
= ext
->oe_start
>> ppc_bits
;
684 loff_t ext_chk_end
= ext
->oe_end
>> ppc_bits
;
686 LASSERT(sanity_check_nolock(ext
) == 0);
687 if (chunk
> ext_chk_end
+ 1)
690 /* if covering by different locks, no chance to match */
691 if (lock
!= ext
->oe_osclock
) {
692 EASSERTF(!overlapped(ext
, cur
), ext
,
693 EXTSTR
, EXTPARA(cur
));
695 ext
= next_extent(ext
);
699 /* discontiguous chunks? */
700 if (chunk
+ 1 < ext_chk_start
) {
701 ext
= next_extent(ext
);
705 /* ok, from now on, ext and cur have these attrs:
706 * 1. covered by the same lock
707 * 2. contiguous at chunk level or overlapping. */
709 if (overlapped(ext
, cur
)) {
710 /* cur is the minimum unit, so overlapping means
712 EASSERTF((ext
->oe_start
<= cur
->oe_start
&&
713 ext
->oe_end
>= cur
->oe_end
),
714 ext
, EXTSTR
, EXTPARA(cur
));
716 if (ext
->oe_state
> OES_CACHE
|| ext
->oe_fsync_wait
) {
717 /* for simplicity, we wait for this extent to
718 * finish before going forward. */
719 conflict
= osc_extent_get(ext
);
723 found
= osc_extent_hold(ext
);
727 /* non-overlapped extent */
728 if (ext
->oe_state
!= OES_CACHE
|| ext
->oe_fsync_wait
) {
729 /* we can't do anything for a non OES_CACHE extent, or
730 * if there is someone waiting for this extent to be
731 * flushed, try next one. */
732 ext
= next_extent(ext
);
736 /* check if they belong to the same rpc slot before trying to
737 * merge. the extents are not overlapped and contiguous at
738 * chunk level to get here. */
739 if (ext
->oe_max_end
!= max_end
) {
740 /* if they don't belong to the same RPC slot or
741 * max_pages_per_rpc has ever changed, do not merge. */
742 ext
= next_extent(ext
);
746 /* it's required that an extent must be contiguous at chunk
747 * level so that we know the whole extent is covered by grant
748 * (the pages in the extent are NOT required to be contiguous).
749 * Otherwise, it will be too much difficult to know which
750 * chunks have grants allocated. */
752 /* try to do front merge - extend ext's start */
753 if (chunk
+ 1 == ext_chk_start
) {
754 /* ext must be chunk size aligned */
755 EASSERT((ext
->oe_start
& ~chunk_mask
) == 0, ext
);
757 /* pull ext's start back to cover cur */
758 ext
->oe_start
= cur
->oe_start
;
759 ext
->oe_grants
+= chunksize
;
760 *grants
-= chunksize
;
762 found
= osc_extent_hold(ext
);
763 } else if (chunk
== ext_chk_end
+ 1) {
765 ext
->oe_end
= cur
->oe_end
;
766 ext
->oe_grants
+= chunksize
;
767 *grants
-= chunksize
;
769 /* try to merge with the next one because we just fill
771 if (osc_extent_merge(env
, ext
, next_extent(ext
)) == 0)
772 /* we can save extent tax from next extent */
773 *grants
+= cli
->cl_extent_tax
;
775 found
= osc_extent_hold(ext
);
780 ext
= next_extent(ext
);
783 osc_extent_tree_dump(D_CACHE
, obj
);
785 LASSERT(conflict
== NULL
);
786 if (!IS_ERR(found
)) {
787 LASSERT(found
->oe_osclock
== cur
->oe_osclock
);
788 OSC_EXTENT_DUMP(D_CACHE
, found
,
789 "found caching ext for %lu.\n", index
);
791 } else if (conflict
== NULL
) {
792 /* create a new extent */
793 EASSERT(osc_extent_is_overlapped(obj
, cur
) == 0, cur
);
794 cur
->oe_grants
= chunksize
+ cli
->cl_extent_tax
;
795 *grants
-= cur
->oe_grants
;
796 LASSERT(*grants
>= 0);
798 cur
->oe_state
= OES_CACHE
;
799 found
= osc_extent_hold(cur
);
800 osc_extent_insert(obj
, cur
);
801 OSC_EXTENT_DUMP(D_CACHE
, cur
, "add into tree %lu/%lu.\n",
802 index
, lock
->cll_descr
.cld_end
);
804 osc_object_unlock(obj
);
806 if (conflict
!= NULL
) {
807 LASSERT(found
== NULL
);
809 /* waiting for IO to finish. Please notice that it's impossible
810 * to be an OES_TRUNC extent. */
811 rc
= osc_extent_wait(env
, conflict
, OES_INV
);
812 osc_extent_put(env
, conflict
);
823 osc_extent_put(env
, cur
);
824 LASSERT(*grants
>= 0);
829 * Called when IO is finished to an extent.
831 int osc_extent_finish(const struct lu_env
*env
, struct osc_extent
*ext
,
834 struct client_obd
*cli
= osc_cli(ext
->oe_obj
);
835 struct osc_async_page
*oap
;
836 struct osc_async_page
*tmp
;
837 int nr_pages
= ext
->oe_nr_pages
;
839 int blocksize
= cli
->cl_import
->imp_obd
->obd_osfs
.os_bsize
? : 4096;
843 OSC_EXTENT_DUMP(D_CACHE
, ext
, "extent finished.\n");
845 ext
->oe_rc
= rc
?: ext
->oe_nr_pages
;
846 EASSERT(ergo(rc
== 0, ext
->oe_state
== OES_RPC
), ext
);
847 list_for_each_entry_safe(oap
, tmp
, &ext
->oe_pages
,
849 list_del_init(&oap
->oap_rpc_item
);
850 list_del_init(&oap
->oap_pending_item
);
851 if (last_off
<= oap
->oap_obj_off
) {
852 last_off
= oap
->oap_obj_off
;
853 last_count
= oap
->oap_count
;
857 osc_ap_completion(env
, cli
, oap
, sent
, rc
);
859 EASSERT(ext
->oe_nr_pages
== 0, ext
);
862 lost_grant
= ext
->oe_grants
;
863 } else if (blocksize
< PAGE_CACHE_SIZE
&&
864 last_count
!= PAGE_CACHE_SIZE
) {
865 /* For short writes we shouldn't count parts of pages that
866 * span a whole chunk on the OST side, or our accounting goes
867 * wrong. Should match the code in filter_grant_check. */
868 int offset
= oap
->oap_page_off
& ~CFS_PAGE_MASK
;
869 int count
= oap
->oap_count
+ (offset
& (blocksize
- 1));
870 int end
= (offset
+ oap
->oap_count
) & (blocksize
- 1);
872 count
+= blocksize
- end
;
874 lost_grant
= PAGE_CACHE_SIZE
- count
;
876 if (ext
->oe_grants
> 0)
877 osc_free_grant(cli
, nr_pages
, lost_grant
);
879 osc_extent_remove(ext
);
880 /* put the refcount for RPC */
881 osc_extent_put(env
, ext
);
885 static int extent_wait_cb(struct osc_extent
*ext
, int state
)
889 osc_object_lock(ext
->oe_obj
);
890 ret
= ext
->oe_state
== state
;
891 osc_object_unlock(ext
->oe_obj
);
897 * Wait for the extent's state to become @state.
899 static int osc_extent_wait(const struct lu_env
*env
, struct osc_extent
*ext
,
902 struct osc_object
*obj
= ext
->oe_obj
;
903 struct l_wait_info lwi
= LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL
,
904 LWI_ON_SIGNAL_NOOP
, NULL
);
907 osc_object_lock(obj
);
908 LASSERT(sanity_check_nolock(ext
) == 0);
909 /* `Kick' this extent only if the caller is waiting for it to be
911 if (state
== OES_INV
&& !ext
->oe_urgent
&& !ext
->oe_hp
&&
912 !ext
->oe_trunc_pending
) {
913 if (ext
->oe_state
== OES_ACTIVE
) {
915 } else if (ext
->oe_state
== OES_CACHE
) {
917 osc_extent_hold(ext
);
921 osc_object_unlock(obj
);
923 osc_extent_release(env
, ext
);
925 /* wait for the extent until its state becomes @state */
926 rc
= l_wait_event(ext
->oe_waitq
, extent_wait_cb(ext
, state
), &lwi
);
927 if (rc
== -ETIMEDOUT
) {
928 OSC_EXTENT_DUMP(D_ERROR
, ext
,
929 "%s: wait ext to %d timedout, recovery in progress?\n",
930 osc_export(obj
)->exp_obd
->obd_name
, state
);
932 lwi
= LWI_INTR(LWI_ON_SIGNAL_NOOP
, NULL
);
933 rc
= l_wait_event(ext
->oe_waitq
, extent_wait_cb(ext
, state
),
936 if (rc
== 0 && ext
->oe_rc
< 0)
942 * Discard pages with index greater than @size. If @ext is overlapped with
943 * @size, then partial truncate happens.
945 static int osc_extent_truncate(struct osc_extent
*ext
, pgoff_t trunc_index
,
948 struct cl_env_nest nest
;
951 struct osc_object
*obj
= ext
->oe_obj
;
952 struct client_obd
*cli
= osc_cli(obj
);
953 struct osc_async_page
*oap
;
954 struct osc_async_page
*tmp
;
955 int pages_in_chunk
= 0;
956 int ppc_bits
= cli
->cl_chunkbits
- PAGE_CACHE_SHIFT
;
957 __u64 trunc_chunk
= trunc_index
>> ppc_bits
;
962 LASSERT(sanity_check(ext
) == 0);
963 EASSERT(ext
->oe_state
== OES_TRUNC
, ext
);
964 EASSERT(!ext
->oe_urgent
, ext
);
966 /* Request new lu_env.
967 * We can't use that env from osc_cache_truncate_start() because
968 * it's from lov_io_sub and not fully initialized. */
969 env
= cl_env_nested_get(&nest
);
970 io
= &osc_env_info(env
)->oti_io
;
971 io
->ci_obj
= cl_object_top(osc2cl(obj
));
972 rc
= cl_io_init(env
, io
, CIT_MISC
, io
->ci_obj
);
976 /* discard all pages with index greater then trunc_index */
977 list_for_each_entry_safe(oap
, tmp
, &ext
->oe_pages
,
979 struct cl_page
*sub
= oap2cl_page(oap
);
980 struct cl_page
*page
= cl_page_top(sub
);
982 LASSERT(list_empty(&oap
->oap_rpc_item
));
984 /* only discard the pages with their index greater than
985 * trunc_index, and ... */
986 if (sub
->cp_index
< trunc_index
||
987 (sub
->cp_index
== trunc_index
&& partial
)) {
988 /* accounting how many pages remaining in the chunk
989 * so that we can calculate grants correctly. */
990 if (sub
->cp_index
>> ppc_bits
== trunc_chunk
)
995 list_del_init(&oap
->oap_pending_item
);
998 lu_ref_add(&page
->cp_reference
, "truncate", current
);
1000 if (cl_page_own(env
, io
, page
) == 0) {
1001 cl_page_unmap(env
, io
, page
);
1002 cl_page_discard(env
, io
, page
);
1003 cl_page_disown(env
, io
, page
);
1005 LASSERT(page
->cp_state
== CPS_FREEING
);
1009 lu_ref_del(&page
->cp_reference
, "truncate", current
);
1010 cl_page_put(env
, page
);
1015 EASSERTF(ergo(ext
->oe_start
>= trunc_index
+ !!partial
,
1016 ext
->oe_nr_pages
== 0),
1017 ext
, "trunc_index %lu, partial %d\n", trunc_index
, partial
);
1019 osc_object_lock(obj
);
1020 if (ext
->oe_nr_pages
== 0) {
1021 LASSERT(pages_in_chunk
== 0);
1022 grants
= ext
->oe_grants
;
1024 } else { /* calculate how many grants we can free */
1025 int chunks
= (ext
->oe_end
>> ppc_bits
) - trunc_chunk
;
1029 /* if there is no pages in this chunk, we can also free grants
1030 * for the last chunk */
1031 if (pages_in_chunk
== 0) {
1032 /* if this is the 1st chunk and no pages in this chunk,
1033 * ext->oe_nr_pages must be zero, so we should be in
1034 * the other if-clause. */
1035 LASSERT(trunc_chunk
> 0);
1040 /* this is what we can free from this extent */
1041 grants
= chunks
<< cli
->cl_chunkbits
;
1042 ext
->oe_grants
-= grants
;
1043 last_index
= ((trunc_chunk
+ 1) << ppc_bits
) - 1;
1044 ext
->oe_end
= min(last_index
, ext
->oe_max_end
);
1045 LASSERT(ext
->oe_end
>= ext
->oe_start
);
1046 LASSERT(ext
->oe_grants
> 0);
1048 osc_object_unlock(obj
);
1050 if (grants
> 0 || nr_pages
> 0)
1051 osc_free_grant(cli
, nr_pages
, grants
);
1054 cl_io_fini(env
, io
);
1055 cl_env_nested_put(&nest
, env
);
1060 * This function is used to make the extent prepared for transfer.
1061 * A race with flushing page - ll_writepage() has to be handled cautiously.
1063 static int osc_extent_make_ready(const struct lu_env
*env
,
1064 struct osc_extent
*ext
)
1066 struct osc_async_page
*oap
;
1067 struct osc_async_page
*last
= NULL
;
1068 struct osc_object
*obj
= ext
->oe_obj
;
1072 /* we're going to grab page lock, so object lock must not be taken. */
1073 LASSERT(sanity_check(ext
) == 0);
1074 /* in locking state, any process should not touch this extent. */
1075 EASSERT(ext
->oe_state
== OES_LOCKING
, ext
);
1076 EASSERT(ext
->oe_owner
!= NULL
, ext
);
1078 OSC_EXTENT_DUMP(D_CACHE
, ext
, "make ready\n");
1080 list_for_each_entry(oap
, &ext
->oe_pages
, oap_pending_item
) {
1082 if (last
== NULL
|| last
->oap_obj_off
< oap
->oap_obj_off
)
1085 /* checking ASYNC_READY is race safe */
1086 if ((oap
->oap_async_flags
& ASYNC_READY
) != 0)
1089 rc
= osc_make_ready(env
, oap
, OBD_BRW_WRITE
);
1092 spin_lock(&oap
->oap_lock
);
1093 oap
->oap_async_flags
|= ASYNC_READY
;
1094 spin_unlock(&oap
->oap_lock
);
1097 LASSERT((oap
->oap_async_flags
& ASYNC_READY
) != 0);
1100 LASSERTF(0, "unknown return code: %d\n", rc
);
1104 LASSERT(page_count
== ext
->oe_nr_pages
);
1105 LASSERT(last
!= NULL
);
1106 /* the last page is the only one we need to refresh its count by
1107 * the size of file. */
1108 if (!(last
->oap_async_flags
& ASYNC_COUNT_STABLE
)) {
1109 last
->oap_count
= osc_refresh_count(env
, last
, OBD_BRW_WRITE
);
1110 LASSERT(last
->oap_count
> 0);
1111 LASSERT(last
->oap_page_off
+ last
->oap_count
<= PAGE_CACHE_SIZE
);
1112 last
->oap_async_flags
|= ASYNC_COUNT_STABLE
;
1115 /* for the rest of pages, we don't need to call osf_refresh_count()
1116 * because it's known they are not the last page */
1117 list_for_each_entry(oap
, &ext
->oe_pages
, oap_pending_item
) {
1118 if (!(oap
->oap_async_flags
& ASYNC_COUNT_STABLE
)) {
1119 oap
->oap_count
= PAGE_CACHE_SIZE
- oap
->oap_page_off
;
1120 oap
->oap_async_flags
|= ASYNC_COUNT_STABLE
;
1124 osc_object_lock(obj
);
1125 osc_extent_state_set(ext
, OES_RPC
);
1126 osc_object_unlock(obj
);
1127 /* get a refcount for RPC. */
1128 osc_extent_get(ext
);
1134 * Quick and simple version of osc_extent_find(). This function is frequently
1135 * called to expand the extent for the same IO. To expand the extent, the
1136 * page index must be in the same or next chunk of ext->oe_end.
1138 static int osc_extent_expand(struct osc_extent
*ext
, pgoff_t index
, int *grants
)
1140 struct osc_object
*obj
= ext
->oe_obj
;
1141 struct client_obd
*cli
= osc_cli(obj
);
1142 struct osc_extent
*next
;
1143 int ppc_bits
= cli
->cl_chunkbits
- PAGE_CACHE_SHIFT
;
1144 pgoff_t chunk
= index
>> ppc_bits
;
1147 int chunksize
= 1 << cli
->cl_chunkbits
;
1150 LASSERT(ext
->oe_max_end
>= index
&& ext
->oe_start
<= index
);
1151 osc_object_lock(obj
);
1152 LASSERT(sanity_check_nolock(ext
) == 0);
1153 end_chunk
= ext
->oe_end
>> ppc_bits
;
1154 if (chunk
> end_chunk
+ 1) {
1159 if (end_chunk
>= chunk
) {
1164 LASSERT(end_chunk
+ 1 == chunk
);
1165 /* try to expand this extent to cover @index */
1166 end_index
= min(ext
->oe_max_end
, ((chunk
+ 1) << ppc_bits
) - 1);
1168 next
= next_extent(ext
);
1169 if (next
!= NULL
&& next
->oe_start
<= end_index
) {
1170 /* complex mode - overlapped with the next extent,
1171 * this case will be handled by osc_extent_find() */
1176 ext
->oe_end
= end_index
;
1177 ext
->oe_grants
+= chunksize
;
1178 *grants
-= chunksize
;
1179 LASSERT(*grants
>= 0);
1180 EASSERTF(osc_extent_is_overlapped(obj
, ext
) == 0, ext
,
1181 "overlapped after expanding for %lu.\n", index
);
1184 osc_object_unlock(obj
);
1188 static void osc_extent_tree_dump0(int level
, struct osc_object
*obj
,
1189 const char *func
, int line
)
1191 struct osc_extent
*ext
;
1194 CDEBUG(level
, "Dump object %p extents at %s:%d, mppr: %u.\n",
1195 obj
, func
, line
, osc_cli(obj
)->cl_max_pages_per_rpc
);
1197 /* osc_object_lock(obj); */
1199 for (ext
= first_extent(obj
); ext
!= NULL
; ext
= next_extent(ext
))
1200 OSC_EXTENT_DUMP(level
, ext
, "in tree %d.\n", cnt
++);
1203 list_for_each_entry(ext
, &obj
->oo_hp_exts
, oe_link
)
1204 OSC_EXTENT_DUMP(level
, ext
, "hp %d.\n", cnt
++);
1207 list_for_each_entry(ext
, &obj
->oo_urgent_exts
, oe_link
)
1208 OSC_EXTENT_DUMP(level
, ext
, "urgent %d.\n", cnt
++);
1211 list_for_each_entry(ext
, &obj
->oo_reading_exts
, oe_link
)
1212 OSC_EXTENT_DUMP(level
, ext
, "reading %d.\n", cnt
++);
1213 /* osc_object_unlock(obj); */
1216 /* ------------------ osc extent end ------------------ */
1218 static inline int osc_is_ready(struct osc_object
*osc
)
1220 return !list_empty(&osc
->oo_ready_item
) ||
1221 !list_empty(&osc
->oo_hp_ready_item
);
1224 #define OSC_IO_DEBUG(OSC, STR, args...) \
1225 CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR, \
1226 (OSC), osc_is_ready(OSC), \
1227 list_empty_marker(&(OSC)->oo_hp_ready_item), \
1228 list_empty_marker(&(OSC)->oo_ready_item), \
1229 atomic_read(&(OSC)->oo_nr_writes), \
1230 list_empty_marker(&(OSC)->oo_hp_exts), \
1231 list_empty_marker(&(OSC)->oo_urgent_exts), \
1232 atomic_read(&(OSC)->oo_nr_reads), \
1233 list_empty_marker(&(OSC)->oo_reading_exts), \
1236 static int osc_make_ready(const struct lu_env
*env
, struct osc_async_page
*oap
,
1239 struct osc_page
*opg
= oap2osc_page(oap
);
1240 struct cl_page
*page
= cl_page_top(oap2cl_page(oap
));
1243 LASSERT(cmd
== OBD_BRW_WRITE
); /* no cached reads */
1245 result
= cl_page_make_ready(env
, page
, CRT_WRITE
);
1247 opg
->ops_submit_time
= cfs_time_current();
1251 static int osc_refresh_count(const struct lu_env
*env
,
1252 struct osc_async_page
*oap
, int cmd
)
1254 struct osc_page
*opg
= oap2osc_page(oap
);
1255 struct cl_page
*page
= oap2cl_page(oap
);
1256 struct cl_object
*obj
;
1257 struct cl_attr
*attr
= &osc_env_info(env
)->oti_attr
;
1262 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
1263 LASSERT(!(cmd
& OBD_BRW_READ
));
1264 LASSERT(opg
!= NULL
);
1265 obj
= opg
->ops_cl
.cpl_obj
;
1267 cl_object_attr_lock(obj
);
1268 result
= cl_object_attr_get(env
, obj
, attr
);
1269 cl_object_attr_unlock(obj
);
1272 kms
= attr
->cat_kms
;
1273 if (cl_offset(obj
, page
->cp_index
) >= kms
)
1274 /* catch race with truncate */
1276 else if (cl_offset(obj
, page
->cp_index
+ 1) > kms
)
1277 /* catch sub-page write at end of file */
1278 return kms
% PAGE_CACHE_SIZE
;
1280 return PAGE_CACHE_SIZE
;
1283 static int osc_completion(const struct lu_env
*env
, struct osc_async_page
*oap
,
1286 struct osc_page
*opg
= oap2osc_page(oap
);
1287 struct cl_page
*page
= cl_page_top(oap2cl_page(oap
));
1288 struct osc_object
*obj
= cl2osc(opg
->ops_cl
.cpl_obj
);
1289 enum cl_req_type crt
;
1292 cmd
&= ~OBD_BRW_NOQUOTA
;
1293 LASSERT(equi(page
->cp_state
== CPS_PAGEIN
, cmd
== OBD_BRW_READ
));
1294 LASSERT(equi(page
->cp_state
== CPS_PAGEOUT
, cmd
== OBD_BRW_WRITE
));
1295 LASSERT(opg
->ops_transfer_pinned
);
1298 * page->cp_req can be NULL if io submission failed before
1299 * cl_req was allocated.
1301 if (page
->cp_req
!= NULL
)
1302 cl_req_page_done(env
, page
);
1303 LASSERT(page
->cp_req
== NULL
);
1305 crt
= cmd
== OBD_BRW_READ
? CRT_READ
: CRT_WRITE
;
1306 /* Clear opg->ops_transfer_pinned before VM lock is released. */
1307 opg
->ops_transfer_pinned
= 0;
1309 spin_lock(&obj
->oo_seatbelt
);
1310 LASSERT(opg
->ops_submitter
!= NULL
);
1311 LASSERT(!list_empty(&opg
->ops_inflight
));
1312 list_del_init(&opg
->ops_inflight
);
1313 opg
->ops_submitter
= NULL
;
1314 spin_unlock(&obj
->oo_seatbelt
);
1316 opg
->ops_submit_time
= 0;
1317 srvlock
= oap
->oap_brw_flags
& OBD_BRW_SRVLOCK
;
1320 if (rc
== 0 && srvlock
) {
1321 struct lu_device
*ld
= opg
->ops_cl
.cpl_obj
->co_lu
.lo_dev
;
1322 struct osc_stats
*stats
= &lu2osc_dev(ld
)->od_stats
;
1323 int bytes
= oap
->oap_count
;
1325 if (crt
== CRT_READ
)
1326 stats
->os_lockless_reads
+= bytes
;
1328 stats
->os_lockless_writes
+= bytes
;
1332 * This has to be the last operation with the page, as locks are
1333 * released in cl_page_completion() and nothing except for the
1334 * reference counter protects page from concurrent reclaim.
1336 lu_ref_del(&page
->cp_reference
, "transfer", page
);
1338 cl_page_completion(env
, page
, crt
, rc
);
1343 #define OSC_DUMP_GRANT(cli, fmt, args...) do { \
1344 struct client_obd *__tmp = (cli); \
1345 CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
1346 "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
1347 __tmp->cl_import->imp_obd->obd_name, \
1348 __tmp->cl_dirty, __tmp->cl_dirty_max, \
1349 atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
1350 __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
1351 __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
1354 /* caller must hold loi_list_lock */
1355 static void osc_consume_write_grant(struct client_obd
*cli
,
1356 struct brw_page
*pga
)
1358 assert_spin_locked(&cli
->cl_loi_list_lock
.lock
);
1359 LASSERT(!(pga
->flag
& OBD_BRW_FROM_GRANT
));
1360 atomic_inc(&obd_dirty_pages
);
1361 cli
->cl_dirty
+= PAGE_CACHE_SIZE
;
1362 pga
->flag
|= OBD_BRW_FROM_GRANT
;
1363 CDEBUG(D_CACHE
, "using %lu grant credits for brw %p page %p\n",
1364 PAGE_CACHE_SIZE
, pga
, pga
->pg
);
1365 osc_update_next_shrink(cli
);
1368 /* the companion to osc_consume_write_grant, called when a brw has completed.
1369 * must be called with the loi lock held. */
1370 static void osc_release_write_grant(struct client_obd
*cli
,
1371 struct brw_page
*pga
)
1373 assert_spin_locked(&cli
->cl_loi_list_lock
.lock
);
1374 if (!(pga
->flag
& OBD_BRW_FROM_GRANT
)) {
1378 pga
->flag
&= ~OBD_BRW_FROM_GRANT
;
1379 atomic_dec(&obd_dirty_pages
);
1380 cli
->cl_dirty
-= PAGE_CACHE_SIZE
;
1381 if (pga
->flag
& OBD_BRW_NOCACHE
) {
1382 pga
->flag
&= ~OBD_BRW_NOCACHE
;
1383 atomic_dec(&obd_dirty_transit_pages
);
1384 cli
->cl_dirty_transit
-= PAGE_CACHE_SIZE
;
1389 * To avoid sleeping with object lock held, it's good for us allocate enough
1390 * grants before entering into critical section.
1392 * client_obd_list_lock held by caller
1394 static int osc_reserve_grant(struct client_obd
*cli
, unsigned int bytes
)
1398 if (cli
->cl_avail_grant
>= bytes
) {
1399 cli
->cl_avail_grant
-= bytes
;
1400 cli
->cl_reserved_grant
+= bytes
;
1406 static void __osc_unreserve_grant(struct client_obd
*cli
,
1407 unsigned int reserved
, unsigned int unused
)
1409 /* it's quite normal for us to get more grant than reserved.
1410 * Thinking about a case that two extents merged by adding a new
1411 * chunk, we can save one extent tax. If extent tax is greater than
1412 * one chunk, we can save more grant by adding a new chunk */
1413 cli
->cl_reserved_grant
-= reserved
;
1414 if (unused
> reserved
) {
1415 cli
->cl_avail_grant
+= reserved
;
1416 cli
->cl_lost_grant
+= unused
- reserved
;
1418 cli
->cl_avail_grant
+= unused
;
1422 void osc_unreserve_grant(struct client_obd
*cli
,
1423 unsigned int reserved
, unsigned int unused
)
1425 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1426 __osc_unreserve_grant(cli
, reserved
, unused
);
1428 osc_wake_cache_waiters(cli
);
1429 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1433 * Free grant after IO is finished or canceled.
1435 * @lost_grant is used to remember how many grants we have allocated but not
1436 * used, we should return these grants to OST. There're two cases where grants
1439 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
1440 * written. In this case OST may use less chunks to serve this partial
1441 * write. OSTs don't actually know the page size on the client side. so
1442 * clients have to calculate lost grant by the blocksize on the OST.
1443 * See filter_grant_check() for details.
1445 static void osc_free_grant(struct client_obd
*cli
, unsigned int nr_pages
,
1446 unsigned int lost_grant
)
1448 int grant
= (1 << cli
->cl_chunkbits
) + cli
->cl_extent_tax
;
1450 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1451 atomic_sub(nr_pages
, &obd_dirty_pages
);
1452 cli
->cl_dirty
-= nr_pages
<< PAGE_CACHE_SHIFT
;
1453 cli
->cl_lost_grant
+= lost_grant
;
1454 if (cli
->cl_avail_grant
< grant
&& cli
->cl_lost_grant
>= grant
) {
1455 /* borrow some grant from truncate to avoid the case that
1456 * truncate uses up all avail grant */
1457 cli
->cl_lost_grant
-= grant
;
1458 cli
->cl_avail_grant
+= grant
;
1460 osc_wake_cache_waiters(cli
);
1461 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1462 CDEBUG(D_CACHE
, "lost %u grant: %lu avail: %lu dirty: %lu\n",
1463 lost_grant
, cli
->cl_lost_grant
,
1464 cli
->cl_avail_grant
, cli
->cl_dirty
);
1468 * The companion to osc_enter_cache(), called when @oap is no longer part of
1469 * the dirty accounting due to error.
1471 static void osc_exit_cache(struct client_obd
*cli
, struct osc_async_page
*oap
)
1473 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1474 osc_release_write_grant(cli
, &oap
->oap_brw_page
);
1475 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1479 * Non-blocking version of osc_enter_cache() that consumes grant only when it
1482 static int osc_enter_cache_try(struct client_obd
*cli
,
1483 struct osc_async_page
*oap
,
1484 int bytes
, int transient
)
1488 OSC_DUMP_GRANT(cli
, "need:%d.\n", bytes
);
1490 rc
= osc_reserve_grant(cli
, bytes
);
1494 if (cli
->cl_dirty
+ PAGE_CACHE_SIZE
<= cli
->cl_dirty_max
&&
1495 atomic_read(&obd_dirty_pages
) + 1 <= obd_max_dirty_pages
) {
1496 osc_consume_write_grant(cli
, &oap
->oap_brw_page
);
1498 cli
->cl_dirty_transit
+= PAGE_CACHE_SIZE
;
1499 atomic_inc(&obd_dirty_transit_pages
);
1500 oap
->oap_brw_flags
|= OBD_BRW_NOCACHE
;
1504 __osc_unreserve_grant(cli
, bytes
, bytes
);
1510 static int ocw_granted(struct client_obd
*cli
, struct osc_cache_waiter
*ocw
)
1513 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1514 rc
= list_empty(&ocw
->ocw_entry
);
1515 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1520 * The main entry to reserve dirty page accounting. Usually the grant reserved
1521 * in this function will be freed in bulk in osc_free_grant() unless it fails
1522 * to add osc cache, in that case, it will be freed in osc_exit_cache().
1524 * The process will be put into sleep if it's already run out of grant.
1526 static int osc_enter_cache(const struct lu_env
*env
, struct client_obd
*cli
,
1527 struct osc_async_page
*oap
, int bytes
)
1529 struct osc_object
*osc
= oap
->oap_obj
;
1530 struct lov_oinfo
*loi
= osc
->oo_oinfo
;
1531 struct osc_cache_waiter ocw
;
1532 struct l_wait_info lwi
= LWI_INTR(LWI_ON_SIGNAL_NOOP
, NULL
);
1535 OSC_DUMP_GRANT(cli
, "need:%d.\n", bytes
);
1537 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1539 /* force the caller to try sync io. this can jump the list
1540 * of queued writes and create a discontiguous rpc stream */
1541 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT
) ||
1542 cli
->cl_dirty_max
< PAGE_CACHE_SIZE
||
1543 cli
->cl_ar
.ar_force_sync
|| loi
->loi_ar
.ar_force_sync
) {
1548 /* Hopefully normal case - cache space and write credits available */
1549 if (osc_enter_cache_try(cli
, oap
, bytes
, 0)) {
1554 /* We can get here for two reasons: too many dirty pages in cache, or
1555 * run out of grants. In both cases we should write dirty pages out.
1556 * Adding a cache waiter will trigger urgent write-out no matter what
1558 * The exiting condition is no avail grants and no dirty pages caching,
1559 * that really means there is no space on the OST. */
1560 init_waitqueue_head(&ocw
.ocw_waitq
);
1562 ocw
.ocw_grant
= bytes
;
1563 while (cli
->cl_dirty
> 0 || cli
->cl_w_in_flight
> 0) {
1564 list_add_tail(&ocw
.ocw_entry
, &cli
->cl_cache_waiters
);
1566 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1568 osc_io_unplug_async(env
, cli
, NULL
);
1570 CDEBUG(D_CACHE
, "%s: sleeping for cache space @ %p for %p\n",
1571 cli
->cl_import
->imp_obd
->obd_name
, &ocw
, oap
);
1573 rc
= l_wait_event(ocw
.ocw_waitq
, ocw_granted(cli
, &ocw
), &lwi
);
1575 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1577 /* l_wait_event is interrupted by signal */
1579 list_del_init(&ocw
.ocw_entry
);
1583 LASSERT(list_empty(&ocw
.ocw_entry
));
1588 if (osc_enter_cache_try(cli
, oap
, bytes
, 0)) {
1594 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1595 OSC_DUMP_GRANT(cli
, "returned %d.\n", rc
);
1599 /* caller must hold loi_list_lock */
1600 void osc_wake_cache_waiters(struct client_obd
*cli
)
1602 struct list_head
*l
, *tmp
;
1603 struct osc_cache_waiter
*ocw
;
1605 list_for_each_safe(l
, tmp
, &cli
->cl_cache_waiters
) {
1606 ocw
= list_entry(l
, struct osc_cache_waiter
, ocw_entry
);
1607 list_del_init(&ocw
->ocw_entry
);
1609 ocw
->ocw_rc
= -EDQUOT
;
1610 /* we can't dirty more */
1611 if ((cli
->cl_dirty
+ PAGE_CACHE_SIZE
> cli
->cl_dirty_max
) ||
1612 (atomic_read(&obd_dirty_pages
) + 1 >
1613 obd_max_dirty_pages
)) {
1614 CDEBUG(D_CACHE
, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
1616 cli
->cl_dirty_max
, obd_max_dirty_pages
);
1621 if (!osc_enter_cache_try(cli
, ocw
->ocw_oap
, ocw
->ocw_grant
, 0))
1622 ocw
->ocw_rc
= -EDQUOT
;
1625 CDEBUG(D_CACHE
, "wake up %p for oap %p, avail grant %ld, %d\n",
1626 ocw
, ocw
->ocw_oap
, cli
->cl_avail_grant
, ocw
->ocw_rc
);
1628 wake_up(&ocw
->ocw_waitq
);
1632 static int osc_max_rpc_in_flight(struct client_obd
*cli
, struct osc_object
*osc
)
1634 int hprpc
= !!list_empty(&osc
->oo_hp_exts
);
1635 return rpcs_in_flight(cli
) >= cli
->cl_max_rpcs_in_flight
+ hprpc
;
1638 /* This maintains the lists of pending pages to read/write for a given object
1639 * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
1640 * to quickly find objects that are ready to send an RPC. */
1641 static int osc_makes_rpc(struct client_obd
*cli
, struct osc_object
*osc
,
1644 int invalid_import
= 0;
1646 /* if we have an invalid import we want to drain the queued pages
1647 * by forcing them through rpcs that immediately fail and complete
1648 * the pages. recovery relies on this to empty the queued pages
1649 * before canceling the locks and evicting down the llite pages */
1650 if ((cli
->cl_import
== NULL
|| cli
->cl_import
->imp_invalid
))
1653 if (cmd
& OBD_BRW_WRITE
) {
1654 if (atomic_read(&osc
->oo_nr_writes
) == 0)
1656 if (invalid_import
) {
1657 CDEBUG(D_CACHE
, "invalid import forcing RPC\n");
1660 if (!list_empty(&osc
->oo_hp_exts
)) {
1661 CDEBUG(D_CACHE
, "high prio request forcing RPC\n");
1664 if (!list_empty(&osc
->oo_urgent_exts
)) {
1665 CDEBUG(D_CACHE
, "urgent request forcing RPC\n");
1668 /* trigger a write rpc stream as long as there are dirtiers
1669 * waiting for space. as they're waiting, they're not going to
1670 * create more pages to coalesce with what's waiting.. */
1671 if (!list_empty(&cli
->cl_cache_waiters
)) {
1672 CDEBUG(D_CACHE
, "cache waiters forcing RPC\n");
1675 if (atomic_read(&osc
->oo_nr_writes
) >=
1676 cli
->cl_max_pages_per_rpc
)
1679 if (atomic_read(&osc
->oo_nr_reads
) == 0)
1681 if (invalid_import
) {
1682 CDEBUG(D_CACHE
, "invalid import forcing RPC\n");
1685 /* all read are urgent. */
1686 if (!list_empty(&osc
->oo_reading_exts
))
1693 static void osc_update_pending(struct osc_object
*obj
, int cmd
, int delta
)
1695 struct client_obd
*cli
= osc_cli(obj
);
1696 if (cmd
& OBD_BRW_WRITE
) {
1697 atomic_add(delta
, &obj
->oo_nr_writes
);
1698 atomic_add(delta
, &cli
->cl_pending_w_pages
);
1699 LASSERT(atomic_read(&obj
->oo_nr_writes
) >= 0);
1701 atomic_add(delta
, &obj
->oo_nr_reads
);
1702 atomic_add(delta
, &cli
->cl_pending_r_pages
);
1703 LASSERT(atomic_read(&obj
->oo_nr_reads
) >= 0);
1705 OSC_IO_DEBUG(obj
, "update pending cmd %d delta %d.\n", cmd
, delta
);
1708 static int osc_makes_hprpc(struct osc_object
*obj
)
1710 return !list_empty(&obj
->oo_hp_exts
);
1713 static void on_list(struct list_head
*item
, struct list_head
*list
, int should_be_on
)
1715 if (list_empty(item
) && should_be_on
)
1716 list_add_tail(item
, list
);
1717 else if (!list_empty(item
) && !should_be_on
)
1718 list_del_init(item
);
1721 /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
1722 * can find pages to build into rpcs quickly */
1723 static int __osc_list_maint(struct client_obd
*cli
, struct osc_object
*osc
)
1725 if (osc_makes_hprpc(osc
)) {
1727 on_list(&osc
->oo_ready_item
, &cli
->cl_loi_ready_list
, 0);
1728 on_list(&osc
->oo_hp_ready_item
, &cli
->cl_loi_hp_ready_list
, 1);
1730 on_list(&osc
->oo_hp_ready_item
, &cli
->cl_loi_hp_ready_list
, 0);
1731 on_list(&osc
->oo_ready_item
, &cli
->cl_loi_ready_list
,
1732 osc_makes_rpc(cli
, osc
, OBD_BRW_WRITE
) ||
1733 osc_makes_rpc(cli
, osc
, OBD_BRW_READ
));
1736 on_list(&osc
->oo_write_item
, &cli
->cl_loi_write_list
,
1737 atomic_read(&osc
->oo_nr_writes
) > 0);
1739 on_list(&osc
->oo_read_item
, &cli
->cl_loi_read_list
,
1740 atomic_read(&osc
->oo_nr_reads
) > 0);
1742 return osc_is_ready(osc
);
1745 static int osc_list_maint(struct client_obd
*cli
, struct osc_object
*osc
)
1749 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1750 is_ready
= __osc_list_maint(cli
, osc
);
1751 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1756 /* this is trying to propagate async writeback errors back up to the
1757 * application. As an async write fails we record the error code for later if
1758 * the app does an fsync. As long as errors persist we force future rpcs to be
1759 * sync so that the app can get a sync error and break the cycle of queueing
1760 * pages for which writeback will fail. */
1761 static void osc_process_ar(struct osc_async_rc
*ar
, __u64 xid
,
1768 ar
->ar_force_sync
= 1;
1769 ar
->ar_min_xid
= ptlrpc_sample_next_xid();
1774 if (ar
->ar_force_sync
&& (xid
>= ar
->ar_min_xid
))
1775 ar
->ar_force_sync
= 0;
1779 /* this must be called holding the loi list lock to give coverage to exit_cache,
1780 * async_flag maintenance, and oap_request */
1781 static void osc_ap_completion(const struct lu_env
*env
, struct client_obd
*cli
,
1782 struct osc_async_page
*oap
, int sent
, int rc
)
1784 struct osc_object
*osc
= oap
->oap_obj
;
1785 struct lov_oinfo
*loi
= osc
->oo_oinfo
;
1788 if (oap
->oap_request
!= NULL
) {
1789 xid
= ptlrpc_req_xid(oap
->oap_request
);
1790 ptlrpc_req_finished(oap
->oap_request
);
1791 oap
->oap_request
= NULL
;
1794 /* As the transfer for this page is being done, clear the flags */
1795 spin_lock(&oap
->oap_lock
);
1796 oap
->oap_async_flags
= 0;
1797 spin_unlock(&oap
->oap_lock
);
1798 oap
->oap_interrupted
= 0;
1800 if (oap
->oap_cmd
& OBD_BRW_WRITE
&& xid
> 0) {
1801 client_obd_list_lock(&cli
->cl_loi_list_lock
);
1802 osc_process_ar(&cli
->cl_ar
, xid
, rc
);
1803 osc_process_ar(&loi
->loi_ar
, xid
, rc
);
1804 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
1807 rc
= osc_completion(env
, oap
, oap
->oap_cmd
, rc
);
1809 CERROR("completion on oap %p obj %p returns %d.\n",
1814 * Try to add extent to one RPC. We need to think about the following things:
1815 * - # of pages must not be over max_pages_per_rpc
1816 * - extent must be compatible with previous ones
1818 static int try_to_add_extent_for_io(struct client_obd
*cli
,
1819 struct osc_extent
*ext
, struct list_head
*rpclist
,
1820 int *pc
, unsigned int *max_pages
)
1822 struct osc_extent
*tmp
;
1823 struct osc_async_page
*oap
= list_first_entry(&ext
->oe_pages
,
1824 struct osc_async_page
,
1827 EASSERT((ext
->oe_state
== OES_CACHE
|| ext
->oe_state
== OES_LOCK_DONE
),
1830 *max_pages
= max(ext
->oe_mppr
, *max_pages
);
1831 if (*pc
+ ext
->oe_nr_pages
> *max_pages
)
1834 list_for_each_entry(tmp
, rpclist
, oe_link
) {
1835 struct osc_async_page
*oap2
;
1837 oap2
= list_first_entry(&tmp
->oe_pages
, struct osc_async_page
,
1839 EASSERT(tmp
->oe_owner
== current
, tmp
);
1841 if (overlapped(tmp
, ext
)) {
1842 OSC_EXTENT_DUMP(D_ERROR
, tmp
, "overlapped %p.\n", ext
);
1846 if (oap2cl_page(oap
)->cp_type
!= oap2cl_page(oap2
)->cp_type
) {
1847 CDEBUG(D_CACHE
, "Do not permit different type of IO"
1848 " for a same RPC\n");
1852 if (tmp
->oe_srvlock
!= ext
->oe_srvlock
||
1853 !tmp
->oe_grants
!= !ext
->oe_grants
)
1856 /* remove break for strict check */
1860 *pc
+= ext
->oe_nr_pages
;
1861 list_move_tail(&ext
->oe_link
, rpclist
);
1862 ext
->oe_owner
= current
;
1867 * In order to prevent multiple ptlrpcd from breaking contiguous extents,
1868 * get_write_extent() takes all appropriate extents in atomic.
1870 * The following policy is used to collect extents for IO:
1871 * 1. Add as many HP extents as possible;
1872 * 2. Add the first urgent extent in urgent extent list and take it out of
1874 * 3. Add subsequent extents of this urgent extent;
1875 * 4. If urgent list is not empty, goto 2;
1876 * 5. Traverse the extent tree from the 1st extent;
1877 * 6. Above steps exit if there is no space in this RPC.
1879 static int get_write_extents(struct osc_object
*obj
, struct list_head
*rpclist
)
1881 struct client_obd
*cli
= osc_cli(obj
);
1882 struct osc_extent
*ext
;
1884 unsigned int max_pages
= cli
->cl_max_pages_per_rpc
;
1886 LASSERT(osc_object_is_locked(obj
));
1887 while (!list_empty(&obj
->oo_hp_exts
)) {
1888 ext
= list_entry(obj
->oo_hp_exts
.next
, struct osc_extent
,
1890 LASSERT(ext
->oe_state
== OES_CACHE
);
1891 if (!try_to_add_extent_for_io(cli
, ext
, rpclist
, &page_count
,
1894 EASSERT(ext
->oe_nr_pages
<= max_pages
, ext
);
1896 if (page_count
== max_pages
)
1899 while (!list_empty(&obj
->oo_urgent_exts
)) {
1900 ext
= list_entry(obj
->oo_urgent_exts
.next
,
1901 struct osc_extent
, oe_link
);
1902 if (!try_to_add_extent_for_io(cli
, ext
, rpclist
, &page_count
,
1906 if (!ext
->oe_intree
)
1909 while ((ext
= next_extent(ext
)) != NULL
) {
1910 if ((ext
->oe_state
!= OES_CACHE
) ||
1911 (!list_empty(&ext
->oe_link
) &&
1912 ext
->oe_owner
!= NULL
))
1915 if (!try_to_add_extent_for_io(cli
, ext
, rpclist
,
1916 &page_count
, &max_pages
))
1920 if (page_count
== max_pages
)
1923 ext
= first_extent(obj
);
1924 while (ext
!= NULL
) {
1925 if ((ext
->oe_state
!= OES_CACHE
) ||
1926 /* this extent may be already in current rpclist */
1927 (!list_empty(&ext
->oe_link
) && ext
->oe_owner
!= NULL
)) {
1928 ext
= next_extent(ext
);
1932 if (!try_to_add_extent_for_io(cli
, ext
, rpclist
, &page_count
,
1936 ext
= next_extent(ext
);
1942 osc_send_write_rpc(const struct lu_env
*env
, struct client_obd
*cli
,
1943 struct osc_object
*osc
, pdl_policy_t pol
)
1946 struct osc_extent
*ext
;
1947 struct osc_extent
*tmp
;
1948 struct osc_extent
*first
= NULL
;
1953 LASSERT(osc_object_is_locked(osc
));
1955 page_count
= get_write_extents(osc
, &rpclist
);
1956 LASSERT(equi(page_count
== 0, list_empty(&rpclist
)));
1958 if (list_empty(&rpclist
))
1961 osc_update_pending(osc
, OBD_BRW_WRITE
, -page_count
);
1963 list_for_each_entry(ext
, &rpclist
, oe_link
) {
1964 LASSERT(ext
->oe_state
== OES_CACHE
||
1965 ext
->oe_state
== OES_LOCK_DONE
);
1966 if (ext
->oe_state
== OES_CACHE
)
1967 osc_extent_state_set(ext
, OES_LOCKING
);
1969 osc_extent_state_set(ext
, OES_RPC
);
1972 /* we're going to grab page lock, so release object lock because
1973 * lock order is page lock -> object lock. */
1974 osc_object_unlock(osc
);
1976 list_for_each_entry_safe(ext
, tmp
, &rpclist
, oe_link
) {
1977 if (ext
->oe_state
== OES_LOCKING
) {
1978 rc
= osc_extent_make_ready(env
, ext
);
1979 if (unlikely(rc
< 0)) {
1980 list_del_init(&ext
->oe_link
);
1981 osc_extent_finish(env
, ext
, 0, rc
);
1985 if (first
== NULL
) {
1987 srvlock
= ext
->oe_srvlock
;
1989 LASSERT(srvlock
== ext
->oe_srvlock
);
1993 if (!list_empty(&rpclist
)) {
1994 LASSERT(page_count
> 0);
1995 rc
= osc_build_rpc(env
, cli
, &rpclist
, OBD_BRW_WRITE
, pol
);
1996 LASSERT(list_empty(&rpclist
));
1999 osc_object_lock(osc
);
2004 * prepare pages for ASYNC io and put pages in send queue.
2006 * \param cmd OBD_BRW_* macroses
2007 * \param lop pending pages
2009 * \return zero if no page added to send queue.
2010 * \return 1 if pages successfully added to send queue.
2011 * \return negative on errors.
2014 osc_send_read_rpc(const struct lu_env
*env
, struct client_obd
*cli
,
2015 struct osc_object
*osc
, pdl_policy_t pol
)
2017 struct osc_extent
*ext
;
2018 struct osc_extent
*next
;
2021 unsigned int max_pages
= cli
->cl_max_pages_per_rpc
;
2024 LASSERT(osc_object_is_locked(osc
));
2025 list_for_each_entry_safe(ext
, next
,
2026 &osc
->oo_reading_exts
, oe_link
) {
2027 EASSERT(ext
->oe_state
== OES_LOCK_DONE
, ext
);
2028 if (!try_to_add_extent_for_io(cli
, ext
, &rpclist
, &page_count
,
2031 osc_extent_state_set(ext
, OES_RPC
);
2032 EASSERT(ext
->oe_nr_pages
<= max_pages
, ext
);
2034 LASSERT(page_count
<= max_pages
);
2036 osc_update_pending(osc
, OBD_BRW_READ
, -page_count
);
2038 if (!list_empty(&rpclist
)) {
2039 osc_object_unlock(osc
);
2041 LASSERT(page_count
> 0);
2042 rc
= osc_build_rpc(env
, cli
, &rpclist
, OBD_BRW_READ
, pol
);
2043 LASSERT(list_empty(&rpclist
));
2045 osc_object_lock(osc
);
2050 #define list_to_obj(list, item) ({ \
2051 struct list_head *__tmp = (list)->next; \
2052 list_del_init(__tmp); \
2053 list_entry(__tmp, struct osc_object, oo_##item); \
2056 /* This is called by osc_check_rpcs() to find which objects have pages that
2057 * we could be sending. These lists are maintained by osc_makes_rpc(). */
2058 static struct osc_object
*osc_next_obj(struct client_obd
*cli
)
2060 /* First return objects that have blocked locks so that they
2061 * will be flushed quickly and other clients can get the lock,
2062 * then objects which have pages ready to be stuffed into RPCs */
2063 if (!list_empty(&cli
->cl_loi_hp_ready_list
))
2064 return list_to_obj(&cli
->cl_loi_hp_ready_list
, hp_ready_item
);
2065 if (!list_empty(&cli
->cl_loi_ready_list
))
2066 return list_to_obj(&cli
->cl_loi_ready_list
, ready_item
);
2068 /* then if we have cache waiters, return all objects with queued
2069 * writes. This is especially important when many small files
2070 * have filled up the cache and not been fired into rpcs because
2071 * they don't pass the nr_pending/object threshold */
2072 if (!list_empty(&cli
->cl_cache_waiters
) &&
2073 !list_empty(&cli
->cl_loi_write_list
))
2074 return list_to_obj(&cli
->cl_loi_write_list
, write_item
);
2076 /* then return all queued objects when we have an invalid import
2077 * so that they get flushed */
2078 if (cli
->cl_import
== NULL
|| cli
->cl_import
->imp_invalid
) {
2079 if (!list_empty(&cli
->cl_loi_write_list
))
2080 return list_to_obj(&cli
->cl_loi_write_list
, write_item
);
2081 if (!list_empty(&cli
->cl_loi_read_list
))
2082 return list_to_obj(&cli
->cl_loi_read_list
, read_item
);
2087 /* called with the loi list lock held */
2088 static void osc_check_rpcs(const struct lu_env
*env
, struct client_obd
*cli
,
2091 struct osc_object
*osc
;
2094 while ((osc
= osc_next_obj(cli
)) != NULL
) {
2095 struct cl_object
*obj
= osc2cl(osc
);
2096 struct lu_ref_link link
;
2098 OSC_IO_DEBUG(osc
, "%lu in flight\n", rpcs_in_flight(cli
));
2100 if (osc_max_rpc_in_flight(cli
, osc
)) {
2101 __osc_list_maint(cli
, osc
);
2106 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
2107 lu_object_ref_add_at(&obj
->co_lu
, &link
, "check",
2110 /* attempt some read/write balancing by alternating between
2111 * reads and writes in an object. The makes_rpc checks here
2112 * would be redundant if we were getting read/write work items
2113 * instead of objects. we don't want send_oap_rpc to drain a
2114 * partial read pending queue when we're given this object to
2115 * do io on writes while there are cache waiters */
2116 osc_object_lock(osc
);
2117 if (osc_makes_rpc(cli
, osc
, OBD_BRW_WRITE
)) {
2118 rc
= osc_send_write_rpc(env
, cli
, osc
, pol
);
2120 CERROR("Write request failed with %d\n", rc
);
2122 /* osc_send_write_rpc failed, mostly because of
2125 * It can't break here, because if:
2126 * - a page was submitted by osc_io_submit, so
2128 * - no request in flight
2129 * - no subsequent request
2130 * The system will be in live-lock state,
2131 * because there is no chance to call
2132 * osc_io_unplug() and osc_check_rpcs() any
2133 * more. pdflush can't help in this case,
2134 * because it might be blocked at grabbing
2135 * the page lock as we mentioned.
2137 * Anyway, continue to drain pages. */
2141 if (osc_makes_rpc(cli
, osc
, OBD_BRW_READ
)) {
2142 rc
= osc_send_read_rpc(env
, cli
, osc
, pol
);
2144 CERROR("Read request failed with %d\n", rc
);
2146 osc_object_unlock(osc
);
2148 osc_list_maint(cli
, osc
);
2149 lu_object_ref_del_at(&obj
->co_lu
, &link
, "check",
2151 cl_object_put(env
, obj
);
2153 client_obd_list_lock(&cli
->cl_loi_list_lock
);
2157 static int osc_io_unplug0(const struct lu_env
*env
, struct client_obd
*cli
,
2158 struct osc_object
*osc
, pdl_policy_t pol
, int async
)
2162 if (osc
!= NULL
&& osc_list_maint(cli
, osc
) == 0)
2166 /* disable osc_lru_shrink() temporarily to avoid
2167 * potential stack overrun problem. LU-2859 */
2168 atomic_inc(&cli
->cl_lru_shrinkers
);
2169 client_obd_list_lock(&cli
->cl_loi_list_lock
);
2170 osc_check_rpcs(env
, cli
, pol
);
2171 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
2172 atomic_dec(&cli
->cl_lru_shrinkers
);
2174 CDEBUG(D_CACHE
, "Queue writeback work for client %p.\n", cli
);
2175 LASSERT(cli
->cl_writeback_work
!= NULL
);
2176 rc
= ptlrpcd_queue_work(cli
->cl_writeback_work
);
2181 static int osc_io_unplug_async(const struct lu_env
*env
,
2182 struct client_obd
*cli
, struct osc_object
*osc
)
2184 /* XXX: policy is no use actually. */
2185 return osc_io_unplug0(env
, cli
, osc
, PDL_POLICY_ROUND
, 1);
2188 void osc_io_unplug(const struct lu_env
*env
, struct client_obd
*cli
,
2189 struct osc_object
*osc
, pdl_policy_t pol
)
2191 (void)osc_io_unplug0(env
, cli
, osc
, pol
, 0);
2194 int osc_prep_async_page(struct osc_object
*osc
, struct osc_page
*ops
,
2195 struct page
*page
, loff_t offset
)
2197 struct obd_export
*exp
= osc_export(osc
);
2198 struct osc_async_page
*oap
= &ops
->ops_oap
;
2201 return cfs_size_round(sizeof(*oap
));
2203 oap
->oap_magic
= OAP_MAGIC
;
2204 oap
->oap_cli
= &exp
->exp_obd
->u
.cli
;
2207 oap
->oap_page
= page
;
2208 oap
->oap_obj_off
= offset
;
2209 LASSERT(!(offset
& ~CFS_PAGE_MASK
));
2211 if (!client_is_remote(exp
) && capable(CFS_CAP_SYS_RESOURCE
))
2212 oap
->oap_brw_flags
= OBD_BRW_NOQUOTA
;
2214 INIT_LIST_HEAD(&oap
->oap_pending_item
);
2215 INIT_LIST_HEAD(&oap
->oap_rpc_item
);
2217 spin_lock_init(&oap
->oap_lock
);
2218 CDEBUG(D_INFO
, "oap %p page %p obj off %llu\n",
2219 oap
, page
, oap
->oap_obj_off
);
2223 int osc_queue_async_io(const struct lu_env
*env
, struct cl_io
*io
,
2224 struct osc_page
*ops
)
2226 struct osc_io
*oio
= osc_env_io(env
);
2227 struct osc_extent
*ext
= NULL
;
2228 struct osc_async_page
*oap
= &ops
->ops_oap
;
2229 struct client_obd
*cli
= oap
->oap_cli
;
2230 struct osc_object
*osc
= oap
->oap_obj
;
2233 int brw_flags
= OBD_BRW_ASYNC
;
2234 int cmd
= OBD_BRW_WRITE
;
2235 int need_release
= 0;
2238 if (oap
->oap_magic
!= OAP_MAGIC
)
2241 if (cli
->cl_import
== NULL
|| cli
->cl_import
->imp_invalid
)
2244 if (!list_empty(&oap
->oap_pending_item
) ||
2245 !list_empty(&oap
->oap_rpc_item
))
2248 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
2249 brw_flags
|= ops
->ops_srvlock
? OBD_BRW_SRVLOCK
: 0;
2250 if (!client_is_remote(osc_export(osc
)) &&
2251 capable(CFS_CAP_SYS_RESOURCE
)) {
2252 brw_flags
|= OBD_BRW_NOQUOTA
;
2253 cmd
|= OBD_BRW_NOQUOTA
;
2256 /* check if the file's owner/group is over quota */
2257 if (!(cmd
& OBD_BRW_NOQUOTA
)) {
2258 struct cl_object
*obj
;
2259 struct cl_attr
*attr
;
2260 unsigned int qid
[MAXQUOTAS
];
2262 obj
= cl_object_top(&osc
->oo_cl
);
2263 attr
= &osc_env_info(env
)->oti_attr
;
2265 cl_object_attr_lock(obj
);
2266 rc
= cl_object_attr_get(env
, obj
, attr
);
2267 cl_object_attr_unlock(obj
);
2269 qid
[USRQUOTA
] = attr
->cat_uid
;
2270 qid
[GRPQUOTA
] = attr
->cat_gid
;
2271 if (rc
== 0 && osc_quota_chkdq(cli
, qid
) == NO_QUOTA
)
2278 oap
->oap_page_off
= ops
->ops_from
;
2279 oap
->oap_count
= ops
->ops_to
- ops
->ops_from
;
2280 oap
->oap_async_flags
= 0;
2281 oap
->oap_brw_flags
= brw_flags
;
2283 OSC_IO_DEBUG(osc
, "oap %p page %p added for cmd %d\n",
2284 oap
, oap
->oap_page
, oap
->oap_cmd
& OBD_BRW_RWMASK
);
2286 index
= oap2cl_page(oap
)->cp_index
;
2288 /* Add this page into extent by the following steps:
2289 * 1. if there exists an active extent for this IO, mostly this page
2290 * can be added to the active extent and sometimes we need to
2291 * expand extent to accommodate this page;
2292 * 2. otherwise, a new extent will be allocated. */
2294 ext
= oio
->oi_active
;
2295 if (ext
!= NULL
&& ext
->oe_start
<= index
&& ext
->oe_max_end
>= index
) {
2296 /* one chunk plus extent overhead must be enough to write this
2298 grants
= (1 << cli
->cl_chunkbits
) + cli
->cl_extent_tax
;
2299 if (ext
->oe_end
>= index
)
2302 /* it doesn't need any grant to dirty this page */
2303 client_obd_list_lock(&cli
->cl_loi_list_lock
);
2304 rc
= osc_enter_cache_try(cli
, oap
, grants
, 0);
2305 client_obd_list_unlock(&cli
->cl_loi_list_lock
);
2306 if (rc
== 0) { /* try failed */
2309 } else if (ext
->oe_end
< index
) {
2311 /* try to expand this extent */
2312 rc
= osc_extent_expand(ext
, index
, &tmp
);
2315 /* don't free reserved grant */
2317 OSC_EXTENT_DUMP(D_CACHE
, ext
,
2318 "expanded for %lu.\n", index
);
2319 osc_unreserve_grant(cli
, grants
, tmp
);
2324 } else if (ext
!= NULL
) {
2325 /* index is located outside of active extent */
2329 osc_extent_release(env
, ext
);
2330 oio
->oi_active
= NULL
;
2335 int tmp
= (1 << cli
->cl_chunkbits
) + cli
->cl_extent_tax
;
2337 /* try to find new extent to cover this page */
2338 LASSERT(oio
->oi_active
== NULL
);
2339 /* we may have allocated grant for this page if we failed
2340 * to expand the previous active extent. */
2341 LASSERT(ergo(grants
> 0, grants
>= tmp
));
2345 /* we haven't allocated grant for this page. */
2346 rc
= osc_enter_cache(env
, cli
, oap
, tmp
);
2353 ext
= osc_extent_find(env
, osc
, index
, &tmp
);
2355 LASSERT(tmp
== grants
);
2356 osc_exit_cache(cli
, oap
);
2360 oio
->oi_active
= ext
;
2364 osc_unreserve_grant(cli
, grants
, tmp
);
2367 LASSERT(ergo(rc
== 0, ext
!= NULL
));
2369 EASSERTF(ext
->oe_end
>= index
&& ext
->oe_start
<= index
,
2370 ext
, "index = %lu.\n", index
);
2371 LASSERT((oap
->oap_brw_flags
& OBD_BRW_FROM_GRANT
) != 0);
2373 osc_object_lock(osc
);
2374 if (ext
->oe_nr_pages
== 0)
2375 ext
->oe_srvlock
= ops
->ops_srvlock
;
2377 LASSERT(ext
->oe_srvlock
== ops
->ops_srvlock
);
2379 list_add_tail(&oap
->oap_pending_item
, &ext
->oe_pages
);
2380 osc_object_unlock(osc
);
2385 int osc_teardown_async_page(const struct lu_env
*env
,
2386 struct osc_object
*obj
, struct osc_page
*ops
)
2388 struct osc_async_page
*oap
= &ops
->ops_oap
;
2389 struct osc_extent
*ext
= NULL
;
2392 LASSERT(oap
->oap_magic
== OAP_MAGIC
);
2394 CDEBUG(D_INFO
, "teardown oap %p page %p at index %lu.\n",
2395 oap
, ops
, oap2cl_page(oap
)->cp_index
);
2397 osc_object_lock(obj
);
2398 if (!list_empty(&oap
->oap_rpc_item
)) {
2399 CDEBUG(D_CACHE
, "oap %p is not in cache.\n", oap
);
2401 } else if (!list_empty(&oap
->oap_pending_item
)) {
2402 ext
= osc_extent_lookup(obj
, oap2cl_page(oap
)->cp_index
);
2403 /* only truncated pages are allowed to be taken out.
2404 * See osc_extent_truncate() and osc_cache_truncate_start()
2406 if (ext
!= NULL
&& ext
->oe_state
!= OES_TRUNC
) {
2407 OSC_EXTENT_DUMP(D_ERROR
, ext
, "trunc at %lu.\n",
2408 oap2cl_page(oap
)->cp_index
);
2412 osc_object_unlock(obj
);
2414 osc_extent_put(env
, ext
);
2419 * This is called when a page is picked up by kernel to write out.
2421 * We should find out the corresponding extent and add the whole extent
2422 * into urgent list. The extent may be being truncated or used, handle it
2425 int osc_flush_async_page(const struct lu_env
*env
, struct cl_io
*io
,
2426 struct osc_page
*ops
)
2428 struct osc_extent
*ext
= NULL
;
2429 struct osc_object
*obj
= cl2osc(ops
->ops_cl
.cpl_obj
);
2430 struct cl_page
*cp
= ops
->ops_cl
.cpl_page
;
2431 pgoff_t index
= cp
->cp_index
;
2432 struct osc_async_page
*oap
= &ops
->ops_oap
;
2433 bool unplug
= false;
2436 osc_object_lock(obj
);
2437 ext
= osc_extent_lookup(obj
, index
);
2439 osc_extent_tree_dump(D_ERROR
, obj
);
2440 LASSERTF(0, "page index %lu is NOT covered.\n", index
);
2443 switch (ext
->oe_state
) {
2446 CL_PAGE_DEBUG(D_ERROR
, env
, cl_page_top(cp
),
2447 "flush an in-rpc page?\n");
2451 /* If we know this extent is being written out, we should abort
2452 * so that the writer can make this page ready. Otherwise, there
2453 * exists a deadlock problem because other process can wait for
2454 * page writeback bit holding page lock; and meanwhile in
2455 * vvp_page_make_ready(), we need to grab page lock before
2456 * really sending the RPC. */
2458 /* race with truncate, page will be redirtied */
2460 /* The extent is active so we need to abort and let the caller
2461 * re-dirty the page. If we continued on here, and we were the
2462 * one making the extent active, we could deadlock waiting for
2463 * the page writeback to clear but it won't because the extent
2464 * is active and won't be written out. */
2471 rc
= cl_page_prep(env
, io
, cl_page_top(cp
), CRT_WRITE
);
2475 spin_lock(&oap
->oap_lock
);
2476 oap
->oap_async_flags
|= ASYNC_READY
|ASYNC_URGENT
;
2477 spin_unlock(&oap
->oap_lock
);
2479 if (memory_pressure_get())
2480 ext
->oe_memalloc
= 1;
2483 if (ext
->oe_state
== OES_CACHE
) {
2484 OSC_EXTENT_DUMP(D_CACHE
, ext
,
2485 "flush page %p make it urgent.\n", oap
);
2486 if (list_empty(&ext
->oe_link
))
2487 list_add_tail(&ext
->oe_link
, &obj
->oo_urgent_exts
);
2493 osc_object_unlock(obj
);
2494 osc_extent_put(env
, ext
);
2496 osc_io_unplug_async(env
, osc_cli(obj
), obj
);
2501 * this is called when a sync waiter receives an interruption. Its job is to
2502 * get the caller woken as soon as possible. If its page hasn't been put in an
2503 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
2504 * desiring interruption which will forcefully complete the rpc once the rpc
2507 int osc_cancel_async_page(const struct lu_env
*env
, struct osc_page
*ops
)
2509 struct osc_async_page
*oap
= &ops
->ops_oap
;
2510 struct osc_object
*obj
= oap
->oap_obj
;
2511 struct client_obd
*cli
= osc_cli(obj
);
2512 struct osc_extent
*ext
;
2513 struct osc_extent
*found
= NULL
;
2514 struct list_head
*plist
;
2515 pgoff_t index
= oap2cl_page(oap
)->cp_index
;
2519 LASSERT(!oap
->oap_interrupted
);
2520 oap
->oap_interrupted
= 1;
2522 /* Find out the caching extent */
2523 osc_object_lock(obj
);
2524 if (oap
->oap_cmd
& OBD_BRW_WRITE
) {
2525 plist
= &obj
->oo_urgent_exts
;
2526 cmd
= OBD_BRW_WRITE
;
2528 plist
= &obj
->oo_reading_exts
;
2531 list_for_each_entry(ext
, plist
, oe_link
) {
2532 if (ext
->oe_start
<= index
&& ext
->oe_end
>= index
) {
2533 LASSERT(ext
->oe_state
== OES_LOCK_DONE
);
2534 /* For OES_LOCK_DONE state extent, it has already held
2535 * a refcount for RPC. */
2536 found
= osc_extent_get(ext
);
2540 if (found
!= NULL
) {
2541 list_del_init(&found
->oe_link
);
2542 osc_update_pending(obj
, cmd
, -found
->oe_nr_pages
);
2543 osc_object_unlock(obj
);
2545 osc_extent_finish(env
, found
, 0, -EINTR
);
2546 osc_extent_put(env
, found
);
2549 osc_object_unlock(obj
);
2550 /* ok, it's been put in an rpc. only one oap gets a request
2552 if (oap
->oap_request
!= NULL
) {
2553 ptlrpc_mark_interrupted(oap
->oap_request
);
2554 ptlrpcd_wake(oap
->oap_request
);
2555 ptlrpc_req_finished(oap
->oap_request
);
2556 oap
->oap_request
= NULL
;
2560 osc_list_maint(cli
, obj
);
2564 int osc_queue_sync_pages(const struct lu_env
*env
, struct osc_object
*obj
,
2565 struct list_head
*list
, int cmd
, int brw_flags
)
2567 struct client_obd
*cli
= osc_cli(obj
);
2568 struct osc_extent
*ext
;
2569 struct osc_async_page
*oap
, *tmp
;
2571 int mppr
= cli
->cl_max_pages_per_rpc
;
2572 pgoff_t start
= CL_PAGE_EOF
;
2575 list_for_each_entry(oap
, list
, oap_pending_item
) {
2576 struct cl_page
*cp
= oap2cl_page(oap
);
2577 if (cp
->cp_index
> end
)
2579 if (cp
->cp_index
< start
)
2580 start
= cp
->cp_index
;
2582 mppr
<<= (page_count
> mppr
);
2585 ext
= osc_extent_alloc(obj
);
2587 list_for_each_entry_safe(oap
, tmp
, list
, oap_pending_item
) {
2588 list_del_init(&oap
->oap_pending_item
);
2589 osc_ap_completion(env
, cli
, oap
, 0, -ENOMEM
);
2594 ext
->oe_rw
= !!(cmd
& OBD_BRW_READ
);
2596 ext
->oe_start
= start
;
2597 ext
->oe_end
= ext
->oe_max_end
= end
;
2599 ext
->oe_srvlock
= !!(brw_flags
& OBD_BRW_SRVLOCK
);
2600 ext
->oe_nr_pages
= page_count
;
2601 ext
->oe_mppr
= mppr
;
2602 list_splice_init(list
, &ext
->oe_pages
);
2604 osc_object_lock(obj
);
2605 /* Reuse the initial refcount for RPC, don't drop it */
2606 osc_extent_state_set(ext
, OES_LOCK_DONE
);
2607 if (cmd
& OBD_BRW_WRITE
) {
2608 list_add_tail(&ext
->oe_link
, &obj
->oo_urgent_exts
);
2609 osc_update_pending(obj
, OBD_BRW_WRITE
, page_count
);
2611 list_add_tail(&ext
->oe_link
, &obj
->oo_reading_exts
);
2612 osc_update_pending(obj
, OBD_BRW_READ
, page_count
);
2614 osc_object_unlock(obj
);
2616 osc_io_unplug_async(env
, cli
, obj
);
2621 * Called by osc_io_setattr_start() to freeze and destroy covering extents.
2623 int osc_cache_truncate_start(const struct lu_env
*env
, struct osc_io
*oio
,
2624 struct osc_object
*obj
, __u64 size
)
2626 struct client_obd
*cli
= osc_cli(obj
);
2627 struct osc_extent
*ext
;
2628 struct osc_extent
*waiting
= NULL
;
2634 /* pages with index greater or equal to index will be truncated. */
2635 index
= cl_index(osc2cl(obj
), size
);
2636 partial
= size
> cl_offset(osc2cl(obj
), index
);
2639 osc_object_lock(obj
);
2640 ext
= osc_extent_search(obj
, index
);
2642 ext
= first_extent(obj
);
2643 else if (ext
->oe_end
< index
)
2644 ext
= next_extent(ext
);
2645 while (ext
!= NULL
) {
2646 EASSERT(ext
->oe_state
!= OES_TRUNC
, ext
);
2648 if (ext
->oe_state
> OES_CACHE
|| ext
->oe_urgent
) {
2649 /* if ext is in urgent state, it means there must exist
2650 * a page already having been flushed by write_page().
2651 * We have to wait for this extent because we can't
2652 * truncate that page. */
2653 LASSERT(!ext
->oe_hp
);
2654 OSC_EXTENT_DUMP(D_CACHE
, ext
,
2655 "waiting for busy extent\n");
2656 waiting
= osc_extent_get(ext
);
2660 OSC_EXTENT_DUMP(D_CACHE
, ext
, "try to trunc:%llu.\n", size
);
2662 osc_extent_get(ext
);
2663 if (ext
->oe_state
== OES_ACTIVE
) {
2664 /* though we grab inode mutex for write path, but we
2665 * release it before releasing extent(in osc_io_end()),
2666 * so there is a race window that an extent is still
2667 * in OES_ACTIVE when truncate starts. */
2668 LASSERT(!ext
->oe_trunc_pending
);
2669 ext
->oe_trunc_pending
= 1;
2671 EASSERT(ext
->oe_state
== OES_CACHE
, ext
);
2672 osc_extent_state_set(ext
, OES_TRUNC
);
2673 osc_update_pending(obj
, OBD_BRW_WRITE
,
2676 EASSERT(list_empty(&ext
->oe_link
), ext
);
2677 list_add_tail(&ext
->oe_link
, &list
);
2679 ext
= next_extent(ext
);
2681 osc_object_unlock(obj
);
2683 osc_list_maint(cli
, obj
);
2685 while (!list_empty(&list
)) {
2688 ext
= list_entry(list
.next
, struct osc_extent
, oe_link
);
2689 list_del_init(&ext
->oe_link
);
2691 /* extent may be in OES_ACTIVE state because inode mutex
2692 * is released before osc_io_end() in file write case */
2693 if (ext
->oe_state
!= OES_TRUNC
)
2694 osc_extent_wait(env
, ext
, OES_TRUNC
);
2696 rc
= osc_extent_truncate(ext
, index
, partial
);
2701 OSC_EXTENT_DUMP(D_ERROR
, ext
,
2702 "truncate error %d\n", rc
);
2703 } else if (ext
->oe_nr_pages
== 0) {
2704 osc_extent_remove(ext
);
2706 /* this must be an overlapped extent which means only
2707 * part of pages in this extent have been truncated.
2709 EASSERTF(ext
->oe_start
<= index
, ext
,
2710 "trunc index = %lu/%d.\n", index
, partial
);
2711 /* fix index to skip this partially truncated extent */
2712 index
= ext
->oe_end
+ 1;
2715 /* we need to hold this extent in OES_TRUNC state so
2716 * that no writeback will happen. This is to avoid
2718 LASSERT(oio
->oi_trunc
== NULL
);
2719 oio
->oi_trunc
= osc_extent_get(ext
);
2720 OSC_EXTENT_DUMP(D_CACHE
, ext
,
2721 "trunc at %llu\n", size
);
2723 osc_extent_put(env
, ext
);
2725 if (waiting
!= NULL
) {
2728 /* ignore the result of osc_extent_wait the write initiator
2729 * should take care of it. */
2730 rc
= osc_extent_wait(env
, waiting
, OES_INV
);
2732 OSC_EXTENT_DUMP(D_CACHE
, waiting
, "error: %d.\n", rc
);
2734 osc_extent_put(env
, waiting
);
2742 * Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
2744 void osc_cache_truncate_end(const struct lu_env
*env
, struct osc_io
*oio
,
2745 struct osc_object
*obj
)
2747 struct osc_extent
*ext
= oio
->oi_trunc
;
2749 oio
->oi_trunc
= NULL
;
2751 bool unplug
= false;
2753 EASSERT(ext
->oe_nr_pages
> 0, ext
);
2754 EASSERT(ext
->oe_state
== OES_TRUNC
, ext
);
2755 EASSERT(!ext
->oe_urgent
, ext
);
2757 OSC_EXTENT_DUMP(D_CACHE
, ext
, "trunc -> cache.\n");
2758 osc_object_lock(obj
);
2759 osc_extent_state_set(ext
, OES_CACHE
);
2760 if (ext
->oe_fsync_wait
&& !ext
->oe_urgent
) {
2762 list_move_tail(&ext
->oe_link
, &obj
->oo_urgent_exts
);
2765 osc_update_pending(obj
, OBD_BRW_WRITE
, ext
->oe_nr_pages
);
2766 osc_object_unlock(obj
);
2767 osc_extent_put(env
, ext
);
2770 osc_io_unplug_async(env
, osc_cli(obj
), obj
);
2775 * Wait for extents in a specific range to be written out.
2776 * The caller must have called osc_cache_writeback_range() to issue IO
2777 * otherwise it will take a long time for this function to finish.
2779 * Caller must hold inode_mutex , or cancel exclusive dlm lock so that
2780 * nobody else can dirty this range of file while we're waiting for
2781 * extents to be written.
2783 int osc_cache_wait_range(const struct lu_env
*env
, struct osc_object
*obj
,
2784 pgoff_t start
, pgoff_t end
)
2786 struct osc_extent
*ext
;
2787 pgoff_t index
= start
;
2791 osc_object_lock(obj
);
2792 ext
= osc_extent_search(obj
, index
);
2794 ext
= first_extent(obj
);
2795 else if (ext
->oe_end
< index
)
2796 ext
= next_extent(ext
);
2797 while (ext
!= NULL
) {
2800 if (ext
->oe_start
> end
)
2803 if (!ext
->oe_fsync_wait
) {
2804 ext
= next_extent(ext
);
2808 EASSERT(ergo(ext
->oe_state
== OES_CACHE
,
2809 ext
->oe_hp
|| ext
->oe_urgent
), ext
);
2810 EASSERT(ergo(ext
->oe_state
== OES_ACTIVE
,
2811 !ext
->oe_hp
&& ext
->oe_urgent
), ext
);
2813 index
= ext
->oe_end
+ 1;
2814 osc_extent_get(ext
);
2815 osc_object_unlock(obj
);
2817 rc
= osc_extent_wait(env
, ext
, OES_INV
);
2820 osc_extent_put(env
, ext
);
2823 osc_object_unlock(obj
);
2825 OSC_IO_DEBUG(obj
, "sync file range.\n");
2830 * Called to write out a range of osc object.
2832 * @hp : should be set this is caused by lock cancel;
2833 * @discard: is set if dirty pages should be dropped - file will be deleted or
2834 * truncated, this implies there is no partially discarding extents.
2836 * Return how many pages will be issued, or error code if error occurred.
2838 int osc_cache_writeback_range(const struct lu_env
*env
, struct osc_object
*obj
,
2839 pgoff_t start
, pgoff_t end
, int hp
, int discard
)
2841 struct osc_extent
*ext
;
2842 LIST_HEAD(discard_list
);
2843 bool unplug
= false;
2846 osc_object_lock(obj
);
2847 ext
= osc_extent_search(obj
, start
);
2849 ext
= first_extent(obj
);
2850 else if (ext
->oe_end
< start
)
2851 ext
= next_extent(ext
);
2852 while (ext
!= NULL
) {
2853 if (ext
->oe_start
> end
)
2856 ext
->oe_fsync_wait
= 1;
2857 switch (ext
->oe_state
) {
2859 result
+= ext
->oe_nr_pages
;
2861 struct list_head
*list
= NULL
;
2863 EASSERT(!ext
->oe_hp
, ext
);
2865 list
= &obj
->oo_hp_exts
;
2866 } else if (!ext
->oe_urgent
) {
2868 list
= &obj
->oo_urgent_exts
;
2871 list_move_tail(&ext
->oe_link
, list
);
2874 /* the only discarder is lock cancelling, so
2875 * [start, end] must contain this extent */
2876 EASSERT(ext
->oe_start
>= start
&&
2877 ext
->oe_max_end
<= end
, ext
);
2878 osc_extent_state_set(ext
, OES_LOCKING
);
2879 ext
->oe_owner
= current
;
2880 list_move_tail(&ext
->oe_link
,
2882 osc_update_pending(obj
, OBD_BRW_WRITE
,
2887 /* It's pretty bad to wait for ACTIVE extents, because
2888 * we don't know how long we will wait for it to be
2889 * flushed since it may be blocked at awaiting more
2890 * grants. We do this for the correctness of fsync. */
2891 LASSERT(hp
== 0 && discard
== 0);
2895 /* this extent is being truncated, can't do anything
2896 * for it now. it will be set to urgent after truncate
2897 * is finished in osc_cache_truncate_end(). */
2901 ext
= next_extent(ext
);
2903 osc_object_unlock(obj
);
2905 LASSERT(ergo(!discard
, list_empty(&discard_list
)));
2906 if (!list_empty(&discard_list
)) {
2907 struct osc_extent
*tmp
;
2910 osc_list_maint(osc_cli(obj
), obj
);
2911 list_for_each_entry_safe(ext
, tmp
, &discard_list
, oe_link
) {
2912 list_del_init(&ext
->oe_link
);
2913 EASSERT(ext
->oe_state
== OES_LOCKING
, ext
);
2915 /* Discard caching pages. We don't actually write this
2916 * extent out but we complete it as if we did. */
2917 rc
= osc_extent_make_ready(env
, ext
);
2918 if (unlikely(rc
< 0)) {
2919 OSC_EXTENT_DUMP(D_ERROR
, ext
,
2920 "make_ready returned %d\n", rc
);
2925 /* finish the extent as if the pages were sent */
2926 osc_extent_finish(env
, ext
, 0, 0);
2931 osc_io_unplug(env
, osc_cli(obj
), obj
, PDL_POLICY_ROUND
);
2933 if (hp
|| discard
) {
2935 rc
= osc_cache_wait_range(env
, obj
, start
, end
);
2936 if (result
>= 0 && rc
< 0)
2940 OSC_IO_DEBUG(obj
, "cache page out.\n");