4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LOV
44 #include "lov_cl_internal.h"
50 static inline void lov_sub_enter(struct lov_io_sub
*sub
)
54 static inline void lov_sub_exit(struct lov_io_sub
*sub
)
59 static void lov_io_sub_fini(const struct lu_env
*env
, struct lov_io
*lio
,
60 struct lov_io_sub
*sub
)
62 if (sub
->sub_io
!= NULL
) {
63 if (sub
->sub_io_initialized
) {
65 cl_io_fini(sub
->sub_env
, sub
->sub_io
);
67 sub
->sub_io_initialized
= 0;
68 lio
->lis_active_subios
--;
70 if (sub
->sub_stripe
== lio
->lis_single_subio_index
)
71 lio
->lis_single_subio_index
= -1;
72 else if (!sub
->sub_borrowed
)
76 if (sub
->sub_env
!= NULL
&& !IS_ERR(sub
->sub_env
)) {
77 if (!sub
->sub_borrowed
)
78 cl_env_put(sub
->sub_env
, &sub
->sub_refcheck
);
83 static void lov_io_sub_inherit(struct cl_io
*io
, struct lov_io
*lio
,
84 int stripe
, loff_t start
, loff_t end
)
86 struct lov_stripe_md
*lsm
= lio
->lis_object
->lo_lsm
;
87 struct cl_io
*parent
= lio
->lis_cl
.cis_io
;
89 switch (io
->ci_type
) {
91 io
->u
.ci_setattr
.sa_attr
= parent
->u
.ci_setattr
.sa_attr
;
92 io
->u
.ci_setattr
.sa_valid
= parent
->u
.ci_setattr
.sa_valid
;
93 io
->u
.ci_setattr
.sa_capa
= parent
->u
.ci_setattr
.sa_capa
;
94 if (cl_io_is_trunc(io
)) {
95 loff_t new_size
= parent
->u
.ci_setattr
.sa_attr
.lvb_size
;
97 new_size
= lov_size_to_stripe(lsm
, new_size
, stripe
);
98 io
->u
.ci_setattr
.sa_attr
.lvb_size
= new_size
;
103 struct cl_object
*obj
= parent
->ci_obj
;
104 loff_t off
= cl_offset(obj
, parent
->u
.ci_fault
.ft_index
);
106 io
->u
.ci_fault
= parent
->u
.ci_fault
;
107 off
= lov_size_to_stripe(lsm
, off
, stripe
);
108 io
->u
.ci_fault
.ft_index
= cl_index(obj
, off
);
112 io
->u
.ci_fsync
.fi_start
= start
;
113 io
->u
.ci_fsync
.fi_end
= end
;
114 io
->u
.ci_fsync
.fi_capa
= parent
->u
.ci_fsync
.fi_capa
;
115 io
->u
.ci_fsync
.fi_fid
= parent
->u
.ci_fsync
.fi_fid
;
116 io
->u
.ci_fsync
.fi_mode
= parent
->u
.ci_fsync
.fi_mode
;
121 io
->u
.ci_wr
.wr_sync
= cl_io_is_sync_write(parent
);
122 if (cl_io_is_append(parent
)) {
123 io
->u
.ci_wr
.wr_append
= 1;
125 io
->u
.ci_rw
.crw_pos
= start
;
126 io
->u
.ci_rw
.crw_count
= end
- start
;
135 static int lov_io_sub_init(const struct lu_env
*env
, struct lov_io
*lio
,
136 struct lov_io_sub
*sub
)
138 struct lov_object
*lov
= lio
->lis_object
;
139 struct lov_device
*ld
= lu2lov_dev(lov2cl(lov
)->co_lu
.lo_dev
);
140 struct cl_io
*sub_io
;
141 struct cl_object
*sub_obj
;
142 struct cl_io
*io
= lio
->lis_cl
.cis_io
;
144 int stripe
= sub
->sub_stripe
;
147 LASSERT(sub
->sub_io
== NULL
);
148 LASSERT(sub
->sub_env
== NULL
);
149 LASSERT(sub
->sub_stripe
< lio
->lis_stripe_count
);
151 if (unlikely(lov_r0(lov
)->lo_sub
[stripe
] == NULL
))
155 sub
->sub_io_initialized
= 0;
156 sub
->sub_borrowed
= 0;
158 if (lio
->lis_mem_frozen
) {
159 LASSERT(mutex_is_locked(&ld
->ld_mutex
));
160 sub
->sub_io
= &ld
->ld_emrg
[stripe
]->emrg_subio
;
161 sub
->sub_env
= ld
->ld_emrg
[stripe
]->emrg_env
;
162 sub
->sub_borrowed
= 1;
166 /* obtain new environment */
167 cookie
= cl_env_reenter();
168 sub
->sub_env
= cl_env_get(&sub
->sub_refcheck
);
169 cl_env_reexit(cookie
);
170 if (IS_ERR(sub
->sub_env
))
171 result
= PTR_ERR(sub
->sub_env
);
175 * First sub-io. Use ->lis_single_subio to
176 * avoid dynamic allocation.
178 if (lio
->lis_active_subios
== 0) {
179 sub
->sub_io
= &lio
->lis_single_subio
;
180 lio
->lis_single_subio_index
= stripe
;
182 sub
->sub_io
= kzalloc(sizeof(*sub
->sub_io
),
184 if (sub
->sub_io
== NULL
)
191 sub_obj
= lovsub2cl(lov_r0(lov
)->lo_sub
[stripe
]);
192 sub_io
= sub
->sub_io
;
194 sub_io
->ci_obj
= sub_obj
;
195 sub_io
->ci_result
= 0;
197 sub_io
->ci_parent
= io
;
198 sub_io
->ci_lockreq
= io
->ci_lockreq
;
199 sub_io
->ci_type
= io
->ci_type
;
200 sub_io
->ci_no_srvlock
= io
->ci_no_srvlock
;
201 sub_io
->ci_noatime
= io
->ci_noatime
;
204 result
= cl_io_sub_init(sub
->sub_env
, sub_io
,
205 io
->ci_type
, sub_obj
);
208 lio
->lis_active_subios
++;
209 sub
->sub_io_initialized
= 1;
214 lov_io_sub_fini(env
, lio
, sub
);
218 struct lov_io_sub
*lov_sub_get(const struct lu_env
*env
,
219 struct lov_io
*lio
, int stripe
)
222 struct lov_io_sub
*sub
= &lio
->lis_subs
[stripe
];
224 LASSERT(stripe
< lio
->lis_stripe_count
);
226 if (!sub
->sub_io_initialized
) {
227 sub
->sub_stripe
= stripe
;
228 rc
= lov_io_sub_init(env
, lio
, sub
);
238 void lov_sub_put(struct lov_io_sub
*sub
)
243 /*****************************************************************************
249 static int lov_page_stripe(const struct cl_page
*page
)
251 struct lovsub_object
*subobj
;
254 lu_object_locate(page
->cp_child
->cp_obj
->co_lu
.lo_header
,
255 &lovsub_device_type
));
256 LASSERT(subobj
!= NULL
);
257 return subobj
->lso_index
;
260 struct lov_io_sub
*lov_page_subio(const struct lu_env
*env
, struct lov_io
*lio
,
261 const struct cl_page_slice
*slice
)
263 struct lov_stripe_md
*lsm
= lio
->lis_object
->lo_lsm
;
264 struct cl_page
*page
= slice
->cpl_page
;
267 LASSERT(lio
->lis_cl
.cis_io
!= NULL
);
268 LASSERT(cl2lov(slice
->cpl_obj
) == lio
->lis_object
);
269 LASSERT(lsm
!= NULL
);
270 LASSERT(lio
->lis_nr_subios
> 0);
272 stripe
= lov_page_stripe(page
);
273 return lov_sub_get(env
, lio
, stripe
);
277 static int lov_io_subio_init(const struct lu_env
*env
, struct lov_io
*lio
,
280 struct lov_stripe_md
*lsm
= lio
->lis_object
->lo_lsm
;
283 LASSERT(lio
->lis_object
!= NULL
);
286 * Need to be optimized, we can't afford to allocate a piece of memory
287 * when writing a page. -jay
290 libcfs_kvzalloc(lsm
->lsm_stripe_count
*
291 sizeof(lio
->lis_subs
[0]),
293 if (lio
->lis_subs
!= NULL
) {
294 lio
->lis_nr_subios
= lio
->lis_stripe_count
;
295 lio
->lis_single_subio_index
= -1;
296 lio
->lis_active_subios
= 0;
303 static void lov_io_slice_init(struct lov_io
*lio
,
304 struct lov_object
*obj
, struct cl_io
*io
)
307 lio
->lis_object
= obj
;
309 LASSERT(obj
->lo_lsm
!= NULL
);
310 lio
->lis_stripe_count
= obj
->lo_lsm
->lsm_stripe_count
;
312 switch (io
->ci_type
) {
315 lio
->lis_pos
= io
->u
.ci_rw
.crw_pos
;
316 lio
->lis_endpos
= io
->u
.ci_rw
.crw_pos
+ io
->u
.ci_rw
.crw_count
;
317 lio
->lis_io_endpos
= lio
->lis_endpos
;
318 if (cl_io_is_append(io
)) {
319 LASSERT(io
->ci_type
== CIT_WRITE
);
321 lio
->lis_endpos
= OBD_OBJECT_EOF
;
326 if (cl_io_is_trunc(io
))
327 lio
->lis_pos
= io
->u
.ci_setattr
.sa_attr
.lvb_size
;
330 lio
->lis_endpos
= OBD_OBJECT_EOF
;
334 pgoff_t index
= io
->u
.ci_fault
.ft_index
;
335 lio
->lis_pos
= cl_offset(io
->ci_obj
, index
);
336 lio
->lis_endpos
= cl_offset(io
->ci_obj
, index
+ 1);
341 lio
->lis_pos
= io
->u
.ci_fsync
.fi_start
;
342 lio
->lis_endpos
= io
->u
.ci_fsync
.fi_end
;
348 lio
->lis_endpos
= OBD_OBJECT_EOF
;
356 static void lov_io_fini(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
358 struct lov_io
*lio
= cl2lov_io(env
, ios
);
359 struct lov_object
*lov
= cl2lov(ios
->cis_obj
);
362 if (lio
->lis_subs
!= NULL
) {
363 for (i
= 0; i
< lio
->lis_nr_subios
; i
++)
364 lov_io_sub_fini(env
, lio
, &lio
->lis_subs
[i
]);
365 kvfree(lio
->lis_subs
);
366 lio
->lis_nr_subios
= 0;
369 LASSERT(atomic_read(&lov
->lo_active_ios
) > 0);
370 if (atomic_dec_and_test(&lov
->lo_active_ios
))
371 wake_up_all(&lov
->lo_waitq
);
374 static u64
lov_offset_mod(u64 val
, int delta
)
376 if (val
!= OBD_OBJECT_EOF
)
381 static int lov_io_iter_init(const struct lu_env
*env
,
382 const struct cl_io_slice
*ios
)
384 struct lov_io
*lio
= cl2lov_io(env
, ios
);
385 struct lov_stripe_md
*lsm
= lio
->lis_object
->lo_lsm
;
386 struct lov_io_sub
*sub
;
393 endpos
= lov_offset_mod(lio
->lis_endpos
, -1);
394 for (stripe
= 0; stripe
< lio
->lis_stripe_count
; stripe
++) {
395 if (!lov_stripe_intersects(lsm
, stripe
, lio
->lis_pos
,
396 endpos
, &start
, &end
))
399 if (unlikely(lov_r0(lio
->lis_object
)->lo_sub
[stripe
] == NULL
)) {
400 if (ios
->cis_io
->ci_type
== CIT_READ
||
401 ios
->cis_io
->ci_type
== CIT_WRITE
||
402 ios
->cis_io
->ci_type
== CIT_FAULT
)
408 end
= lov_offset_mod(end
, 1);
409 sub
= lov_sub_get(env
, lio
, stripe
);
411 lov_io_sub_inherit(sub
->sub_io
, lio
, stripe
,
413 rc
= cl_io_iter_init(sub
->sub_env
, sub
->sub_io
);
415 CDEBUG(D_VFSTRACE
, "shrink: %d [%llu, %llu)\n",
421 list_add_tail(&sub
->sub_linkage
, &lio
->lis_active
);
428 static int lov_io_rw_iter_init(const struct lu_env
*env
,
429 const struct cl_io_slice
*ios
)
431 struct lov_io
*lio
= cl2lov_io(env
, ios
);
432 struct cl_io
*io
= ios
->cis_io
;
433 struct lov_stripe_md
*lsm
= lio
->lis_object
->lo_lsm
;
434 __u64 start
= io
->u
.ci_rw
.crw_pos
;
436 unsigned long ssize
= lsm
->lsm_stripe_size
;
438 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
440 /* fast path for common case. */
441 if (lio
->lis_nr_subios
!= 1 && !cl_io_is_append(io
)) {
443 lov_do_div64(start
, ssize
);
444 next
= (start
+ 1) * ssize
;
445 if (next
<= start
* ssize
)
448 io
->ci_continue
= next
< lio
->lis_io_endpos
;
449 io
->u
.ci_rw
.crw_count
= min_t(loff_t
, lio
->lis_io_endpos
,
450 next
) - io
->u
.ci_rw
.crw_pos
;
451 lio
->lis_pos
= io
->u
.ci_rw
.crw_pos
;
452 lio
->lis_endpos
= io
->u
.ci_rw
.crw_pos
+ io
->u
.ci_rw
.crw_count
;
453 CDEBUG(D_VFSTRACE
, "stripe: %llu chunk: [%llu, %llu) %llu\n",
454 (__u64
)start
, lio
->lis_pos
, lio
->lis_endpos
,
455 (__u64
)lio
->lis_io_endpos
);
458 * XXX The following call should be optimized: we know, that
459 * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
461 return lov_io_iter_init(env
, ios
);
464 static int lov_io_call(const struct lu_env
*env
, struct lov_io
*lio
,
465 int (*iofunc
)(const struct lu_env
*, struct cl_io
*))
467 struct cl_io
*parent
= lio
->lis_cl
.cis_io
;
468 struct lov_io_sub
*sub
;
471 list_for_each_entry(sub
, &lio
->lis_active
, sub_linkage
) {
473 rc
= iofunc(sub
->sub_env
, sub
->sub_io
);
478 if (parent
->ci_result
== 0)
479 parent
->ci_result
= sub
->sub_io
->ci_result
;
484 static int lov_io_lock(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
486 return lov_io_call(env
, cl2lov_io(env
, ios
), cl_io_lock
);
489 static int lov_io_start(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
491 return lov_io_call(env
, cl2lov_io(env
, ios
), cl_io_start
);
494 static int lov_io_end_wrapper(const struct lu_env
*env
, struct cl_io
*io
)
497 * It's possible that lov_io_start() wasn't called against this
498 * sub-io, either because previous sub-io failed, or upper layer
501 if (io
->ci_state
== CIS_IO_GOING
)
504 io
->ci_state
= CIS_IO_FINISHED
;
508 static int lov_io_iter_fini_wrapper(const struct lu_env
*env
, struct cl_io
*io
)
510 cl_io_iter_fini(env
, io
);
514 static int lov_io_unlock_wrapper(const struct lu_env
*env
, struct cl_io
*io
)
516 cl_io_unlock(env
, io
);
520 static void lov_io_end(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
524 rc
= lov_io_call(env
, cl2lov_io(env
, ios
), lov_io_end_wrapper
);
528 static void lov_io_iter_fini(const struct lu_env
*env
,
529 const struct cl_io_slice
*ios
)
531 struct lov_io
*lio
= cl2lov_io(env
, ios
);
534 rc
= lov_io_call(env
, lio
, lov_io_iter_fini_wrapper
);
536 while (!list_empty(&lio
->lis_active
))
537 list_del_init(lio
->lis_active
.next
);
540 static void lov_io_unlock(const struct lu_env
*env
,
541 const struct cl_io_slice
*ios
)
545 rc
= lov_io_call(env
, cl2lov_io(env
, ios
), lov_io_unlock_wrapper
);
550 static struct cl_page_list
*lov_io_submit_qin(struct lov_device
*ld
,
551 struct cl_page_list
*qin
,
554 return alloc
? &qin
[idx
] : &ld
->ld_emrg
[idx
]->emrg_page_list
;
558 * lov implementation of cl_operations::cio_submit() method. It takes a list
559 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
560 * cl_io_submit() on underlying devices to submit sub-lists, and then splices
563 * Major complication of this function is a need to handle memory cleansing:
564 * cl_io_submit() is called to write out pages as a part of VM memory
565 * reclamation, and hence it may not fail due to memory shortages (system
566 * dead-locks otherwise). To deal with this, some resources (sub-lists,
567 * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
568 * not-memory cleansing context), and in case of memory shortage, these
569 * pre-allocated resources are used by lov_io_submit() under
570 * lov_device::ld_mutex mutex.
572 static int lov_io_submit(const struct lu_env
*env
,
573 const struct cl_io_slice
*ios
,
574 enum cl_req_type crt
, struct cl_2queue
*queue
)
576 struct lov_io
*lio
= cl2lov_io(env
, ios
);
577 struct lov_object
*obj
= lio
->lis_object
;
578 struct lov_device
*ld
= lu2lov_dev(lov2cl(obj
)->co_lu
.lo_dev
);
579 struct cl_page_list
*qin
= &queue
->c2_qin
;
580 struct cl_2queue
*cl2q
= &lov_env_info(env
)->lti_cl2q
;
581 struct cl_page_list
*stripes_qin
= NULL
;
582 struct cl_page
*page
;
586 #define QIN(stripe) lov_io_submit_qin(ld, stripes_qin, stripe, alloc)
590 !(current
->flags
& PF_MEMALLOC
);
592 if (lio
->lis_active_subios
== 1) {
593 int idx
= lio
->lis_single_subio_index
;
594 struct lov_io_sub
*sub
;
596 LASSERT(idx
< lio
->lis_nr_subios
);
597 sub
= lov_sub_get(env
, lio
, idx
);
598 LASSERT(!IS_ERR(sub
));
599 LASSERT(sub
->sub_io
== &lio
->lis_single_subio
);
600 rc
= cl_io_submit_rw(sub
->sub_env
, sub
->sub_io
,
606 LASSERT(lio
->lis_subs
!= NULL
);
609 libcfs_kvzalloc(sizeof(*stripes_qin
) *
612 if (stripes_qin
== NULL
)
615 for (stripe
= 0; stripe
< lio
->lis_nr_subios
; stripe
++)
616 cl_page_list_init(&stripes_qin
[stripe
]);
619 * If we get here, it means pageout & swap doesn't help.
620 * In order to not make things worse, even don't try to
621 * allocate the memory with __GFP_NOWARN. -jay
623 mutex_lock(&ld
->ld_mutex
);
624 lio
->lis_mem_frozen
= 1;
627 cl_2queue_init(cl2q
);
628 cl_page_list_for_each_safe(page
, tmp
, qin
) {
629 stripe
= lov_page_stripe(page
);
630 cl_page_list_move(QIN(stripe
), qin
, page
);
633 for (stripe
= 0; stripe
< lio
->lis_nr_subios
; stripe
++) {
634 struct lov_io_sub
*sub
;
635 struct cl_page_list
*sub_qin
= QIN(stripe
);
637 if (list_empty(&sub_qin
->pl_pages
))
640 cl_page_list_splice(sub_qin
, &cl2q
->c2_qin
);
641 sub
= lov_sub_get(env
, lio
, stripe
);
643 rc
= cl_io_submit_rw(sub
->sub_env
, sub
->sub_io
,
648 cl_page_list_splice(&cl2q
->c2_qin
, &queue
->c2_qin
);
649 cl_page_list_splice(&cl2q
->c2_qout
, &queue
->c2_qout
);
654 for (stripe
= 0; stripe
< lio
->lis_nr_subios
; stripe
++) {
655 struct cl_page_list
*sub_qin
= QIN(stripe
);
657 if (list_empty(&sub_qin
->pl_pages
))
660 cl_page_list_splice(sub_qin
, qin
);
668 for (i
= 0; i
< lio
->lis_nr_subios
; i
++) {
669 struct cl_io
*cio
= lio
->lis_subs
[i
].sub_io
;
671 if (cio
&& cio
== &ld
->ld_emrg
[i
]->emrg_subio
)
672 lov_io_sub_fini(env
, lio
, &lio
->lis_subs
[i
]);
674 lio
->lis_mem_frozen
= 0;
675 mutex_unlock(&ld
->ld_mutex
);
682 static int lov_io_prepare_write(const struct lu_env
*env
,
683 const struct cl_io_slice
*ios
,
684 const struct cl_page_slice
*slice
,
685 unsigned from
, unsigned to
)
687 struct lov_io
*lio
= cl2lov_io(env
, ios
);
688 struct cl_page
*sub_page
= lov_sub_page(slice
);
689 struct lov_io_sub
*sub
;
692 sub
= lov_page_subio(env
, lio
, slice
);
694 result
= cl_io_prepare_write(sub
->sub_env
, sub
->sub_io
,
698 result
= PTR_ERR(sub
);
702 static int lov_io_commit_write(const struct lu_env
*env
,
703 const struct cl_io_slice
*ios
,
704 const struct cl_page_slice
*slice
,
705 unsigned from
, unsigned to
)
707 struct lov_io
*lio
= cl2lov_io(env
, ios
);
708 struct cl_page
*sub_page
= lov_sub_page(slice
);
709 struct lov_io_sub
*sub
;
712 sub
= lov_page_subio(env
, lio
, slice
);
714 result
= cl_io_commit_write(sub
->sub_env
, sub
->sub_io
,
718 result
= PTR_ERR(sub
);
722 static int lov_io_fault_start(const struct lu_env
*env
,
723 const struct cl_io_slice
*ios
)
725 struct cl_fault_io
*fio
;
727 struct lov_io_sub
*sub
;
729 fio
= &ios
->cis_io
->u
.ci_fault
;
730 lio
= cl2lov_io(env
, ios
);
731 sub
= lov_sub_get(env
, lio
, lov_page_stripe(fio
->ft_page
));
732 sub
->sub_io
->u
.ci_fault
.ft_nob
= fio
->ft_nob
;
734 return lov_io_start(env
, ios
);
737 static void lov_io_fsync_end(const struct lu_env
*env
,
738 const struct cl_io_slice
*ios
)
740 struct lov_io
*lio
= cl2lov_io(env
, ios
);
741 struct lov_io_sub
*sub
;
742 unsigned int *written
= &ios
->cis_io
->u
.ci_fsync
.fi_nr_written
;
745 list_for_each_entry(sub
, &lio
->lis_active
, sub_linkage
) {
746 struct cl_io
*subio
= sub
->sub_io
;
749 lov_io_end_wrapper(sub
->sub_env
, subio
);
752 if (subio
->ci_result
== 0)
753 *written
+= subio
->u
.ci_fsync
.fi_nr_written
;
757 static const struct cl_io_operations lov_io_ops
= {
760 .cio_fini
= lov_io_fini
,
761 .cio_iter_init
= lov_io_rw_iter_init
,
762 .cio_iter_fini
= lov_io_iter_fini
,
763 .cio_lock
= lov_io_lock
,
764 .cio_unlock
= lov_io_unlock
,
765 .cio_start
= lov_io_start
,
766 .cio_end
= lov_io_end
769 .cio_fini
= lov_io_fini
,
770 .cio_iter_init
= lov_io_rw_iter_init
,
771 .cio_iter_fini
= lov_io_iter_fini
,
772 .cio_lock
= lov_io_lock
,
773 .cio_unlock
= lov_io_unlock
,
774 .cio_start
= lov_io_start
,
775 .cio_end
= lov_io_end
778 .cio_fini
= lov_io_fini
,
779 .cio_iter_init
= lov_io_iter_init
,
780 .cio_iter_fini
= lov_io_iter_fini
,
781 .cio_lock
= lov_io_lock
,
782 .cio_unlock
= lov_io_unlock
,
783 .cio_start
= lov_io_start
,
784 .cio_end
= lov_io_end
787 .cio_fini
= lov_io_fini
,
788 .cio_iter_init
= lov_io_iter_init
,
789 .cio_iter_fini
= lov_io_iter_fini
,
790 .cio_lock
= lov_io_lock
,
791 .cio_unlock
= lov_io_unlock
,
792 .cio_start
= lov_io_fault_start
,
793 .cio_end
= lov_io_end
796 .cio_fini
= lov_io_fini
,
797 .cio_iter_init
= lov_io_iter_init
,
798 .cio_iter_fini
= lov_io_iter_fini
,
799 .cio_lock
= lov_io_lock
,
800 .cio_unlock
= lov_io_unlock
,
801 .cio_start
= lov_io_start
,
802 .cio_end
= lov_io_fsync_end
805 .cio_fini
= lov_io_fini
810 .cio_submit
= lov_io_submit
813 .cio_submit
= lov_io_submit
816 .cio_prepare_write
= lov_io_prepare_write
,
817 .cio_commit_write
= lov_io_commit_write
820 /*****************************************************************************
822 * Empty lov io operations.
826 static void lov_empty_io_fini(const struct lu_env
*env
,
827 const struct cl_io_slice
*ios
)
829 struct lov_object
*lov
= cl2lov(ios
->cis_obj
);
831 if (atomic_dec_and_test(&lov
->lo_active_ios
))
832 wake_up_all(&lov
->lo_waitq
);
835 static void lov_empty_impossible(const struct lu_env
*env
,
836 struct cl_io_slice
*ios
)
841 #define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
844 * An io operation vector for files without stripes.
846 static const struct cl_io_operations lov_empty_io_ops
= {
849 .cio_fini
= lov_empty_io_fini
,
851 .cio_iter_init
= LOV_EMPTY_IMPOSSIBLE
,
852 .cio_lock
= LOV_EMPTY_IMPOSSIBLE
,
853 .cio_start
= LOV_EMPTY_IMPOSSIBLE
,
854 .cio_end
= LOV_EMPTY_IMPOSSIBLE
858 .cio_fini
= lov_empty_io_fini
,
859 .cio_iter_init
= LOV_EMPTY_IMPOSSIBLE
,
860 .cio_lock
= LOV_EMPTY_IMPOSSIBLE
,
861 .cio_start
= LOV_EMPTY_IMPOSSIBLE
,
862 .cio_end
= LOV_EMPTY_IMPOSSIBLE
865 .cio_fini
= lov_empty_io_fini
,
866 .cio_iter_init
= LOV_EMPTY_IMPOSSIBLE
,
867 .cio_lock
= LOV_EMPTY_IMPOSSIBLE
,
868 .cio_start
= LOV_EMPTY_IMPOSSIBLE
,
869 .cio_end
= LOV_EMPTY_IMPOSSIBLE
872 .cio_fini
= lov_empty_io_fini
,
873 .cio_iter_init
= LOV_EMPTY_IMPOSSIBLE
,
874 .cio_lock
= LOV_EMPTY_IMPOSSIBLE
,
875 .cio_start
= LOV_EMPTY_IMPOSSIBLE
,
876 .cio_end
= LOV_EMPTY_IMPOSSIBLE
879 .cio_fini
= lov_empty_io_fini
882 .cio_fini
= lov_empty_io_fini
887 .cio_submit
= LOV_EMPTY_IMPOSSIBLE
890 .cio_submit
= LOV_EMPTY_IMPOSSIBLE
893 .cio_commit_write
= LOV_EMPTY_IMPOSSIBLE
896 int lov_io_init_raid0(const struct lu_env
*env
, struct cl_object
*obj
,
899 struct lov_io
*lio
= lov_env_io(env
);
900 struct lov_object
*lov
= cl2lov(obj
);
902 INIT_LIST_HEAD(&lio
->lis_active
);
903 lov_io_slice_init(lio
, lov
, io
);
904 if (io
->ci_result
== 0) {
905 io
->ci_result
= lov_io_subio_init(env
, lio
, io
);
906 if (io
->ci_result
== 0) {
907 cl_io_slice_add(io
, &lio
->lis_cl
, obj
, &lov_io_ops
);
908 atomic_inc(&lov
->lo_active_ios
);
911 return io
->ci_result
;
914 int lov_io_init_empty(const struct lu_env
*env
, struct cl_object
*obj
,
917 struct lov_object
*lov
= cl2lov(obj
);
918 struct lov_io
*lio
= lov_env_io(env
);
921 lio
->lis_object
= lov
;
922 switch (io
->ci_type
) {
938 CERROR("Page fault on a file without stripes: "DFID
"\n",
939 PFID(lu_object_fid(&obj
->co_lu
)));
943 cl_io_slice_add(io
, &lio
->lis_cl
, obj
, &lov_empty_io_ops
);
944 atomic_inc(&lov
->lo_active_ios
);
947 io
->ci_result
= result
< 0 ? result
: 0;
951 int lov_io_init_released(const struct lu_env
*env
, struct cl_object
*obj
,
954 struct lov_object
*lov
= cl2lov(obj
);
955 struct lov_io
*lio
= lov_env_io(env
);
958 LASSERT(lov
->lo_lsm
!= NULL
);
959 lio
->lis_object
= lov
;
961 switch (io
->ci_type
) {
963 LASSERTF(0, "invalid type %d\n", io
->ci_type
);
969 /* the truncate to 0 is managed by MDT:
970 * - in open, for open O_TRUNC
971 * - in setattr, for truncate
973 /* the truncate is for size > 0 so triggers a restore */
974 if (cl_io_is_trunc(io
))
975 io
->ci_restore_needed
= 1;
981 io
->ci_restore_needed
= 1;
986 cl_io_slice_add(io
, &lio
->lis_cl
, obj
, &lov_empty_io_ops
);
987 atomic_inc(&lov
->lo_active_ios
);
990 io
->ci_result
= result
< 0 ? result
: 0;