2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com>
7 * This file is part of exofs.
9 * exofs is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation. Since it is based on ext2, and the only
12 * valid version of GPL for the Linux kernel is version 2, the only valid
13 * version of GPL for exofs is version 2.
15 * exofs is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with exofs; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <linux/slab.h>
26 #include <asm/div64.h>
27 #include <linux/lcm.h>
31 MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
32 MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
33 MODULE_LICENSE("GPL");
35 /* ore_verify_layout does a couple of things:
36 * 1. Given a minimum number of needed parameters fixes up the rest of the
37 * members to be operatonals for the ore. The needed parameters are those
38 * that are defined by the pnfs-objects layout STD.
39 * 2. Check to see if the current ore code actually supports these parameters
40 * for example stripe_unit must be a multple of the system PAGE_SIZE,
42 * 3. Cache some havily used calculations that will be needed by users.
45 enum { BIO_MAX_PAGES_KMALLOC
=
46 (PAGE_SIZE
- sizeof(struct bio
)) / sizeof(struct bio_vec
),};
48 int ore_verify_layout(unsigned total_comps
, struct ore_layout
*layout
)
52 /* FIXME: Only raid0 is supported for now. */
53 if (layout
->raid_algorithm
!= PNFS_OSD_RAID_0
) {
54 ORE_ERR("Only RAID_0 for now\n");
57 if (0 != (layout
->stripe_unit
& ~PAGE_MASK
)) {
58 ORE_ERR("Stripe Unit(0x%llx)"
59 " must be Multples of PAGE_SIZE(0x%lx)\n",
60 _LLU(layout
->stripe_unit
), PAGE_SIZE
);
63 if (layout
->group_width
) {
64 if (!layout
->group_depth
) {
65 ORE_ERR("group_depth == 0 && group_width != 0\n");
68 if (total_comps
< (layout
->group_width
* layout
->mirrors_p1
)) {
69 ORE_ERR("Data Map wrong, "
70 "numdevs=%d < group_width=%d * mirrors=%d\n",
71 total_comps
, layout
->group_width
,
75 layout
->group_count
= total_comps
/ layout
->mirrors_p1
/
78 if (layout
->group_depth
) {
79 printk(KERN_NOTICE
"Warning: group_depth ignored "
80 "group_width == 0 && group_depth == %lld\n",
81 _LLU(layout
->group_depth
));
83 layout
->group_width
= total_comps
/ layout
->mirrors_p1
;
84 layout
->group_depth
= -1;
85 layout
->group_count
= 1;
88 stripe_length
= (u64
)layout
->group_width
* layout
->stripe_unit
;
89 if (stripe_length
>= (1ULL << 32)) {
90 ORE_ERR("Stripe_length(0x%llx) >= 32bit is not supported\n",
95 layout
->max_io_length
=
96 (BIO_MAX_PAGES_KMALLOC
* PAGE_SIZE
- layout
->stripe_unit
) *
99 unsigned stripe_length
=
100 (layout
->group_width
- layout
->parity
) *
103 layout
->max_io_length
/= stripe_length
;
104 layout
->max_io_length
*= stripe_length
;
108 EXPORT_SYMBOL(ore_verify_layout
);
110 static u8
*_ios_cred(struct ore_io_state
*ios
, unsigned index
)
112 return ios
->oc
->comps
[index
& ios
->oc
->single_comp
].cred
;
115 static struct osd_obj_id
*_ios_obj(struct ore_io_state
*ios
, unsigned index
)
117 return &ios
->oc
->comps
[index
& ios
->oc
->single_comp
].obj
;
120 static struct osd_dev
*_ios_od(struct ore_io_state
*ios
, unsigned index
)
122 ORE_DBGMSG2("oc->first_dev=%d oc->numdevs=%d i=%d oc->ods=%p\n",
123 ios
->oc
->first_dev
, ios
->oc
->numdevs
, index
,
126 return ore_comp_dev(ios
->oc
, index
);
129 int _ore_get_io_state(struct ore_layout
*layout
,
130 struct ore_components
*oc
, unsigned numdevs
,
131 unsigned sgs_per_dev
, unsigned num_par_pages
,
132 struct ore_io_state
**pios
)
134 struct ore_io_state
*ios
;
136 struct osd_sg_entry
*sgilist
;
137 struct __alloc_all_io_state
{
138 struct ore_io_state ios
;
139 struct ore_per_dev_state per_dev
[numdevs
];
141 struct osd_sg_entry sglist
[sgs_per_dev
* numdevs
];
142 struct page
*pages
[num_par_pages
];
146 if (likely(sizeof(*_aios
) <= PAGE_SIZE
)) {
147 _aios
= kzalloc(sizeof(*_aios
), GFP_KERNEL
);
148 if (unlikely(!_aios
)) {
149 ORE_DBGMSG("Failed kzalloc bytes=%zd\n",
154 pages
= num_par_pages
? _aios
->pages
: NULL
;
155 sgilist
= sgs_per_dev
? _aios
->sglist
: NULL
;
158 struct __alloc_small_io_state
{
159 struct ore_io_state ios
;
160 struct ore_per_dev_state per_dev
[numdevs
];
163 struct osd_sg_entry sglist
[sgs_per_dev
* numdevs
];
164 struct page
*pages
[num_par_pages
];
167 _aio_small
= kzalloc(sizeof(*_aio_small
), GFP_KERNEL
);
168 if (unlikely(!_aio_small
)) {
169 ORE_DBGMSG("Failed alloc first part bytes=%zd\n",
170 sizeof(*_aio_small
));
174 extra_part
= kzalloc(sizeof(*extra_part
), GFP_KERNEL
);
175 if (unlikely(!extra_part
)) {
176 ORE_DBGMSG("Failed alloc second part bytes=%zd\n",
177 sizeof(*extra_part
));
183 pages
= num_par_pages
? extra_part
->pages
: NULL
;
184 sgilist
= sgs_per_dev
? extra_part
->sglist
: NULL
;
185 /* In this case the per_dev[0].sgilist holds the pointer to
188 ios
= &_aio_small
->ios
;
189 ios
->extra_part_alloc
= true;
193 ios
->parity_pages
= pages
;
194 ios
->max_par_pages
= num_par_pages
;
199 for (d
= 0; d
< numdevs
; ++d
) {
200 ios
->per_dev
[d
].sglist
= sgilist
;
201 sgilist
+= sgs_per_dev
;
203 ios
->sgs_per_dev
= sgs_per_dev
;
206 ios
->layout
= layout
;
212 /* Allocate an io_state for only a single group of devices
214 * If a user needs to call ore_read/write() this version must be used becase it
215 * allocates extra stuff for striping and raid.
216 * The ore might decide to only IO less then @length bytes do to alignmets
217 * and constrains as follows:
218 * - The IO cannot cross group boundary.
219 * - In raid5/6 The end of the IO must align at end of a stripe eg.
220 * (@offset + @length) % strip_size == 0. Or the complete range is within a
222 * - Memory condition only permitted a shorter IO. (A user can use @length=~0
223 * And check the returned ios->length for max_io_size.)
225 * The caller must check returned ios->length (and/or ios->nr_pages) and
226 * re-issue these pages that fall outside of ios->length
228 int ore_get_rw_state(struct ore_layout
*layout
, struct ore_components
*oc
,
229 bool is_reading
, u64 offset
, u64 length
,
230 struct ore_io_state
**pios
)
232 struct ore_io_state
*ios
;
233 unsigned numdevs
= layout
->group_width
* layout
->mirrors_p1
;
234 unsigned sgs_per_dev
= 0, max_par_pages
= 0;
237 if (layout
->parity
&& length
) {
238 unsigned data_devs
= layout
->group_width
- layout
->parity
;
239 unsigned stripe_size
= layout
->stripe_unit
* data_devs
;
240 unsigned pages_in_unit
= layout
->stripe_unit
/ PAGE_SIZE
;
245 num_stripes
= div_u64_rem(length
, stripe_size
, &remainder
);
249 num_raid_units
= num_stripes
* layout
->parity
;
252 /* For reads add per_dev sglist array */
253 /* TODO: Raid 6 we need twice more. Actually:
254 * num_stripes / LCMdP(W,P);
255 * if (W%P != 0) num_stripes *= parity;
258 /* first/last seg is split */
259 num_raid_units
+= layout
->group_width
;
260 sgs_per_dev
= div_u64(num_raid_units
, data_devs
);
262 /* For Writes add parity pages array. */
263 max_par_pages
= num_raid_units
* pages_in_unit
*
264 sizeof(struct page
*);
268 ret
= _ore_get_io_state(layout
, oc
, numdevs
, sgs_per_dev
, max_par_pages
,
274 ios
->reading
= is_reading
;
275 ios
->offset
= offset
;
278 ore_calc_stripe_info(layout
, offset
, length
, &ios
->si
);
279 ios
->length
= ios
->si
.length
;
280 ios
->nr_pages
= (ios
->length
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
282 _ore_post_alloc_raid_stuff(ios
);
287 EXPORT_SYMBOL(ore_get_rw_state
);
289 /* Allocate an io_state for all the devices in the comps array
291 * This version of io_state allocation is used mostly by create/remove
292 * and trunc where we currently need all the devices. The only wastful
293 * bit is the read/write_attributes with no IO. Those sites should
294 * be converted to use ore_get_rw_state() with length=0
296 int ore_get_io_state(struct ore_layout
*layout
, struct ore_components
*oc
,
297 struct ore_io_state
**pios
)
299 return _ore_get_io_state(layout
, oc
, oc
->numdevs
, 0, 0, pios
);
301 EXPORT_SYMBOL(ore_get_io_state
);
303 void ore_put_io_state(struct ore_io_state
*ios
)
308 for (i
= 0; i
< ios
->numdevs
; i
++) {
309 struct ore_per_dev_state
*per_dev
= &ios
->per_dev
[i
];
312 osd_end_request(per_dev
->or);
314 bio_put(per_dev
->bio
);
317 _ore_free_raid_stuff(ios
);
321 EXPORT_SYMBOL(ore_put_io_state
);
323 static void _sync_done(struct ore_io_state
*ios
, void *p
)
325 struct completion
*waiting
= p
;
330 static void _last_io(struct kref
*kref
)
332 struct ore_io_state
*ios
= container_of(
333 kref
, struct ore_io_state
, kref
);
335 ios
->done(ios
, ios
->private);
338 static void _done_io(struct osd_request
*or, void *p
)
340 struct ore_io_state
*ios
= p
;
342 kref_put(&ios
->kref
, _last_io
);
345 int ore_io_execute(struct ore_io_state
*ios
)
347 DECLARE_COMPLETION_ONSTACK(wait
);
348 bool sync
= (ios
->done
== NULL
);
352 ios
->done
= _sync_done
;
353 ios
->private = &wait
;
356 for (i
= 0; i
< ios
->numdevs
; i
++) {
357 struct osd_request
*or = ios
->per_dev
[i
].or;
361 ret
= osd_finalize_request(or, 0, _ios_cred(ios
, i
), NULL
);
363 ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
369 kref_init(&ios
->kref
);
371 for (i
= 0; i
< ios
->numdevs
; i
++) {
372 struct osd_request
*or = ios
->per_dev
[i
].or;
376 kref_get(&ios
->kref
);
377 osd_execute_request_async(or, _done_io
, ios
);
380 kref_put(&ios
->kref
, _last_io
);
384 wait_for_completion(&wait
);
385 ret
= ore_check_io(ios
, NULL
);
390 static void _clear_bio(struct bio
*bio
)
395 __bio_for_each_segment(bv
, bio
, i
, 0) {
396 unsigned this_count
= bv
->bv_len
;
398 if (likely(PAGE_SIZE
== this_count
))
399 clear_highpage(bv
->bv_page
);
401 zero_user(bv
->bv_page
, bv
->bv_offset
, this_count
);
405 int ore_check_io(struct ore_io_state
*ios
, ore_on_dev_error on_dev_error
)
407 enum osd_err_priority acumulated_osd_err
= 0;
408 int acumulated_lin_err
= 0;
411 for (i
= 0; i
< ios
->numdevs
; i
++) {
412 struct osd_sense_info osi
;
413 struct ore_per_dev_state
*per_dev
= &ios
->per_dev
[i
];
414 struct osd_request
*or = per_dev
->or;
420 ret
= osd_req_decode_sense(or, &osi
);
424 if (OSD_ERR_PRI_CLEAR_PAGES
== osi
.osd_err_pri
) {
425 /* start read offset passed endof file */
426 _clear_bio(per_dev
->bio
);
427 ORE_DBGMSG("start read offset passed end of file "
428 "offset=0x%llx, length=0x%llx\n",
429 _LLU(per_dev
->offset
),
430 _LLU(per_dev
->length
));
432 continue; /* we recovered */
436 u64 residual
= ios
->reading
?
437 or->in
.residual
: or->out
.residual
;
438 u64 offset
= (ios
->offset
+ ios
->length
) - residual
;
439 struct ore_dev
*od
= ios
->oc
->ods
[
440 per_dev
->dev
- ios
->oc
->first_dev
];
442 on_dev_error(ios
, od
, per_dev
->dev
, osi
.osd_err_pri
,
445 if (osi
.osd_err_pri
>= acumulated_osd_err
) {
446 acumulated_osd_err
= osi
.osd_err_pri
;
447 acumulated_lin_err
= ret
;
451 return acumulated_lin_err
;
453 EXPORT_SYMBOL(ore_check_io
);
456 * L - logical offset into the file
458 * D - number of Data devices
459 * D = group_width - parity
461 * U - The number of bytes in a stripe within a group
462 * U = stripe_unit * D
464 * T - The number of bytes striped within a group of component objects
465 * (before advancing to the next group)
466 * T = U * group_depth
468 * S - The number of bytes striped across all component objects
469 * before the pattern repeats
470 * S = T * group_count
472 * M - The "major" (i.e., across all components) cycle number
475 * G - Counts the groups from the beginning of the major cycle
476 * G = (L - (M * S)) / T [or (L % S) / T]
478 * H - The byte offset within the group
479 * H = (L - (M * S)) % T [or (L % S) % T]
481 * N - The "minor" (i.e., across the group) stripe number
484 * C - The component index coresponding to L
486 * C = (H - (N * U)) / stripe_unit + G * D
487 * [or (L % U) / stripe_unit + G * D]
489 * O - The component offset coresponding to L
490 * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
492 * LCMdP – Parity cycle: Lowest Common Multiple of group_width, parity
494 * LCMdP = lcm(group_width, parity) / parity
496 * R - The parity Rotation stripe
497 * (Note parity cycle always starts at a group's boundary)
500 * I = the first parity device index
501 * I = (group_width + group_width - R*parity - parity) % group_width
503 * Craid - The component index Rotated
504 * Craid = (group_width + C - R*parity) % group_width
505 * (We add the group_width to avoid negative numbers modulo math)
507 void ore_calc_stripe_info(struct ore_layout
*layout
, u64 file_offset
,
508 u64 length
, struct ore_striping_info
*si
)
510 u32 stripe_unit
= layout
->stripe_unit
;
511 u32 group_width
= layout
->group_width
;
512 u64 group_depth
= layout
->group_depth
;
513 u32 parity
= layout
->parity
;
515 u32 D
= group_width
- parity
;
516 u32 U
= D
* stripe_unit
;
517 u64 T
= U
* group_depth
;
518 u64 S
= T
* layout
->group_count
;
519 u64 M
= div64_u64(file_offset
, S
);
522 G = (L - (M * S)) / T
523 H = (L - (M * S)) % T
525 u64 LmodS
= file_offset
- M
* S
;
526 u32 G
= div64_u64(LmodS
, T
);
527 u64 H
= LmodS
- G
* T
;
529 u32 N
= div_u64(H
, U
);
531 /* "H - (N * U)" is just "H % U" so it's bound to u32 */
532 u32 C
= (u32
)(H
- (N
* U
)) / stripe_unit
+ G
* group_width
;
534 div_u64_rem(file_offset
, stripe_unit
, &si
->unit_off
);
536 si
->obj_offset
= si
->unit_off
+ (N
* stripe_unit
) +
537 (M
* group_depth
* stripe_unit
);
540 u32 LCMdP
= lcm(group_width
, parity
) / parity
;
542 u32 RxP
= (N
% LCMdP
) * parity
;
543 u32 first_dev
= C
- C
% group_width
;
545 si
->par_dev
= (group_width
+ group_width
- parity
- RxP
) %
546 group_width
+ first_dev
;
547 si
->dev
= (group_width
+ C
- RxP
) % group_width
+ first_dev
;
548 si
->bytes_in_stripe
= U
;
549 si
->first_stripe_start
= M
* S
+ G
* T
+ N
* U
;
551 /* Make the math correct see _prepare_one_group */
552 si
->par_dev
= group_width
;
556 si
->dev
*= layout
->mirrors_p1
;
557 si
->par_dev
*= layout
->mirrors_p1
;
558 si
->offset
= file_offset
;
560 if (si
->length
> length
)
564 EXPORT_SYMBOL(ore_calc_stripe_info
);
566 int _ore_add_stripe_unit(struct ore_io_state
*ios
, unsigned *cur_pg
,
567 unsigned pgbase
, struct page
**pages
,
568 struct ore_per_dev_state
*per_dev
, int cur_len
)
570 unsigned pg
= *cur_pg
;
571 struct request_queue
*q
=
572 osd_request_queue(_ios_od(ios
, per_dev
->dev
));
573 unsigned len
= cur_len
;
576 if (per_dev
->bio
== NULL
) {
577 unsigned pages_in_stripe
= ios
->layout
->group_width
*
578 (ios
->layout
->stripe_unit
/ PAGE_SIZE
);
579 unsigned nr_pages
= ios
->nr_pages
* ios
->layout
->group_width
/
580 (ios
->layout
->group_width
-
581 ios
->layout
->parity
);
582 unsigned bio_size
= (nr_pages
+ pages_in_stripe
) /
583 ios
->layout
->group_width
;
585 per_dev
->bio
= bio_kmalloc(GFP_KERNEL
, bio_size
);
586 if (unlikely(!per_dev
->bio
)) {
587 ORE_DBGMSG("Failed to allocate BIO size=%u\n",
594 while (cur_len
> 0) {
595 unsigned pglen
= min_t(unsigned, PAGE_SIZE
- pgbase
, cur_len
);
600 added_len
= bio_add_pc_page(q
, per_dev
->bio
, pages
[pg
],
602 if (unlikely(pglen
!= added_len
)) {
603 ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
604 per_dev
->bio
->bi_vcnt
);
608 _add_stripe_page(ios
->sp2d
, &ios
->si
, pages
[pg
]);
615 per_dev
->length
+= len
;
618 out
: /* we fail the complete unit on an error eg don't advance
619 * per_dev->length and cur_pg. This means that we might have a bigger
620 * bio than the CDB requested length (per_dev->length). That's fine
621 * only the oposite is fatal.
626 static int _prepare_for_striping(struct ore_io_state
*ios
)
628 struct ore_striping_info
*si
= &ios
->si
;
629 unsigned stripe_unit
= ios
->layout
->stripe_unit
;
630 unsigned mirrors_p1
= ios
->layout
->mirrors_p1
;
631 unsigned group_width
= ios
->layout
->group_width
;
632 unsigned devs_in_group
= group_width
* mirrors_p1
;
633 unsigned dev
= si
->dev
;
634 unsigned first_dev
= dev
- (dev
% devs_in_group
);
636 unsigned cur_pg
= ios
->pages_consumed
;
637 u64 length
= ios
->length
;
641 ios
->numdevs
= ios
->layout
->mirrors_p1
;
645 BUG_ON(length
> si
->length
);
647 dev_order
= _dev_order(devs_in_group
, mirrors_p1
, si
->par_dev
, dev
);
648 si
->cur_comp
= dev_order
;
649 si
->cur_pg
= si
->unit_off
/ PAGE_SIZE
;
652 unsigned comp
= dev
- first_dev
;
653 struct ore_per_dev_state
*per_dev
= &ios
->per_dev
[comp
];
654 unsigned cur_len
, page_off
= 0;
656 if (!per_dev
->length
) {
658 if (dev
== si
->dev
) {
659 WARN_ON(dev
== si
->par_dev
);
660 per_dev
->offset
= si
->obj_offset
;
661 cur_len
= stripe_unit
- si
->unit_off
;
662 page_off
= si
->unit_off
& ~PAGE_MASK
;
663 BUG_ON(page_off
&& (page_off
!= ios
->pgbase
));
665 if (si
->cur_comp
> dev_order
)
667 si
->obj_offset
- si
->unit_off
;
668 else /* si->cur_comp < dev_order */
670 si
->obj_offset
+ stripe_unit
-
672 cur_len
= stripe_unit
;
675 cur_len
= stripe_unit
;
677 if (cur_len
>= length
)
680 ret
= _ore_add_stripe_unit(ios
, &cur_pg
, page_off
, ios
->pages
,
686 dev
= (dev
% devs_in_group
) + first_dev
;
690 si
->cur_comp
= (si
->cur_comp
+ 1) % group_width
;
691 if (unlikely((dev
== si
->par_dev
) || (!length
&& ios
->sp2d
))) {
692 if (!length
&& ios
->sp2d
) {
693 /* If we are writing and this is the very last
694 * stripe. then operate on parity dev.
699 /* In writes cur_len just means if it's the
700 * last one. See _ore_add_parity_unit.
703 per_dev
= &ios
->per_dev
[dev
- first_dev
];
704 if (!per_dev
->length
) {
705 /* Only/always the parity unit of the first
706 * stripe will be empty. So this is a chance to
707 * initialize the per_dev info.
710 per_dev
->offset
= si
->obj_offset
- si
->unit_off
;
713 ret
= _ore_add_parity_unit(ios
, si
, per_dev
, cur_len
);
717 /* Rotate next par_dev backwards with wraping */
718 si
->par_dev
= (devs_in_group
+ si
->par_dev
-
719 ios
->layout
->parity
* mirrors_p1
) %
720 devs_in_group
+ first_dev
;
721 /* Next stripe, start fresh */
727 ios
->numdevs
= devs_in_group
;
728 ios
->pages_consumed
= cur_pg
;
730 if (length
== ios
->length
)
733 ios
->length
-= length
;
738 int ore_create(struct ore_io_state
*ios
)
742 for (i
= 0; i
< ios
->oc
->numdevs
; i
++) {
743 struct osd_request
*or;
745 or = osd_start_request(_ios_od(ios
, i
), GFP_KERNEL
);
747 ORE_ERR("%s: osd_start_request failed\n", __func__
);
751 ios
->per_dev
[i
].or = or;
754 osd_req_create_object(or, _ios_obj(ios
, i
));
756 ret
= ore_io_execute(ios
);
761 EXPORT_SYMBOL(ore_create
);
763 int ore_remove(struct ore_io_state
*ios
)
767 for (i
= 0; i
< ios
->oc
->numdevs
; i
++) {
768 struct osd_request
*or;
770 or = osd_start_request(_ios_od(ios
, i
), GFP_KERNEL
);
772 ORE_ERR("%s: osd_start_request failed\n", __func__
);
776 ios
->per_dev
[i
].or = or;
779 osd_req_remove_object(or, _ios_obj(ios
, i
));
781 ret
= ore_io_execute(ios
);
786 EXPORT_SYMBOL(ore_remove
);
788 static int _write_mirror(struct ore_io_state
*ios
, int cur_comp
)
790 struct ore_per_dev_state
*master_dev
= &ios
->per_dev
[cur_comp
];
791 unsigned dev
= ios
->per_dev
[cur_comp
].dev
;
792 unsigned last_comp
= cur_comp
+ ios
->layout
->mirrors_p1
;
795 if (ios
->pages
&& !master_dev
->length
)
796 return 0; /* Just an empty slot */
798 for (; cur_comp
< last_comp
; ++cur_comp
, ++dev
) {
799 struct ore_per_dev_state
*per_dev
= &ios
->per_dev
[cur_comp
];
800 struct osd_request
*or;
802 or = osd_start_request(_ios_od(ios
, dev
), GFP_KERNEL
);
804 ORE_ERR("%s: osd_start_request failed\n", __func__
);
813 if (per_dev
!= master_dev
) {
814 bio
= bio_kmalloc(GFP_KERNEL
,
815 master_dev
->bio
->bi_max_vecs
);
816 if (unlikely(!bio
)) {
818 "Failed to allocate BIO size=%u\n",
819 master_dev
->bio
->bi_max_vecs
);
824 __bio_clone(bio
, master_dev
->bio
);
827 per_dev
->offset
= master_dev
->offset
;
828 per_dev
->length
= master_dev
->length
;
832 bio
= master_dev
->bio
;
833 /* FIXME: bio_set_dir() */
834 bio
->bi_rw
|= REQ_WRITE
;
837 osd_req_write(or, _ios_obj(ios
, dev
), per_dev
->offset
,
838 bio
, per_dev
->length
);
839 ORE_DBGMSG("write(0x%llx) offset=0x%llx "
840 "length=0x%llx dev=%d\n",
841 _LLU(_ios_obj(ios
, dev
)->id
),
842 _LLU(per_dev
->offset
),
843 _LLU(per_dev
->length
), dev
);
844 } else if (ios
->kern_buff
) {
845 per_dev
->offset
= ios
->si
.obj_offset
;
846 per_dev
->dev
= ios
->si
.dev
+ dev
;
848 /* no cross device without page array */
849 BUG_ON((ios
->layout
->group_width
> 1) &&
850 (ios
->si
.unit_off
+ ios
->length
>
851 ios
->layout
->stripe_unit
));
853 ret
= osd_req_write_kern(or, _ios_obj(ios
, per_dev
->dev
),
855 ios
->kern_buff
, ios
->length
);
858 ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
859 "length=0x%llx dev=%d\n",
860 _LLU(_ios_obj(ios
, dev
)->id
),
861 _LLU(per_dev
->offset
),
862 _LLU(ios
->length
), per_dev
->dev
);
864 osd_req_set_attributes(or, _ios_obj(ios
, dev
));
865 ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
866 _LLU(_ios_obj(ios
, dev
)->id
),
867 ios
->out_attr_len
, dev
);
871 osd_req_add_set_attr_list(or, ios
->out_attr
,
875 osd_req_add_get_attr_list(or, ios
->in_attr
,
883 int ore_write(struct ore_io_state
*ios
)
888 if (unlikely(ios
->sp2d
&& !ios
->r4w
)) {
889 /* A library is attempting a RAID-write without providing
890 * a pages lock interface.
896 ret
= _prepare_for_striping(ios
);
900 for (i
= 0; i
< ios
->numdevs
; i
+= ios
->layout
->mirrors_p1
) {
901 ret
= _write_mirror(ios
, i
);
906 ret
= ore_io_execute(ios
);
909 EXPORT_SYMBOL(ore_write
);
911 int _ore_read_mirror(struct ore_io_state
*ios
, unsigned cur_comp
)
913 struct osd_request
*or;
914 struct ore_per_dev_state
*per_dev
= &ios
->per_dev
[cur_comp
];
915 struct osd_obj_id
*obj
= _ios_obj(ios
, cur_comp
);
916 unsigned first_dev
= (unsigned)obj
->id
;
918 if (ios
->pages
&& !per_dev
->length
)
919 return 0; /* Just an empty slot */
921 first_dev
= per_dev
->dev
+ first_dev
% ios
->layout
->mirrors_p1
;
922 or = osd_start_request(_ios_od(ios
, first_dev
), GFP_KERNEL
);
924 ORE_ERR("%s: osd_start_request failed\n", __func__
);
930 if (per_dev
->cur_sg
) {
931 /* finalize the last sg_entry */
932 _ore_add_sg_seg(per_dev
, 0, false);
933 if (unlikely(!per_dev
->cur_sg
))
934 return 0; /* Skip parity only device */
936 osd_req_read_sg(or, obj
, per_dev
->bio
,
937 per_dev
->sglist
, per_dev
->cur_sg
);
939 /* The no raid case */
940 osd_req_read(or, obj
, per_dev
->offset
,
941 per_dev
->bio
, per_dev
->length
);
944 ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
945 " dev=%d sg_len=%d\n", _LLU(obj
->id
),
946 _LLU(per_dev
->offset
), _LLU(per_dev
->length
),
947 first_dev
, per_dev
->cur_sg
);
949 BUG_ON(ios
->kern_buff
);
951 osd_req_get_attributes(or, obj
);
952 ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
954 ios
->in_attr_len
, first_dev
);
957 osd_req_add_set_attr_list(or, ios
->out_attr
, ios
->out_attr_len
);
960 osd_req_add_get_attr_list(or, ios
->in_attr
, ios
->in_attr_len
);
965 int ore_read(struct ore_io_state
*ios
)
970 ret
= _prepare_for_striping(ios
);
974 for (i
= 0; i
< ios
->numdevs
; i
+= ios
->layout
->mirrors_p1
) {
975 ret
= _ore_read_mirror(ios
, i
);
980 ret
= ore_io_execute(ios
);
983 EXPORT_SYMBOL(ore_read
);
985 int extract_attr_from_ios(struct ore_io_state
*ios
, struct osd_attr
*attr
)
987 struct osd_attr cur_attr
= {.attr_page
= 0}; /* start with zeros */
993 osd_req_decode_get_attr_list(ios
->per_dev
[0].or,
994 &cur_attr
, &nelem
, &iter
);
995 if ((cur_attr
.attr_page
== attr
->attr_page
) &&
996 (cur_attr
.attr_id
== attr
->attr_id
)) {
997 attr
->len
= cur_attr
.len
;
998 attr
->val_ptr
= cur_attr
.val_ptr
;
1005 EXPORT_SYMBOL(extract_attr_from_ios
);
1007 static int _truncate_mirrors(struct ore_io_state
*ios
, unsigned cur_comp
,
1008 struct osd_attr
*attr
)
1010 int last_comp
= cur_comp
+ ios
->layout
->mirrors_p1
;
1012 for (; cur_comp
< last_comp
; ++cur_comp
) {
1013 struct ore_per_dev_state
*per_dev
= &ios
->per_dev
[cur_comp
];
1014 struct osd_request
*or;
1016 or = osd_start_request(_ios_od(ios
, cur_comp
), GFP_KERNEL
);
1017 if (unlikely(!or)) {
1018 ORE_ERR("%s: osd_start_request failed\n", __func__
);
1023 osd_req_set_attributes(or, _ios_obj(ios
, cur_comp
));
1024 osd_req_add_set_attr_list(or, attr
, 1);
1030 struct _trunc_info
{
1031 struct ore_striping_info si
;
1032 u64 prev_group_obj_off
;
1033 u64 next_group_obj_off
;
1035 unsigned first_group_dev
;
1036 unsigned nex_group_dev
;
1039 static void _calc_trunk_info(struct ore_layout
*layout
, u64 file_offset
,
1040 struct _trunc_info
*ti
)
1042 unsigned stripe_unit
= layout
->stripe_unit
;
1044 ore_calc_stripe_info(layout
, file_offset
, 0, &ti
->si
);
1046 ti
->prev_group_obj_off
= ti
->si
.M
* stripe_unit
;
1047 ti
->next_group_obj_off
= ti
->si
.M
? (ti
->si
.M
- 1) * stripe_unit
: 0;
1049 ti
->first_group_dev
= ti
->si
.dev
- (ti
->si
.dev
% layout
->group_width
);
1050 ti
->nex_group_dev
= ti
->first_group_dev
+ layout
->group_width
;
1053 int ore_truncate(struct ore_layout
*layout
, struct ore_components
*oc
,
1056 struct ore_io_state
*ios
;
1057 struct exofs_trunc_attr
{
1058 struct osd_attr attr
;
1061 struct _trunc_info ti
;
1064 ret
= ore_get_io_state(layout
, oc
, &ios
);
1068 _calc_trunk_info(ios
->layout
, size
, &ti
);
1070 size_attrs
= kcalloc(ios
->oc
->numdevs
, sizeof(*size_attrs
),
1072 if (unlikely(!size_attrs
)) {
1077 ios
->numdevs
= ios
->oc
->numdevs
;
1079 for (i
= 0; i
< ios
->numdevs
; ++i
) {
1080 struct exofs_trunc_attr
*size_attr
= &size_attrs
[i
];
1083 if (i
< ti
.first_group_dev
)
1084 obj_size
= ti
.prev_group_obj_off
;
1085 else if (i
>= ti
.nex_group_dev
)
1086 obj_size
= ti
.next_group_obj_off
;
1087 else if (i
< ti
.si
.dev
) /* dev within this group */
1088 obj_size
= ti
.si
.obj_offset
+
1089 ios
->layout
->stripe_unit
- ti
.si
.unit_off
;
1090 else if (i
== ti
.si
.dev
)
1091 obj_size
= ti
.si
.obj_offset
;
1092 else /* i > ti.dev */
1093 obj_size
= ti
.si
.obj_offset
- ti
.si
.unit_off
;
1095 size_attr
->newsize
= cpu_to_be64(obj_size
);
1096 size_attr
->attr
= g_attr_logical_length
;
1097 size_attr
->attr
.val_ptr
= &size_attr
->newsize
;
1099 ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
1100 _LLU(oc
->comps
->obj
.id
), _LLU(obj_size
), i
);
1101 ret
= _truncate_mirrors(ios
, i
* ios
->layout
->mirrors_p1
,
1106 ret
= ore_io_execute(ios
);
1110 ore_put_io_state(ios
);
1113 EXPORT_SYMBOL(ore_truncate
);
1115 const struct osd_attr g_attr_logical_length
= ATTR_DEF(
1116 OSD_APAGE_OBJECT_INFORMATION
, OSD_ATTR_OI_LOGICAL_LENGTH
, 8);
1117 EXPORT_SYMBOL(g_attr_logical_length
);