]>
Commit | Line | Data |
---|---|---|
ccd979bd MF |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public | |
17 | * License along with this program; if not, write to the | |
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 | * Boston, MA 021110-1307, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/fs.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/pagemap.h> | |
26 | #include <asm/byteorder.h> | |
9517bac6 | 27 | #include <linux/swap.h> |
6af67d82 | 28 | #include <linux/pipe_fs_i.h> |
ccd979bd MF |
29 | |
30 | #define MLOG_MASK_PREFIX ML_FILE_IO | |
31 | #include <cluster/masklog.h> | |
32 | ||
33 | #include "ocfs2.h" | |
34 | ||
35 | #include "alloc.h" | |
36 | #include "aops.h" | |
37 | #include "dlmglue.h" | |
38 | #include "extent_map.h" | |
39 | #include "file.h" | |
40 | #include "inode.h" | |
41 | #include "journal.h" | |
9517bac6 | 42 | #include "suballoc.h" |
ccd979bd MF |
43 | #include "super.h" |
44 | #include "symlink.h" | |
45 | ||
46 | #include "buffer_head_io.h" | |
47 | ||
48 | static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, | |
49 | struct buffer_head *bh_result, int create) | |
50 | { | |
51 | int err = -EIO; | |
52 | int status; | |
53 | struct ocfs2_dinode *fe = NULL; | |
54 | struct buffer_head *bh = NULL; | |
55 | struct buffer_head *buffer_cache_bh = NULL; | |
56 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
57 | void *kaddr; | |
58 | ||
59 | mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, | |
60 | (unsigned long long)iblock, bh_result, create); | |
61 | ||
62 | BUG_ON(ocfs2_inode_is_fast_symlink(inode)); | |
63 | ||
64 | if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { | |
65 | mlog(ML_ERROR, "block offset > PATH_MAX: %llu", | |
66 | (unsigned long long)iblock); | |
67 | goto bail; | |
68 | } | |
69 | ||
70 | status = ocfs2_read_block(OCFS2_SB(inode->i_sb), | |
71 | OCFS2_I(inode)->ip_blkno, | |
72 | &bh, OCFS2_BH_CACHED, inode); | |
73 | if (status < 0) { | |
74 | mlog_errno(status); | |
75 | goto bail; | |
76 | } | |
77 | fe = (struct ocfs2_dinode *) bh->b_data; | |
78 | ||
79 | if (!OCFS2_IS_VALID_DINODE(fe)) { | |
b0697053 | 80 | mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n", |
1ca1a111 MF |
81 | (unsigned long long)le64_to_cpu(fe->i_blkno), 7, |
82 | fe->i_signature); | |
ccd979bd MF |
83 | goto bail; |
84 | } | |
85 | ||
86 | if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, | |
87 | le32_to_cpu(fe->i_clusters))) { | |
88 | mlog(ML_ERROR, "block offset is outside the allocated size: " | |
89 | "%llu\n", (unsigned long long)iblock); | |
90 | goto bail; | |
91 | } | |
92 | ||
93 | /* We don't use the page cache to create symlink data, so if | |
94 | * need be, copy it over from the buffer cache. */ | |
95 | if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { | |
96 | u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + | |
97 | iblock; | |
98 | buffer_cache_bh = sb_getblk(osb->sb, blkno); | |
99 | if (!buffer_cache_bh) { | |
100 | mlog(ML_ERROR, "couldn't getblock for symlink!\n"); | |
101 | goto bail; | |
102 | } | |
103 | ||
104 | /* we haven't locked out transactions, so a commit | |
105 | * could've happened. Since we've got a reference on | |
106 | * the bh, even if it commits while we're doing the | |
107 | * copy, the data is still good. */ | |
108 | if (buffer_jbd(buffer_cache_bh) | |
109 | && ocfs2_inode_is_new(inode)) { | |
110 | kaddr = kmap_atomic(bh_result->b_page, KM_USER0); | |
111 | if (!kaddr) { | |
112 | mlog(ML_ERROR, "couldn't kmap!\n"); | |
113 | goto bail; | |
114 | } | |
115 | memcpy(kaddr + (bh_result->b_size * iblock), | |
116 | buffer_cache_bh->b_data, | |
117 | bh_result->b_size); | |
118 | kunmap_atomic(kaddr, KM_USER0); | |
119 | set_buffer_uptodate(bh_result); | |
120 | } | |
121 | brelse(buffer_cache_bh); | |
122 | } | |
123 | ||
124 | map_bh(bh_result, inode->i_sb, | |
125 | le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); | |
126 | ||
127 | err = 0; | |
128 | ||
129 | bail: | |
130 | if (bh) | |
131 | brelse(bh); | |
132 | ||
133 | mlog_exit(err); | |
134 | return err; | |
135 | } | |
136 | ||
137 | static int ocfs2_get_block(struct inode *inode, sector_t iblock, | |
138 | struct buffer_head *bh_result, int create) | |
139 | { | |
140 | int err = 0; | |
49cb8d2d | 141 | unsigned int ext_flags; |
ccd979bd | 142 | u64 p_blkno, past_eof; |
25baf2da | 143 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
ccd979bd MF |
144 | |
145 | mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, | |
146 | (unsigned long long)iblock, bh_result, create); | |
147 | ||
148 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) | |
149 | mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", | |
150 | inode, inode->i_ino); | |
151 | ||
152 | if (S_ISLNK(inode->i_mode)) { | |
153 | /* this always does I/O for some reason. */ | |
154 | err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); | |
155 | goto bail; | |
156 | } | |
157 | ||
49cb8d2d MF |
158 | err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, NULL, |
159 | &ext_flags); | |
ccd979bd MF |
160 | if (err) { |
161 | mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " | |
b0697053 MF |
162 | "%llu, NULL)\n", err, inode, (unsigned long long)iblock, |
163 | (unsigned long long)p_blkno); | |
ccd979bd MF |
164 | goto bail; |
165 | } | |
166 | ||
25baf2da MF |
167 | /* |
168 | * ocfs2 never allocates in this function - the only time we | |
169 | * need to use BH_New is when we're extending i_size on a file | |
170 | * system which doesn't support holes, in which case BH_New | |
171 | * allows block_prepare_write() to zero. | |
172 | */ | |
173 | mlog_bug_on_msg(create && p_blkno == 0 && ocfs2_sparse_alloc(osb), | |
174 | "ino %lu, iblock %llu\n", inode->i_ino, | |
175 | (unsigned long long)iblock); | |
176 | ||
49cb8d2d MF |
177 | /* Treat the unwritten extent as a hole for zeroing purposes. */ |
178 | if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) | |
25baf2da MF |
179 | map_bh(bh_result, inode->i_sb, p_blkno); |
180 | ||
181 | if (!ocfs2_sparse_alloc(osb)) { | |
182 | if (p_blkno == 0) { | |
183 | err = -EIO; | |
184 | mlog(ML_ERROR, | |
185 | "iblock = %llu p_blkno = %llu blkno=(%llu)\n", | |
186 | (unsigned long long)iblock, | |
187 | (unsigned long long)p_blkno, | |
188 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
189 | mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); | |
190 | dump_stack(); | |
191 | } | |
ccd979bd | 192 | |
25baf2da MF |
193 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); |
194 | mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, | |
195 | (unsigned long long)past_eof); | |
ccd979bd | 196 | |
25baf2da MF |
197 | if (create && (iblock >= past_eof)) |
198 | set_buffer_new(bh_result); | |
199 | } | |
ccd979bd MF |
200 | |
201 | bail: | |
202 | if (err < 0) | |
203 | err = -EIO; | |
204 | ||
205 | mlog_exit(err); | |
206 | return err; | |
207 | } | |
208 | ||
209 | static int ocfs2_readpage(struct file *file, struct page *page) | |
210 | { | |
211 | struct inode *inode = page->mapping->host; | |
212 | loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; | |
213 | int ret, unlock = 1; | |
214 | ||
215 | mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); | |
216 | ||
4bcec184 | 217 | ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page); |
ccd979bd MF |
218 | if (ret != 0) { |
219 | if (ret == AOP_TRUNCATED_PAGE) | |
220 | unlock = 0; | |
221 | mlog_errno(ret); | |
222 | goto out; | |
223 | } | |
224 | ||
e9dfc0b2 MF |
225 | if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) { |
226 | ret = AOP_TRUNCATED_PAGE; | |
227 | goto out_meta_unlock; | |
228 | } | |
ccd979bd MF |
229 | |
230 | /* | |
231 | * i_size might have just been updated as we grabed the meta lock. We | |
232 | * might now be discovering a truncate that hit on another node. | |
233 | * block_read_full_page->get_block freaks out if it is asked to read | |
234 | * beyond the end of a file, so we check here. Callers | |
235 | * (generic_file_read, fault->nopage) are clever enough to check i_size | |
236 | * and notice that the page they just read isn't needed. | |
237 | * | |
238 | * XXX sys_readahead() seems to get that wrong? | |
239 | */ | |
240 | if (start >= i_size_read(inode)) { | |
5c3c6bb7 | 241 | zero_user_page(page, 0, PAGE_SIZE, KM_USER0); |
ccd979bd MF |
242 | SetPageUptodate(page); |
243 | ret = 0; | |
244 | goto out_alloc; | |
245 | } | |
246 | ||
247 | ret = ocfs2_data_lock_with_page(inode, 0, page); | |
248 | if (ret != 0) { | |
249 | if (ret == AOP_TRUNCATED_PAGE) | |
250 | unlock = 0; | |
251 | mlog_errno(ret); | |
252 | goto out_alloc; | |
253 | } | |
254 | ||
255 | ret = block_read_full_page(page, ocfs2_get_block); | |
256 | unlock = 0; | |
257 | ||
258 | ocfs2_data_unlock(inode, 0); | |
259 | out_alloc: | |
260 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | |
e9dfc0b2 | 261 | out_meta_unlock: |
ccd979bd MF |
262 | ocfs2_meta_unlock(inode, 0); |
263 | out: | |
264 | if (unlock) | |
265 | unlock_page(page); | |
266 | mlog_exit(ret); | |
267 | return ret; | |
268 | } | |
269 | ||
270 | /* Note: Because we don't support holes, our allocation has | |
271 | * already happened (allocation writes zeros to the file data) | |
272 | * so we don't have to worry about ordered writes in | |
273 | * ocfs2_writepage. | |
274 | * | |
275 | * ->writepage is called during the process of invalidating the page cache | |
276 | * during blocked lock processing. It can't block on any cluster locks | |
277 | * to during block mapping. It's relying on the fact that the block | |
278 | * mapping can't have disappeared under the dirty pages that it is | |
279 | * being asked to write back. | |
280 | */ | |
281 | static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) | |
282 | { | |
283 | int ret; | |
284 | ||
285 | mlog_entry("(0x%p)\n", page); | |
286 | ||
287 | ret = block_write_full_page(page, ocfs2_get_block, wbc); | |
288 | ||
289 | mlog_exit(ret); | |
290 | ||
291 | return ret; | |
292 | } | |
293 | ||
5069120b MF |
294 | /* |
295 | * This is called from ocfs2_write_zero_page() which has handled it's | |
296 | * own cluster locking and has ensured allocation exists for those | |
297 | * blocks to be written. | |
298 | */ | |
53013cba MF |
299 | int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, |
300 | unsigned from, unsigned to) | |
301 | { | |
302 | int ret; | |
303 | ||
304 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | |
305 | ||
306 | ret = block_prepare_write(page, from, to, ocfs2_get_block); | |
307 | ||
308 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | |
309 | ||
310 | return ret; | |
311 | } | |
312 | ||
ccd979bd MF |
313 | /* Taken from ext3. We don't necessarily need the full blown |
314 | * functionality yet, but IMHO it's better to cut and paste the whole | |
315 | * thing so we can avoid introducing our own bugs (and easily pick up | |
316 | * their fixes when they happen) --Mark */ | |
60b11392 MF |
317 | int walk_page_buffers( handle_t *handle, |
318 | struct buffer_head *head, | |
319 | unsigned from, | |
320 | unsigned to, | |
321 | int *partial, | |
322 | int (*fn)( handle_t *handle, | |
323 | struct buffer_head *bh)) | |
ccd979bd MF |
324 | { |
325 | struct buffer_head *bh; | |
326 | unsigned block_start, block_end; | |
327 | unsigned blocksize = head->b_size; | |
328 | int err, ret = 0; | |
329 | struct buffer_head *next; | |
330 | ||
331 | for ( bh = head, block_start = 0; | |
332 | ret == 0 && (bh != head || !block_start); | |
333 | block_start = block_end, bh = next) | |
334 | { | |
335 | next = bh->b_this_page; | |
336 | block_end = block_start + blocksize; | |
337 | if (block_end <= from || block_start >= to) { | |
338 | if (partial && !buffer_uptodate(bh)) | |
339 | *partial = 1; | |
340 | continue; | |
341 | } | |
342 | err = (*fn)(handle, bh); | |
343 | if (!ret) | |
344 | ret = err; | |
345 | } | |
346 | return ret; | |
347 | } | |
348 | ||
1fabe148 | 349 | handle_t *ocfs2_start_walk_page_trans(struct inode *inode, |
ccd979bd MF |
350 | struct page *page, |
351 | unsigned from, | |
352 | unsigned to) | |
353 | { | |
354 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
1fabe148 | 355 | handle_t *handle = NULL; |
ccd979bd MF |
356 | int ret = 0; |
357 | ||
65eff9cc | 358 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); |
ccd979bd MF |
359 | if (!handle) { |
360 | ret = -ENOMEM; | |
361 | mlog_errno(ret); | |
362 | goto out; | |
363 | } | |
364 | ||
365 | if (ocfs2_should_order_data(inode)) { | |
1fabe148 | 366 | ret = walk_page_buffers(handle, |
ccd979bd MF |
367 | page_buffers(page), |
368 | from, to, NULL, | |
369 | ocfs2_journal_dirty_data); | |
370 | if (ret < 0) | |
371 | mlog_errno(ret); | |
372 | } | |
373 | out: | |
374 | if (ret) { | |
375 | if (handle) | |
02dc1af4 | 376 | ocfs2_commit_trans(osb, handle); |
ccd979bd MF |
377 | handle = ERR_PTR(ret); |
378 | } | |
379 | return handle; | |
380 | } | |
381 | ||
ccd979bd MF |
382 | static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) |
383 | { | |
384 | sector_t status; | |
385 | u64 p_blkno = 0; | |
386 | int err = 0; | |
387 | struct inode *inode = mapping->host; | |
388 | ||
389 | mlog_entry("(block = %llu)\n", (unsigned long long)block); | |
390 | ||
391 | /* We don't need to lock journal system files, since they aren't | |
392 | * accessed concurrently from multiple nodes. | |
393 | */ | |
394 | if (!INODE_JOURNAL(inode)) { | |
4bcec184 | 395 | err = ocfs2_meta_lock(inode, NULL, 0); |
ccd979bd MF |
396 | if (err) { |
397 | if (err != -ENOENT) | |
398 | mlog_errno(err); | |
399 | goto bail; | |
400 | } | |
401 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | |
402 | } | |
403 | ||
49cb8d2d | 404 | err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL); |
ccd979bd MF |
405 | |
406 | if (!INODE_JOURNAL(inode)) { | |
407 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | |
408 | ocfs2_meta_unlock(inode, 0); | |
409 | } | |
410 | ||
411 | if (err) { | |
412 | mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", | |
413 | (unsigned long long)block); | |
414 | mlog_errno(err); | |
415 | goto bail; | |
416 | } | |
417 | ||
418 | ||
419 | bail: | |
420 | status = err ? 0 : p_blkno; | |
421 | ||
422 | mlog_exit((int)status); | |
423 | ||
424 | return status; | |
425 | } | |
426 | ||
427 | /* | |
428 | * TODO: Make this into a generic get_blocks function. | |
429 | * | |
430 | * From do_direct_io in direct-io.c: | |
431 | * "So what we do is to permit the ->get_blocks function to populate | |
432 | * bh.b_size with the size of IO which is permitted at this offset and | |
433 | * this i_blkbits." | |
434 | * | |
435 | * This function is called directly from get_more_blocks in direct-io.c. | |
436 | * | |
437 | * called like this: dio->get_blocks(dio->inode, fs_startblk, | |
438 | * fs_count, map_bh, dio->rw == WRITE); | |
439 | */ | |
440 | static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, | |
ccd979bd MF |
441 | struct buffer_head *bh_result, int create) |
442 | { | |
443 | int ret; | |
4f902c37 | 444 | u64 p_blkno, inode_blocks, contig_blocks; |
49cb8d2d | 445 | unsigned int ext_flags; |
184d7d20 | 446 | unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; |
1d8fa7a2 | 447 | unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; |
ccd979bd | 448 | |
ccd979bd MF |
449 | /* This function won't even be called if the request isn't all |
450 | * nicely aligned and of the right size, so there's no need | |
451 | * for us to check any of that. */ | |
452 | ||
25baf2da | 453 | inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); |
564f8a32 MF |
454 | |
455 | /* | |
456 | * Any write past EOF is not allowed because we'd be extending. | |
457 | */ | |
458 | if (create && (iblock + max_blocks) > inode_blocks) { | |
ccd979bd MF |
459 | ret = -EIO; |
460 | goto bail; | |
461 | } | |
ccd979bd MF |
462 | |
463 | /* This figures out the size of the next contiguous block, and | |
464 | * our logical offset */ | |
363041a5 | 465 | ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, |
49cb8d2d | 466 | &contig_blocks, &ext_flags); |
ccd979bd MF |
467 | if (ret) { |
468 | mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", | |
469 | (unsigned long long)iblock); | |
470 | ret = -EIO; | |
471 | goto bail; | |
472 | } | |
473 | ||
25baf2da MF |
474 | if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno) { |
475 | ocfs2_error(inode->i_sb, | |
476 | "Inode %llu has a hole at block %llu\n", | |
477 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
478 | (unsigned long long)iblock); | |
479 | ret = -EROFS; | |
480 | goto bail; | |
481 | } | |
482 | ||
483 | /* | |
484 | * get_more_blocks() expects us to describe a hole by clearing | |
485 | * the mapped bit on bh_result(). | |
49cb8d2d MF |
486 | * |
487 | * Consider an unwritten extent as a hole. | |
25baf2da | 488 | */ |
49cb8d2d | 489 | if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) |
25baf2da MF |
490 | map_bh(bh_result, inode->i_sb, p_blkno); |
491 | else { | |
492 | /* | |
493 | * ocfs2_prepare_inode_for_write() should have caught | |
494 | * the case where we'd be filling a hole and triggered | |
495 | * a buffered write instead. | |
496 | */ | |
497 | if (create) { | |
498 | ret = -EIO; | |
499 | mlog_errno(ret); | |
500 | goto bail; | |
501 | } | |
502 | ||
503 | clear_buffer_mapped(bh_result); | |
504 | } | |
ccd979bd MF |
505 | |
506 | /* make sure we don't map more than max_blocks blocks here as | |
507 | that's all the kernel will handle at this point. */ | |
508 | if (max_blocks < contig_blocks) | |
509 | contig_blocks = max_blocks; | |
510 | bh_result->b_size = contig_blocks << blocksize_bits; | |
511 | bail: | |
512 | return ret; | |
513 | } | |
514 | ||
515 | /* | |
516 | * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're | |
517 | * particularly interested in the aio/dio case. Like the core uses | |
518 | * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from | |
519 | * truncation on another. | |
520 | */ | |
521 | static void ocfs2_dio_end_io(struct kiocb *iocb, | |
522 | loff_t offset, | |
523 | ssize_t bytes, | |
524 | void *private) | |
525 | { | |
d28c9174 | 526 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; |
7cdfc3a1 | 527 | int level; |
ccd979bd MF |
528 | |
529 | /* this io's submitter should not have unlocked this before we could */ | |
530 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); | |
7cdfc3a1 | 531 | |
ccd979bd | 532 | ocfs2_iocb_clear_rw_locked(iocb); |
7cdfc3a1 MF |
533 | |
534 | level = ocfs2_iocb_rw_locked_level(iocb); | |
535 | if (!level) | |
536 | up_read(&inode->i_alloc_sem); | |
537 | ocfs2_rw_unlock(inode, level); | |
ccd979bd MF |
538 | } |
539 | ||
03f981cf JB |
540 | /* |
541 | * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen | |
542 | * from ext3. PageChecked() bits have been removed as OCFS2 does not | |
543 | * do journalled data. | |
544 | */ | |
545 | static void ocfs2_invalidatepage(struct page *page, unsigned long offset) | |
546 | { | |
547 | journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; | |
548 | ||
549 | journal_invalidatepage(journal, page, offset); | |
550 | } | |
551 | ||
552 | static int ocfs2_releasepage(struct page *page, gfp_t wait) | |
553 | { | |
554 | journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; | |
555 | ||
556 | if (!page_has_buffers(page)) | |
557 | return 0; | |
558 | return journal_try_to_free_buffers(journal, page, wait); | |
559 | } | |
560 | ||
ccd979bd MF |
561 | static ssize_t ocfs2_direct_IO(int rw, |
562 | struct kiocb *iocb, | |
563 | const struct iovec *iov, | |
564 | loff_t offset, | |
565 | unsigned long nr_segs) | |
566 | { | |
567 | struct file *file = iocb->ki_filp; | |
d28c9174 | 568 | struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; |
ccd979bd MF |
569 | int ret; |
570 | ||
571 | mlog_entry_void(); | |
53013cba | 572 | |
9517bac6 MF |
573 | if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) { |
574 | /* | |
575 | * We get PR data locks even for O_DIRECT. This | |
576 | * allows concurrent O_DIRECT I/O but doesn't let | |
577 | * O_DIRECT with extending and buffered zeroing writes | |
578 | * race. If they did race then the buffered zeroing | |
579 | * could be written back after the O_DIRECT I/O. It's | |
580 | * one thing to tell people not to mix buffered and | |
581 | * O_DIRECT writes, but expecting them to understand | |
582 | * that file extension is also an implicit buffered | |
583 | * write is too much. By getting the PR we force | |
584 | * writeback of the buffered zeroing before | |
585 | * proceeding. | |
586 | */ | |
587 | ret = ocfs2_data_lock(inode, 0); | |
588 | if (ret < 0) { | |
589 | mlog_errno(ret); | |
590 | goto out; | |
591 | } | |
592 | ocfs2_data_unlock(inode, 0); | |
53013cba | 593 | } |
53013cba | 594 | |
ccd979bd MF |
595 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, |
596 | inode->i_sb->s_bdev, iov, offset, | |
597 | nr_segs, | |
598 | ocfs2_direct_IO_get_blocks, | |
599 | ocfs2_dio_end_io); | |
53013cba | 600 | out: |
ccd979bd MF |
601 | mlog_exit(ret); |
602 | return ret; | |
603 | } | |
604 | ||
9517bac6 MF |
605 | static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, |
606 | u32 cpos, | |
607 | unsigned int *start, | |
608 | unsigned int *end) | |
609 | { | |
610 | unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; | |
611 | ||
612 | if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { | |
613 | unsigned int cpp; | |
614 | ||
615 | cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); | |
616 | ||
617 | cluster_start = cpos % cpp; | |
618 | cluster_start = cluster_start << osb->s_clustersize_bits; | |
619 | ||
620 | cluster_end = cluster_start + osb->s_clustersize; | |
621 | } | |
622 | ||
623 | BUG_ON(cluster_start > PAGE_SIZE); | |
624 | BUG_ON(cluster_end > PAGE_SIZE); | |
625 | ||
626 | if (start) | |
627 | *start = cluster_start; | |
628 | if (end) | |
629 | *end = cluster_end; | |
630 | } | |
631 | ||
632 | /* | |
633 | * 'from' and 'to' are the region in the page to avoid zeroing. | |
634 | * | |
635 | * If pagesize > clustersize, this function will avoid zeroing outside | |
636 | * of the cluster boundary. | |
637 | * | |
638 | * from == to == 0 is code for "zero the entire cluster region" | |
639 | */ | |
640 | static void ocfs2_clear_page_regions(struct page *page, | |
641 | struct ocfs2_super *osb, u32 cpos, | |
642 | unsigned from, unsigned to) | |
643 | { | |
644 | void *kaddr; | |
645 | unsigned int cluster_start, cluster_end; | |
646 | ||
647 | ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); | |
648 | ||
649 | kaddr = kmap_atomic(page, KM_USER0); | |
650 | ||
651 | if (from || to) { | |
652 | if (from > cluster_start) | |
653 | memset(kaddr + cluster_start, 0, from - cluster_start); | |
654 | if (to < cluster_end) | |
655 | memset(kaddr + to, 0, cluster_end - to); | |
656 | } else { | |
657 | memset(kaddr + cluster_start, 0, cluster_end - cluster_start); | |
658 | } | |
659 | ||
660 | kunmap_atomic(kaddr, KM_USER0); | |
661 | } | |
662 | ||
663 | /* | |
664 | * Some of this taken from block_prepare_write(). We already have our | |
665 | * mapping by now though, and the entire write will be allocating or | |
666 | * it won't, so not much need to use BH_New. | |
667 | * | |
668 | * This will also skip zeroing, which is handled externally. | |
669 | */ | |
60b11392 MF |
670 | int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, |
671 | struct inode *inode, unsigned int from, | |
672 | unsigned int to, int new) | |
9517bac6 MF |
673 | { |
674 | int ret = 0; | |
675 | struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; | |
676 | unsigned int block_end, block_start; | |
677 | unsigned int bsize = 1 << inode->i_blkbits; | |
678 | ||
679 | if (!page_has_buffers(page)) | |
680 | create_empty_buffers(page, bsize, 0); | |
681 | ||
682 | head = page_buffers(page); | |
683 | for (bh = head, block_start = 0; bh != head || !block_start; | |
684 | bh = bh->b_this_page, block_start += bsize) { | |
685 | block_end = block_start + bsize; | |
686 | ||
3a307ffc MF |
687 | clear_buffer_new(bh); |
688 | ||
9517bac6 MF |
689 | /* |
690 | * Ignore blocks outside of our i/o range - | |
691 | * they may belong to unallocated clusters. | |
692 | */ | |
60b11392 | 693 | if (block_start >= to || block_end <= from) { |
9517bac6 MF |
694 | if (PageUptodate(page)) |
695 | set_buffer_uptodate(bh); | |
696 | continue; | |
697 | } | |
698 | ||
699 | /* | |
700 | * For an allocating write with cluster size >= page | |
701 | * size, we always write the entire page. | |
702 | */ | |
3a307ffc MF |
703 | if (new) |
704 | set_buffer_new(bh); | |
9517bac6 MF |
705 | |
706 | if (!buffer_mapped(bh)) { | |
707 | map_bh(bh, inode->i_sb, *p_blkno); | |
708 | unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); | |
709 | } | |
710 | ||
711 | if (PageUptodate(page)) { | |
712 | if (!buffer_uptodate(bh)) | |
713 | set_buffer_uptodate(bh); | |
714 | } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && | |
bce99768 MF |
715 | !buffer_new(bh) && |
716 | (block_start < from || block_end > to)) { | |
9517bac6 MF |
717 | ll_rw_block(READ, 1, &bh); |
718 | *wait_bh++=bh; | |
719 | } | |
720 | ||
721 | *p_blkno = *p_blkno + 1; | |
722 | } | |
723 | ||
724 | /* | |
725 | * If we issued read requests - let them complete. | |
726 | */ | |
727 | while(wait_bh > wait) { | |
728 | wait_on_buffer(*--wait_bh); | |
729 | if (!buffer_uptodate(*wait_bh)) | |
730 | ret = -EIO; | |
731 | } | |
732 | ||
733 | if (ret == 0 || !new) | |
734 | return ret; | |
735 | ||
736 | /* | |
737 | * If we get -EIO above, zero out any newly allocated blocks | |
738 | * to avoid exposing stale data. | |
739 | */ | |
740 | bh = head; | |
741 | block_start = 0; | |
742 | do { | |
743 | void *kaddr; | |
744 | ||
745 | block_end = block_start + bsize; | |
746 | if (block_end <= from) | |
747 | goto next_bh; | |
748 | if (block_start >= to) | |
749 | break; | |
750 | ||
751 | kaddr = kmap_atomic(page, KM_USER0); | |
752 | memset(kaddr+block_start, 0, bh->b_size); | |
753 | flush_dcache_page(page); | |
754 | kunmap_atomic(kaddr, KM_USER0); | |
755 | set_buffer_uptodate(bh); | |
756 | mark_buffer_dirty(bh); | |
757 | ||
758 | next_bh: | |
759 | block_start = block_end; | |
760 | bh = bh->b_this_page; | |
761 | } while (bh != head); | |
762 | ||
763 | return ret; | |
764 | } | |
765 | ||
3a307ffc MF |
766 | #if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) |
767 | #define OCFS2_MAX_CTXT_PAGES 1 | |
768 | #else | |
769 | #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) | |
770 | #endif | |
771 | ||
772 | #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) | |
773 | ||
6af67d82 | 774 | /* |
3a307ffc | 775 | * Describe the state of a single cluster to be written to. |
6af67d82 | 776 | */ |
3a307ffc MF |
777 | struct ocfs2_write_cluster_desc { |
778 | u32 c_cpos; | |
779 | u32 c_phys; | |
780 | /* | |
781 | * Give this a unique field because c_phys eventually gets | |
782 | * filled. | |
783 | */ | |
784 | unsigned c_new; | |
785 | }; | |
6af67d82 | 786 | |
3a307ffc MF |
787 | struct ocfs2_write_ctxt { |
788 | /* Logical cluster position / len of write */ | |
789 | u32 w_cpos; | |
790 | u32 w_clen; | |
6af67d82 | 791 | |
3a307ffc | 792 | struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; |
6af67d82 | 793 | |
3a307ffc MF |
794 | /* |
795 | * This is true if page_size > cluster_size. | |
796 | * | |
797 | * It triggers a set of special cases during write which might | |
798 | * have to deal with allocating writes to partial pages. | |
799 | */ | |
800 | unsigned int w_large_pages; | |
6af67d82 | 801 | |
3a307ffc MF |
802 | /* |
803 | * Pages involved in this write. | |
804 | * | |
805 | * w_target_page is the page being written to by the user. | |
806 | * | |
807 | * w_pages is an array of pages which always contains | |
808 | * w_target_page, and in the case of an allocating write with | |
809 | * page_size < cluster size, it will contain zero'd and mapped | |
810 | * pages adjacent to w_target_page which need to be written | |
811 | * out in so that future reads from that region will get | |
812 | * zero's. | |
813 | */ | |
814 | struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; | |
815 | unsigned int w_num_pages; | |
816 | struct page *w_target_page; | |
eeb47d12 | 817 | |
3a307ffc MF |
818 | /* |
819 | * ocfs2_write_end() uses this to know what the real range to | |
820 | * write in the target should be. | |
821 | */ | |
822 | unsigned int w_target_from; | |
823 | unsigned int w_target_to; | |
824 | ||
825 | /* | |
826 | * We could use journal_current_handle() but this is cleaner, | |
827 | * IMHO -Mark | |
828 | */ | |
829 | handle_t *w_handle; | |
830 | ||
831 | struct buffer_head *w_di_bh; | |
832 | }; | |
833 | ||
834 | static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) | |
835 | { | |
836 | int i; | |
837 | ||
838 | for(i = 0; i < wc->w_num_pages; i++) { | |
839 | if (wc->w_pages[i] == NULL) | |
840 | continue; | |
841 | ||
842 | unlock_page(wc->w_pages[i]); | |
843 | mark_page_accessed(wc->w_pages[i]); | |
844 | page_cache_release(wc->w_pages[i]); | |
6af67d82 MF |
845 | } |
846 | ||
3a307ffc MF |
847 | brelse(wc->w_di_bh); |
848 | kfree(wc); | |
849 | } | |
850 | ||
851 | static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, | |
852 | struct ocfs2_super *osb, loff_t pos, | |
607d44aa | 853 | unsigned len, struct buffer_head *di_bh) |
3a307ffc MF |
854 | { |
855 | struct ocfs2_write_ctxt *wc; | |
856 | ||
857 | wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); | |
858 | if (!wc) | |
859 | return -ENOMEM; | |
6af67d82 | 860 | |
3a307ffc MF |
861 | wc->w_cpos = pos >> osb->s_clustersize_bits; |
862 | wc->w_clen = ocfs2_clusters_for_bytes(osb->sb, len); | |
607d44aa MF |
863 | get_bh(di_bh); |
864 | wc->w_di_bh = di_bh; | |
6af67d82 | 865 | |
3a307ffc MF |
866 | if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) |
867 | wc->w_large_pages = 1; | |
868 | else | |
869 | wc->w_large_pages = 0; | |
870 | ||
871 | *wcp = wc; | |
6af67d82 | 872 | |
3a307ffc | 873 | return 0; |
6af67d82 MF |
874 | } |
875 | ||
9517bac6 | 876 | /* |
3a307ffc MF |
877 | * If a page has any new buffers, zero them out here, and mark them uptodate |
878 | * and dirty so they'll be written out (in order to prevent uninitialised | |
879 | * block data from leaking). And clear the new bit. | |
9517bac6 | 880 | */ |
3a307ffc | 881 | static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) |
9517bac6 | 882 | { |
3a307ffc MF |
883 | unsigned int block_start, block_end; |
884 | struct buffer_head *head, *bh; | |
9517bac6 | 885 | |
3a307ffc MF |
886 | BUG_ON(!PageLocked(page)); |
887 | if (!page_has_buffers(page)) | |
888 | return; | |
9517bac6 | 889 | |
3a307ffc MF |
890 | bh = head = page_buffers(page); |
891 | block_start = 0; | |
892 | do { | |
893 | block_end = block_start + bh->b_size; | |
894 | ||
895 | if (buffer_new(bh)) { | |
896 | if (block_end > from && block_start < to) { | |
897 | if (!PageUptodate(page)) { | |
898 | unsigned start, end; | |
899 | void *kaddr; | |
900 | ||
901 | start = max(from, block_start); | |
902 | end = min(to, block_end); | |
903 | ||
904 | kaddr = kmap_atomic(page, KM_USER0); | |
905 | memset(kaddr+start, 0, end - start); | |
906 | flush_dcache_page(page); | |
907 | kunmap_atomic(kaddr, KM_USER0); | |
908 | set_buffer_uptodate(bh); | |
909 | } | |
910 | ||
911 | clear_buffer_new(bh); | |
912 | mark_buffer_dirty(bh); | |
913 | } | |
914 | } | |
9517bac6 | 915 | |
3a307ffc MF |
916 | block_start = block_end; |
917 | bh = bh->b_this_page; | |
918 | } while (bh != head); | |
919 | } | |
920 | ||
921 | /* | |
922 | * Only called when we have a failure during allocating write to write | |
923 | * zero's to the newly allocated region. | |
924 | */ | |
925 | static void ocfs2_write_failure(struct inode *inode, | |
926 | struct ocfs2_write_ctxt *wc, | |
927 | loff_t user_pos, unsigned user_len) | |
928 | { | |
929 | int i; | |
930 | unsigned from, to; | |
931 | struct page *tmppage; | |
932 | ||
933 | ocfs2_zero_new_buffers(wc->w_target_page, user_pos, user_len); | |
9517bac6 | 934 | |
9517bac6 | 935 | if (wc->w_large_pages) { |
3a307ffc MF |
936 | from = wc->w_target_from; |
937 | to = wc->w_target_to; | |
9517bac6 | 938 | } else { |
3a307ffc MF |
939 | from = 0; |
940 | to = PAGE_CACHE_SIZE; | |
9517bac6 MF |
941 | } |
942 | ||
3a307ffc MF |
943 | for(i = 0; i < wc->w_num_pages; i++) { |
944 | tmppage = wc->w_pages[i]; | |
9517bac6 | 945 | |
3a307ffc MF |
946 | if (ocfs2_should_order_data(inode)) |
947 | walk_page_buffers(wc->w_handle, page_buffers(tmppage), | |
948 | from, to, NULL, | |
949 | ocfs2_journal_dirty_data); | |
eeb47d12 | 950 | |
3a307ffc | 951 | block_commit_write(tmppage, from, to); |
9517bac6 | 952 | } |
9517bac6 MF |
953 | } |
954 | ||
3a307ffc MF |
955 | static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, |
956 | struct ocfs2_write_ctxt *wc, | |
957 | struct page *page, u32 cpos, | |
958 | loff_t user_pos, unsigned user_len, | |
959 | int new) | |
9517bac6 | 960 | { |
3a307ffc MF |
961 | int ret; |
962 | unsigned int map_from = 0, map_to = 0; | |
9517bac6 | 963 | unsigned int cluster_start, cluster_end; |
3a307ffc | 964 | unsigned int user_data_from = 0, user_data_to = 0; |
9517bac6 | 965 | |
3a307ffc | 966 | ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, |
9517bac6 MF |
967 | &cluster_start, &cluster_end); |
968 | ||
3a307ffc MF |
969 | if (page == wc->w_target_page) { |
970 | map_from = user_pos & (PAGE_CACHE_SIZE - 1); | |
971 | map_to = map_from + user_len; | |
972 | ||
973 | if (new) | |
974 | ret = ocfs2_map_page_blocks(page, p_blkno, inode, | |
975 | cluster_start, cluster_end, | |
976 | new); | |
977 | else | |
978 | ret = ocfs2_map_page_blocks(page, p_blkno, inode, | |
979 | map_from, map_to, new); | |
980 | if (ret) { | |
9517bac6 MF |
981 | mlog_errno(ret); |
982 | goto out; | |
983 | } | |
984 | ||
3a307ffc MF |
985 | user_data_from = map_from; |
986 | user_data_to = map_to; | |
9517bac6 | 987 | if (new) { |
3a307ffc MF |
988 | map_from = cluster_start; |
989 | map_to = cluster_end; | |
9517bac6 | 990 | } |
3a307ffc MF |
991 | |
992 | wc->w_target_from = map_from; | |
993 | wc->w_target_to = map_to; | |
9517bac6 MF |
994 | } else { |
995 | /* | |
996 | * If we haven't allocated the new page yet, we | |
997 | * shouldn't be writing it out without copying user | |
998 | * data. This is likely a math error from the caller. | |
999 | */ | |
1000 | BUG_ON(!new); | |
1001 | ||
3a307ffc MF |
1002 | map_from = cluster_start; |
1003 | map_to = cluster_end; | |
9517bac6 MF |
1004 | |
1005 | ret = ocfs2_map_page_blocks(page, p_blkno, inode, | |
3a307ffc | 1006 | cluster_start, cluster_end, new); |
9517bac6 MF |
1007 | if (ret) { |
1008 | mlog_errno(ret); | |
1009 | goto out; | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | /* | |
1014 | * Parts of newly allocated pages need to be zero'd. | |
1015 | * | |
1016 | * Above, we have also rewritten 'to' and 'from' - as far as | |
1017 | * the rest of the function is concerned, the entire cluster | |
1018 | * range inside of a page needs to be written. | |
1019 | * | |
1020 | * We can skip this if the page is up to date - it's already | |
1021 | * been zero'd from being read in as a hole. | |
1022 | */ | |
1023 | if (new && !PageUptodate(page)) | |
1024 | ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), | |
3a307ffc | 1025 | cpos, user_data_from, user_data_to); |
9517bac6 MF |
1026 | |
1027 | flush_dcache_page(page); | |
1028 | ||
9517bac6 | 1029 | out: |
3a307ffc | 1030 | return ret; |
9517bac6 MF |
1031 | } |
1032 | ||
1033 | /* | |
3a307ffc | 1034 | * This function will only grab one clusters worth of pages. |
9517bac6 | 1035 | */ |
3a307ffc MF |
1036 | static int ocfs2_grab_pages_for_write(struct address_space *mapping, |
1037 | struct ocfs2_write_ctxt *wc, | |
7307de80 MF |
1038 | u32 cpos, loff_t user_pos, int new, |
1039 | struct page *mmap_page) | |
9517bac6 | 1040 | { |
3a307ffc MF |
1041 | int ret = 0, i; |
1042 | unsigned long start, target_index, index; | |
9517bac6 | 1043 | struct inode *inode = mapping->host; |
9517bac6 | 1044 | |
3a307ffc | 1045 | target_index = user_pos >> PAGE_CACHE_SHIFT; |
9517bac6 MF |
1046 | |
1047 | /* | |
1048 | * Figure out how many pages we'll be manipulating here. For | |
60b11392 MF |
1049 | * non allocating write, we just change the one |
1050 | * page. Otherwise, we'll need a whole clusters worth. | |
9517bac6 | 1051 | */ |
9517bac6 | 1052 | if (new) { |
3a307ffc MF |
1053 | wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); |
1054 | start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); | |
9517bac6 | 1055 | } else { |
3a307ffc MF |
1056 | wc->w_num_pages = 1; |
1057 | start = target_index; | |
9517bac6 MF |
1058 | } |
1059 | ||
3a307ffc | 1060 | for(i = 0; i < wc->w_num_pages; i++) { |
9517bac6 MF |
1061 | index = start + i; |
1062 | ||
7307de80 MF |
1063 | if (index == target_index && mmap_page) { |
1064 | /* | |
1065 | * ocfs2_pagemkwrite() is a little different | |
1066 | * and wants us to directly use the page | |
1067 | * passed in. | |
1068 | */ | |
1069 | lock_page(mmap_page); | |
1070 | ||
1071 | if (mmap_page->mapping != mapping) { | |
1072 | unlock_page(mmap_page); | |
1073 | /* | |
1074 | * Sanity check - the locking in | |
1075 | * ocfs2_pagemkwrite() should ensure | |
1076 | * that this code doesn't trigger. | |
1077 | */ | |
1078 | ret = -EINVAL; | |
1079 | mlog_errno(ret); | |
1080 | goto out; | |
1081 | } | |
1082 | ||
1083 | page_cache_get(mmap_page); | |
1084 | wc->w_pages[i] = mmap_page; | |
1085 | } else { | |
1086 | wc->w_pages[i] = find_or_create_page(mapping, index, | |
1087 | GFP_NOFS); | |
1088 | if (!wc->w_pages[i]) { | |
1089 | ret = -ENOMEM; | |
1090 | mlog_errno(ret); | |
1091 | goto out; | |
1092 | } | |
9517bac6 | 1093 | } |
3a307ffc MF |
1094 | |
1095 | if (index == target_index) | |
1096 | wc->w_target_page = wc->w_pages[i]; | |
9517bac6 | 1097 | } |
3a307ffc MF |
1098 | out: |
1099 | return ret; | |
1100 | } | |
1101 | ||
1102 | /* | |
1103 | * Prepare a single cluster for write one cluster into the file. | |
1104 | */ | |
1105 | static int ocfs2_write_cluster(struct address_space *mapping, | |
1106 | u32 phys, struct ocfs2_alloc_context *data_ac, | |
1107 | struct ocfs2_alloc_context *meta_ac, | |
1108 | struct ocfs2_write_ctxt *wc, u32 cpos, | |
1109 | loff_t user_pos, unsigned user_len) | |
1110 | { | |
1111 | int ret, i, new; | |
1112 | u64 v_blkno, p_blkno; | |
1113 | struct inode *inode = mapping->host; | |
1114 | ||
1115 | new = phys == 0 ? 1 : 0; | |
9517bac6 MF |
1116 | |
1117 | if (new) { | |
3a307ffc MF |
1118 | u32 tmp_pos; |
1119 | ||
9517bac6 MF |
1120 | /* |
1121 | * This is safe to call with the page locks - it won't take | |
1122 | * any additional semaphores or cluster locks. | |
1123 | */ | |
3a307ffc | 1124 | tmp_pos = cpos; |
9517bac6 | 1125 | ret = ocfs2_do_extend_allocation(OCFS2_SB(inode->i_sb), inode, |
3a307ffc MF |
1126 | &tmp_pos, 1, wc->w_di_bh, |
1127 | wc->w_handle, data_ac, | |
1128 | meta_ac, NULL); | |
9517bac6 MF |
1129 | /* |
1130 | * This shouldn't happen because we must have already | |
1131 | * calculated the correct meta data allocation required. The | |
1132 | * internal tree allocation code should know how to increase | |
1133 | * transaction credits itself. | |
1134 | * | |
1135 | * If need be, we could handle -EAGAIN for a | |
1136 | * RESTART_TRANS here. | |
1137 | */ | |
1138 | mlog_bug_on_msg(ret == -EAGAIN, | |
1139 | "Inode %llu: EAGAIN return during allocation.\n", | |
1140 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
1141 | if (ret < 0) { | |
1142 | mlog_errno(ret); | |
1143 | goto out; | |
1144 | } | |
3a307ffc MF |
1145 | |
1146 | v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos); | |
1147 | } else { | |
1148 | v_blkno = user_pos >> inode->i_sb->s_blocksize_bits; | |
9517bac6 MF |
1149 | } |
1150 | ||
3a307ffc MF |
1151 | /* |
1152 | * The only reason this should fail is due to an inability to | |
1153 | * find the extent added. | |
1154 | */ | |
49cb8d2d MF |
1155 | ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL, |
1156 | NULL); | |
9517bac6 | 1157 | if (ret < 0) { |
3a307ffc MF |
1158 | ocfs2_error(inode->i_sb, "Corrupting extend for inode %llu, " |
1159 | "at logical block %llu", | |
1160 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
1161 | (unsigned long long)v_blkno); | |
9517bac6 MF |
1162 | goto out; |
1163 | } | |
1164 | ||
1165 | BUG_ON(p_blkno == 0); | |
1166 | ||
3a307ffc MF |
1167 | for(i = 0; i < wc->w_num_pages; i++) { |
1168 | int tmpret; | |
9517bac6 | 1169 | |
3a307ffc MF |
1170 | tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, |
1171 | wc->w_pages[i], cpos, | |
1172 | user_pos, user_len, new); | |
1173 | if (tmpret) { | |
1174 | mlog_errno(tmpret); | |
1175 | if (ret == 0) | |
1176 | tmpret = ret; | |
1177 | } | |
9517bac6 MF |
1178 | } |
1179 | ||
3a307ffc MF |
1180 | /* |
1181 | * We only have cleanup to do in case of allocating write. | |
1182 | */ | |
1183 | if (ret && new) | |
1184 | ocfs2_write_failure(inode, wc, user_pos, user_len); | |
1185 | ||
9517bac6 | 1186 | out: |
9517bac6 | 1187 | |
3a307ffc | 1188 | return ret; |
9517bac6 MF |
1189 | } |
1190 | ||
3a307ffc MF |
1191 | /* |
1192 | * ocfs2_write_end() wants to know which parts of the target page it | |
1193 | * should complete the write on. It's easiest to compute them ahead of | |
1194 | * time when a more complete view of the write is available. | |
1195 | */ | |
1196 | static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, | |
1197 | struct ocfs2_write_ctxt *wc, | |
1198 | loff_t pos, unsigned len, int alloc) | |
9517bac6 | 1199 | { |
3a307ffc | 1200 | struct ocfs2_write_cluster_desc *desc; |
9517bac6 | 1201 | |
3a307ffc MF |
1202 | wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); |
1203 | wc->w_target_to = wc->w_target_from + len; | |
1204 | ||
1205 | if (alloc == 0) | |
1206 | return; | |
1207 | ||
1208 | /* | |
1209 | * Allocating write - we may have different boundaries based | |
1210 | * on page size and cluster size. | |
1211 | * | |
1212 | * NOTE: We can no longer compute one value from the other as | |
1213 | * the actual write length and user provided length may be | |
1214 | * different. | |
1215 | */ | |
9517bac6 | 1216 | |
3a307ffc MF |
1217 | if (wc->w_large_pages) { |
1218 | /* | |
1219 | * We only care about the 1st and last cluster within | |
1220 | * our range and whether they are holes or not. Either | |
1221 | * value may be extended out to the start/end of a | |
1222 | * newly allocated cluster. | |
1223 | */ | |
1224 | desc = &wc->w_desc[0]; | |
1225 | if (desc->c_new) | |
1226 | ocfs2_figure_cluster_boundaries(osb, | |
1227 | desc->c_cpos, | |
1228 | &wc->w_target_from, | |
1229 | NULL); | |
1230 | ||
1231 | desc = &wc->w_desc[wc->w_clen - 1]; | |
1232 | if (desc->c_new) | |
1233 | ocfs2_figure_cluster_boundaries(osb, | |
1234 | desc->c_cpos, | |
1235 | NULL, | |
1236 | &wc->w_target_to); | |
1237 | } else { | |
1238 | wc->w_target_from = 0; | |
1239 | wc->w_target_to = PAGE_CACHE_SIZE; | |
1240 | } | |
9517bac6 MF |
1241 | } |
1242 | ||
7307de80 MF |
1243 | int ocfs2_write_begin_nolock(struct address_space *mapping, |
1244 | loff_t pos, unsigned len, unsigned flags, | |
1245 | struct page **pagep, void **fsdata, | |
1246 | struct buffer_head *di_bh, struct page *mmap_page) | |
9517bac6 | 1247 | { |
3a307ffc MF |
1248 | int ret, i, credits = OCFS2_INODE_UPDATE_CREDITS; |
1249 | unsigned int num_clusters = 0, clusters_to_alloc = 0; | |
1250 | u32 phys = 0; | |
1251 | struct ocfs2_write_ctxt *wc; | |
1252 | struct inode *inode = mapping->host; | |
9517bac6 | 1253 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
9517bac6 MF |
1254 | struct ocfs2_dinode *di; |
1255 | struct ocfs2_alloc_context *data_ac = NULL; | |
1256 | struct ocfs2_alloc_context *meta_ac = NULL; | |
1257 | handle_t *handle; | |
3a307ffc | 1258 | struct ocfs2_write_cluster_desc *desc; |
9517bac6 | 1259 | |
607d44aa | 1260 | ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); |
3a307ffc MF |
1261 | if (ret) { |
1262 | mlog_errno(ret); | |
1263 | return ret; | |
1264 | } | |
9517bac6 | 1265 | |
3a307ffc | 1266 | di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; |
9517bac6 | 1267 | |
3a307ffc MF |
1268 | for (i = 0; i < wc->w_clen; i++) { |
1269 | desc = &wc->w_desc[i]; | |
1270 | desc->c_cpos = wc->w_cpos + i; | |
1271 | ||
1272 | if (num_clusters == 0) { | |
1273 | ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, | |
1274 | &num_clusters, NULL); | |
1275 | if (ret) { | |
1276 | mlog_errno(ret); | |
607d44aa | 1277 | goto out; |
3a307ffc MF |
1278 | } |
1279 | } else if (phys) { | |
1280 | /* | |
1281 | * Only increment phys if it doesn't describe | |
1282 | * a hole. | |
1283 | */ | |
1284 | phys++; | |
1285 | } | |
1286 | ||
1287 | desc->c_phys = phys; | |
1288 | if (phys == 0) { | |
1289 | desc->c_new = 1; | |
1290 | clusters_to_alloc++; | |
1291 | } | |
1292 | ||
1293 | num_clusters--; | |
9517bac6 MF |
1294 | } |
1295 | ||
3a307ffc MF |
1296 | /* |
1297 | * We set w_target_from, w_target_to here so that | |
1298 | * ocfs2_write_end() knows which range in the target page to | |
1299 | * write out. An allocation requires that we write the entire | |
1300 | * cluster range. | |
1301 | */ | |
1302 | if (clusters_to_alloc > 0) { | |
1303 | /* | |
1304 | * XXX: We are stretching the limits of | |
1305 | * ocfs2_lock_allocators(). It greately over-estimates | |
1306 | * the work to be done. | |
1307 | */ | |
1308 | ret = ocfs2_lock_allocators(inode, di, clusters_to_alloc, | |
1309 | &data_ac, &meta_ac); | |
9517bac6 MF |
1310 | if (ret) { |
1311 | mlog_errno(ret); | |
607d44aa | 1312 | goto out; |
9517bac6 MF |
1313 | } |
1314 | ||
3a307ffc MF |
1315 | credits = ocfs2_calc_extend_credits(inode->i_sb, di, |
1316 | clusters_to_alloc); | |
1317 | ||
9517bac6 MF |
1318 | } |
1319 | ||
3a307ffc MF |
1320 | ocfs2_set_target_boundaries(osb, wc, pos, len, clusters_to_alloc); |
1321 | ||
9517bac6 MF |
1322 | handle = ocfs2_start_trans(osb, credits); |
1323 | if (IS_ERR(handle)) { | |
1324 | ret = PTR_ERR(handle); | |
1325 | mlog_errno(ret); | |
607d44aa | 1326 | goto out; |
9517bac6 MF |
1327 | } |
1328 | ||
3a307ffc MF |
1329 | wc->w_handle = handle; |
1330 | ||
1331 | /* | |
1332 | * We don't want this to fail in ocfs2_write_end(), so do it | |
1333 | * here. | |
1334 | */ | |
1335 | ret = ocfs2_journal_access(handle, inode, wc->w_di_bh, | |
1336 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1337 | if (ret) { | |
9517bac6 MF |
1338 | mlog_errno(ret); |
1339 | goto out_commit; | |
1340 | } | |
1341 | ||
3a307ffc MF |
1342 | /* |
1343 | * Fill our page array first. That way we've grabbed enough so | |
1344 | * that we can zero and flush if we error after adding the | |
1345 | * extent. | |
1346 | */ | |
1347 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, | |
7307de80 | 1348 | clusters_to_alloc, mmap_page); |
9517bac6 MF |
1349 | if (ret) { |
1350 | mlog_errno(ret); | |
1351 | goto out_commit; | |
1352 | } | |
1353 | ||
3a307ffc MF |
1354 | for (i = 0; i < wc->w_clen; i++) { |
1355 | desc = &wc->w_desc[i]; | |
1356 | ||
1357 | ret = ocfs2_write_cluster(mapping, desc->c_phys, data_ac, | |
1358 | meta_ac, wc, desc->c_cpos, pos, len); | |
1359 | if (ret) { | |
1360 | mlog_errno(ret); | |
1361 | goto out_commit; | |
1362 | } | |
9517bac6 | 1363 | } |
9517bac6 | 1364 | |
3a307ffc MF |
1365 | if (data_ac) |
1366 | ocfs2_free_alloc_context(data_ac); | |
1367 | if (meta_ac) | |
1368 | ocfs2_free_alloc_context(meta_ac); | |
9517bac6 | 1369 | |
3a307ffc MF |
1370 | *pagep = wc->w_target_page; |
1371 | *fsdata = wc; | |
1372 | return 0; | |
9517bac6 MF |
1373 | out_commit: |
1374 | ocfs2_commit_trans(osb, handle); | |
1375 | ||
9517bac6 | 1376 | out: |
3a307ffc MF |
1377 | ocfs2_free_write_ctxt(wc); |
1378 | ||
9517bac6 MF |
1379 | if (data_ac) |
1380 | ocfs2_free_alloc_context(data_ac); | |
1381 | if (meta_ac) | |
1382 | ocfs2_free_alloc_context(meta_ac); | |
3a307ffc MF |
1383 | return ret; |
1384 | } | |
1385 | ||
607d44aa MF |
1386 | int ocfs2_write_begin(struct file *file, struct address_space *mapping, |
1387 | loff_t pos, unsigned len, unsigned flags, | |
1388 | struct page **pagep, void **fsdata) | |
1389 | { | |
1390 | int ret; | |
1391 | struct buffer_head *di_bh = NULL; | |
1392 | struct inode *inode = mapping->host; | |
1393 | ||
1394 | ret = ocfs2_meta_lock(inode, &di_bh, 1); | |
1395 | if (ret) { | |
1396 | mlog_errno(ret); | |
1397 | return ret; | |
1398 | } | |
1399 | ||
1400 | /* | |
1401 | * Take alloc sem here to prevent concurrent lookups. That way | |
1402 | * the mapping, zeroing and tree manipulation within | |
1403 | * ocfs2_write() will be safe against ->readpage(). This | |
1404 | * should also serve to lock out allocation from a shared | |
1405 | * writeable region. | |
1406 | */ | |
1407 | down_write(&OCFS2_I(inode)->ip_alloc_sem); | |
1408 | ||
1409 | ret = ocfs2_data_lock(inode, 1); | |
1410 | if (ret) { | |
1411 | mlog_errno(ret); | |
1412 | goto out_fail; | |
1413 | } | |
1414 | ||
1415 | ret = ocfs2_write_begin_nolock(mapping, pos, len, flags, pagep, | |
7307de80 | 1416 | fsdata, di_bh, NULL); |
607d44aa MF |
1417 | if (ret) { |
1418 | mlog_errno(ret); | |
1419 | goto out_fail_data; | |
1420 | } | |
1421 | ||
1422 | brelse(di_bh); | |
1423 | ||
1424 | return 0; | |
1425 | ||
1426 | out_fail_data: | |
1427 | ocfs2_data_unlock(inode, 1); | |
1428 | out_fail: | |
1429 | up_write(&OCFS2_I(inode)->ip_alloc_sem); | |
1430 | ||
1431 | brelse(di_bh); | |
1432 | ocfs2_meta_unlock(inode, 1); | |
1433 | ||
1434 | return ret; | |
1435 | } | |
1436 | ||
7307de80 MF |
1437 | int ocfs2_write_end_nolock(struct address_space *mapping, |
1438 | loff_t pos, unsigned len, unsigned copied, | |
1439 | struct page *page, void *fsdata) | |
3a307ffc MF |
1440 | { |
1441 | int i; | |
1442 | unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); | |
1443 | struct inode *inode = mapping->host; | |
1444 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
1445 | struct ocfs2_write_ctxt *wc = fsdata; | |
1446 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; | |
1447 | handle_t *handle = wc->w_handle; | |
1448 | struct page *tmppage; | |
1449 | ||
1450 | if (unlikely(copied < len)) { | |
1451 | if (!PageUptodate(wc->w_target_page)) | |
1452 | copied = 0; | |
1453 | ||
1454 | ocfs2_zero_new_buffers(wc->w_target_page, start+copied, | |
1455 | start+len); | |
1456 | } | |
1457 | flush_dcache_page(wc->w_target_page); | |
1458 | ||
1459 | for(i = 0; i < wc->w_num_pages; i++) { | |
1460 | tmppage = wc->w_pages[i]; | |
1461 | ||
1462 | if (tmppage == wc->w_target_page) { | |
1463 | from = wc->w_target_from; | |
1464 | to = wc->w_target_to; | |
1465 | ||
1466 | BUG_ON(from > PAGE_CACHE_SIZE || | |
1467 | to > PAGE_CACHE_SIZE || | |
1468 | to < from); | |
1469 | } else { | |
1470 | /* | |
1471 | * Pages adjacent to the target (if any) imply | |
1472 | * a hole-filling write in which case we want | |
1473 | * to flush their entire range. | |
1474 | */ | |
1475 | from = 0; | |
1476 | to = PAGE_CACHE_SIZE; | |
1477 | } | |
1478 | ||
1479 | if (ocfs2_should_order_data(inode)) | |
1480 | walk_page_buffers(wc->w_handle, page_buffers(tmppage), | |
1481 | from, to, NULL, | |
1482 | ocfs2_journal_dirty_data); | |
1483 | ||
1484 | block_commit_write(tmppage, from, to); | |
1485 | } | |
1486 | ||
1487 | pos += copied; | |
1488 | if (pos > inode->i_size) { | |
1489 | i_size_write(inode, pos); | |
1490 | mark_inode_dirty(inode); | |
1491 | } | |
1492 | inode->i_blocks = ocfs2_inode_sector_count(inode); | |
1493 | di->i_size = cpu_to_le64((u64)i_size_read(inode)); | |
1494 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | |
1495 | di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); | |
1496 | di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); | |
1497 | ||
1498 | ocfs2_journal_dirty(handle, wc->w_di_bh); | |
1499 | ||
1500 | ocfs2_commit_trans(osb, handle); | |
607d44aa MF |
1501 | ocfs2_free_write_ctxt(wc); |
1502 | ||
1503 | return copied; | |
1504 | } | |
1505 | ||
1506 | int ocfs2_write_end(struct file *file, struct address_space *mapping, | |
1507 | loff_t pos, unsigned len, unsigned copied, | |
1508 | struct page *page, void *fsdata) | |
1509 | { | |
1510 | int ret; | |
1511 | struct inode *inode = mapping->host; | |
1512 | ||
1513 | ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata); | |
1514 | ||
3a307ffc MF |
1515 | ocfs2_data_unlock(inode, 1); |
1516 | up_write(&OCFS2_I(inode)->ip_alloc_sem); | |
1517 | ocfs2_meta_unlock(inode, 1); | |
9517bac6 | 1518 | |
607d44aa | 1519 | return ret; |
9517bac6 MF |
1520 | } |
1521 | ||
f5e54d6e | 1522 | const struct address_space_operations ocfs2_aops = { |
ccd979bd MF |
1523 | .readpage = ocfs2_readpage, |
1524 | .writepage = ocfs2_writepage, | |
ccd979bd MF |
1525 | .bmap = ocfs2_bmap, |
1526 | .sync_page = block_sync_page, | |
03f981cf JB |
1527 | .direct_IO = ocfs2_direct_IO, |
1528 | .invalidatepage = ocfs2_invalidatepage, | |
1529 | .releasepage = ocfs2_releasepage, | |
1530 | .migratepage = buffer_migrate_page, | |
ccd979bd | 1531 | }; |