]>
Commit | Line | Data |
---|---|---|
1 | /* -*- mode: c; c-basic-offset: 8; -*- | |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public | |
17 | * License along with this program; if not, write to the | |
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 | * Boston, MA 021110-1307, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/fs.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/pagemap.h> | |
26 | #include <asm/byteorder.h> | |
27 | #include <linux/swap.h> | |
28 | #include <linux/pipe_fs_i.h> | |
29 | #include <linux/mpage.h> | |
30 | #include <linux/quotaops.h> | |
31 | #include <linux/blkdev.h> | |
32 | #include <linux/uio.h> | |
33 | ||
34 | #include <cluster/masklog.h> | |
35 | ||
36 | #include "ocfs2.h" | |
37 | ||
38 | #include "alloc.h" | |
39 | #include "aops.h" | |
40 | #include "dlmglue.h" | |
41 | #include "extent_map.h" | |
42 | #include "file.h" | |
43 | #include "inode.h" | |
44 | #include "journal.h" | |
45 | #include "suballoc.h" | |
46 | #include "super.h" | |
47 | #include "symlink.h" | |
48 | #include "refcounttree.h" | |
49 | #include "ocfs2_trace.h" | |
50 | ||
51 | #include "buffer_head_io.h" | |
52 | #include "dir.h" | |
53 | #include "namei.h" | |
54 | #include "sysfile.h" | |
55 | ||
56 | static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, | |
57 | struct buffer_head *bh_result, int create) | |
58 | { | |
59 | int err = -EIO; | |
60 | int status; | |
61 | struct ocfs2_dinode *fe = NULL; | |
62 | struct buffer_head *bh = NULL; | |
63 | struct buffer_head *buffer_cache_bh = NULL; | |
64 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
65 | void *kaddr; | |
66 | ||
67 | trace_ocfs2_symlink_get_block( | |
68 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
69 | (unsigned long long)iblock, bh_result, create); | |
70 | ||
71 | BUG_ON(ocfs2_inode_is_fast_symlink(inode)); | |
72 | ||
73 | if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { | |
74 | mlog(ML_ERROR, "block offset > PATH_MAX: %llu", | |
75 | (unsigned long long)iblock); | |
76 | goto bail; | |
77 | } | |
78 | ||
79 | status = ocfs2_read_inode_block(inode, &bh); | |
80 | if (status < 0) { | |
81 | mlog_errno(status); | |
82 | goto bail; | |
83 | } | |
84 | fe = (struct ocfs2_dinode *) bh->b_data; | |
85 | ||
86 | if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, | |
87 | le32_to_cpu(fe->i_clusters))) { | |
88 | err = -ENOMEM; | |
89 | mlog(ML_ERROR, "block offset is outside the allocated size: " | |
90 | "%llu\n", (unsigned long long)iblock); | |
91 | goto bail; | |
92 | } | |
93 | ||
94 | /* We don't use the page cache to create symlink data, so if | |
95 | * need be, copy it over from the buffer cache. */ | |
96 | if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { | |
97 | u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + | |
98 | iblock; | |
99 | buffer_cache_bh = sb_getblk(osb->sb, blkno); | |
100 | if (!buffer_cache_bh) { | |
101 | err = -ENOMEM; | |
102 | mlog(ML_ERROR, "couldn't getblock for symlink!\n"); | |
103 | goto bail; | |
104 | } | |
105 | ||
106 | /* we haven't locked out transactions, so a commit | |
107 | * could've happened. Since we've got a reference on | |
108 | * the bh, even if it commits while we're doing the | |
109 | * copy, the data is still good. */ | |
110 | if (buffer_jbd(buffer_cache_bh) | |
111 | && ocfs2_inode_is_new(inode)) { | |
112 | kaddr = kmap_atomic(bh_result->b_page); | |
113 | if (!kaddr) { | |
114 | mlog(ML_ERROR, "couldn't kmap!\n"); | |
115 | goto bail; | |
116 | } | |
117 | memcpy(kaddr + (bh_result->b_size * iblock), | |
118 | buffer_cache_bh->b_data, | |
119 | bh_result->b_size); | |
120 | kunmap_atomic(kaddr); | |
121 | set_buffer_uptodate(bh_result); | |
122 | } | |
123 | brelse(buffer_cache_bh); | |
124 | } | |
125 | ||
126 | map_bh(bh_result, inode->i_sb, | |
127 | le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); | |
128 | ||
129 | err = 0; | |
130 | ||
131 | bail: | |
132 | brelse(bh); | |
133 | ||
134 | return err; | |
135 | } | |
136 | ||
137 | int ocfs2_get_block(struct inode *inode, sector_t iblock, | |
138 | struct buffer_head *bh_result, int create) | |
139 | { | |
140 | int err = 0; | |
141 | unsigned int ext_flags; | |
142 | u64 max_blocks = bh_result->b_size >> inode->i_blkbits; | |
143 | u64 p_blkno, count, past_eof; | |
144 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
145 | ||
146 | trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, | |
147 | (unsigned long long)iblock, bh_result, create); | |
148 | ||
149 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) | |
150 | mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", | |
151 | inode, inode->i_ino); | |
152 | ||
153 | if (S_ISLNK(inode->i_mode)) { | |
154 | /* this always does I/O for some reason. */ | |
155 | err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); | |
156 | goto bail; | |
157 | } | |
158 | ||
159 | err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, | |
160 | &ext_flags); | |
161 | if (err) { | |
162 | mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " | |
163 | "%llu, NULL)\n", err, inode, (unsigned long long)iblock, | |
164 | (unsigned long long)p_blkno); | |
165 | goto bail; | |
166 | } | |
167 | ||
168 | if (max_blocks < count) | |
169 | count = max_blocks; | |
170 | ||
171 | /* | |
172 | * ocfs2 never allocates in this function - the only time we | |
173 | * need to use BH_New is when we're extending i_size on a file | |
174 | * system which doesn't support holes, in which case BH_New | |
175 | * allows __block_write_begin() to zero. | |
176 | * | |
177 | * If we see this on a sparse file system, then a truncate has | |
178 | * raced us and removed the cluster. In this case, we clear | |
179 | * the buffers dirty and uptodate bits and let the buffer code | |
180 | * ignore it as a hole. | |
181 | */ | |
182 | if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { | |
183 | clear_buffer_dirty(bh_result); | |
184 | clear_buffer_uptodate(bh_result); | |
185 | goto bail; | |
186 | } | |
187 | ||
188 | /* Treat the unwritten extent as a hole for zeroing purposes. */ | |
189 | if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) | |
190 | map_bh(bh_result, inode->i_sb, p_blkno); | |
191 | ||
192 | bh_result->b_size = count << inode->i_blkbits; | |
193 | ||
194 | if (!ocfs2_sparse_alloc(osb)) { | |
195 | if (p_blkno == 0) { | |
196 | err = -EIO; | |
197 | mlog(ML_ERROR, | |
198 | "iblock = %llu p_blkno = %llu blkno=(%llu)\n", | |
199 | (unsigned long long)iblock, | |
200 | (unsigned long long)p_blkno, | |
201 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
202 | mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); | |
203 | dump_stack(); | |
204 | goto bail; | |
205 | } | |
206 | } | |
207 | ||
208 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); | |
209 | ||
210 | trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, | |
211 | (unsigned long long)past_eof); | |
212 | if (create && (iblock >= past_eof)) | |
213 | set_buffer_new(bh_result); | |
214 | ||
215 | bail: | |
216 | if (err < 0) | |
217 | err = -EIO; | |
218 | ||
219 | return err; | |
220 | } | |
221 | ||
222 | int ocfs2_read_inline_data(struct inode *inode, struct page *page, | |
223 | struct buffer_head *di_bh) | |
224 | { | |
225 | void *kaddr; | |
226 | loff_t size; | |
227 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
228 | ||
229 | if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { | |
230 | ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n", | |
231 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
232 | return -EROFS; | |
233 | } | |
234 | ||
235 | size = i_size_read(inode); | |
236 | ||
237 | if (size > PAGE_SIZE || | |
238 | size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { | |
239 | ocfs2_error(inode->i_sb, | |
240 | "Inode %llu has with inline data has bad size: %Lu\n", | |
241 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
242 | (unsigned long long)size); | |
243 | return -EROFS; | |
244 | } | |
245 | ||
246 | kaddr = kmap_atomic(page); | |
247 | if (size) | |
248 | memcpy(kaddr, di->id2.i_data.id_data, size); | |
249 | /* Clear the remaining part of the page */ | |
250 | memset(kaddr + size, 0, PAGE_SIZE - size); | |
251 | flush_dcache_page(page); | |
252 | kunmap_atomic(kaddr); | |
253 | ||
254 | SetPageUptodate(page); | |
255 | ||
256 | return 0; | |
257 | } | |
258 | ||
259 | static int ocfs2_readpage_inline(struct inode *inode, struct page *page) | |
260 | { | |
261 | int ret; | |
262 | struct buffer_head *di_bh = NULL; | |
263 | ||
264 | BUG_ON(!PageLocked(page)); | |
265 | BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); | |
266 | ||
267 | ret = ocfs2_read_inode_block(inode, &di_bh); | |
268 | if (ret) { | |
269 | mlog_errno(ret); | |
270 | goto out; | |
271 | } | |
272 | ||
273 | ret = ocfs2_read_inline_data(inode, page, di_bh); | |
274 | out: | |
275 | unlock_page(page); | |
276 | ||
277 | brelse(di_bh); | |
278 | return ret; | |
279 | } | |
280 | ||
281 | static int ocfs2_readpage(struct file *file, struct page *page) | |
282 | { | |
283 | struct inode *inode = page->mapping->host; | |
284 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
285 | loff_t start = (loff_t)page->index << PAGE_SHIFT; | |
286 | int ret, unlock = 1; | |
287 | ||
288 | trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, | |
289 | (page ? page->index : 0)); | |
290 | ||
291 | ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); | |
292 | if (ret != 0) { | |
293 | if (ret == AOP_TRUNCATED_PAGE) | |
294 | unlock = 0; | |
295 | mlog_errno(ret); | |
296 | goto out; | |
297 | } | |
298 | ||
299 | if (down_read_trylock(&oi->ip_alloc_sem) == 0) { | |
300 | /* | |
301 | * Unlock the page and cycle ip_alloc_sem so that we don't | |
302 | * busyloop waiting for ip_alloc_sem to unlock | |
303 | */ | |
304 | ret = AOP_TRUNCATED_PAGE; | |
305 | unlock_page(page); | |
306 | unlock = 0; | |
307 | down_read(&oi->ip_alloc_sem); | |
308 | up_read(&oi->ip_alloc_sem); | |
309 | goto out_inode_unlock; | |
310 | } | |
311 | ||
312 | /* | |
313 | * i_size might have just been updated as we grabed the meta lock. We | |
314 | * might now be discovering a truncate that hit on another node. | |
315 | * block_read_full_page->get_block freaks out if it is asked to read | |
316 | * beyond the end of a file, so we check here. Callers | |
317 | * (generic_file_read, vm_ops->fault) are clever enough to check i_size | |
318 | * and notice that the page they just read isn't needed. | |
319 | * | |
320 | * XXX sys_readahead() seems to get that wrong? | |
321 | */ | |
322 | if (start >= i_size_read(inode)) { | |
323 | zero_user(page, 0, PAGE_SIZE); | |
324 | SetPageUptodate(page); | |
325 | ret = 0; | |
326 | goto out_alloc; | |
327 | } | |
328 | ||
329 | if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) | |
330 | ret = ocfs2_readpage_inline(inode, page); | |
331 | else | |
332 | ret = block_read_full_page(page, ocfs2_get_block); | |
333 | unlock = 0; | |
334 | ||
335 | out_alloc: | |
336 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | |
337 | out_inode_unlock: | |
338 | ocfs2_inode_unlock(inode, 0); | |
339 | out: | |
340 | if (unlock) | |
341 | unlock_page(page); | |
342 | return ret; | |
343 | } | |
344 | ||
345 | /* | |
346 | * This is used only for read-ahead. Failures or difficult to handle | |
347 | * situations are safe to ignore. | |
348 | * | |
349 | * Right now, we don't bother with BH_Boundary - in-inode extent lists | |
350 | * are quite large (243 extents on 4k blocks), so most inodes don't | |
351 | * grow out to a tree. If need be, detecting boundary extents could | |
352 | * trivially be added in a future version of ocfs2_get_block(). | |
353 | */ | |
354 | static int ocfs2_readpages(struct file *filp, struct address_space *mapping, | |
355 | struct list_head *pages, unsigned nr_pages) | |
356 | { | |
357 | int ret, err = -EIO; | |
358 | struct inode *inode = mapping->host; | |
359 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
360 | loff_t start; | |
361 | struct page *last; | |
362 | ||
363 | /* | |
364 | * Use the nonblocking flag for the dlm code to avoid page | |
365 | * lock inversion, but don't bother with retrying. | |
366 | */ | |
367 | ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); | |
368 | if (ret) | |
369 | return err; | |
370 | ||
371 | if (down_read_trylock(&oi->ip_alloc_sem) == 0) { | |
372 | ocfs2_inode_unlock(inode, 0); | |
373 | return err; | |
374 | } | |
375 | ||
376 | /* | |
377 | * Don't bother with inline-data. There isn't anything | |
378 | * to read-ahead in that case anyway... | |
379 | */ | |
380 | if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) | |
381 | goto out_unlock; | |
382 | ||
383 | /* | |
384 | * Check whether a remote node truncated this file - we just | |
385 | * drop out in that case as it's not worth handling here. | |
386 | */ | |
387 | last = list_entry(pages->prev, struct page, lru); | |
388 | start = (loff_t)last->index << PAGE_SHIFT; | |
389 | if (start >= i_size_read(inode)) | |
390 | goto out_unlock; | |
391 | ||
392 | err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); | |
393 | ||
394 | out_unlock: | |
395 | up_read(&oi->ip_alloc_sem); | |
396 | ocfs2_inode_unlock(inode, 0); | |
397 | ||
398 | return err; | |
399 | } | |
400 | ||
401 | /* Note: Because we don't support holes, our allocation has | |
402 | * already happened (allocation writes zeros to the file data) | |
403 | * so we don't have to worry about ordered writes in | |
404 | * ocfs2_writepage. | |
405 | * | |
406 | * ->writepage is called during the process of invalidating the page cache | |
407 | * during blocked lock processing. It can't block on any cluster locks | |
408 | * to during block mapping. It's relying on the fact that the block | |
409 | * mapping can't have disappeared under the dirty pages that it is | |
410 | * being asked to write back. | |
411 | */ | |
412 | static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) | |
413 | { | |
414 | trace_ocfs2_writepage( | |
415 | (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, | |
416 | page->index); | |
417 | ||
418 | return block_write_full_page(page, ocfs2_get_block, wbc); | |
419 | } | |
420 | ||
421 | /* Taken from ext3. We don't necessarily need the full blown | |
422 | * functionality yet, but IMHO it's better to cut and paste the whole | |
423 | * thing so we can avoid introducing our own bugs (and easily pick up | |
424 | * their fixes when they happen) --Mark */ | |
425 | int walk_page_buffers( handle_t *handle, | |
426 | struct buffer_head *head, | |
427 | unsigned from, | |
428 | unsigned to, | |
429 | int *partial, | |
430 | int (*fn)( handle_t *handle, | |
431 | struct buffer_head *bh)) | |
432 | { | |
433 | struct buffer_head *bh; | |
434 | unsigned block_start, block_end; | |
435 | unsigned blocksize = head->b_size; | |
436 | int err, ret = 0; | |
437 | struct buffer_head *next; | |
438 | ||
439 | for ( bh = head, block_start = 0; | |
440 | ret == 0 && (bh != head || !block_start); | |
441 | block_start = block_end, bh = next) | |
442 | { | |
443 | next = bh->b_this_page; | |
444 | block_end = block_start + blocksize; | |
445 | if (block_end <= from || block_start >= to) { | |
446 | if (partial && !buffer_uptodate(bh)) | |
447 | *partial = 1; | |
448 | continue; | |
449 | } | |
450 | err = (*fn)(handle, bh); | |
451 | if (!ret) | |
452 | ret = err; | |
453 | } | |
454 | return ret; | |
455 | } | |
456 | ||
457 | static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) | |
458 | { | |
459 | sector_t status; | |
460 | u64 p_blkno = 0; | |
461 | int err = 0; | |
462 | struct inode *inode = mapping->host; | |
463 | ||
464 | trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, | |
465 | (unsigned long long)block); | |
466 | ||
467 | /* | |
468 | * The swap code (ab-)uses ->bmap to get a block mapping and then | |
469 | * bypasseѕ the file system for actual I/O. We really can't allow | |
470 | * that on refcounted inodes, so we have to skip out here. And yes, | |
471 | * 0 is the magic code for a bmap error.. | |
472 | */ | |
473 | if (ocfs2_is_refcount_inode(inode)) | |
474 | return 0; | |
475 | ||
476 | /* We don't need to lock journal system files, since they aren't | |
477 | * accessed concurrently from multiple nodes. | |
478 | */ | |
479 | if (!INODE_JOURNAL(inode)) { | |
480 | err = ocfs2_inode_lock(inode, NULL, 0); | |
481 | if (err) { | |
482 | if (err != -ENOENT) | |
483 | mlog_errno(err); | |
484 | goto bail; | |
485 | } | |
486 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | |
487 | } | |
488 | ||
489 | if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) | |
490 | err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, | |
491 | NULL); | |
492 | ||
493 | if (!INODE_JOURNAL(inode)) { | |
494 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | |
495 | ocfs2_inode_unlock(inode, 0); | |
496 | } | |
497 | ||
498 | if (err) { | |
499 | mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", | |
500 | (unsigned long long)block); | |
501 | mlog_errno(err); | |
502 | goto bail; | |
503 | } | |
504 | ||
505 | bail: | |
506 | status = err ? 0 : p_blkno; | |
507 | ||
508 | return status; | |
509 | } | |
510 | ||
511 | static int ocfs2_releasepage(struct page *page, gfp_t wait) | |
512 | { | |
513 | if (!page_has_buffers(page)) | |
514 | return 0; | |
515 | return try_to_free_buffers(page); | |
516 | } | |
517 | ||
518 | static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, | |
519 | u32 cpos, | |
520 | unsigned int *start, | |
521 | unsigned int *end) | |
522 | { | |
523 | unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; | |
524 | ||
525 | if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { | |
526 | unsigned int cpp; | |
527 | ||
528 | cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); | |
529 | ||
530 | cluster_start = cpos % cpp; | |
531 | cluster_start = cluster_start << osb->s_clustersize_bits; | |
532 | ||
533 | cluster_end = cluster_start + osb->s_clustersize; | |
534 | } | |
535 | ||
536 | BUG_ON(cluster_start > PAGE_SIZE); | |
537 | BUG_ON(cluster_end > PAGE_SIZE); | |
538 | ||
539 | if (start) | |
540 | *start = cluster_start; | |
541 | if (end) | |
542 | *end = cluster_end; | |
543 | } | |
544 | ||
545 | /* | |
546 | * 'from' and 'to' are the region in the page to avoid zeroing. | |
547 | * | |
548 | * If pagesize > clustersize, this function will avoid zeroing outside | |
549 | * of the cluster boundary. | |
550 | * | |
551 | * from == to == 0 is code for "zero the entire cluster region" | |
552 | */ | |
553 | static void ocfs2_clear_page_regions(struct page *page, | |
554 | struct ocfs2_super *osb, u32 cpos, | |
555 | unsigned from, unsigned to) | |
556 | { | |
557 | void *kaddr; | |
558 | unsigned int cluster_start, cluster_end; | |
559 | ||
560 | ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); | |
561 | ||
562 | kaddr = kmap_atomic(page); | |
563 | ||
564 | if (from || to) { | |
565 | if (from > cluster_start) | |
566 | memset(kaddr + cluster_start, 0, from - cluster_start); | |
567 | if (to < cluster_end) | |
568 | memset(kaddr + to, 0, cluster_end - to); | |
569 | } else { | |
570 | memset(kaddr + cluster_start, 0, cluster_end - cluster_start); | |
571 | } | |
572 | ||
573 | kunmap_atomic(kaddr); | |
574 | } | |
575 | ||
576 | /* | |
577 | * Nonsparse file systems fully allocate before we get to the write | |
578 | * code. This prevents ocfs2_write() from tagging the write as an | |
579 | * allocating one, which means ocfs2_map_page_blocks() might try to | |
580 | * read-in the blocks at the tail of our file. Avoid reading them by | |
581 | * testing i_size against each block offset. | |
582 | */ | |
583 | static int ocfs2_should_read_blk(struct inode *inode, struct page *page, | |
584 | unsigned int block_start) | |
585 | { | |
586 | u64 offset = page_offset(page) + block_start; | |
587 | ||
588 | if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) | |
589 | return 1; | |
590 | ||
591 | if (i_size_read(inode) > offset) | |
592 | return 1; | |
593 | ||
594 | return 0; | |
595 | } | |
596 | ||
597 | /* | |
598 | * Some of this taken from __block_write_begin(). We already have our | |
599 | * mapping by now though, and the entire write will be allocating or | |
600 | * it won't, so not much need to use BH_New. | |
601 | * | |
602 | * This will also skip zeroing, which is handled externally. | |
603 | */ | |
604 | int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, | |
605 | struct inode *inode, unsigned int from, | |
606 | unsigned int to, int new) | |
607 | { | |
608 | int ret = 0; | |
609 | struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; | |
610 | unsigned int block_end, block_start; | |
611 | unsigned int bsize = i_blocksize(inode); | |
612 | ||
613 | if (!page_has_buffers(page)) | |
614 | create_empty_buffers(page, bsize, 0); | |
615 | ||
616 | head = page_buffers(page); | |
617 | for (bh = head, block_start = 0; bh != head || !block_start; | |
618 | bh = bh->b_this_page, block_start += bsize) { | |
619 | block_end = block_start + bsize; | |
620 | ||
621 | clear_buffer_new(bh); | |
622 | ||
623 | /* | |
624 | * Ignore blocks outside of our i/o range - | |
625 | * they may belong to unallocated clusters. | |
626 | */ | |
627 | if (block_start >= to || block_end <= from) { | |
628 | if (PageUptodate(page)) | |
629 | set_buffer_uptodate(bh); | |
630 | continue; | |
631 | } | |
632 | ||
633 | /* | |
634 | * For an allocating write with cluster size >= page | |
635 | * size, we always write the entire page. | |
636 | */ | |
637 | if (new) | |
638 | set_buffer_new(bh); | |
639 | ||
640 | if (!buffer_mapped(bh)) { | |
641 | map_bh(bh, inode->i_sb, *p_blkno); | |
642 | clean_bdev_bh_alias(bh); | |
643 | } | |
644 | ||
645 | if (PageUptodate(page)) { | |
646 | if (!buffer_uptodate(bh)) | |
647 | set_buffer_uptodate(bh); | |
648 | } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && | |
649 | !buffer_new(bh) && | |
650 | ocfs2_should_read_blk(inode, page, block_start) && | |
651 | (block_start < from || block_end > to)) { | |
652 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); | |
653 | *wait_bh++=bh; | |
654 | } | |
655 | ||
656 | *p_blkno = *p_blkno + 1; | |
657 | } | |
658 | ||
659 | /* | |
660 | * If we issued read requests - let them complete. | |
661 | */ | |
662 | while(wait_bh > wait) { | |
663 | wait_on_buffer(*--wait_bh); | |
664 | if (!buffer_uptodate(*wait_bh)) | |
665 | ret = -EIO; | |
666 | } | |
667 | ||
668 | if (ret == 0 || !new) | |
669 | return ret; | |
670 | ||
671 | /* | |
672 | * If we get -EIO above, zero out any newly allocated blocks | |
673 | * to avoid exposing stale data. | |
674 | */ | |
675 | bh = head; | |
676 | block_start = 0; | |
677 | do { | |
678 | block_end = block_start + bsize; | |
679 | if (block_end <= from) | |
680 | goto next_bh; | |
681 | if (block_start >= to) | |
682 | break; | |
683 | ||
684 | zero_user(page, block_start, bh->b_size); | |
685 | set_buffer_uptodate(bh); | |
686 | mark_buffer_dirty(bh); | |
687 | ||
688 | next_bh: | |
689 | block_start = block_end; | |
690 | bh = bh->b_this_page; | |
691 | } while (bh != head); | |
692 | ||
693 | return ret; | |
694 | } | |
695 | ||
696 | #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) | |
697 | #define OCFS2_MAX_CTXT_PAGES 1 | |
698 | #else | |
699 | #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) | |
700 | #endif | |
701 | ||
702 | #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) | |
703 | ||
704 | struct ocfs2_unwritten_extent { | |
705 | struct list_head ue_node; | |
706 | struct list_head ue_ip_node; | |
707 | u32 ue_cpos; | |
708 | u32 ue_phys; | |
709 | }; | |
710 | ||
711 | /* | |
712 | * Describe the state of a single cluster to be written to. | |
713 | */ | |
714 | struct ocfs2_write_cluster_desc { | |
715 | u32 c_cpos; | |
716 | u32 c_phys; | |
717 | /* | |
718 | * Give this a unique field because c_phys eventually gets | |
719 | * filled. | |
720 | */ | |
721 | unsigned c_new; | |
722 | unsigned c_clear_unwritten; | |
723 | unsigned c_needs_zero; | |
724 | }; | |
725 | ||
726 | struct ocfs2_write_ctxt { | |
727 | /* Logical cluster position / len of write */ | |
728 | u32 w_cpos; | |
729 | u32 w_clen; | |
730 | ||
731 | /* First cluster allocated in a nonsparse extend */ | |
732 | u32 w_first_new_cpos; | |
733 | ||
734 | /* Type of caller. Must be one of buffer, mmap, direct. */ | |
735 | ocfs2_write_type_t w_type; | |
736 | ||
737 | struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; | |
738 | ||
739 | /* | |
740 | * This is true if page_size > cluster_size. | |
741 | * | |
742 | * It triggers a set of special cases during write which might | |
743 | * have to deal with allocating writes to partial pages. | |
744 | */ | |
745 | unsigned int w_large_pages; | |
746 | ||
747 | /* | |
748 | * Pages involved in this write. | |
749 | * | |
750 | * w_target_page is the page being written to by the user. | |
751 | * | |
752 | * w_pages is an array of pages which always contains | |
753 | * w_target_page, and in the case of an allocating write with | |
754 | * page_size < cluster size, it will contain zero'd and mapped | |
755 | * pages adjacent to w_target_page which need to be written | |
756 | * out in so that future reads from that region will get | |
757 | * zero's. | |
758 | */ | |
759 | unsigned int w_num_pages; | |
760 | struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; | |
761 | struct page *w_target_page; | |
762 | ||
763 | /* | |
764 | * w_target_locked is used for page_mkwrite path indicating no unlocking | |
765 | * against w_target_page in ocfs2_write_end_nolock. | |
766 | */ | |
767 | unsigned int w_target_locked:1; | |
768 | ||
769 | /* | |
770 | * ocfs2_write_end() uses this to know what the real range to | |
771 | * write in the target should be. | |
772 | */ | |
773 | unsigned int w_target_from; | |
774 | unsigned int w_target_to; | |
775 | ||
776 | /* | |
777 | * We could use journal_current_handle() but this is cleaner, | |
778 | * IMHO -Mark | |
779 | */ | |
780 | handle_t *w_handle; | |
781 | ||
782 | struct buffer_head *w_di_bh; | |
783 | ||
784 | struct ocfs2_cached_dealloc_ctxt w_dealloc; | |
785 | ||
786 | struct list_head w_unwritten_list; | |
787 | }; | |
788 | ||
789 | void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) | |
790 | { | |
791 | int i; | |
792 | ||
793 | for(i = 0; i < num_pages; i++) { | |
794 | if (pages[i]) { | |
795 | unlock_page(pages[i]); | |
796 | mark_page_accessed(pages[i]); | |
797 | put_page(pages[i]); | |
798 | } | |
799 | } | |
800 | } | |
801 | ||
802 | static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) | |
803 | { | |
804 | int i; | |
805 | ||
806 | /* | |
807 | * w_target_locked is only set to true in the page_mkwrite() case. | |
808 | * The intent is to allow us to lock the target page from write_begin() | |
809 | * to write_end(). The caller must hold a ref on w_target_page. | |
810 | */ | |
811 | if (wc->w_target_locked) { | |
812 | BUG_ON(!wc->w_target_page); | |
813 | for (i = 0; i < wc->w_num_pages; i++) { | |
814 | if (wc->w_target_page == wc->w_pages[i]) { | |
815 | wc->w_pages[i] = NULL; | |
816 | break; | |
817 | } | |
818 | } | |
819 | mark_page_accessed(wc->w_target_page); | |
820 | put_page(wc->w_target_page); | |
821 | } | |
822 | ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); | |
823 | } | |
824 | ||
825 | static void ocfs2_free_unwritten_list(struct inode *inode, | |
826 | struct list_head *head) | |
827 | { | |
828 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
829 | struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; | |
830 | ||
831 | list_for_each_entry_safe(ue, tmp, head, ue_node) { | |
832 | list_del(&ue->ue_node); | |
833 | spin_lock(&oi->ip_lock); | |
834 | list_del(&ue->ue_ip_node); | |
835 | spin_unlock(&oi->ip_lock); | |
836 | kfree(ue); | |
837 | } | |
838 | } | |
839 | ||
840 | static void ocfs2_free_write_ctxt(struct inode *inode, | |
841 | struct ocfs2_write_ctxt *wc) | |
842 | { | |
843 | ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); | |
844 | ocfs2_unlock_pages(wc); | |
845 | brelse(wc->w_di_bh); | |
846 | kfree(wc); | |
847 | } | |
848 | ||
849 | static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, | |
850 | struct ocfs2_super *osb, loff_t pos, | |
851 | unsigned len, ocfs2_write_type_t type, | |
852 | struct buffer_head *di_bh) | |
853 | { | |
854 | u32 cend; | |
855 | struct ocfs2_write_ctxt *wc; | |
856 | ||
857 | wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); | |
858 | if (!wc) | |
859 | return -ENOMEM; | |
860 | ||
861 | wc->w_cpos = pos >> osb->s_clustersize_bits; | |
862 | wc->w_first_new_cpos = UINT_MAX; | |
863 | cend = (pos + len - 1) >> osb->s_clustersize_bits; | |
864 | wc->w_clen = cend - wc->w_cpos + 1; | |
865 | get_bh(di_bh); | |
866 | wc->w_di_bh = di_bh; | |
867 | wc->w_type = type; | |
868 | ||
869 | if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) | |
870 | wc->w_large_pages = 1; | |
871 | else | |
872 | wc->w_large_pages = 0; | |
873 | ||
874 | ocfs2_init_dealloc_ctxt(&wc->w_dealloc); | |
875 | INIT_LIST_HEAD(&wc->w_unwritten_list); | |
876 | ||
877 | *wcp = wc; | |
878 | ||
879 | return 0; | |
880 | } | |
881 | ||
882 | /* | |
883 | * If a page has any new buffers, zero them out here, and mark them uptodate | |
884 | * and dirty so they'll be written out (in order to prevent uninitialised | |
885 | * block data from leaking). And clear the new bit. | |
886 | */ | |
887 | static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) | |
888 | { | |
889 | unsigned int block_start, block_end; | |
890 | struct buffer_head *head, *bh; | |
891 | ||
892 | BUG_ON(!PageLocked(page)); | |
893 | if (!page_has_buffers(page)) | |
894 | return; | |
895 | ||
896 | bh = head = page_buffers(page); | |
897 | block_start = 0; | |
898 | do { | |
899 | block_end = block_start + bh->b_size; | |
900 | ||
901 | if (buffer_new(bh)) { | |
902 | if (block_end > from && block_start < to) { | |
903 | if (!PageUptodate(page)) { | |
904 | unsigned start, end; | |
905 | ||
906 | start = max(from, block_start); | |
907 | end = min(to, block_end); | |
908 | ||
909 | zero_user_segment(page, start, end); | |
910 | set_buffer_uptodate(bh); | |
911 | } | |
912 | ||
913 | clear_buffer_new(bh); | |
914 | mark_buffer_dirty(bh); | |
915 | } | |
916 | } | |
917 | ||
918 | block_start = block_end; | |
919 | bh = bh->b_this_page; | |
920 | } while (bh != head); | |
921 | } | |
922 | ||
923 | /* | |
924 | * Only called when we have a failure during allocating write to write | |
925 | * zero's to the newly allocated region. | |
926 | */ | |
927 | static void ocfs2_write_failure(struct inode *inode, | |
928 | struct ocfs2_write_ctxt *wc, | |
929 | loff_t user_pos, unsigned user_len) | |
930 | { | |
931 | int i; | |
932 | unsigned from = user_pos & (PAGE_SIZE - 1), | |
933 | to = user_pos + user_len; | |
934 | struct page *tmppage; | |
935 | ||
936 | if (wc->w_target_page) | |
937 | ocfs2_zero_new_buffers(wc->w_target_page, from, to); | |
938 | ||
939 | for(i = 0; i < wc->w_num_pages; i++) { | |
940 | tmppage = wc->w_pages[i]; | |
941 | ||
942 | if (tmppage && page_has_buffers(tmppage)) { | |
943 | if (ocfs2_should_order_data(inode)) | |
944 | ocfs2_jbd2_file_inode(wc->w_handle, inode); | |
945 | ||
946 | block_commit_write(tmppage, from, to); | |
947 | } | |
948 | } | |
949 | } | |
950 | ||
951 | static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, | |
952 | struct ocfs2_write_ctxt *wc, | |
953 | struct page *page, u32 cpos, | |
954 | loff_t user_pos, unsigned user_len, | |
955 | int new) | |
956 | { | |
957 | int ret; | |
958 | unsigned int map_from = 0, map_to = 0; | |
959 | unsigned int cluster_start, cluster_end; | |
960 | unsigned int user_data_from = 0, user_data_to = 0; | |
961 | ||
962 | ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, | |
963 | &cluster_start, &cluster_end); | |
964 | ||
965 | /* treat the write as new if the a hole/lseek spanned across | |
966 | * the page boundary. | |
967 | */ | |
968 | new = new | ((i_size_read(inode) <= page_offset(page)) && | |
969 | (page_offset(page) <= user_pos)); | |
970 | ||
971 | if (page == wc->w_target_page) { | |
972 | map_from = user_pos & (PAGE_SIZE - 1); | |
973 | map_to = map_from + user_len; | |
974 | ||
975 | if (new) | |
976 | ret = ocfs2_map_page_blocks(page, p_blkno, inode, | |
977 | cluster_start, cluster_end, | |
978 | new); | |
979 | else | |
980 | ret = ocfs2_map_page_blocks(page, p_blkno, inode, | |
981 | map_from, map_to, new); | |
982 | if (ret) { | |
983 | mlog_errno(ret); | |
984 | goto out; | |
985 | } | |
986 | ||
987 | user_data_from = map_from; | |
988 | user_data_to = map_to; | |
989 | if (new) { | |
990 | map_from = cluster_start; | |
991 | map_to = cluster_end; | |
992 | } | |
993 | } else { | |
994 | /* | |
995 | * If we haven't allocated the new page yet, we | |
996 | * shouldn't be writing it out without copying user | |
997 | * data. This is likely a math error from the caller. | |
998 | */ | |
999 | BUG_ON(!new); | |
1000 | ||
1001 | map_from = cluster_start; | |
1002 | map_to = cluster_end; | |
1003 | ||
1004 | ret = ocfs2_map_page_blocks(page, p_blkno, inode, | |
1005 | cluster_start, cluster_end, new); | |
1006 | if (ret) { | |
1007 | mlog_errno(ret); | |
1008 | goto out; | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | /* | |
1013 | * Parts of newly allocated pages need to be zero'd. | |
1014 | * | |
1015 | * Above, we have also rewritten 'to' and 'from' - as far as | |
1016 | * the rest of the function is concerned, the entire cluster | |
1017 | * range inside of a page needs to be written. | |
1018 | * | |
1019 | * We can skip this if the page is up to date - it's already | |
1020 | * been zero'd from being read in as a hole. | |
1021 | */ | |
1022 | if (new && !PageUptodate(page)) | |
1023 | ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), | |
1024 | cpos, user_data_from, user_data_to); | |
1025 | ||
1026 | flush_dcache_page(page); | |
1027 | ||
1028 | out: | |
1029 | return ret; | |
1030 | } | |
1031 | ||
1032 | /* | |
1033 | * This function will only grab one clusters worth of pages. | |
1034 | */ | |
1035 | static int ocfs2_grab_pages_for_write(struct address_space *mapping, | |
1036 | struct ocfs2_write_ctxt *wc, | |
1037 | u32 cpos, loff_t user_pos, | |
1038 | unsigned user_len, int new, | |
1039 | struct page *mmap_page) | |
1040 | { | |
1041 | int ret = 0, i; | |
1042 | unsigned long start, target_index, end_index, index; | |
1043 | struct inode *inode = mapping->host; | |
1044 | loff_t last_byte; | |
1045 | ||
1046 | target_index = user_pos >> PAGE_SHIFT; | |
1047 | ||
1048 | /* | |
1049 | * Figure out how many pages we'll be manipulating here. For | |
1050 | * non allocating write, we just change the one | |
1051 | * page. Otherwise, we'll need a whole clusters worth. If we're | |
1052 | * writing past i_size, we only need enough pages to cover the | |
1053 | * last page of the write. | |
1054 | */ | |
1055 | if (new) { | |
1056 | wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); | |
1057 | start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); | |
1058 | /* | |
1059 | * We need the index *past* the last page we could possibly | |
1060 | * touch. This is the page past the end of the write or | |
1061 | * i_size, whichever is greater. | |
1062 | */ | |
1063 | last_byte = max(user_pos + user_len, i_size_read(inode)); | |
1064 | BUG_ON(last_byte < 1); | |
1065 | end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; | |
1066 | if ((start + wc->w_num_pages) > end_index) | |
1067 | wc->w_num_pages = end_index - start; | |
1068 | } else { | |
1069 | wc->w_num_pages = 1; | |
1070 | start = target_index; | |
1071 | } | |
1072 | end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; | |
1073 | ||
1074 | for(i = 0; i < wc->w_num_pages; i++) { | |
1075 | index = start + i; | |
1076 | ||
1077 | if (index >= target_index && index <= end_index && | |
1078 | wc->w_type == OCFS2_WRITE_MMAP) { | |
1079 | /* | |
1080 | * ocfs2_pagemkwrite() is a little different | |
1081 | * and wants us to directly use the page | |
1082 | * passed in. | |
1083 | */ | |
1084 | lock_page(mmap_page); | |
1085 | ||
1086 | /* Exit and let the caller retry */ | |
1087 | if (mmap_page->mapping != mapping) { | |
1088 | WARN_ON(mmap_page->mapping); | |
1089 | unlock_page(mmap_page); | |
1090 | ret = -EAGAIN; | |
1091 | goto out; | |
1092 | } | |
1093 | ||
1094 | get_page(mmap_page); | |
1095 | wc->w_pages[i] = mmap_page; | |
1096 | wc->w_target_locked = true; | |
1097 | } else if (index >= target_index && index <= end_index && | |
1098 | wc->w_type == OCFS2_WRITE_DIRECT) { | |
1099 | /* Direct write has no mapping page. */ | |
1100 | wc->w_pages[i] = NULL; | |
1101 | continue; | |
1102 | } else { | |
1103 | wc->w_pages[i] = find_or_create_page(mapping, index, | |
1104 | GFP_NOFS); | |
1105 | if (!wc->w_pages[i]) { | |
1106 | ret = -ENOMEM; | |
1107 | mlog_errno(ret); | |
1108 | goto out; | |
1109 | } | |
1110 | } | |
1111 | wait_for_stable_page(wc->w_pages[i]); | |
1112 | ||
1113 | if (index == target_index) | |
1114 | wc->w_target_page = wc->w_pages[i]; | |
1115 | } | |
1116 | out: | |
1117 | if (ret) | |
1118 | wc->w_target_locked = false; | |
1119 | return ret; | |
1120 | } | |
1121 | ||
1122 | /* | |
1123 | * Prepare a single cluster for write one cluster into the file. | |
1124 | */ | |
1125 | static int ocfs2_write_cluster(struct address_space *mapping, | |
1126 | u32 *phys, unsigned int new, | |
1127 | unsigned int clear_unwritten, | |
1128 | unsigned int should_zero, | |
1129 | struct ocfs2_alloc_context *data_ac, | |
1130 | struct ocfs2_alloc_context *meta_ac, | |
1131 | struct ocfs2_write_ctxt *wc, u32 cpos, | |
1132 | loff_t user_pos, unsigned user_len) | |
1133 | { | |
1134 | int ret, i; | |
1135 | u64 p_blkno; | |
1136 | struct inode *inode = mapping->host; | |
1137 | struct ocfs2_extent_tree et; | |
1138 | int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); | |
1139 | ||
1140 | if (new) { | |
1141 | u32 tmp_pos; | |
1142 | ||
1143 | /* | |
1144 | * This is safe to call with the page locks - it won't take | |
1145 | * any additional semaphores or cluster locks. | |
1146 | */ | |
1147 | tmp_pos = cpos; | |
1148 | ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, | |
1149 | &tmp_pos, 1, !clear_unwritten, | |
1150 | wc->w_di_bh, wc->w_handle, | |
1151 | data_ac, meta_ac, NULL); | |
1152 | /* | |
1153 | * This shouldn't happen because we must have already | |
1154 | * calculated the correct meta data allocation required. The | |
1155 | * internal tree allocation code should know how to increase | |
1156 | * transaction credits itself. | |
1157 | * | |
1158 | * If need be, we could handle -EAGAIN for a | |
1159 | * RESTART_TRANS here. | |
1160 | */ | |
1161 | mlog_bug_on_msg(ret == -EAGAIN, | |
1162 | "Inode %llu: EAGAIN return during allocation.\n", | |
1163 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
1164 | if (ret < 0) { | |
1165 | mlog_errno(ret); | |
1166 | goto out; | |
1167 | } | |
1168 | } else if (clear_unwritten) { | |
1169 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), | |
1170 | wc->w_di_bh); | |
1171 | ret = ocfs2_mark_extent_written(inode, &et, | |
1172 | wc->w_handle, cpos, 1, *phys, | |
1173 | meta_ac, &wc->w_dealloc); | |
1174 | if (ret < 0) { | |
1175 | mlog_errno(ret); | |
1176 | goto out; | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | /* | |
1181 | * The only reason this should fail is due to an inability to | |
1182 | * find the extent added. | |
1183 | */ | |
1184 | ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); | |
1185 | if (ret < 0) { | |
1186 | mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " | |
1187 | "at logical cluster %u", | |
1188 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); | |
1189 | goto out; | |
1190 | } | |
1191 | ||
1192 | BUG_ON(*phys == 0); | |
1193 | ||
1194 | p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); | |
1195 | if (!should_zero) | |
1196 | p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); | |
1197 | ||
1198 | for(i = 0; i < wc->w_num_pages; i++) { | |
1199 | int tmpret; | |
1200 | ||
1201 | /* This is the direct io target page. */ | |
1202 | if (wc->w_pages[i] == NULL) { | |
1203 | p_blkno++; | |
1204 | continue; | |
1205 | } | |
1206 | ||
1207 | tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, | |
1208 | wc->w_pages[i], cpos, | |
1209 | user_pos, user_len, | |
1210 | should_zero); | |
1211 | if (tmpret) { | |
1212 | mlog_errno(tmpret); | |
1213 | if (ret == 0) | |
1214 | ret = tmpret; | |
1215 | } | |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * We only have cleanup to do in case of allocating write. | |
1220 | */ | |
1221 | if (ret && new) | |
1222 | ocfs2_write_failure(inode, wc, user_pos, user_len); | |
1223 | ||
1224 | out: | |
1225 | ||
1226 | return ret; | |
1227 | } | |
1228 | ||
1229 | static int ocfs2_write_cluster_by_desc(struct address_space *mapping, | |
1230 | struct ocfs2_alloc_context *data_ac, | |
1231 | struct ocfs2_alloc_context *meta_ac, | |
1232 | struct ocfs2_write_ctxt *wc, | |
1233 | loff_t pos, unsigned len) | |
1234 | { | |
1235 | int ret, i; | |
1236 | loff_t cluster_off; | |
1237 | unsigned int local_len = len; | |
1238 | struct ocfs2_write_cluster_desc *desc; | |
1239 | struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); | |
1240 | ||
1241 | for (i = 0; i < wc->w_clen; i++) { | |
1242 | desc = &wc->w_desc[i]; | |
1243 | ||
1244 | /* | |
1245 | * We have to make sure that the total write passed in | |
1246 | * doesn't extend past a single cluster. | |
1247 | */ | |
1248 | local_len = len; | |
1249 | cluster_off = pos & (osb->s_clustersize - 1); | |
1250 | if ((cluster_off + local_len) > osb->s_clustersize) | |
1251 | local_len = osb->s_clustersize - cluster_off; | |
1252 | ||
1253 | ret = ocfs2_write_cluster(mapping, &desc->c_phys, | |
1254 | desc->c_new, | |
1255 | desc->c_clear_unwritten, | |
1256 | desc->c_needs_zero, | |
1257 | data_ac, meta_ac, | |
1258 | wc, desc->c_cpos, pos, local_len); | |
1259 | if (ret) { | |
1260 | mlog_errno(ret); | |
1261 | goto out; | |
1262 | } | |
1263 | ||
1264 | len -= local_len; | |
1265 | pos += local_len; | |
1266 | } | |
1267 | ||
1268 | ret = 0; | |
1269 | out: | |
1270 | return ret; | |
1271 | } | |
1272 | ||
1273 | /* | |
1274 | * ocfs2_write_end() wants to know which parts of the target page it | |
1275 | * should complete the write on. It's easiest to compute them ahead of | |
1276 | * time when a more complete view of the write is available. | |
1277 | */ | |
1278 | static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, | |
1279 | struct ocfs2_write_ctxt *wc, | |
1280 | loff_t pos, unsigned len, int alloc) | |
1281 | { | |
1282 | struct ocfs2_write_cluster_desc *desc; | |
1283 | ||
1284 | wc->w_target_from = pos & (PAGE_SIZE - 1); | |
1285 | wc->w_target_to = wc->w_target_from + len; | |
1286 | ||
1287 | if (alloc == 0) | |
1288 | return; | |
1289 | ||
1290 | /* | |
1291 | * Allocating write - we may have different boundaries based | |
1292 | * on page size and cluster size. | |
1293 | * | |
1294 | * NOTE: We can no longer compute one value from the other as | |
1295 | * the actual write length and user provided length may be | |
1296 | * different. | |
1297 | */ | |
1298 | ||
1299 | if (wc->w_large_pages) { | |
1300 | /* | |
1301 | * We only care about the 1st and last cluster within | |
1302 | * our range and whether they should be zero'd or not. Either | |
1303 | * value may be extended out to the start/end of a | |
1304 | * newly allocated cluster. | |
1305 | */ | |
1306 | desc = &wc->w_desc[0]; | |
1307 | if (desc->c_needs_zero) | |
1308 | ocfs2_figure_cluster_boundaries(osb, | |
1309 | desc->c_cpos, | |
1310 | &wc->w_target_from, | |
1311 | NULL); | |
1312 | ||
1313 | desc = &wc->w_desc[wc->w_clen - 1]; | |
1314 | if (desc->c_needs_zero) | |
1315 | ocfs2_figure_cluster_boundaries(osb, | |
1316 | desc->c_cpos, | |
1317 | NULL, | |
1318 | &wc->w_target_to); | |
1319 | } else { | |
1320 | wc->w_target_from = 0; | |
1321 | wc->w_target_to = PAGE_SIZE; | |
1322 | } | |
1323 | } | |
1324 | ||
1325 | /* | |
1326 | * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to | |
1327 | * do the zero work. And should not to clear UNWRITTEN since it will be cleared | |
1328 | * by the direct io procedure. | |
1329 | * If this is a new extent that allocated by direct io, we should mark it in | |
1330 | * the ip_unwritten_list. | |
1331 | */ | |
1332 | static int ocfs2_unwritten_check(struct inode *inode, | |
1333 | struct ocfs2_write_ctxt *wc, | |
1334 | struct ocfs2_write_cluster_desc *desc) | |
1335 | { | |
1336 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
1337 | struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; | |
1338 | int ret = 0; | |
1339 | ||
1340 | if (!desc->c_needs_zero) | |
1341 | return 0; | |
1342 | ||
1343 | retry: | |
1344 | spin_lock(&oi->ip_lock); | |
1345 | /* Needs not to zero no metter buffer or direct. The one who is zero | |
1346 | * the cluster is doing zero. And he will clear unwritten after all | |
1347 | * cluster io finished. */ | |
1348 | list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { | |
1349 | if (desc->c_cpos == ue->ue_cpos) { | |
1350 | BUG_ON(desc->c_new); | |
1351 | desc->c_needs_zero = 0; | |
1352 | desc->c_clear_unwritten = 0; | |
1353 | goto unlock; | |
1354 | } | |
1355 | } | |
1356 | ||
1357 | if (wc->w_type != OCFS2_WRITE_DIRECT) | |
1358 | goto unlock; | |
1359 | ||
1360 | if (new == NULL) { | |
1361 | spin_unlock(&oi->ip_lock); | |
1362 | new = kmalloc(sizeof(struct ocfs2_unwritten_extent), | |
1363 | GFP_NOFS); | |
1364 | if (new == NULL) { | |
1365 | ret = -ENOMEM; | |
1366 | goto out; | |
1367 | } | |
1368 | goto retry; | |
1369 | } | |
1370 | /* This direct write will doing zero. */ | |
1371 | new->ue_cpos = desc->c_cpos; | |
1372 | new->ue_phys = desc->c_phys; | |
1373 | desc->c_clear_unwritten = 0; | |
1374 | list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); | |
1375 | list_add_tail(&new->ue_node, &wc->w_unwritten_list); | |
1376 | new = NULL; | |
1377 | unlock: | |
1378 | spin_unlock(&oi->ip_lock); | |
1379 | out: | |
1380 | if (new) | |
1381 | kfree(new); | |
1382 | return ret; | |
1383 | } | |
1384 | ||
1385 | /* | |
1386 | * Populate each single-cluster write descriptor in the write context | |
1387 | * with information about the i/o to be done. | |
1388 | * | |
1389 | * Returns the number of clusters that will have to be allocated, as | |
1390 | * well as a worst case estimate of the number of extent records that | |
1391 | * would have to be created during a write to an unwritten region. | |
1392 | */ | |
1393 | static int ocfs2_populate_write_desc(struct inode *inode, | |
1394 | struct ocfs2_write_ctxt *wc, | |
1395 | unsigned int *clusters_to_alloc, | |
1396 | unsigned int *extents_to_split) | |
1397 | { | |
1398 | int ret; | |
1399 | struct ocfs2_write_cluster_desc *desc; | |
1400 | unsigned int num_clusters = 0; | |
1401 | unsigned int ext_flags = 0; | |
1402 | u32 phys = 0; | |
1403 | int i; | |
1404 | ||
1405 | *clusters_to_alloc = 0; | |
1406 | *extents_to_split = 0; | |
1407 | ||
1408 | for (i = 0; i < wc->w_clen; i++) { | |
1409 | desc = &wc->w_desc[i]; | |
1410 | desc->c_cpos = wc->w_cpos + i; | |
1411 | ||
1412 | if (num_clusters == 0) { | |
1413 | /* | |
1414 | * Need to look up the next extent record. | |
1415 | */ | |
1416 | ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, | |
1417 | &num_clusters, &ext_flags); | |
1418 | if (ret) { | |
1419 | mlog_errno(ret); | |
1420 | goto out; | |
1421 | } | |
1422 | ||
1423 | /* We should already CoW the refcountd extent. */ | |
1424 | BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); | |
1425 | ||
1426 | /* | |
1427 | * Assume worst case - that we're writing in | |
1428 | * the middle of the extent. | |
1429 | * | |
1430 | * We can assume that the write proceeds from | |
1431 | * left to right, in which case the extent | |
1432 | * insert code is smart enough to coalesce the | |
1433 | * next splits into the previous records created. | |
1434 | */ | |
1435 | if (ext_flags & OCFS2_EXT_UNWRITTEN) | |
1436 | *extents_to_split = *extents_to_split + 2; | |
1437 | } else if (phys) { | |
1438 | /* | |
1439 | * Only increment phys if it doesn't describe | |
1440 | * a hole. | |
1441 | */ | |
1442 | phys++; | |
1443 | } | |
1444 | ||
1445 | /* | |
1446 | * If w_first_new_cpos is < UINT_MAX, we have a non-sparse | |
1447 | * file that got extended. w_first_new_cpos tells us | |
1448 | * where the newly allocated clusters are so we can | |
1449 | * zero them. | |
1450 | */ | |
1451 | if (desc->c_cpos >= wc->w_first_new_cpos) { | |
1452 | BUG_ON(phys == 0); | |
1453 | desc->c_needs_zero = 1; | |
1454 | } | |
1455 | ||
1456 | desc->c_phys = phys; | |
1457 | if (phys == 0) { | |
1458 | desc->c_new = 1; | |
1459 | desc->c_needs_zero = 1; | |
1460 | desc->c_clear_unwritten = 1; | |
1461 | *clusters_to_alloc = *clusters_to_alloc + 1; | |
1462 | } | |
1463 | ||
1464 | if (ext_flags & OCFS2_EXT_UNWRITTEN) { | |
1465 | desc->c_clear_unwritten = 1; | |
1466 | desc->c_needs_zero = 1; | |
1467 | } | |
1468 | ||
1469 | ret = ocfs2_unwritten_check(inode, wc, desc); | |
1470 | if (ret) { | |
1471 | mlog_errno(ret); | |
1472 | goto out; | |
1473 | } | |
1474 | ||
1475 | num_clusters--; | |
1476 | } | |
1477 | ||
1478 | ret = 0; | |
1479 | out: | |
1480 | return ret; | |
1481 | } | |
1482 | ||
1483 | static int ocfs2_write_begin_inline(struct address_space *mapping, | |
1484 | struct inode *inode, | |
1485 | struct ocfs2_write_ctxt *wc) | |
1486 | { | |
1487 | int ret; | |
1488 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
1489 | struct page *page; | |
1490 | handle_t *handle; | |
1491 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; | |
1492 | ||
1493 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | |
1494 | if (IS_ERR(handle)) { | |
1495 | ret = PTR_ERR(handle); | |
1496 | mlog_errno(ret); | |
1497 | goto out; | |
1498 | } | |
1499 | ||
1500 | page = find_or_create_page(mapping, 0, GFP_NOFS); | |
1501 | if (!page) { | |
1502 | ocfs2_commit_trans(osb, handle); | |
1503 | ret = -ENOMEM; | |
1504 | mlog_errno(ret); | |
1505 | goto out; | |
1506 | } | |
1507 | /* | |
1508 | * If we don't set w_num_pages then this page won't get unlocked | |
1509 | * and freed on cleanup of the write context. | |
1510 | */ | |
1511 | wc->w_pages[0] = wc->w_target_page = page; | |
1512 | wc->w_num_pages = 1; | |
1513 | ||
1514 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, | |
1515 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1516 | if (ret) { | |
1517 | ocfs2_commit_trans(osb, handle); | |
1518 | ||
1519 | mlog_errno(ret); | |
1520 | goto out; | |
1521 | } | |
1522 | ||
1523 | if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) | |
1524 | ocfs2_set_inode_data_inline(inode, di); | |
1525 | ||
1526 | if (!PageUptodate(page)) { | |
1527 | ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); | |
1528 | if (ret) { | |
1529 | ocfs2_commit_trans(osb, handle); | |
1530 | ||
1531 | goto out; | |
1532 | } | |
1533 | } | |
1534 | ||
1535 | wc->w_handle = handle; | |
1536 | out: | |
1537 | return ret; | |
1538 | } | |
1539 | ||
1540 | int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) | |
1541 | { | |
1542 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
1543 | ||
1544 | if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) | |
1545 | return 1; | |
1546 | return 0; | |
1547 | } | |
1548 | ||
1549 | static int ocfs2_try_to_write_inline_data(struct address_space *mapping, | |
1550 | struct inode *inode, loff_t pos, | |
1551 | unsigned len, struct page *mmap_page, | |
1552 | struct ocfs2_write_ctxt *wc) | |
1553 | { | |
1554 | int ret, written = 0; | |
1555 | loff_t end = pos + len; | |
1556 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
1557 | struct ocfs2_dinode *di = NULL; | |
1558 | ||
1559 | trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, | |
1560 | len, (unsigned long long)pos, | |
1561 | oi->ip_dyn_features); | |
1562 | ||
1563 | /* | |
1564 | * Handle inodes which already have inline data 1st. | |
1565 | */ | |
1566 | if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { | |
1567 | if (mmap_page == NULL && | |
1568 | ocfs2_size_fits_inline_data(wc->w_di_bh, end)) | |
1569 | goto do_inline_write; | |
1570 | ||
1571 | /* | |
1572 | * The write won't fit - we have to give this inode an | |
1573 | * inline extent list now. | |
1574 | */ | |
1575 | ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); | |
1576 | if (ret) | |
1577 | mlog_errno(ret); | |
1578 | goto out; | |
1579 | } | |
1580 | ||
1581 | /* | |
1582 | * Check whether the inode can accept inline data. | |
1583 | */ | |
1584 | if (oi->ip_clusters != 0 || i_size_read(inode) != 0) | |
1585 | return 0; | |
1586 | ||
1587 | /* | |
1588 | * Check whether the write can fit. | |
1589 | */ | |
1590 | di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; | |
1591 | if (mmap_page || | |
1592 | end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) | |
1593 | return 0; | |
1594 | ||
1595 | do_inline_write: | |
1596 | ret = ocfs2_write_begin_inline(mapping, inode, wc); | |
1597 | if (ret) { | |
1598 | mlog_errno(ret); | |
1599 | goto out; | |
1600 | } | |
1601 | ||
1602 | /* | |
1603 | * This signals to the caller that the data can be written | |
1604 | * inline. | |
1605 | */ | |
1606 | written = 1; | |
1607 | out: | |
1608 | return written ? written : ret; | |
1609 | } | |
1610 | ||
1611 | /* | |
1612 | * This function only does anything for file systems which can't | |
1613 | * handle sparse files. | |
1614 | * | |
1615 | * What we want to do here is fill in any hole between the current end | |
1616 | * of allocation and the end of our write. That way the rest of the | |
1617 | * write path can treat it as an non-allocating write, which has no | |
1618 | * special case code for sparse/nonsparse files. | |
1619 | */ | |
1620 | static int ocfs2_expand_nonsparse_inode(struct inode *inode, | |
1621 | struct buffer_head *di_bh, | |
1622 | loff_t pos, unsigned len, | |
1623 | struct ocfs2_write_ctxt *wc) | |
1624 | { | |
1625 | int ret; | |
1626 | loff_t newsize = pos + len; | |
1627 | ||
1628 | BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); | |
1629 | ||
1630 | if (newsize <= i_size_read(inode)) | |
1631 | return 0; | |
1632 | ||
1633 | ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); | |
1634 | if (ret) | |
1635 | mlog_errno(ret); | |
1636 | ||
1637 | /* There is no wc if this is call from direct. */ | |
1638 | if (wc) | |
1639 | wc->w_first_new_cpos = | |
1640 | ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); | |
1641 | ||
1642 | return ret; | |
1643 | } | |
1644 | ||
1645 | static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, | |
1646 | loff_t pos) | |
1647 | { | |
1648 | int ret = 0; | |
1649 | ||
1650 | BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); | |
1651 | if (pos > i_size_read(inode)) | |
1652 | ret = ocfs2_zero_extend(inode, di_bh, pos); | |
1653 | ||
1654 | return ret; | |
1655 | } | |
1656 | ||
1657 | int ocfs2_write_begin_nolock(struct address_space *mapping, | |
1658 | loff_t pos, unsigned len, ocfs2_write_type_t type, | |
1659 | struct page **pagep, void **fsdata, | |
1660 | struct buffer_head *di_bh, struct page *mmap_page) | |
1661 | { | |
1662 | int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; | |
1663 | unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; | |
1664 | struct ocfs2_write_ctxt *wc; | |
1665 | struct inode *inode = mapping->host; | |
1666 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
1667 | struct ocfs2_dinode *di; | |
1668 | struct ocfs2_alloc_context *data_ac = NULL; | |
1669 | struct ocfs2_alloc_context *meta_ac = NULL; | |
1670 | handle_t *handle; | |
1671 | struct ocfs2_extent_tree et; | |
1672 | int try_free = 1, ret1; | |
1673 | ||
1674 | try_again: | |
1675 | ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); | |
1676 | if (ret) { | |
1677 | mlog_errno(ret); | |
1678 | return ret; | |
1679 | } | |
1680 | ||
1681 | if (ocfs2_supports_inline_data(osb)) { | |
1682 | ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, | |
1683 | mmap_page, wc); | |
1684 | if (ret == 1) { | |
1685 | ret = 0; | |
1686 | goto success; | |
1687 | } | |
1688 | if (ret < 0) { | |
1689 | mlog_errno(ret); | |
1690 | goto out; | |
1691 | } | |
1692 | } | |
1693 | ||
1694 | /* Direct io change i_size late, should not zero tail here. */ | |
1695 | if (type != OCFS2_WRITE_DIRECT) { | |
1696 | if (ocfs2_sparse_alloc(osb)) | |
1697 | ret = ocfs2_zero_tail(inode, di_bh, pos); | |
1698 | else | |
1699 | ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, | |
1700 | len, wc); | |
1701 | if (ret) { | |
1702 | mlog_errno(ret); | |
1703 | goto out; | |
1704 | } | |
1705 | } | |
1706 | ||
1707 | ret = ocfs2_check_range_for_refcount(inode, pos, len); | |
1708 | if (ret < 0) { | |
1709 | mlog_errno(ret); | |
1710 | goto out; | |
1711 | } else if (ret == 1) { | |
1712 | clusters_need = wc->w_clen; | |
1713 | ret = ocfs2_refcount_cow(inode, di_bh, | |
1714 | wc->w_cpos, wc->w_clen, UINT_MAX); | |
1715 | if (ret) { | |
1716 | mlog_errno(ret); | |
1717 | goto out; | |
1718 | } | |
1719 | } | |
1720 | ||
1721 | ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, | |
1722 | &extents_to_split); | |
1723 | if (ret) { | |
1724 | mlog_errno(ret); | |
1725 | goto out; | |
1726 | } | |
1727 | clusters_need += clusters_to_alloc; | |
1728 | ||
1729 | di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; | |
1730 | ||
1731 | trace_ocfs2_write_begin_nolock( | |
1732 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
1733 | (long long)i_size_read(inode), | |
1734 | le32_to_cpu(di->i_clusters), | |
1735 | pos, len, type, mmap_page, | |
1736 | clusters_to_alloc, extents_to_split); | |
1737 | ||
1738 | /* | |
1739 | * We set w_target_from, w_target_to here so that | |
1740 | * ocfs2_write_end() knows which range in the target page to | |
1741 | * write out. An allocation requires that we write the entire | |
1742 | * cluster range. | |
1743 | */ | |
1744 | if (clusters_to_alloc || extents_to_split) { | |
1745 | /* | |
1746 | * XXX: We are stretching the limits of | |
1747 | * ocfs2_lock_allocators(). It greatly over-estimates | |
1748 | * the work to be done. | |
1749 | */ | |
1750 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), | |
1751 | wc->w_di_bh); | |
1752 | ret = ocfs2_lock_allocators(inode, &et, | |
1753 | clusters_to_alloc, extents_to_split, | |
1754 | &data_ac, &meta_ac); | |
1755 | if (ret) { | |
1756 | mlog_errno(ret); | |
1757 | goto out; | |
1758 | } | |
1759 | ||
1760 | if (data_ac) | |
1761 | data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; | |
1762 | ||
1763 | credits = ocfs2_calc_extend_credits(inode->i_sb, | |
1764 | &di->id2.i_list); | |
1765 | } else if (type == OCFS2_WRITE_DIRECT) | |
1766 | /* direct write needs not to start trans if no extents alloc. */ | |
1767 | goto success; | |
1768 | ||
1769 | /* | |
1770 | * We have to zero sparse allocated clusters, unwritten extent clusters, | |
1771 | * and non-sparse clusters we just extended. For non-sparse writes, | |
1772 | * we know zeros will only be needed in the first and/or last cluster. | |
1773 | */ | |
1774 | if (wc->w_clen && (wc->w_desc[0].c_needs_zero || | |
1775 | wc->w_desc[wc->w_clen - 1].c_needs_zero)) | |
1776 | cluster_of_pages = 1; | |
1777 | else | |
1778 | cluster_of_pages = 0; | |
1779 | ||
1780 | ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); | |
1781 | ||
1782 | handle = ocfs2_start_trans(osb, credits); | |
1783 | if (IS_ERR(handle)) { | |
1784 | ret = PTR_ERR(handle); | |
1785 | mlog_errno(ret); | |
1786 | goto out; | |
1787 | } | |
1788 | ||
1789 | wc->w_handle = handle; | |
1790 | ||
1791 | if (clusters_to_alloc) { | |
1792 | ret = dquot_alloc_space_nodirty(inode, | |
1793 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); | |
1794 | if (ret) | |
1795 | goto out_commit; | |
1796 | } | |
1797 | ||
1798 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, | |
1799 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1800 | if (ret) { | |
1801 | mlog_errno(ret); | |
1802 | goto out_quota; | |
1803 | } | |
1804 | ||
1805 | /* | |
1806 | * Fill our page array first. That way we've grabbed enough so | |
1807 | * that we can zero and flush if we error after adding the | |
1808 | * extent. | |
1809 | */ | |
1810 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, | |
1811 | cluster_of_pages, mmap_page); | |
1812 | if (ret && ret != -EAGAIN) { | |
1813 | mlog_errno(ret); | |
1814 | goto out_quota; | |
1815 | } | |
1816 | ||
1817 | /* | |
1818 | * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock | |
1819 | * the target page. In this case, we exit with no error and no target | |
1820 | * page. This will trigger the caller, page_mkwrite(), to re-try | |
1821 | * the operation. | |
1822 | */ | |
1823 | if (ret == -EAGAIN) { | |
1824 | BUG_ON(wc->w_target_page); | |
1825 | ret = 0; | |
1826 | goto out_quota; | |
1827 | } | |
1828 | ||
1829 | ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, | |
1830 | len); | |
1831 | if (ret) { | |
1832 | mlog_errno(ret); | |
1833 | goto out_quota; | |
1834 | } | |
1835 | ||
1836 | if (data_ac) | |
1837 | ocfs2_free_alloc_context(data_ac); | |
1838 | if (meta_ac) | |
1839 | ocfs2_free_alloc_context(meta_ac); | |
1840 | ||
1841 | success: | |
1842 | if (pagep) | |
1843 | *pagep = wc->w_target_page; | |
1844 | *fsdata = wc; | |
1845 | return 0; | |
1846 | out_quota: | |
1847 | if (clusters_to_alloc) | |
1848 | dquot_free_space(inode, | |
1849 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); | |
1850 | out_commit: | |
1851 | ocfs2_commit_trans(osb, handle); | |
1852 | ||
1853 | out: | |
1854 | /* | |
1855 | * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(), | |
1856 | * even in case of error here like ENOSPC and ENOMEM. So, we need | |
1857 | * to unlock the target page manually to prevent deadlocks when | |
1858 | * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED | |
1859 | * to VM code. | |
1860 | */ | |
1861 | if (wc->w_target_locked) | |
1862 | unlock_page(mmap_page); | |
1863 | ||
1864 | ocfs2_free_write_ctxt(inode, wc); | |
1865 | ||
1866 | if (data_ac) { | |
1867 | ocfs2_free_alloc_context(data_ac); | |
1868 | data_ac = NULL; | |
1869 | } | |
1870 | if (meta_ac) { | |
1871 | ocfs2_free_alloc_context(meta_ac); | |
1872 | meta_ac = NULL; | |
1873 | } | |
1874 | ||
1875 | if (ret == -ENOSPC && try_free) { | |
1876 | /* | |
1877 | * Try to free some truncate log so that we can have enough | |
1878 | * clusters to allocate. | |
1879 | */ | |
1880 | try_free = 0; | |
1881 | ||
1882 | ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); | |
1883 | if (ret1 == 1) | |
1884 | goto try_again; | |
1885 | ||
1886 | if (ret1 < 0) | |
1887 | mlog_errno(ret1); | |
1888 | } | |
1889 | ||
1890 | return ret; | |
1891 | } | |
1892 | ||
1893 | static int ocfs2_write_begin(struct file *file, struct address_space *mapping, | |
1894 | loff_t pos, unsigned len, unsigned flags, | |
1895 | struct page **pagep, void **fsdata) | |
1896 | { | |
1897 | int ret; | |
1898 | struct buffer_head *di_bh = NULL; | |
1899 | struct inode *inode = mapping->host; | |
1900 | ||
1901 | ret = ocfs2_inode_lock(inode, &di_bh, 1); | |
1902 | if (ret) { | |
1903 | mlog_errno(ret); | |
1904 | return ret; | |
1905 | } | |
1906 | ||
1907 | /* | |
1908 | * Take alloc sem here to prevent concurrent lookups. That way | |
1909 | * the mapping, zeroing and tree manipulation within | |
1910 | * ocfs2_write() will be safe against ->readpage(). This | |
1911 | * should also serve to lock out allocation from a shared | |
1912 | * writeable region. | |
1913 | */ | |
1914 | down_write(&OCFS2_I(inode)->ip_alloc_sem); | |
1915 | ||
1916 | ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, | |
1917 | pagep, fsdata, di_bh, NULL); | |
1918 | if (ret) { | |
1919 | mlog_errno(ret); | |
1920 | goto out_fail; | |
1921 | } | |
1922 | ||
1923 | brelse(di_bh); | |
1924 | ||
1925 | return 0; | |
1926 | ||
1927 | out_fail: | |
1928 | up_write(&OCFS2_I(inode)->ip_alloc_sem); | |
1929 | ||
1930 | brelse(di_bh); | |
1931 | ocfs2_inode_unlock(inode, 1); | |
1932 | ||
1933 | return ret; | |
1934 | } | |
1935 | ||
1936 | static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, | |
1937 | unsigned len, unsigned *copied, | |
1938 | struct ocfs2_dinode *di, | |
1939 | struct ocfs2_write_ctxt *wc) | |
1940 | { | |
1941 | void *kaddr; | |
1942 | ||
1943 | if (unlikely(*copied < len)) { | |
1944 | if (!PageUptodate(wc->w_target_page)) { | |
1945 | *copied = 0; | |
1946 | return; | |
1947 | } | |
1948 | } | |
1949 | ||
1950 | kaddr = kmap_atomic(wc->w_target_page); | |
1951 | memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); | |
1952 | kunmap_atomic(kaddr); | |
1953 | ||
1954 | trace_ocfs2_write_end_inline( | |
1955 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
1956 | (unsigned long long)pos, *copied, | |
1957 | le16_to_cpu(di->id2.i_data.id_count), | |
1958 | le16_to_cpu(di->i_dyn_features)); | |
1959 | } | |
1960 | ||
1961 | int ocfs2_write_end_nolock(struct address_space *mapping, | |
1962 | loff_t pos, unsigned len, unsigned copied, void *fsdata) | |
1963 | { | |
1964 | int i, ret; | |
1965 | unsigned from, to, start = pos & (PAGE_SIZE - 1); | |
1966 | struct inode *inode = mapping->host; | |
1967 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
1968 | struct ocfs2_write_ctxt *wc = fsdata; | |
1969 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; | |
1970 | handle_t *handle = wc->w_handle; | |
1971 | struct page *tmppage; | |
1972 | ||
1973 | BUG_ON(!list_empty(&wc->w_unwritten_list)); | |
1974 | ||
1975 | if (handle) { | |
1976 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), | |
1977 | wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); | |
1978 | if (ret) { | |
1979 | copied = ret; | |
1980 | mlog_errno(ret); | |
1981 | goto out; | |
1982 | } | |
1983 | } | |
1984 | ||
1985 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { | |
1986 | ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); | |
1987 | goto out_write_size; | |
1988 | } | |
1989 | ||
1990 | if (unlikely(copied < len) && wc->w_target_page) { | |
1991 | if (!PageUptodate(wc->w_target_page)) | |
1992 | copied = 0; | |
1993 | ||
1994 | ocfs2_zero_new_buffers(wc->w_target_page, start+copied, | |
1995 | start+len); | |
1996 | } | |
1997 | if (wc->w_target_page) | |
1998 | flush_dcache_page(wc->w_target_page); | |
1999 | ||
2000 | for(i = 0; i < wc->w_num_pages; i++) { | |
2001 | tmppage = wc->w_pages[i]; | |
2002 | ||
2003 | /* This is the direct io target page. */ | |
2004 | if (tmppage == NULL) | |
2005 | continue; | |
2006 | ||
2007 | if (tmppage == wc->w_target_page) { | |
2008 | from = wc->w_target_from; | |
2009 | to = wc->w_target_to; | |
2010 | ||
2011 | BUG_ON(from > PAGE_SIZE || | |
2012 | to > PAGE_SIZE || | |
2013 | to < from); | |
2014 | } else { | |
2015 | /* | |
2016 | * Pages adjacent to the target (if any) imply | |
2017 | * a hole-filling write in which case we want | |
2018 | * to flush their entire range. | |
2019 | */ | |
2020 | from = 0; | |
2021 | to = PAGE_SIZE; | |
2022 | } | |
2023 | ||
2024 | if (page_has_buffers(tmppage)) { | |
2025 | if (handle && ocfs2_should_order_data(inode)) | |
2026 | ocfs2_jbd2_file_inode(handle, inode); | |
2027 | block_commit_write(tmppage, from, to); | |
2028 | } | |
2029 | } | |
2030 | ||
2031 | out_write_size: | |
2032 | /* Direct io do not update i_size here. */ | |
2033 | if (wc->w_type != OCFS2_WRITE_DIRECT) { | |
2034 | pos += copied; | |
2035 | if (pos > i_size_read(inode)) { | |
2036 | i_size_write(inode, pos); | |
2037 | mark_inode_dirty(inode); | |
2038 | } | |
2039 | inode->i_blocks = ocfs2_inode_sector_count(inode); | |
2040 | di->i_size = cpu_to_le64((u64)i_size_read(inode)); | |
2041 | inode->i_mtime = inode->i_ctime = current_time(inode); | |
2042 | di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); | |
2043 | di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); | |
2044 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | |
2045 | } | |
2046 | if (handle) | |
2047 | ocfs2_journal_dirty(handle, wc->w_di_bh); | |
2048 | ||
2049 | out: | |
2050 | /* unlock pages before dealloc since it needs acquiring j_trans_barrier | |
2051 | * lock, or it will cause a deadlock since journal commit threads holds | |
2052 | * this lock and will ask for the page lock when flushing the data. | |
2053 | * put it here to preserve the unlock order. | |
2054 | */ | |
2055 | ocfs2_unlock_pages(wc); | |
2056 | ||
2057 | if (handle) | |
2058 | ocfs2_commit_trans(osb, handle); | |
2059 | ||
2060 | ocfs2_run_deallocs(osb, &wc->w_dealloc); | |
2061 | ||
2062 | brelse(wc->w_di_bh); | |
2063 | kfree(wc); | |
2064 | ||
2065 | return copied; | |
2066 | } | |
2067 | ||
2068 | static int ocfs2_write_end(struct file *file, struct address_space *mapping, | |
2069 | loff_t pos, unsigned len, unsigned copied, | |
2070 | struct page *page, void *fsdata) | |
2071 | { | |
2072 | int ret; | |
2073 | struct inode *inode = mapping->host; | |
2074 | ||
2075 | ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata); | |
2076 | ||
2077 | up_write(&OCFS2_I(inode)->ip_alloc_sem); | |
2078 | ocfs2_inode_unlock(inode, 1); | |
2079 | ||
2080 | return ret; | |
2081 | } | |
2082 | ||
2083 | struct ocfs2_dio_write_ctxt { | |
2084 | struct list_head dw_zero_list; | |
2085 | unsigned dw_zero_count; | |
2086 | int dw_orphaned; | |
2087 | pid_t dw_writer_pid; | |
2088 | }; | |
2089 | ||
2090 | static struct ocfs2_dio_write_ctxt * | |
2091 | ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) | |
2092 | { | |
2093 | struct ocfs2_dio_write_ctxt *dwc = NULL; | |
2094 | ||
2095 | if (bh->b_private) | |
2096 | return bh->b_private; | |
2097 | ||
2098 | dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); | |
2099 | if (dwc == NULL) | |
2100 | return NULL; | |
2101 | INIT_LIST_HEAD(&dwc->dw_zero_list); | |
2102 | dwc->dw_zero_count = 0; | |
2103 | dwc->dw_orphaned = 0; | |
2104 | dwc->dw_writer_pid = task_pid_nr(current); | |
2105 | bh->b_private = dwc; | |
2106 | *alloc = 1; | |
2107 | ||
2108 | return dwc; | |
2109 | } | |
2110 | ||
2111 | static void ocfs2_dio_free_write_ctx(struct inode *inode, | |
2112 | struct ocfs2_dio_write_ctxt *dwc) | |
2113 | { | |
2114 | ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); | |
2115 | kfree(dwc); | |
2116 | } | |
2117 | ||
2118 | /* | |
2119 | * TODO: Make this into a generic get_blocks function. | |
2120 | * | |
2121 | * From do_direct_io in direct-io.c: | |
2122 | * "So what we do is to permit the ->get_blocks function to populate | |
2123 | * bh.b_size with the size of IO which is permitted at this offset and | |
2124 | * this i_blkbits." | |
2125 | * | |
2126 | * This function is called directly from get_more_blocks in direct-io.c. | |
2127 | * | |
2128 | * called like this: dio->get_blocks(dio->inode, fs_startblk, | |
2129 | * fs_count, map_bh, dio->rw == WRITE); | |
2130 | */ | |
2131 | static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, | |
2132 | struct buffer_head *bh_result, int create) | |
2133 | { | |
2134 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
2135 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
2136 | struct ocfs2_write_ctxt *wc; | |
2137 | struct ocfs2_write_cluster_desc *desc = NULL; | |
2138 | struct ocfs2_dio_write_ctxt *dwc = NULL; | |
2139 | struct buffer_head *di_bh = NULL; | |
2140 | u64 p_blkno; | |
2141 | loff_t pos = iblock << inode->i_sb->s_blocksize_bits; | |
2142 | unsigned len, total_len = bh_result->b_size; | |
2143 | int ret = 0, first_get_block = 0; | |
2144 | ||
2145 | len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); | |
2146 | len = min(total_len, len); | |
2147 | ||
2148 | mlog(0, "get block of %lu at %llu:%u req %u\n", | |
2149 | inode->i_ino, pos, len, total_len); | |
2150 | ||
2151 | /* | |
2152 | * Because we need to change file size in ocfs2_dio_end_io_write(), or | |
2153 | * we may need to add it to orphan dir. So can not fall to fast path | |
2154 | * while file size will be changed. | |
2155 | */ | |
2156 | if (pos + total_len <= i_size_read(inode)) { | |
2157 | down_read(&oi->ip_alloc_sem); | |
2158 | /* This is the fast path for re-write. */ | |
2159 | ret = ocfs2_get_block(inode, iblock, bh_result, create); | |
2160 | ||
2161 | up_read(&oi->ip_alloc_sem); | |
2162 | ||
2163 | if (buffer_mapped(bh_result) && | |
2164 | !buffer_new(bh_result) && | |
2165 | ret == 0) | |
2166 | goto out; | |
2167 | ||
2168 | /* Clear state set by ocfs2_get_block. */ | |
2169 | bh_result->b_state = 0; | |
2170 | } | |
2171 | ||
2172 | dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); | |
2173 | if (unlikely(dwc == NULL)) { | |
2174 | ret = -ENOMEM; | |
2175 | mlog_errno(ret); | |
2176 | goto out; | |
2177 | } | |
2178 | ||
2179 | if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > | |
2180 | ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && | |
2181 | !dwc->dw_orphaned) { | |
2182 | /* | |
2183 | * when we are going to alloc extents beyond file size, add the | |
2184 | * inode to orphan dir, so we can recall those spaces when | |
2185 | * system crashed during write. | |
2186 | */ | |
2187 | ret = ocfs2_add_inode_to_orphan(osb, inode); | |
2188 | if (ret < 0) { | |
2189 | mlog_errno(ret); | |
2190 | goto out; | |
2191 | } | |
2192 | dwc->dw_orphaned = 1; | |
2193 | } | |
2194 | ||
2195 | ret = ocfs2_inode_lock(inode, &di_bh, 1); | |
2196 | if (ret) { | |
2197 | mlog_errno(ret); | |
2198 | goto out; | |
2199 | } | |
2200 | ||
2201 | down_write(&oi->ip_alloc_sem); | |
2202 | ||
2203 | if (first_get_block) { | |
2204 | if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) | |
2205 | ret = ocfs2_zero_tail(inode, di_bh, pos); | |
2206 | else | |
2207 | ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, | |
2208 | total_len, NULL); | |
2209 | if (ret < 0) { | |
2210 | mlog_errno(ret); | |
2211 | goto unlock; | |
2212 | } | |
2213 | } | |
2214 | ||
2215 | ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, | |
2216 | OCFS2_WRITE_DIRECT, NULL, | |
2217 | (void **)&wc, di_bh, NULL); | |
2218 | if (ret) { | |
2219 | mlog_errno(ret); | |
2220 | goto unlock; | |
2221 | } | |
2222 | ||
2223 | desc = &wc->w_desc[0]; | |
2224 | ||
2225 | p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); | |
2226 | BUG_ON(p_blkno == 0); | |
2227 | p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); | |
2228 | ||
2229 | map_bh(bh_result, inode->i_sb, p_blkno); | |
2230 | bh_result->b_size = len; | |
2231 | if (desc->c_needs_zero) | |
2232 | set_buffer_new(bh_result); | |
2233 | ||
2234 | /* May sleep in end_io. It should not happen in a irq context. So defer | |
2235 | * it to dio work queue. */ | |
2236 | set_buffer_defer_completion(bh_result); | |
2237 | ||
2238 | if (!list_empty(&wc->w_unwritten_list)) { | |
2239 | struct ocfs2_unwritten_extent *ue = NULL; | |
2240 | ||
2241 | ue = list_first_entry(&wc->w_unwritten_list, | |
2242 | struct ocfs2_unwritten_extent, | |
2243 | ue_node); | |
2244 | BUG_ON(ue->ue_cpos != desc->c_cpos); | |
2245 | /* The physical address may be 0, fill it. */ | |
2246 | ue->ue_phys = desc->c_phys; | |
2247 | ||
2248 | list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); | |
2249 | dwc->dw_zero_count++; | |
2250 | } | |
2251 | ||
2252 | ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc); | |
2253 | BUG_ON(ret != len); | |
2254 | ret = 0; | |
2255 | unlock: | |
2256 | up_write(&oi->ip_alloc_sem); | |
2257 | ocfs2_inode_unlock(inode, 1); | |
2258 | brelse(di_bh); | |
2259 | out: | |
2260 | if (ret < 0) | |
2261 | ret = -EIO; | |
2262 | return ret; | |
2263 | } | |
2264 | ||
2265 | static int ocfs2_dio_end_io_write(struct inode *inode, | |
2266 | struct ocfs2_dio_write_ctxt *dwc, | |
2267 | loff_t offset, | |
2268 | ssize_t bytes) | |
2269 | { | |
2270 | struct ocfs2_cached_dealloc_ctxt dealloc; | |
2271 | struct ocfs2_extent_tree et; | |
2272 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
2273 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
2274 | struct ocfs2_unwritten_extent *ue = NULL; | |
2275 | struct buffer_head *di_bh = NULL; | |
2276 | struct ocfs2_dinode *di; | |
2277 | struct ocfs2_alloc_context *data_ac = NULL; | |
2278 | struct ocfs2_alloc_context *meta_ac = NULL; | |
2279 | handle_t *handle = NULL; | |
2280 | loff_t end = offset + bytes; | |
2281 | int ret = 0, credits = 0, locked = 0; | |
2282 | ||
2283 | ocfs2_init_dealloc_ctxt(&dealloc); | |
2284 | ||
2285 | /* We do clear unwritten, delete orphan, change i_size here. If neither | |
2286 | * of these happen, we can skip all this. */ | |
2287 | if (list_empty(&dwc->dw_zero_list) && | |
2288 | end <= i_size_read(inode) && | |
2289 | !dwc->dw_orphaned) | |
2290 | goto out; | |
2291 | ||
2292 | /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we | |
2293 | * are in that context. */ | |
2294 | if (dwc->dw_writer_pid != task_pid_nr(current)) { | |
2295 | inode_lock(inode); | |
2296 | locked = 1; | |
2297 | } | |
2298 | ||
2299 | ret = ocfs2_inode_lock(inode, &di_bh, 1); | |
2300 | if (ret < 0) { | |
2301 | mlog_errno(ret); | |
2302 | goto out; | |
2303 | } | |
2304 | ||
2305 | down_write(&oi->ip_alloc_sem); | |
2306 | ||
2307 | /* Delete orphan before acquire i_mutex. */ | |
2308 | if (dwc->dw_orphaned) { | |
2309 | BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); | |
2310 | ||
2311 | end = end > i_size_read(inode) ? end : 0; | |
2312 | ||
2313 | ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, | |
2314 | !!end, end); | |
2315 | if (ret < 0) | |
2316 | mlog_errno(ret); | |
2317 | } | |
2318 | ||
2319 | di = (struct ocfs2_dinode *)di_bh->b_data; | |
2320 | ||
2321 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); | |
2322 | ||
2323 | ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, | |
2324 | &data_ac, &meta_ac); | |
2325 | if (ret) { | |
2326 | mlog_errno(ret); | |
2327 | goto unlock; | |
2328 | } | |
2329 | ||
2330 | credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); | |
2331 | ||
2332 | handle = ocfs2_start_trans(osb, credits); | |
2333 | if (IS_ERR(handle)) { | |
2334 | ret = PTR_ERR(handle); | |
2335 | mlog_errno(ret); | |
2336 | goto unlock; | |
2337 | } | |
2338 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, | |
2339 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2340 | if (ret) { | |
2341 | mlog_errno(ret); | |
2342 | goto commit; | |
2343 | } | |
2344 | ||
2345 | list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { | |
2346 | ret = ocfs2_mark_extent_written(inode, &et, handle, | |
2347 | ue->ue_cpos, 1, | |
2348 | ue->ue_phys, | |
2349 | meta_ac, &dealloc); | |
2350 | if (ret < 0) { | |
2351 | mlog_errno(ret); | |
2352 | break; | |
2353 | } | |
2354 | } | |
2355 | ||
2356 | if (end > i_size_read(inode)) { | |
2357 | ret = ocfs2_set_inode_size(handle, inode, di_bh, end); | |
2358 | if (ret < 0) | |
2359 | mlog_errno(ret); | |
2360 | } | |
2361 | commit: | |
2362 | ocfs2_commit_trans(osb, handle); | |
2363 | unlock: | |
2364 | up_write(&oi->ip_alloc_sem); | |
2365 | ocfs2_inode_unlock(inode, 1); | |
2366 | brelse(di_bh); | |
2367 | out: | |
2368 | if (data_ac) | |
2369 | ocfs2_free_alloc_context(data_ac); | |
2370 | if (meta_ac) | |
2371 | ocfs2_free_alloc_context(meta_ac); | |
2372 | ocfs2_run_deallocs(osb, &dealloc); | |
2373 | if (locked) | |
2374 | inode_unlock(inode); | |
2375 | ocfs2_dio_free_write_ctx(inode, dwc); | |
2376 | ||
2377 | return ret; | |
2378 | } | |
2379 | ||
2380 | /* | |
2381 | * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're | |
2382 | * particularly interested in the aio/dio case. We use the rw_lock DLM lock | |
2383 | * to protect io on one node from truncation on another. | |
2384 | */ | |
2385 | static int ocfs2_dio_end_io(struct kiocb *iocb, | |
2386 | loff_t offset, | |
2387 | ssize_t bytes, | |
2388 | void *private) | |
2389 | { | |
2390 | struct inode *inode = file_inode(iocb->ki_filp); | |
2391 | int level; | |
2392 | int ret = 0; | |
2393 | ||
2394 | /* this io's submitter should not have unlocked this before we could */ | |
2395 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); | |
2396 | ||
2397 | if (bytes > 0 && private) | |
2398 | ret = ocfs2_dio_end_io_write(inode, private, offset, bytes); | |
2399 | ||
2400 | ocfs2_iocb_clear_rw_locked(iocb); | |
2401 | ||
2402 | level = ocfs2_iocb_rw_locked_level(iocb); | |
2403 | ocfs2_rw_unlock(inode, level); | |
2404 | return ret; | |
2405 | } | |
2406 | ||
2407 | static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |
2408 | { | |
2409 | struct file *file = iocb->ki_filp; | |
2410 | struct inode *inode = file->f_mapping->host; | |
2411 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
2412 | get_block_t *get_block; | |
2413 | ||
2414 | /* | |
2415 | * Fallback to buffered I/O if we see an inode without | |
2416 | * extents. | |
2417 | */ | |
2418 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) | |
2419 | return 0; | |
2420 | ||
2421 | /* Fallback to buffered I/O if we do not support append dio. */ | |
2422 | if (iocb->ki_pos + iter->count > i_size_read(inode) && | |
2423 | !ocfs2_supports_append_dio(osb)) | |
2424 | return 0; | |
2425 | ||
2426 | if (iov_iter_rw(iter) == READ) | |
2427 | get_block = ocfs2_get_block; | |
2428 | else | |
2429 | get_block = ocfs2_dio_get_block; | |
2430 | ||
2431 | return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, | |
2432 | iter, get_block, | |
2433 | ocfs2_dio_end_io, NULL, 0); | |
2434 | } | |
2435 | ||
2436 | const struct address_space_operations ocfs2_aops = { | |
2437 | .readpage = ocfs2_readpage, | |
2438 | .readpages = ocfs2_readpages, | |
2439 | .writepage = ocfs2_writepage, | |
2440 | .write_begin = ocfs2_write_begin, | |
2441 | .write_end = ocfs2_write_end, | |
2442 | .bmap = ocfs2_bmap, | |
2443 | .direct_IO = ocfs2_direct_IO, | |
2444 | .invalidatepage = block_invalidatepage, | |
2445 | .releasepage = ocfs2_releasepage, | |
2446 | .migratepage = buffer_migrate_page, | |
2447 | .is_partially_uptodate = block_is_partially_uptodate, | |
2448 | .error_remove_page = generic_error_remove_page, | |
2449 | }; |