]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/ocfs2/aops.c
ocfs2: remove ocfs2_prepare_write() and ocfs2_commit_write()
[mirror_ubuntu-artful-kernel.git] / fs / ocfs2 / aops.c
CommitLineData
ccd979bd
MF
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/fs.h>
23#include <linux/slab.h>
24#include <linux/highmem.h>
25#include <linux/pagemap.h>
26#include <asm/byteorder.h>
9517bac6 27#include <linux/swap.h>
ccd979bd
MF
28
29#define MLOG_MASK_PREFIX ML_FILE_IO
30#include <cluster/masklog.h>
31
32#include "ocfs2.h"
33
34#include "alloc.h"
35#include "aops.h"
36#include "dlmglue.h"
37#include "extent_map.h"
38#include "file.h"
39#include "inode.h"
40#include "journal.h"
9517bac6 41#include "suballoc.h"
ccd979bd
MF
42#include "super.h"
43#include "symlink.h"
44
45#include "buffer_head_io.h"
46
47static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
48 struct buffer_head *bh_result, int create)
49{
50 int err = -EIO;
51 int status;
52 struct ocfs2_dinode *fe = NULL;
53 struct buffer_head *bh = NULL;
54 struct buffer_head *buffer_cache_bh = NULL;
55 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
56 void *kaddr;
57
58 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
59 (unsigned long long)iblock, bh_result, create);
60
61 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
62
63 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
64 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
65 (unsigned long long)iblock);
66 goto bail;
67 }
68
69 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
70 OCFS2_I(inode)->ip_blkno,
71 &bh, OCFS2_BH_CACHED, inode);
72 if (status < 0) {
73 mlog_errno(status);
74 goto bail;
75 }
76 fe = (struct ocfs2_dinode *) bh->b_data;
77
78 if (!OCFS2_IS_VALID_DINODE(fe)) {
b0697053
MF
79 mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n",
80 (unsigned long long)fe->i_blkno, 7, fe->i_signature);
ccd979bd
MF
81 goto bail;
82 }
83
84 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
85 le32_to_cpu(fe->i_clusters))) {
86 mlog(ML_ERROR, "block offset is outside the allocated size: "
87 "%llu\n", (unsigned long long)iblock);
88 goto bail;
89 }
90
91 /* We don't use the page cache to create symlink data, so if
92 * need be, copy it over from the buffer cache. */
93 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
94 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
95 iblock;
96 buffer_cache_bh = sb_getblk(osb->sb, blkno);
97 if (!buffer_cache_bh) {
98 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
99 goto bail;
100 }
101
102 /* we haven't locked out transactions, so a commit
103 * could've happened. Since we've got a reference on
104 * the bh, even if it commits while we're doing the
105 * copy, the data is still good. */
106 if (buffer_jbd(buffer_cache_bh)
107 && ocfs2_inode_is_new(inode)) {
108 kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
109 if (!kaddr) {
110 mlog(ML_ERROR, "couldn't kmap!\n");
111 goto bail;
112 }
113 memcpy(kaddr + (bh_result->b_size * iblock),
114 buffer_cache_bh->b_data,
115 bh_result->b_size);
116 kunmap_atomic(kaddr, KM_USER0);
117 set_buffer_uptodate(bh_result);
118 }
119 brelse(buffer_cache_bh);
120 }
121
122 map_bh(bh_result, inode->i_sb,
123 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
124
125 err = 0;
126
127bail:
128 if (bh)
129 brelse(bh);
130
131 mlog_exit(err);
132 return err;
133}
134
135static int ocfs2_get_block(struct inode *inode, sector_t iblock,
136 struct buffer_head *bh_result, int create)
137{
138 int err = 0;
139 u64 p_blkno, past_eof;
140
141 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
142 (unsigned long long)iblock, bh_result, create);
143
144 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
145 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
146 inode, inode->i_ino);
147
148 if (S_ISLNK(inode->i_mode)) {
149 /* this always does I/O for some reason. */
150 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
151 goto bail;
152 }
153
154 /* this can happen if another node truncs after our extend! */
155 spin_lock(&OCFS2_I(inode)->ip_lock);
156 if (iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
157 OCFS2_I(inode)->ip_clusters))
158 err = -EIO;
159 spin_unlock(&OCFS2_I(inode)->ip_lock);
160 if (err)
161 goto bail;
162
363041a5 163 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, NULL);
ccd979bd
MF
164 if (err) {
165 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
b0697053
MF
166 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
167 (unsigned long long)p_blkno);
ccd979bd
MF
168 goto bail;
169 }
170
171 map_bh(bh_result, inode->i_sb, p_blkno);
172
173 if (bh_result->b_blocknr == 0) {
174 err = -EIO;
b0697053
MF
175 mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
176 (unsigned long long)iblock,
177 (unsigned long long)p_blkno,
178 (unsigned long long)OCFS2_I(inode)->ip_blkno);
ccd979bd
MF
179 }
180
181 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
b0697053
MF
182 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
183 (unsigned long long)past_eof);
ccd979bd
MF
184
185 if (create && (iblock >= past_eof))
186 set_buffer_new(bh_result);
187
188bail:
189 if (err < 0)
190 err = -EIO;
191
192 mlog_exit(err);
193 return err;
194}
195
196static int ocfs2_readpage(struct file *file, struct page *page)
197{
198 struct inode *inode = page->mapping->host;
199 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
200 int ret, unlock = 1;
201
202 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
203
4bcec184 204 ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
ccd979bd
MF
205 if (ret != 0) {
206 if (ret == AOP_TRUNCATED_PAGE)
207 unlock = 0;
208 mlog_errno(ret);
209 goto out;
210 }
211
212 down_read(&OCFS2_I(inode)->ip_alloc_sem);
213
214 /*
215 * i_size might have just been updated as we grabed the meta lock. We
216 * might now be discovering a truncate that hit on another node.
217 * block_read_full_page->get_block freaks out if it is asked to read
218 * beyond the end of a file, so we check here. Callers
219 * (generic_file_read, fault->nopage) are clever enough to check i_size
220 * and notice that the page they just read isn't needed.
221 *
222 * XXX sys_readahead() seems to get that wrong?
223 */
224 if (start >= i_size_read(inode)) {
225 char *addr = kmap(page);
226 memset(addr, 0, PAGE_SIZE);
227 flush_dcache_page(page);
228 kunmap(page);
229 SetPageUptodate(page);
230 ret = 0;
231 goto out_alloc;
232 }
233
234 ret = ocfs2_data_lock_with_page(inode, 0, page);
235 if (ret != 0) {
236 if (ret == AOP_TRUNCATED_PAGE)
237 unlock = 0;
238 mlog_errno(ret);
239 goto out_alloc;
240 }
241
242 ret = block_read_full_page(page, ocfs2_get_block);
243 unlock = 0;
244
245 ocfs2_data_unlock(inode, 0);
246out_alloc:
247 up_read(&OCFS2_I(inode)->ip_alloc_sem);
248 ocfs2_meta_unlock(inode, 0);
249out:
250 if (unlock)
251 unlock_page(page);
252 mlog_exit(ret);
253 return ret;
254}
255
256/* Note: Because we don't support holes, our allocation has
257 * already happened (allocation writes zeros to the file data)
258 * so we don't have to worry about ordered writes in
259 * ocfs2_writepage.
260 *
261 * ->writepage is called during the process of invalidating the page cache
262 * during blocked lock processing. It can't block on any cluster locks
263 * to during block mapping. It's relying on the fact that the block
264 * mapping can't have disappeared under the dirty pages that it is
265 * being asked to write back.
266 */
267static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
268{
269 int ret;
270
271 mlog_entry("(0x%p)\n", page);
272
273 ret = block_write_full_page(page, ocfs2_get_block, wbc);
274
275 mlog_exit(ret);
276
277 return ret;
278}
279
5069120b
MF
280/*
281 * This is called from ocfs2_write_zero_page() which has handled it's
282 * own cluster locking and has ensured allocation exists for those
283 * blocks to be written.
284 */
53013cba
MF
285int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
286 unsigned from, unsigned to)
287{
288 int ret;
289
290 down_read(&OCFS2_I(inode)->ip_alloc_sem);
291
292 ret = block_prepare_write(page, from, to, ocfs2_get_block);
293
294 up_read(&OCFS2_I(inode)->ip_alloc_sem);
295
296 return ret;
297}
298
ccd979bd
MF
299/* Taken from ext3. We don't necessarily need the full blown
300 * functionality yet, but IMHO it's better to cut and paste the whole
301 * thing so we can avoid introducing our own bugs (and easily pick up
302 * their fixes when they happen) --Mark */
303static int walk_page_buffers( handle_t *handle,
304 struct buffer_head *head,
305 unsigned from,
306 unsigned to,
307 int *partial,
308 int (*fn)( handle_t *handle,
309 struct buffer_head *bh))
310{
311 struct buffer_head *bh;
312 unsigned block_start, block_end;
313 unsigned blocksize = head->b_size;
314 int err, ret = 0;
315 struct buffer_head *next;
316
317 for ( bh = head, block_start = 0;
318 ret == 0 && (bh != head || !block_start);
319 block_start = block_end, bh = next)
320 {
321 next = bh->b_this_page;
322 block_end = block_start + blocksize;
323 if (block_end <= from || block_start >= to) {
324 if (partial && !buffer_uptodate(bh))
325 *partial = 1;
326 continue;
327 }
328 err = (*fn)(handle, bh);
329 if (!ret)
330 ret = err;
331 }
332 return ret;
333}
334
1fabe148 335handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
ccd979bd
MF
336 struct page *page,
337 unsigned from,
338 unsigned to)
339{
340 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1fabe148 341 handle_t *handle = NULL;
ccd979bd
MF
342 int ret = 0;
343
65eff9cc 344 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
ccd979bd
MF
345 if (!handle) {
346 ret = -ENOMEM;
347 mlog_errno(ret);
348 goto out;
349 }
350
351 if (ocfs2_should_order_data(inode)) {
1fabe148 352 ret = walk_page_buffers(handle,
ccd979bd
MF
353 page_buffers(page),
354 from, to, NULL,
355 ocfs2_journal_dirty_data);
356 if (ret < 0)
357 mlog_errno(ret);
358 }
359out:
360 if (ret) {
361 if (handle)
02dc1af4 362 ocfs2_commit_trans(osb, handle);
ccd979bd
MF
363 handle = ERR_PTR(ret);
364 }
365 return handle;
366}
367
ccd979bd
MF
368static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
369{
370 sector_t status;
371 u64 p_blkno = 0;
372 int err = 0;
373 struct inode *inode = mapping->host;
374
375 mlog_entry("(block = %llu)\n", (unsigned long long)block);
376
377 /* We don't need to lock journal system files, since they aren't
378 * accessed concurrently from multiple nodes.
379 */
380 if (!INODE_JOURNAL(inode)) {
4bcec184 381 err = ocfs2_meta_lock(inode, NULL, 0);
ccd979bd
MF
382 if (err) {
383 if (err != -ENOENT)
384 mlog_errno(err);
385 goto bail;
386 }
387 down_read(&OCFS2_I(inode)->ip_alloc_sem);
388 }
389
363041a5 390 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL);
ccd979bd
MF
391
392 if (!INODE_JOURNAL(inode)) {
393 up_read(&OCFS2_I(inode)->ip_alloc_sem);
394 ocfs2_meta_unlock(inode, 0);
395 }
396
397 if (err) {
398 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
399 (unsigned long long)block);
400 mlog_errno(err);
401 goto bail;
402 }
403
404
405bail:
406 status = err ? 0 : p_blkno;
407
408 mlog_exit((int)status);
409
410 return status;
411}
412
413/*
414 * TODO: Make this into a generic get_blocks function.
415 *
416 * From do_direct_io in direct-io.c:
417 * "So what we do is to permit the ->get_blocks function to populate
418 * bh.b_size with the size of IO which is permitted at this offset and
419 * this i_blkbits."
420 *
421 * This function is called directly from get_more_blocks in direct-io.c.
422 *
423 * called like this: dio->get_blocks(dio->inode, fs_startblk,
424 * fs_count, map_bh, dio->rw == WRITE);
425 */
426static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
ccd979bd
MF
427 struct buffer_head *bh_result, int create)
428{
429 int ret;
564f8a32 430 u64 p_blkno, inode_blocks;
ccd979bd 431 int contig_blocks;
184d7d20 432 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1d8fa7a2 433 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
ccd979bd 434
ccd979bd
MF
435 /* This function won't even be called if the request isn't all
436 * nicely aligned and of the right size, so there's no need
437 * for us to check any of that. */
438
ccd979bd 439 spin_lock(&OCFS2_I(inode)->ip_lock);
564f8a32
MF
440 inode_blocks = ocfs2_clusters_to_blocks(inode->i_sb,
441 OCFS2_I(inode)->ip_clusters);
442
443 /*
444 * For a read which begins past the end of file, we return a hole.
445 */
446 if (!create && (iblock >= inode_blocks)) {
447 spin_unlock(&OCFS2_I(inode)->ip_lock);
448 ret = 0;
449 goto bail;
450 }
451
452 /*
453 * Any write past EOF is not allowed because we'd be extending.
454 */
455 if (create && (iblock + max_blocks) > inode_blocks) {
ccd979bd
MF
456 spin_unlock(&OCFS2_I(inode)->ip_lock);
457 ret = -EIO;
458 goto bail;
459 }
460 spin_unlock(&OCFS2_I(inode)->ip_lock);
461
462 /* This figures out the size of the next contiguous block, and
463 * our logical offset */
363041a5 464 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
ccd979bd
MF
465 &contig_blocks);
466 if (ret) {
467 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
468 (unsigned long long)iblock);
469 ret = -EIO;
470 goto bail;
471 }
472
473 map_bh(bh_result, inode->i_sb, p_blkno);
474
475 /* make sure we don't map more than max_blocks blocks here as
476 that's all the kernel will handle at this point. */
477 if (max_blocks < contig_blocks)
478 contig_blocks = max_blocks;
479 bh_result->b_size = contig_blocks << blocksize_bits;
480bail:
481 return ret;
482}
483
484/*
485 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
486 * particularly interested in the aio/dio case. Like the core uses
487 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
488 * truncation on another.
489 */
490static void ocfs2_dio_end_io(struct kiocb *iocb,
491 loff_t offset,
492 ssize_t bytes,
493 void *private)
494{
d28c9174 495 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
ccd979bd
MF
496
497 /* this io's submitter should not have unlocked this before we could */
498 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
499 ocfs2_iocb_clear_rw_locked(iocb);
500 up_read(&inode->i_alloc_sem);
501 ocfs2_rw_unlock(inode, 0);
502}
503
03f981cf
JB
504/*
505 * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
506 * from ext3. PageChecked() bits have been removed as OCFS2 does not
507 * do journalled data.
508 */
509static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
510{
511 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
512
513 journal_invalidatepage(journal, page, offset);
514}
515
516static int ocfs2_releasepage(struct page *page, gfp_t wait)
517{
518 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
519
520 if (!page_has_buffers(page))
521 return 0;
522 return journal_try_to_free_buffers(journal, page, wait);
523}
524
ccd979bd
MF
525static ssize_t ocfs2_direct_IO(int rw,
526 struct kiocb *iocb,
527 const struct iovec *iov,
528 loff_t offset,
529 unsigned long nr_segs)
530{
531 struct file *file = iocb->ki_filp;
d28c9174 532 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
ccd979bd
MF
533 int ret;
534
535 mlog_entry_void();
53013cba 536
9517bac6
MF
537 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
538 /*
539 * We get PR data locks even for O_DIRECT. This
540 * allows concurrent O_DIRECT I/O but doesn't let
541 * O_DIRECT with extending and buffered zeroing writes
542 * race. If they did race then the buffered zeroing
543 * could be written back after the O_DIRECT I/O. It's
544 * one thing to tell people not to mix buffered and
545 * O_DIRECT writes, but expecting them to understand
546 * that file extension is also an implicit buffered
547 * write is too much. By getting the PR we force
548 * writeback of the buffered zeroing before
549 * proceeding.
550 */
551 ret = ocfs2_data_lock(inode, 0);
552 if (ret < 0) {
553 mlog_errno(ret);
554 goto out;
555 }
556 ocfs2_data_unlock(inode, 0);
53013cba 557 }
53013cba 558
ccd979bd
MF
559 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
560 inode->i_sb->s_bdev, iov, offset,
561 nr_segs,
562 ocfs2_direct_IO_get_blocks,
563 ocfs2_dio_end_io);
53013cba 564out:
ccd979bd
MF
565 mlog_exit(ret);
566 return ret;
567}
568
9517bac6
MF
569static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
570 u32 cpos,
571 unsigned int *start,
572 unsigned int *end)
573{
574 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
575
576 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
577 unsigned int cpp;
578
579 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
580
581 cluster_start = cpos % cpp;
582 cluster_start = cluster_start << osb->s_clustersize_bits;
583
584 cluster_end = cluster_start + osb->s_clustersize;
585 }
586
587 BUG_ON(cluster_start > PAGE_SIZE);
588 BUG_ON(cluster_end > PAGE_SIZE);
589
590 if (start)
591 *start = cluster_start;
592 if (end)
593 *end = cluster_end;
594}
595
596/*
597 * 'from' and 'to' are the region in the page to avoid zeroing.
598 *
599 * If pagesize > clustersize, this function will avoid zeroing outside
600 * of the cluster boundary.
601 *
602 * from == to == 0 is code for "zero the entire cluster region"
603 */
604static void ocfs2_clear_page_regions(struct page *page,
605 struct ocfs2_super *osb, u32 cpos,
606 unsigned from, unsigned to)
607{
608 void *kaddr;
609 unsigned int cluster_start, cluster_end;
610
611 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
612
613 kaddr = kmap_atomic(page, KM_USER0);
614
615 if (from || to) {
616 if (from > cluster_start)
617 memset(kaddr + cluster_start, 0, from - cluster_start);
618 if (to < cluster_end)
619 memset(kaddr + to, 0, cluster_end - to);
620 } else {
621 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
622 }
623
624 kunmap_atomic(kaddr, KM_USER0);
625}
626
627/*
628 * Some of this taken from block_prepare_write(). We already have our
629 * mapping by now though, and the entire write will be allocating or
630 * it won't, so not much need to use BH_New.
631 *
632 * This will also skip zeroing, which is handled externally.
633 */
634static int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
635 struct inode *inode, unsigned int from,
636 unsigned int to, int new)
637{
638 int ret = 0;
639 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
640 unsigned int block_end, block_start;
641 unsigned int bsize = 1 << inode->i_blkbits;
642
643 if (!page_has_buffers(page))
644 create_empty_buffers(page, bsize, 0);
645
646 head = page_buffers(page);
647 for (bh = head, block_start = 0; bh != head || !block_start;
648 bh = bh->b_this_page, block_start += bsize) {
649 block_end = block_start + bsize;
650
651 /*
652 * Ignore blocks outside of our i/o range -
653 * they may belong to unallocated clusters.
654 */
655 if (block_start >= to ||
656 (block_start + bsize) <= from) {
657 if (PageUptodate(page))
658 set_buffer_uptodate(bh);
659 continue;
660 }
661
662 /*
663 * For an allocating write with cluster size >= page
664 * size, we always write the entire page.
665 */
666
667 if (buffer_new(bh))
668 clear_buffer_new(bh);
669
670 if (!buffer_mapped(bh)) {
671 map_bh(bh, inode->i_sb, *p_blkno);
672 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
673 }
674
675 if (PageUptodate(page)) {
676 if (!buffer_uptodate(bh))
677 set_buffer_uptodate(bh);
678 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
679 (block_start < from || block_end > to)) {
680 ll_rw_block(READ, 1, &bh);
681 *wait_bh++=bh;
682 }
683
684 *p_blkno = *p_blkno + 1;
685 }
686
687 /*
688 * If we issued read requests - let them complete.
689 */
690 while(wait_bh > wait) {
691 wait_on_buffer(*--wait_bh);
692 if (!buffer_uptodate(*wait_bh))
693 ret = -EIO;
694 }
695
696 if (ret == 0 || !new)
697 return ret;
698
699 /*
700 * If we get -EIO above, zero out any newly allocated blocks
701 * to avoid exposing stale data.
702 */
703 bh = head;
704 block_start = 0;
705 do {
706 void *kaddr;
707
708 block_end = block_start + bsize;
709 if (block_end <= from)
710 goto next_bh;
711 if (block_start >= to)
712 break;
713
714 kaddr = kmap_atomic(page, KM_USER0);
715 memset(kaddr+block_start, 0, bh->b_size);
716 flush_dcache_page(page);
717 kunmap_atomic(kaddr, KM_USER0);
718 set_buffer_uptodate(bh);
719 mark_buffer_dirty(bh);
720
721next_bh:
722 block_start = block_end;
723 bh = bh->b_this_page;
724 } while (bh != head);
725
726 return ret;
727}
728
729/*
730 * This will copy user data from the iovec in the buffered write
731 * context.
732 */
733int ocfs2_map_and_write_user_data(struct inode *inode,
734 struct ocfs2_write_ctxt *wc, u64 *p_blkno,
735 unsigned int *ret_from, unsigned int *ret_to)
736{
737 int ret;
738 unsigned int to, from, cluster_start, cluster_end;
739 unsigned long bytes, src_from;
740 char *dst;
741 struct ocfs2_buffered_write_priv *bp = wc->w_private;
742 const struct iovec *cur_iov = bp->b_cur_iov;
743 char __user *buf;
744 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
745
746 ocfs2_figure_cluster_boundaries(osb, wc->w_cpos, &cluster_start,
747 &cluster_end);
748
749 buf = cur_iov->iov_base + bp->b_cur_off;
750 src_from = (unsigned long)buf & ~PAGE_CACHE_MASK;
751
752 from = wc->w_pos & (PAGE_CACHE_SIZE - 1);
753
754 /*
755 * This is a lot of comparisons, but it reads quite
756 * easily, which is important here.
757 */
758 /* Stay within the src page */
759 bytes = PAGE_SIZE - src_from;
760 /* Stay within the vector */
761 bytes = min(bytes,
762 (unsigned long)(cur_iov->iov_len - bp->b_cur_off));
763 /* Stay within count */
764 bytes = min(bytes, (unsigned long)wc->w_count);
765 /*
766 * For clustersize > page size, just stay within
767 * target page, otherwise we have to calculate pos
768 * within the cluster and obey the rightmost
769 * boundary.
770 */
771 if (wc->w_large_pages) {
772 /*
773 * For cluster size < page size, we have to
774 * calculate pos within the cluster and obey
775 * the rightmost boundary.
776 */
777 bytes = min(bytes, (unsigned long)(osb->s_clustersize
778 - (wc->w_pos & (osb->s_clustersize - 1))));
779 } else {
780 /*
781 * cluster size > page size is the most common
782 * case - we just stay within the target page
783 * boundary.
784 */
785 bytes = min(bytes, PAGE_CACHE_SIZE - from);
786 }
787
788 to = from + bytes;
789
790 if (wc->w_this_page_new)
791 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
792 cluster_start, cluster_end, 1);
793 else
794 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
795 from, to, 0);
796 if (ret) {
797 mlog_errno(ret);
798 goto out;
799 }
800
801 BUG_ON(from > PAGE_CACHE_SIZE);
802 BUG_ON(to > PAGE_CACHE_SIZE);
803 BUG_ON(from > osb->s_clustersize);
804 BUG_ON(to > osb->s_clustersize);
805
806 dst = kmap(wc->w_this_page);
807 memcpy(dst + from, bp->b_src_buf + src_from, bytes);
808 kunmap(wc->w_this_page);
809
810 /*
811 * XXX: This is slow, but simple. The caller of
812 * ocfs2_buffered_write_cluster() is responsible for
813 * passing through the iovecs, so it's difficult to
814 * predict what our next step is in here after our
815 * initial write. A future version should be pushing
816 * that iovec manipulation further down.
817 *
818 * By setting this, we indicate that a copy from user
819 * data was done, and subsequent calls for this
820 * cluster will skip copying more data.
821 */
822 wc->w_finished_copy = 1;
823
824 *ret_from = from;
825 *ret_to = to;
826out:
827
828 return bytes ? (unsigned int)bytes : ret;
829}
830
831/*
832 * Map, fill and write a page to disk.
833 *
834 * The work of copying data is done via callback. Newly allocated
835 * pages which don't take user data will be zero'd (set 'new' to
836 * indicate an allocating write)
837 *
838 * Returns a negative error code or the number of bytes copied into
839 * the page.
840 */
841int ocfs2_write_data_page(struct inode *inode, handle_t *handle,
842 u64 *p_blkno, struct page *page,
843 struct ocfs2_write_ctxt *wc, int new)
844{
845 int ret, copied = 0;
846 unsigned int from = 0, to = 0;
847 unsigned int cluster_start, cluster_end;
848 unsigned int zero_from = 0, zero_to = 0;
849
850 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), wc->w_cpos,
851 &cluster_start, &cluster_end);
852
853 if ((wc->w_pos >> PAGE_CACHE_SHIFT) == page->index
854 && !wc->w_finished_copy) {
855
856 wc->w_this_page = page;
857 wc->w_this_page_new = new;
858 ret = wc->w_write_data_page(inode, wc, p_blkno, &from, &to);
859 if (ret < 0) {
860 mlog_errno(ret);
861 goto out;
862 }
863
864 copied = ret;
865
866 zero_from = from;
867 zero_to = to;
868 if (new) {
869 from = cluster_start;
870 to = cluster_end;
871 }
872 } else {
873 /*
874 * If we haven't allocated the new page yet, we
875 * shouldn't be writing it out without copying user
876 * data. This is likely a math error from the caller.
877 */
878 BUG_ON(!new);
879
880 from = cluster_start;
881 to = cluster_end;
882
883 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
884 cluster_start, cluster_end, 1);
885 if (ret) {
886 mlog_errno(ret);
887 goto out;
888 }
889 }
890
891 /*
892 * Parts of newly allocated pages need to be zero'd.
893 *
894 * Above, we have also rewritten 'to' and 'from' - as far as
895 * the rest of the function is concerned, the entire cluster
896 * range inside of a page needs to be written.
897 *
898 * We can skip this if the page is up to date - it's already
899 * been zero'd from being read in as a hole.
900 */
901 if (new && !PageUptodate(page))
902 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
903 wc->w_cpos, zero_from, zero_to);
904
905 flush_dcache_page(page);
906
907 if (ocfs2_should_order_data(inode)) {
908 ret = walk_page_buffers(handle,
909 page_buffers(page),
910 from, to, NULL,
911 ocfs2_journal_dirty_data);
912 if (ret < 0)
913 mlog_errno(ret);
914 }
915
916 /*
917 * We don't use generic_commit_write() because we need to
918 * handle our own i_size update.
919 */
920 ret = block_commit_write(page, from, to);
921 if (ret)
922 mlog_errno(ret);
923out:
924
925 return copied ? copied : ret;
926}
927
928/*
929 * Do the actual write of some data into an inode. Optionally allocate
930 * in order to fulfill the write.
931 *
932 * cpos is the logical cluster offset within the file to write at
933 *
934 * 'phys' is the physical mapping of that offset. a 'phys' value of
935 * zero indicates that allocation is required. In this case, data_ac
936 * and meta_ac should be valid (meta_ac can be null if metadata
937 * allocation isn't required).
938 */
939static ssize_t ocfs2_write(struct file *file, u32 phys, handle_t *handle,
940 struct buffer_head *di_bh,
941 struct ocfs2_alloc_context *data_ac,
942 struct ocfs2_alloc_context *meta_ac,
943 struct ocfs2_write_ctxt *wc)
944{
945 int ret, i, numpages = 1, new;
946 unsigned int copied = 0;
947 u32 tmp_pos;
948 u64 v_blkno, p_blkno;
949 struct address_space *mapping = file->f_mapping;
950 struct inode *inode = mapping->host;
951 unsigned int cbits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
952 unsigned long index, start;
953 struct page **cpages;
954
955 new = phys == 0 ? 1 : 0;
956
957 /*
958 * Figure out how many pages we'll be manipulating here. For
959 * non-allocating write, or any writes where cluster size is
960 * less than page size, we only need one page. Otherwise,
961 * allocating writes of cluster size larger than page size
962 * need cluster size pages.
963 */
964 if (new && !wc->w_large_pages)
965 numpages = (1 << cbits) / PAGE_SIZE;
966
967 cpages = kzalloc(sizeof(*cpages) * numpages, GFP_NOFS);
968 if (!cpages) {
969 ret = -ENOMEM;
970 mlog_errno(ret);
971 return ret;
972 }
973
974 /*
975 * Fill our page array first. That way we've grabbed enough so
976 * that we can zero and flush if we error after adding the
977 * extent.
978 */
979 if (new) {
980 start = ocfs2_align_clusters_to_page_index(inode->i_sb,
981 wc->w_cpos);
982 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, wc->w_cpos);
983 } else {
984 start = wc->w_pos >> PAGE_CACHE_SHIFT;
985 v_blkno = wc->w_pos >> inode->i_sb->s_blocksize_bits;
986 }
987
988 for(i = 0; i < numpages; i++) {
989 index = start + i;
990
991 cpages[i] = grab_cache_page(mapping, index);
992 if (!cpages[i]) {
993 ret = -ENOMEM;
994 mlog_errno(ret);
995 goto out;
996 }
997 }
998
999 if (new) {
1000 /*
1001 * This is safe to call with the page locks - it won't take
1002 * any additional semaphores or cluster locks.
1003 */
1004 tmp_pos = wc->w_cpos;
1005 ret = ocfs2_do_extend_allocation(OCFS2_SB(inode->i_sb), inode,
1006 &tmp_pos, 1, di_bh, handle,
1007 data_ac, meta_ac, NULL);
1008 /*
1009 * This shouldn't happen because we must have already
1010 * calculated the correct meta data allocation required. The
1011 * internal tree allocation code should know how to increase
1012 * transaction credits itself.
1013 *
1014 * If need be, we could handle -EAGAIN for a
1015 * RESTART_TRANS here.
1016 */
1017 mlog_bug_on_msg(ret == -EAGAIN,
1018 "Inode %llu: EAGAIN return during allocation.\n",
1019 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1020 if (ret < 0) {
1021 mlog_errno(ret);
1022 goto out;
1023 }
1024 }
1025
1026 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL);
1027 if (ret < 0) {
1028
1029 /*
1030 * XXX: Should we go readonly here?
1031 */
1032
1033 mlog_errno(ret);
1034 goto out;
1035 }
1036
1037 BUG_ON(p_blkno == 0);
1038
1039 for(i = 0; i < numpages; i++) {
1040 ret = ocfs2_write_data_page(inode, handle, &p_blkno, cpages[i],
1041 wc, new);
1042 if (ret < 0) {
1043 mlog_errno(ret);
1044 goto out;
1045 }
1046
1047 copied += ret;
1048 }
1049
1050out:
1051 for(i = 0; i < numpages; i++) {
1052 unlock_page(cpages[i]);
1053 mark_page_accessed(cpages[i]);
1054 page_cache_release(cpages[i]);
1055 }
1056 kfree(cpages);
1057
1058 return copied ? copied : ret;
1059}
1060
1061static void ocfs2_write_ctxt_init(struct ocfs2_write_ctxt *wc,
1062 struct ocfs2_super *osb, loff_t pos,
1063 size_t count, ocfs2_page_writer *cb,
1064 void *cb_priv)
1065{
1066 wc->w_count = count;
1067 wc->w_pos = pos;
1068 wc->w_cpos = wc->w_pos >> osb->s_clustersize_bits;
1069 wc->w_finished_copy = 0;
1070
1071 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
1072 wc->w_large_pages = 1;
1073 else
1074 wc->w_large_pages = 0;
1075
1076 wc->w_write_data_page = cb;
1077 wc->w_private = cb_priv;
1078}
1079
1080/*
1081 * Write a cluster to an inode. The cluster may not be allocated yet,
1082 * in which case it will be. This only exists for buffered writes -
1083 * O_DIRECT takes a more "traditional" path through the kernel.
1084 *
1085 * The caller is responsible for incrementing pos, written counts, etc
1086 *
1087 * For file systems that don't support sparse files, pre-allocation
1088 * and page zeroing up until cpos should be done prior to this
1089 * function call.
1090 *
1091 * Callers should be holding i_sem, and the rw cluster lock.
1092 *
1093 * Returns the number of user bytes written, or less than zero for
1094 * error.
1095 */
1096ssize_t ocfs2_buffered_write_cluster(struct file *file, loff_t pos,
1097 size_t count, ocfs2_page_writer *actor,
1098 void *priv)
1099{
1100 int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
1101 ssize_t written = 0;
1102 u32 phys;
1103 struct inode *inode = file->f_mapping->host;
1104 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1105 struct buffer_head *di_bh = NULL;
1106 struct ocfs2_dinode *di;
1107 struct ocfs2_alloc_context *data_ac = NULL;
1108 struct ocfs2_alloc_context *meta_ac = NULL;
1109 handle_t *handle;
1110 struct ocfs2_write_ctxt wc;
1111
1112 ocfs2_write_ctxt_init(&wc, osb, pos, count, actor, priv);
1113
1114 ret = ocfs2_meta_lock(inode, &di_bh, 1);
1115 if (ret) {
1116 mlog_errno(ret);
1117 goto out;
1118 }
1119 di = (struct ocfs2_dinode *)di_bh->b_data;
1120
1121 /*
1122 * Take alloc sem here to prevent concurrent lookups. That way
1123 * the mapping, zeroing and tree manipulation within
1124 * ocfs2_write() will be safe against ->readpage(). This
1125 * should also serve to lock out allocation from a shared
1126 * writeable region.
1127 */
1128 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1129
1130 ret = ocfs2_get_clusters(inode, wc.w_cpos, &phys, NULL);
1131 if (ret) {
1132 mlog_errno(ret);
1133 goto out_meta;
1134 }
1135
1136 /* phys == 0 means that allocation is required. */
1137 if (phys == 0) {
1138 ret = ocfs2_lock_allocators(inode, di, 1, &data_ac, &meta_ac);
1139 if (ret) {
1140 mlog_errno(ret);
1141 goto out_meta;
1142 }
1143
1144 credits = ocfs2_calc_extend_credits(inode->i_sb, di, 1);
1145 }
1146
1147 ret = ocfs2_data_lock(inode, 1);
1148 if (ret) {
1149 mlog_errno(ret);
1150 goto out_meta;
1151 }
1152
1153 handle = ocfs2_start_trans(osb, credits);
1154 if (IS_ERR(handle)) {
1155 ret = PTR_ERR(handle);
1156 mlog_errno(ret);
1157 goto out_data;
1158 }
1159
1160 written = ocfs2_write(file, phys, handle, di_bh, data_ac,
1161 meta_ac, &wc);
1162 if (written < 0) {
1163 ret = written;
1164 mlog_errno(ret);
1165 goto out_commit;
1166 }
1167
1168 ret = ocfs2_journal_access(handle, inode, di_bh,
1169 OCFS2_JOURNAL_ACCESS_WRITE);
1170 if (ret) {
1171 mlog_errno(ret);
1172 goto out_commit;
1173 }
1174
1175 pos += written;
1176 if (pos > inode->i_size) {
1177 i_size_write(inode, pos);
1178 mark_inode_dirty(inode);
1179 }
1180 inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode)));
1181 di->i_size = cpu_to_le64((u64)i_size_read(inode));
1182 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1183 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
1184 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
1185
1186 ret = ocfs2_journal_dirty(handle, di_bh);
1187 if (ret)
1188 mlog_errno(ret);
1189
1190out_commit:
1191 ocfs2_commit_trans(osb, handle);
1192
1193out_data:
1194 ocfs2_data_unlock(inode, 1);
1195
1196out_meta:
1197 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1198 ocfs2_meta_unlock(inode, 1);
1199
1200out:
1201 brelse(di_bh);
1202 if (data_ac)
1203 ocfs2_free_alloc_context(data_ac);
1204 if (meta_ac)
1205 ocfs2_free_alloc_context(meta_ac);
1206
1207 return written ? written : ret;
1208}
1209
f5e54d6e 1210const struct address_space_operations ocfs2_aops = {
ccd979bd
MF
1211 .readpage = ocfs2_readpage,
1212 .writepage = ocfs2_writepage,
ccd979bd
MF
1213 .bmap = ocfs2_bmap,
1214 .sync_page = block_sync_page,
03f981cf
JB
1215 .direct_IO = ocfs2_direct_IO,
1216 .invalidatepage = ocfs2_invalidatepage,
1217 .releasepage = ocfs2_releasepage,
1218 .migratepage = buffer_migrate_page,
ccd979bd 1219};