]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/gfs2/ops_address.c
[GFS2] Remove drop of module ref where not needed
[mirror_ubuntu-zesty-kernel.git] / fs / gfs2 / ops_address.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
7eabb77e 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
fd88de56 16#include <linux/pagevec.h>
9b124fbb 17#include <linux/mpage.h>
d1665e41 18#include <linux/fs.h>
a8d638e3 19#include <linux/writeback.h>
7765ec26 20#include <linux/swap.h>
5c676f6d 21#include <linux/gfs2_ondisk.h>
7d308590 22#include <linux/lm_interface.h>
47e83b50 23#include <linux/backing-dev.h>
b3b94faa
DT
24
25#include "gfs2.h"
5c676f6d 26#include "incore.h"
b3b94faa
DT
27#include "bmap.h"
28#include "glock.h"
29#include "inode.h"
b3b94faa
DT
30#include "log.h"
31#include "meta_io.h"
32#include "ops_address.h"
b3b94faa
DT
33#include "quota.h"
34#include "trans.h"
18ec7d5c 35#include "rgrp.h"
cd81a4ba 36#include "super.h"
5c676f6d 37#include "util.h"
4340fe62 38#include "glops.h"
b3b94faa 39
ba7f7290
SW
40
41static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
42 unsigned int from, unsigned int to)
43{
44 struct buffer_head *head = page_buffers(page);
45 unsigned int bsize = head->b_size;
46 struct buffer_head *bh;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from || start >= to)
53 continue;
ddf4b426
BM
54 if (gfs2_is_jdata(ip))
55 set_buffer_uptodate(bh);
ba7f7290
SW
56 gfs2_trans_add_bh(ip->i_gl, bh, 0);
57 }
58}
59
b3b94faa 60/**
7a6bbacb 61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
b3b94faa
DT
62 * @inode: The inode
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
66 *
67 * Returns: errno
68 */
69
7a6bbacb
SW
70static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
71 struct buffer_head *bh_result, int create)
b3b94faa 72{
b3b94faa
DT
73 int error;
74
e9e1ef2b 75 error = gfs2_block_map(inode, lblock, bh_result, 0);
b3b94faa
DT
76 if (error)
77 return error;
de986e85 78 if (!buffer_mapped(bh_result))
7a6bbacb
SW
79 return -EIO;
80 return 0;
b3b94faa
DT
81}
82
7a6bbacb
SW
83static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
84 struct buffer_head *bh_result, int create)
623d9355 85{
e9e1ef2b 86 return gfs2_block_map(inode, lblock, bh_result, 0);
623d9355 87}
7a6bbacb 88
b3b94faa 89/**
9ff8ec32
SW
90 * gfs2_writepage_common - Common bits of writepage
91 * @page: The page to be written
92 * @wbc: The writeback control
b3b94faa 93 *
9ff8ec32 94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
b3b94faa
DT
95 */
96
9ff8ec32
SW
97static int gfs2_writepage_common(struct page *page,
98 struct writeback_control *wbc)
b3b94faa 99{
18ec7d5c 100 struct inode *inode = page->mapping->host;
f4387149
SW
101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c
SW
103 loff_t i_size = i_size_read(inode);
104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
105 unsigned offset;
b3b94faa 106
9ff8ec32
SW
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
108 goto out;
5c676f6d 109 if (current->journal_info)
9ff8ec32 110 goto redirty;
18ec7d5c 111 /* Is the page fully outside i_size? (truncate in progress) */
9ff8ec32 112 offset = i_size & (PAGE_CACHE_SIZE-1);
d2d7b8a2 113 if (page->index > end_index || (page->index == end_index && !offset)) {
18ec7d5c 114 page->mapping->a_ops->invalidatepage(page, 0);
9ff8ec32 115 goto out;
b3b94faa 116 }
9ff8ec32
SW
117 return 1;
118redirty:
119 redirty_page_for_writepage(wbc, page);
120out:
121 unlock_page(page);
122 return 0;
123}
124
125/**
126 * gfs2_writeback_writepage - Write page for writeback mappings
127 * @page: The page
128 * @wbc: The writeback control
129 *
130 */
131
132static int gfs2_writeback_writepage(struct page *page,
133 struct writeback_control *wbc)
134{
135 int ret;
136
137 ret = gfs2_writepage_common(page, wbc);
138 if (ret <= 0)
139 return ret;
140
141 ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
142 if (ret == -EAGAIN)
143 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
144 return ret;
145}
146
147/**
148 * gfs2_ordered_writepage - Write page for ordered data files
149 * @page: The page to write
150 * @wbc: The writeback control
151 *
152 */
153
154static int gfs2_ordered_writepage(struct page *page,
155 struct writeback_control *wbc)
156{
157 struct inode *inode = page->mapping->host;
158 struct gfs2_inode *ip = GFS2_I(inode);
159 int ret;
160
161 ret = gfs2_writepage_common(page, wbc);
162 if (ret <= 0)
163 return ret;
164
165 if (!page_has_buffers(page)) {
166 create_empty_buffers(page, inode->i_sb->s_blocksize,
167 (1 << BH_Dirty)|(1 << BH_Uptodate));
168 }
169 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
170 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
171}
172
b8e7cbb6
SW
173/**
174 * __gfs2_jdata_writepage - The core of jdata writepage
175 * @page: The page to write
176 * @wbc: The writeback control
177 *
178 * This is shared between writepage and writepages and implements the
179 * core of the writepage operation. If a transaction is required then
180 * PageChecked will have been set and the transaction will have
181 * already been started before this is called.
182 */
183
184static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
185{
186 struct inode *inode = page->mapping->host;
187 struct gfs2_inode *ip = GFS2_I(inode);
188 struct gfs2_sbd *sdp = GFS2_SB(inode);
189
190 if (PageChecked(page)) {
191 ClearPageChecked(page);
192 if (!page_has_buffers(page)) {
193 create_empty_buffers(page, inode->i_sb->s_blocksize,
194 (1 << BH_Dirty)|(1 << BH_Uptodate));
195 }
196 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
197 }
198 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
199}
200
9ff8ec32
SW
201/**
202 * gfs2_jdata_writepage - Write complete page
203 * @page: Page to write
204 *
205 * Returns: errno
206 *
207 */
208
209static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
210{
211 struct inode *inode = page->mapping->host;
9ff8ec32
SW
212 struct gfs2_sbd *sdp = GFS2_SB(inode);
213 int error;
214 int done_trans = 0;
215
216 error = gfs2_writepage_common(page, wbc);
217 if (error <= 0)
218 return error;
b3b94faa 219
bf36a713 220 if (PageChecked(page)) {
b8e7cbb6
SW
221 if (wbc->sync_mode != WB_SYNC_ALL)
222 goto out_ignore;
18ec7d5c
SW
223 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
224 if (error)
225 goto out_ignore;
18ec7d5c
SW
226 done_trans = 1;
227 }
b8e7cbb6 228 error = __gfs2_jdata_writepage(page, wbc);
18ec7d5c
SW
229 if (done_trans)
230 gfs2_trans_end(sdp);
b3b94faa 231 return error;
18ec7d5c
SW
232
233out_ignore:
234 redirty_page_for_writepage(wbc, page);
235 unlock_page(page);
236 return 0;
b3b94faa
DT
237}
238
a8d638e3 239/**
5561093e 240 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
a8d638e3
SW
241 * @mapping: The mapping to write
242 * @wbc: Write-back control
243 *
5561093e 244 * For the data=writeback case we can already ignore buffer heads
a8d638e3
SW
245 * and write whole extents at once. This is a big reduction in the
246 * number of I/O requests we send and the bmap calls we make in this case.
247 */
5561093e
SW
248static int gfs2_writeback_writepages(struct address_space *mapping,
249 struct writeback_control *wbc)
a8d638e3 250{
5561093e 251 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
a8d638e3
SW
252}
253
b8e7cbb6
SW
254/**
255 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
256 * @mapping: The mapping
257 * @wbc: The writeback control
258 * @writepage: The writepage function to call for each page
259 * @pvec: The vector of pages
260 * @nr_pages: The number of pages to write
261 *
262 * Returns: non-zero if loop should terminate, zero otherwise
263 */
264
265static int gfs2_write_jdata_pagevec(struct address_space *mapping,
266 struct writeback_control *wbc,
267 struct pagevec *pvec,
268 int nr_pages, pgoff_t end)
269{
270 struct inode *inode = mapping->host;
271 struct gfs2_sbd *sdp = GFS2_SB(inode);
272 loff_t i_size = i_size_read(inode);
273 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
274 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
275 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
276 struct backing_dev_info *bdi = mapping->backing_dev_info;
277 int i;
278 int ret;
279
20b95bf2 280 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
b8e7cbb6
SW
281 if (ret < 0)
282 return ret;
283
284 for(i = 0; i < nr_pages; i++) {
285 struct page *page = pvec->pages[i];
286
287 lock_page(page);
288
289 if (unlikely(page->mapping != mapping)) {
290 unlock_page(page);
291 continue;
292 }
293
294 if (!wbc->range_cyclic && page->index > end) {
295 ret = 1;
296 unlock_page(page);
297 continue;
298 }
299
300 if (wbc->sync_mode != WB_SYNC_NONE)
301 wait_on_page_writeback(page);
302
303 if (PageWriteback(page) ||
304 !clear_page_dirty_for_io(page)) {
305 unlock_page(page);
306 continue;
307 }
308
309 /* Is the page fully outside i_size? (truncate in progress) */
310 if (page->index > end_index || (page->index == end_index && !offset)) {
311 page->mapping->a_ops->invalidatepage(page, 0);
312 unlock_page(page);
313 continue;
314 }
315
316 ret = __gfs2_jdata_writepage(page, wbc);
317
318 if (ret || (--(wbc->nr_to_write) <= 0))
319 ret = 1;
320 if (wbc->nonblocking && bdi_write_congested(bdi)) {
321 wbc->encountered_congestion = 1;
322 ret = 1;
323 }
324
325 }
326 gfs2_trans_end(sdp);
327 return ret;
328}
329
330/**
331 * gfs2_write_cache_jdata - Like write_cache_pages but different
332 * @mapping: The mapping to write
333 * @wbc: The writeback control
334 * @writepage: The writepage function to call
335 * @data: The data to pass to writepage
336 *
337 * The reason that we use our own function here is that we need to
338 * start transactions before we grab page locks. This allows us
339 * to get the ordering right.
340 */
341
342static int gfs2_write_cache_jdata(struct address_space *mapping,
343 struct writeback_control *wbc)
344{
345 struct backing_dev_info *bdi = mapping->backing_dev_info;
346 int ret = 0;
347 int done = 0;
348 struct pagevec pvec;
349 int nr_pages;
350 pgoff_t index;
351 pgoff_t end;
352 int scanned = 0;
353 int range_whole = 0;
354
355 if (wbc->nonblocking && bdi_write_congested(bdi)) {
356 wbc->encountered_congestion = 1;
357 return 0;
358 }
359
360 pagevec_init(&pvec, 0);
361 if (wbc->range_cyclic) {
362 index = mapping->writeback_index; /* Start from prev offset */
363 end = -1;
364 } else {
365 index = wbc->range_start >> PAGE_CACHE_SHIFT;
366 end = wbc->range_end >> PAGE_CACHE_SHIFT;
367 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
368 range_whole = 1;
369 scanned = 1;
370 }
371
372retry:
373 while (!done && (index <= end) &&
374 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
375 PAGECACHE_TAG_DIRTY,
376 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
377 scanned = 1;
378 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
379 if (ret)
380 done = 1;
381 if (ret > 0)
382 ret = 0;
383
384 pagevec_release(&pvec);
385 cond_resched();
386 }
387
388 if (!scanned && !done) {
389 /*
390 * We hit the last page and there is more work to be done: wrap
391 * back to the start of the file
392 */
393 scanned = 1;
394 index = 0;
395 goto retry;
396 }
397
398 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
399 mapping->writeback_index = index;
400 return ret;
401}
402
403
404/**
405 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
406 * @mapping: The mapping to write
407 * @wbc: The writeback control
408 *
409 */
410
411static int gfs2_jdata_writepages(struct address_space *mapping,
412 struct writeback_control *wbc)
413{
414 struct gfs2_inode *ip = GFS2_I(mapping->host);
415 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
416 int ret;
417
418 ret = gfs2_write_cache_jdata(mapping, wbc);
419 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
420 gfs2_log_flush(sdp, ip->i_gl);
421 ret = gfs2_write_cache_jdata(mapping, wbc);
422 }
423 return ret;
424}
425
b3b94faa
DT
426/**
427 * stuffed_readpage - Fill in a Linux page with stuffed file data
428 * @ip: the inode
429 * @page: the page
430 *
431 * Returns: errno
432 */
433
434static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
435{
436 struct buffer_head *dibh;
437 void *kaddr;
438 int error;
439
bf126aee
SW
440 /*
441 * Due to the order of unstuffing files and ->nopage(), we can be
442 * asked for a zero page in the case of a stuffed file being extended,
443 * so we need to supply one here. It doesn't happen often.
444 */
445 if (unlikely(page->index)) {
eebd2aa3 446 zero_user(page, 0, PAGE_CACHE_SIZE);
bf126aee
SW
447 return 0;
448 }
fd88de56 449
b3b94faa
DT
450 error = gfs2_meta_inode_buffer(ip, &dibh);
451 if (error)
452 return error;
453
5c4e9e03 454 kaddr = kmap_atomic(page, KM_USER0);
fd88de56 455 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
b3b94faa 456 ip->i_di.di_size);
fd88de56 457 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
c312c4fd 458 kunmap_atomic(kaddr, KM_USER0);
bf126aee 459 flush_dcache_page(page);
b3b94faa 460 brelse(dibh);
b3b94faa
DT
461 SetPageUptodate(page);
462
463 return 0;
464}
465
b3b94faa 466
b3b94faa 467/**
51ff87bd
SW
468 * __gfs2_readpage - readpage
469 * @file: The file to read a page for
b3b94faa
DT
470 * @page: The page to read
471 *
51ff87bd
SW
472 * This is the core of gfs2's readpage. Its used by the internal file
473 * reading code as in that case we already hold the glock. Also its
474 * called by gfs2_readpage() once the required lock has been granted.
475 *
b3b94faa
DT
476 */
477
51ff87bd 478static int __gfs2_readpage(void *file, struct page *page)
b3b94faa 479{
feaa7bba
SW
480 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
481 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
b3b94faa
DT
482 int error;
483
18ec7d5c 484 if (gfs2_is_stuffed(ip)) {
fd88de56
SW
485 error = stuffed_readpage(ip, page);
486 unlock_page(page);
51ff87bd 487 } else {
e9e1ef2b 488 error = mpage_readpage(page, gfs2_block_map);
51ff87bd 489 }
b3b94faa
DT
490
491 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
51ff87bd 492 return -EIO;
b3b94faa 493
51ff87bd
SW
494 return error;
495}
496
497/**
498 * gfs2_readpage - read a page of a file
499 * @file: The file to read
500 * @page: The page of the file
501 *
3cc3f710 502 * This deals with the locking required. We use a trylock in order to
51ff87bd
SW
503 * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
504 * in the event that we are unable to get the lock.
505 */
506
507static int gfs2_readpage(struct file *file, struct page *page)
508{
509 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
7afd88d9 510 struct gfs2_holder *gh;
51ff87bd
SW
511 int error;
512
7afd88d9
SW
513 gh = gfs2_glock_is_locked_by_me(ip->i_gl);
514 if (!gh) {
515 gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
516 if (!gh)
517 return -ENOBUFS;
518 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
51ff87bd 519 unlock_page(page);
7afd88d9
SW
520 error = gfs2_glock_nq_atime(gh);
521 if (likely(error != 0))
522 goto out;
523 return AOP_TRUNCATED_PAGE;
61a30dcb 524 }
51ff87bd 525 error = __gfs2_readpage(file, page);
7afd88d9 526 gfs2_glock_dq(gh);
18ec7d5c 527out:
7afd88d9
SW
528 gfs2_holder_uninit(gh);
529 kfree(gh);
51ff87bd
SW
530 return error;
531}
532
533/**
534 * gfs2_internal_read - read an internal file
535 * @ip: The gfs2 inode
536 * @ra_state: The readahead state (or NULL for no readahead)
537 * @buf: The buffer to fill
538 * @pos: The file position
539 * @size: The amount to read
540 *
541 */
542
543int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
544 char *buf, loff_t *pos, unsigned size)
545{
546 struct address_space *mapping = ip->i_inode.i_mapping;
547 unsigned long index = *pos / PAGE_CACHE_SIZE;
548 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
549 unsigned copied = 0;
550 unsigned amt;
551 struct page *page;
552 void *p;
553
554 do {
555 amt = size - copied;
556 if (offset + size > PAGE_CACHE_SIZE)
557 amt = PAGE_CACHE_SIZE - offset;
558 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
559 if (IS_ERR(page))
560 return PTR_ERR(page);
561 p = kmap_atomic(page, KM_USER0);
562 memcpy(buf + copied, p + offset, amt);
563 kunmap_atomic(p, KM_USER0);
564 mark_page_accessed(page);
565 page_cache_release(page);
566 copied += amt;
567 index++;
568 offset = 0;
569 } while(copied < size);
570 (*pos) += size;
571 return size;
fd88de56
SW
572}
573
fd88de56
SW
574/**
575 * gfs2_readpages - Read a bunch of pages at once
576 *
577 * Some notes:
578 * 1. This is only for readahead, so we can simply ignore any things
579 * which are slightly inconvenient (such as locking conflicts between
580 * the page lock and the glock) and return having done no I/O. Its
581 * obviously not something we'd want to do on too regular a basis.
582 * Any I/O we ignore at this time will be done via readpage later.
e1d5b18a 583 * 2. We don't handle stuffed files here we let readpage do the honours.
fd88de56 584 * 3. mpage_readpages() does most of the heavy lifting in the common case.
e9e1ef2b 585 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
fd88de56 586 */
3cc3f710 587
fd88de56
SW
588static int gfs2_readpages(struct file *file, struct address_space *mapping,
589 struct list_head *pages, unsigned nr_pages)
590{
591 struct inode *inode = mapping->host;
feaa7bba
SW
592 struct gfs2_inode *ip = GFS2_I(inode);
593 struct gfs2_sbd *sdp = GFS2_SB(inode);
fd88de56 594 struct gfs2_holder gh;
3cc3f710 595 int ret;
fd88de56 596
3cc3f710 597 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
51ff87bd 598 ret = gfs2_glock_nq_atime(&gh);
51ff87bd 599 if (unlikely(ret))
3cc3f710 600 goto out_uninit;
e1d5b18a 601 if (!gfs2_is_stuffed(ip))
e9e1ef2b 602 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
3cc3f710
SW
603 gfs2_glock_dq(&gh);
604out_uninit:
605 gfs2_holder_uninit(&gh);
fd88de56
SW
606 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
607 ret = -EIO;
608 return ret;
b3b94faa
DT
609}
610
611/**
7765ec26 612 * gfs2_write_begin - Begin to write to a file
b3b94faa 613 * @file: The file to write to
7765ec26
SW
614 * @mapping: The mapping in which to write
615 * @pos: The file offset at which to start writing
616 * @len: Length of the write
617 * @flags: Various flags
618 * @pagep: Pointer to return the page
619 * @fsdata: Pointer to return fs data (unused by GFS2)
b3b94faa
DT
620 *
621 * Returns: errno
622 */
623
7765ec26
SW
624static int gfs2_write_begin(struct file *file, struct address_space *mapping,
625 loff_t pos, unsigned len, unsigned flags,
626 struct page **pagep, void **fsdata)
b3b94faa 627{
7765ec26
SW
628 struct gfs2_inode *ip = GFS2_I(mapping->host);
629 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
18ec7d5c
SW
630 unsigned int data_blocks, ind_blocks, rblocks;
631 int alloc_required;
b3b94faa 632 int error = 0;
18ec7d5c 633 struct gfs2_alloc *al;
7765ec26
SW
634 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
635 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
636 unsigned to = from + len;
637 struct page *page;
52ae7b79 638
7765ec26 639 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
dcd24799 640 error = gfs2_glock_nq_atime(&ip->i_gh);
7765ec26 641 if (unlikely(error))
18ec7d5c 642 goto out_uninit;
b3b94faa 643
7765ec26 644 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
7765ec26 645 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
18ec7d5c 646 if (error)
c41d4f09 647 goto out_unlock;
18ec7d5c
SW
648
649 if (alloc_required) {
650 al = gfs2_alloc_get(ip);
182fe5ab
CG
651 if (!al) {
652 error = -ENOMEM;
653 goto out_unlock;
654 }
18ec7d5c
SW
655
656 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
657 if (error)
658 goto out_alloc_put;
659
2933f925 660 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
18ec7d5c
SW
661 if (error)
662 goto out_qunlock;
663
664 al->al_requested = data_blocks + ind_blocks;
665 error = gfs2_inplace_reserve(ip);
666 if (error)
667 goto out_qunlock;
668 }
669
670 rblocks = RES_DINODE + ind_blocks;
671 if (gfs2_is_jdata(ip))
672 rblocks += data_blocks ? data_blocks : 1;
673 if (ind_blocks || data_blocks)
674 rblocks += RES_STATFS + RES_QUOTA;
675
16615be1
SW
676 error = gfs2_trans_begin(sdp, rblocks,
677 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
18ec7d5c 678 if (error)
a867bb28 679 goto out_trans_fail;
18ec7d5c 680
c41d4f09
SW
681 error = -ENOMEM;
682 page = __grab_cache_page(mapping, index);
683 *pagep = page;
684 if (unlikely(!page))
685 goto out_endtrans;
686
18ec7d5c 687 if (gfs2_is_stuffed(ip)) {
c41d4f09 688 error = 0;
7765ec26 689 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
f25ef0c1 690 error = gfs2_unstuff_dinode(ip, page);
5c4e9e03
SW
691 if (error == 0)
692 goto prepare_write;
c41d4f09 693 } else if (!PageUptodate(page)) {
b3b94faa 694 error = stuffed_readpage(ip, page);
c41d4f09 695 }
5c4e9e03 696 goto out;
18ec7d5c
SW
697 }
698
5c4e9e03 699prepare_write:
e9e1ef2b 700 error = block_prepare_write(page, from, to, gfs2_block_map);
18ec7d5c 701out:
c41d4f09
SW
702 if (error == 0)
703 return 0;
704
705 page_cache_release(page);
706 if (pos + len > ip->i_inode.i_size)
707 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
708out_endtrans:
709 gfs2_trans_end(sdp);
a867bb28 710out_trans_fail:
c41d4f09
SW
711 if (alloc_required) {
712 gfs2_inplace_release(ip);
18ec7d5c 713out_qunlock:
c41d4f09 714 gfs2_quota_unlock(ip);
18ec7d5c 715out_alloc_put:
c41d4f09
SW
716 gfs2_alloc_put(ip);
717 }
18ec7d5c 718out_unlock:
c41d4f09 719 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 720out_uninit:
c41d4f09 721 gfs2_holder_uninit(&ip->i_gh);
b3b94faa
DT
722 return error;
723}
724
7ae8fa84
RP
725/**
726 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
727 * @inode: the rindex inode
728 */
729static void adjust_fs_space(struct inode *inode)
730{
731 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
732 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
733 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
734 u64 fs_total, new_free;
735
736 /* Total up the file system space, according to the latest rindex. */
737 fs_total = gfs2_ri_total(sdp);
738
739 spin_lock(&sdp->sd_statfs_spin);
740 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
741 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
742 else
743 new_free = 0;
744 spin_unlock(&sdp->sd_statfs_spin);
6c53267f
RP
745 fs_warn(sdp, "File system extended by %llu blocks.\n",
746 (unsigned long long)new_free);
7ae8fa84
RP
747 gfs2_statfs_change(sdp, new_free, new_free, 0);
748}
749
b3b94faa 750/**
7765ec26
SW
751 * gfs2_stuffed_write_end - Write end for stuffed files
752 * @inode: The inode
753 * @dibh: The buffer_head containing the on-disk inode
754 * @pos: The file position
755 * @len: The length of the write
756 * @copied: How much was actually copied by the VFS
757 * @page: The page
758 *
759 * This copies the data from the page into the inode block after
760 * the inode data structure itself.
761 *
762 * Returns: errno
763 */
764static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
765 loff_t pos, unsigned len, unsigned copied,
766 struct page *page)
767{
768 struct gfs2_inode *ip = GFS2_I(inode);
769 struct gfs2_sbd *sdp = GFS2_SB(inode);
770 u64 to = pos + copied;
771 void *kaddr;
772 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
773 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
774
775 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
776 kaddr = kmap_atomic(page, KM_USER0);
777 memcpy(buf + pos, kaddr + pos, copied);
778 memset(kaddr + pos + copied, 0, len - copied);
779 flush_dcache_page(page);
780 kunmap_atomic(kaddr, KM_USER0);
781
782 if (!PageUptodate(page))
783 SetPageUptodate(page);
784 unlock_page(page);
785 page_cache_release(page);
786
787 if (inode->i_size < to) {
788 i_size_write(inode, to);
789 ip->i_di.di_size = inode->i_size;
790 di->di_size = cpu_to_be64(inode->i_size);
791 mark_inode_dirty(inode);
792 }
793
794 if (inode == sdp->sd_rindex)
795 adjust_fs_space(inode);
796
797 brelse(dibh);
798 gfs2_trans_end(sdp);
799 gfs2_glock_dq(&ip->i_gh);
800 gfs2_holder_uninit(&ip->i_gh);
801 return copied;
802}
803
804/**
805 * gfs2_write_end
b3b94faa 806 * @file: The file to write to
7765ec26
SW
807 * @mapping: The address space to write to
808 * @pos: The file position
809 * @len: The length of the data
810 * @copied:
811 * @page: The page that has been written
812 * @fsdata: The fsdata (unused in GFS2)
813 *
814 * The main write_end function for GFS2. We have a separate one for
815 * stuffed files as they are slightly different, otherwise we just
816 * put our locking around the VFS provided functions.
b3b94faa
DT
817 *
818 * Returns: errno
819 */
820
7765ec26
SW
821static int gfs2_write_end(struct file *file, struct address_space *mapping,
822 loff_t pos, unsigned len, unsigned copied,
823 struct page *page, void *fsdata)
b3b94faa
DT
824{
825 struct inode *inode = page->mapping->host;
feaa7bba
SW
826 struct gfs2_inode *ip = GFS2_I(inode);
827 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c 828 struct buffer_head *dibh;
6dbd8224 829 struct gfs2_alloc *al = ip->i_alloc;
48516ced 830 struct gfs2_dinode *di;
7765ec26
SW
831 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
832 unsigned int to = from + len;
833 int ret;
b3b94faa 834
7afd88d9 835 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
18ec7d5c 836
7765ec26
SW
837 ret = gfs2_meta_inode_buffer(ip, &dibh);
838 if (unlikely(ret)) {
839 unlock_page(page);
840 page_cache_release(page);
841 goto failed;
842 }
18ec7d5c
SW
843
844 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa 845
7765ec26
SW
846 if (gfs2_is_stuffed(ip))
847 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
b3b94faa 848
bf36a713 849 if (!gfs2_is_writeback(ip))
7765ec26 850 gfs2_page_add_databufs(ip, page, from, to);
b3b94faa 851
7765ec26 852 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
b3b94faa 853
9656b2c1
SW
854 if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) {
855 di = (struct gfs2_dinode *)dibh->b_data;
856 ip->i_di.di_size = inode->i_size;
857 di->di_size = cpu_to_be64(inode->i_size);
858 mark_inode_dirty(inode);
48516ced
SW
859 }
860
7ae8fa84
RP
861 if (inode == sdp->sd_rindex)
862 adjust_fs_space(inode);
863
18ec7d5c
SW
864 brelse(dibh);
865 gfs2_trans_end(sdp);
7765ec26 866failed:
6dbd8224 867 if (al) {
18ec7d5c
SW
868 gfs2_inplace_release(ip);
869 gfs2_quota_unlock(ip);
870 gfs2_alloc_put(ip);
871 }
7765ec26 872 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 873 gfs2_holder_uninit(&ip->i_gh);
7765ec26 874 return ret;
b3b94faa
DT
875}
876
8fb68595
RP
877/**
878 * gfs2_set_page_dirty - Page dirtying function
879 * @page: The page to dirty
880 *
881 * Returns: 1 if it dirtyed the page, or 0 otherwise
882 */
883
884static int gfs2_set_page_dirty(struct page *page)
885{
5561093e 886 SetPageChecked(page);
8fb68595
RP
887 return __set_page_dirty_buffers(page);
888}
889
b3b94faa
DT
890/**
891 * gfs2_bmap - Block map function
892 * @mapping: Address space info
893 * @lblock: The block to map
894 *
895 * Returns: The disk address for the block or 0 on hole or error
896 */
897
898static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
899{
feaa7bba 900 struct gfs2_inode *ip = GFS2_I(mapping->host);
b3b94faa
DT
901 struct gfs2_holder i_gh;
902 sector_t dblock = 0;
903 int error;
904
b3b94faa
DT
905 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
906 if (error)
907 return 0;
908
909 if (!gfs2_is_stuffed(ip))
e9e1ef2b 910 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
b3b94faa
DT
911
912 gfs2_glock_dq_uninit(&i_gh);
913
914 return dblock;
915}
916
d7b616e2
SW
917static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
918{
919 struct gfs2_bufdata *bd;
920
921 lock_buffer(bh);
922 gfs2_log_lock(sdp);
923 clear_buffer_dirty(bh);
924 bd = bh->b_private;
925 if (bd) {
16615be1
SW
926 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
927 list_del_init(&bd->bd_le.le_list);
928 else
929 gfs2_remove_from_journal(bh, current->journal_info, 0);
d7b616e2
SW
930 }
931 bh->b_bdev = NULL;
932 clear_buffer_mapped(bh);
933 clear_buffer_req(bh);
934 clear_buffer_new(bh);
935 gfs2_log_unlock(sdp);
936 unlock_buffer(bh);
937}
938
8628de05 939static void gfs2_invalidatepage(struct page *page, unsigned long offset)
b3b94faa 940{
d7b616e2
SW
941 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
942 struct buffer_head *bh, *head;
943 unsigned long pos = 0;
944
b3b94faa 945 BUG_ON(!PageLocked(page));
8fb68595
RP
946 if (offset == 0)
947 ClearPageChecked(page);
d7b616e2
SW
948 if (!page_has_buffers(page))
949 goto out;
b3b94faa 950
d7b616e2
SW
951 bh = head = page_buffers(page);
952 do {
953 if (offset <= pos)
954 gfs2_discard(sdp, bh);
955 pos += bh->b_size;
956 bh = bh->b_this_page;
957 } while (bh != head);
958out:
959 if (offset == 0)
960 try_to_release_page(page, 0);
b3b94faa
DT
961}
962
c7b33834
SW
963/**
964 * gfs2_ok_for_dio - check that dio is valid on this file
965 * @ip: The inode
966 * @rw: READ or WRITE
967 * @offset: The offset at which we are reading or writing
968 *
969 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
970 * 1 (to accept the i/o request)
971 */
972static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
973{
974 /*
975 * Should we return an error here? I can't see that O_DIRECT for
5561093e
SW
976 * a stuffed file makes any sense. For now we'll silently fall
977 * back to buffered I/O
c7b33834 978 */
c7b33834
SW
979 if (gfs2_is_stuffed(ip))
980 return 0;
981
982 if (offset > i_size_read(&ip->i_inode))
983 return 0;
984 return 1;
985}
986
987
988
a9e5f4d0
SW
989static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
990 const struct iovec *iov, loff_t offset,
991 unsigned long nr_segs)
d1665e41
SW
992{
993 struct file *file = iocb->ki_filp;
994 struct inode *inode = file->f_mapping->host;
feaa7bba 995 struct gfs2_inode *ip = GFS2_I(inode);
d1665e41
SW
996 struct gfs2_holder gh;
997 int rv;
998
999 /*
c7b33834
SW
1000 * Deferred lock, even if its a write, since we do no allocation
1001 * on this path. All we need change is atime, and this lock mode
1002 * ensures that other nodes have flushed their buffered read caches
1003 * (i.e. their page cache entries for this inode). We do not,
1004 * unfortunately have the option of only flushing a range like
1005 * the VFS does.
d1665e41 1006 */
c7b33834 1007 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
dcd24799 1008 rv = gfs2_glock_nq_atime(&gh);
d1665e41 1009 if (rv)
c7b33834
SW
1010 return rv;
1011 rv = gfs2_ok_for_dio(ip, rw, offset);
1012 if (rv != 1)
1013 goto out; /* dio not valid, fall back to buffered i/o */
1014
1015 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1016 iov, offset, nr_segs,
1017 gfs2_get_block_direct, NULL);
d1665e41
SW
1018out:
1019 gfs2_glock_dq_m(1, &gh);
1020 gfs2_holder_uninit(&gh);
d1665e41
SW
1021 return rv;
1022}
1023
4340fe62 1024/**
623d9355 1025 * gfs2_releasepage - free the metadata associated with a page
4340fe62
SW
1026 * @page: the page that's being released
1027 * @gfp_mask: passed from Linux VFS, ignored by us
1028 *
1029 * Call try_to_free_buffers() if the buffers in this page can be
1030 * released.
1031 *
1032 * Returns: 0
1033 */
1034
1035int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1036{
1037 struct inode *aspace = page->mapping->host;
1038 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1039 struct buffer_head *bh, *head;
1040 struct gfs2_bufdata *bd;
4340fe62
SW
1041
1042 if (!page_has_buffers(page))
891ba6d4 1043 return 0;
4340fe62 1044
bb3b0e3d 1045 gfs2_log_lock(sdp);
4340fe62
SW
1046 head = bh = page_buffers(page);
1047 do {
bb3b0e3d
SW
1048 if (atomic_read(&bh->b_count))
1049 goto cannot_release;
1050 bd = bh->b_private;
1051 if (bd && bd->bd_ail)
1052 goto cannot_release;
4340fe62 1053 gfs2_assert_warn(sdp, !buffer_pinned(bh));
623d9355 1054 gfs2_assert_warn(sdp, !buffer_dirty(bh));
bb3b0e3d
SW
1055 bh = bh->b_this_page;
1056 } while(bh != head);
1057 gfs2_log_unlock(sdp);
4340fe62 1058
bb3b0e3d
SW
1059 head = bh = page_buffers(page);
1060 do {
623d9355 1061 gfs2_log_lock(sdp);
4340fe62
SW
1062 bd = bh->b_private;
1063 if (bd) {
1064 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1065 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
d7b616e2
SW
1066 if (!list_empty(&bd->bd_le.le_list)) {
1067 if (!buffer_pinned(bh))
1068 list_del_init(&bd->bd_le.le_list);
1069 else
1070 bd = NULL;
1071 }
1072 if (bd)
1073 bd->bd_bh = NULL;
4340fe62
SW
1074 bh->b_private = NULL;
1075 }
623d9355
SW
1076 gfs2_log_unlock(sdp);
1077 if (bd)
1078 kmem_cache_free(gfs2_bufdata_cachep, bd);
4340fe62
SW
1079
1080 bh = bh->b_this_page;
166afccd 1081 } while (bh != head);
4340fe62 1082
4340fe62 1083 return try_to_free_buffers(page);
bb3b0e3d
SW
1084cannot_release:
1085 gfs2_log_unlock(sdp);
1086 return 0;
4340fe62
SW
1087}
1088
5561093e 1089static const struct address_space_operations gfs2_writeback_aops = {
9ff8ec32 1090 .writepage = gfs2_writeback_writepage,
5561093e
SW
1091 .writepages = gfs2_writeback_writepages,
1092 .readpage = gfs2_readpage,
1093 .readpages = gfs2_readpages,
1094 .sync_page = block_sync_page,
1095 .write_begin = gfs2_write_begin,
1096 .write_end = gfs2_write_end,
1097 .bmap = gfs2_bmap,
1098 .invalidatepage = gfs2_invalidatepage,
1099 .releasepage = gfs2_releasepage,
1100 .direct_IO = gfs2_direct_IO,
e5d9dc27 1101 .migratepage = buffer_migrate_page,
5561093e
SW
1102};
1103
1104static const struct address_space_operations gfs2_ordered_aops = {
9ff8ec32 1105 .writepage = gfs2_ordered_writepage,
b3b94faa 1106 .readpage = gfs2_readpage,
fd88de56 1107 .readpages = gfs2_readpages,
b3b94faa 1108 .sync_page = block_sync_page,
7765ec26
SW
1109 .write_begin = gfs2_write_begin,
1110 .write_end = gfs2_write_end,
8fb68595 1111 .set_page_dirty = gfs2_set_page_dirty,
b3b94faa
DT
1112 .bmap = gfs2_bmap,
1113 .invalidatepage = gfs2_invalidatepage,
4340fe62 1114 .releasepage = gfs2_releasepage,
b3b94faa 1115 .direct_IO = gfs2_direct_IO,
e5d9dc27 1116 .migratepage = buffer_migrate_page,
b3b94faa
DT
1117};
1118
5561093e 1119static const struct address_space_operations gfs2_jdata_aops = {
9ff8ec32 1120 .writepage = gfs2_jdata_writepage,
b8e7cbb6 1121 .writepages = gfs2_jdata_writepages,
5561093e
SW
1122 .readpage = gfs2_readpage,
1123 .readpages = gfs2_readpages,
1124 .sync_page = block_sync_page,
1125 .write_begin = gfs2_write_begin,
1126 .write_end = gfs2_write_end,
1127 .set_page_dirty = gfs2_set_page_dirty,
1128 .bmap = gfs2_bmap,
1129 .invalidatepage = gfs2_invalidatepage,
1130 .releasepage = gfs2_releasepage,
1131};
1132
1133void gfs2_set_aops(struct inode *inode)
1134{
1135 struct gfs2_inode *ip = GFS2_I(inode);
1136
1137 if (gfs2_is_writeback(ip))
1138 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1139 else if (gfs2_is_ordered(ip))
1140 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1141 else if (gfs2_is_jdata(ip))
1142 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1143 else
1144 BUG();
1145}
1146