]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/gfs2/page.c
[GFS2] Macros removal in gfs2.h
[mirror_ubuntu-jammy-kernel.git] / fs / gfs2 / page.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/mm.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <asm/semaphore.h>
19
20 #include "gfs2.h"
21 #include "lm_interface.h"
22 #include "incore.h"
23 #include "bmap.h"
24 #include "inode.h"
25 #include "page.h"
26 #include "trans.h"
27 #include "ops_address.h"
28 #include "util.h"
29
30 /**
31 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
32 * @gl: the glock
33 *
34 */
35
36 void gfs2_pte_inval(struct gfs2_glock *gl)
37 {
38 struct gfs2_inode *ip;
39 struct inode *inode;
40
41 ip = gl->gl_object;
42 if (!ip || !S_ISREG(ip->i_di.di_mode))
43 return;
44
45 if (!test_bit(GIF_PAGED, &ip->i_flags))
46 return;
47
48 inode = gfs2_ip2v_lookup(ip);
49 if (inode) {
50 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
51 iput(inode);
52
53 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
54 set_bit(GLF_DIRTY, &gl->gl_flags);
55 }
56
57 clear_bit(GIF_SW_PAGED, &ip->i_flags);
58 }
59
60 /**
61 * gfs2_page_inval - Invalidate all pages associated with a glock
62 * @gl: the glock
63 *
64 */
65
66 void gfs2_page_inval(struct gfs2_glock *gl)
67 {
68 struct gfs2_inode *ip;
69 struct inode *inode;
70
71 ip = gl->gl_object;
72 if (!ip || !S_ISREG(ip->i_di.di_mode))
73 return;
74
75 inode = gfs2_ip2v_lookup(ip);
76 if (inode) {
77 struct address_space *mapping = inode->i_mapping;
78
79 truncate_inode_pages(mapping, 0);
80 gfs2_assert_withdraw(ip->i_sbd, !mapping->nrpages);
81
82 iput(inode);
83 }
84
85 clear_bit(GIF_PAGED, &ip->i_flags);
86 }
87
88 /**
89 * gfs2_page_sync - Sync the data pages (not metadata) associated with a glock
90 * @gl: the glock
91 * @flags: DIO_START | DIO_WAIT
92 *
93 * Syncs data (not metadata) for a regular file.
94 * No-op for all other types.
95 */
96
97 void gfs2_page_sync(struct gfs2_glock *gl, int flags)
98 {
99 struct gfs2_inode *ip;
100 struct inode *inode;
101
102 ip = gl->gl_object;
103 if (!ip || !S_ISREG(ip->i_di.di_mode))
104 return;
105
106 inode = gfs2_ip2v_lookup(ip);
107 if (inode) {
108 struct address_space *mapping = inode->i_mapping;
109 int error = 0;
110
111 if (flags & DIO_START)
112 filemap_fdatawrite(mapping);
113 if (!error && (flags & DIO_WAIT))
114 error = filemap_fdatawait(mapping);
115
116 /* Put back any errors cleared by filemap_fdatawait()
117 so they can be caught by someone who can pass them
118 up to user space. */
119
120 if (error == -ENOSPC)
121 set_bit(AS_ENOSPC, &mapping->flags);
122 else if (error)
123 set_bit(AS_EIO, &mapping->flags);
124
125 iput(inode);
126 }
127 }
128
129 /**
130 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
131 * @ip: the inode
132 * @dibh: the dinode buffer
133 * @block: the block number that was allocated
134 * @private: any locked page held by the caller process
135 *
136 * Returns: errno
137 */
138
139 int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
140 uint64_t block, void *private)
141 {
142 struct gfs2_sbd *sdp = ip->i_sbd;
143 struct inode *inode = ip->i_vnode;
144 struct page *page = (struct page *)private;
145 struct buffer_head *bh;
146 int release = 0;
147
148 if (!page || page->index) {
149 page = grab_cache_page(inode->i_mapping, 0);
150 if (!page)
151 return -ENOMEM;
152 release = 1;
153 }
154
155 if (!PageUptodate(page)) {
156 void *kaddr = kmap(page);
157
158 memcpy(kaddr,
159 dibh->b_data + sizeof(struct gfs2_dinode),
160 ip->i_di.di_size);
161 memset(kaddr + ip->i_di.di_size,
162 0,
163 PAGE_CACHE_SIZE - ip->i_di.di_size);
164 kunmap(page);
165
166 SetPageUptodate(page);
167 }
168
169 if (!page_has_buffers(page))
170 create_empty_buffers(page, 1 << inode->i_blkbits,
171 (1 << BH_Uptodate));
172
173 bh = page_buffers(page);
174
175 if (!buffer_mapped(bh))
176 map_bh(bh, inode->i_sb, block);
177
178 set_buffer_uptodate(bh);
179 if ((sdp->sd_args.ar_data == GFS2_DATA_ORDERED) || gfs2_is_jdata(ip))
180 gfs2_trans_add_bh(ip->i_gl, bh, 0);
181 mark_buffer_dirty(bh);
182
183 if (release) {
184 unlock_page(page);
185 page_cache_release(page);
186 }
187
188 return 0;
189 }
190
191 /**
192 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
193 *
194 * This is partly borrowed from ext3.
195 */
196 int gfs2_block_truncate_page(struct address_space *mapping)
197 {
198 struct inode *inode = mapping->host;
199 struct gfs2_inode *ip = inode->u.generic_ip;
200 struct gfs2_sbd *sdp = ip->i_sbd;
201 loff_t from = inode->i_size;
202 unsigned long index = from >> PAGE_CACHE_SHIFT;
203 unsigned offset = from & (PAGE_CACHE_SIZE-1);
204 unsigned blocksize, iblock, length, pos;
205 struct buffer_head *bh;
206 struct page *page;
207 void *kaddr;
208 int err;
209
210 page = grab_cache_page(mapping, index);
211 if (!page)
212 return 0;
213
214 blocksize = inode->i_sb->s_blocksize;
215 length = blocksize - (offset & (blocksize - 1));
216 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
217
218 if (!page_has_buffers(page))
219 create_empty_buffers(page, blocksize, 0);
220
221 /* Find the buffer that contains "offset" */
222 bh = page_buffers(page);
223 pos = blocksize;
224 while (offset >= pos) {
225 bh = bh->b_this_page;
226 iblock++;
227 pos += blocksize;
228 }
229
230 err = 0;
231
232 if (!buffer_mapped(bh)) {
233 gfs2_get_block(inode, iblock, bh, 0);
234 /* unmapped? It's a hole - nothing to do */
235 if (!buffer_mapped(bh))
236 goto unlock;
237 }
238
239 /* Ok, it's mapped. Make sure it's up-to-date */
240 if (PageUptodate(page))
241 set_buffer_uptodate(bh);
242
243 if (!buffer_uptodate(bh)) {
244 err = -EIO;
245 ll_rw_block(READ, 1, &bh);
246 wait_on_buffer(bh);
247 /* Uhhuh. Read error. Complain and punt. */
248 if (!buffer_uptodate(bh))
249 goto unlock;
250 }
251
252 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
253 gfs2_trans_add_bh(ip->i_gl, bh, 0);
254
255 kaddr = kmap_atomic(page, KM_USER0);
256 memset(kaddr + offset, 0, length);
257 flush_dcache_page(page);
258 kunmap_atomic(kaddr, KM_USER0);
259
260 unlock:
261 unlock_page(page);
262 page_cache_release(page);
263 return err;
264 }
265
266 void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
267 unsigned int from, unsigned int to)
268 {
269 struct buffer_head *head = page_buffers(page);
270 unsigned int bsize = head->b_size;
271 struct buffer_head *bh;
272 unsigned int start, end;
273
274 for (bh = head, start = 0;
275 bh != head || !start;
276 bh = bh->b_this_page, start = end) {
277 end = start + bsize;
278 if (end <= from || start >= to)
279 continue;
280 gfs2_trans_add_bh(ip->i_gl, bh, 0);
281 }
282 }
283