]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/nilfs2/dat.c
drm/radeon: set fb aperture sizes for framebuffer handoff.
[mirror_ubuntu-zesty-kernel.git] / fs / nilfs2 / dat.c
1 /*
2 * dat.c - NILFS disk address translation.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23 #include <linux/types.h>
24 #include <linux/buffer_head.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include "nilfs.h"
28 #include "mdt.h"
29 #include "alloc.h"
30 #include "dat.h"
31
32
33 #define NILFS_CNO_MIN ((__u64)1)
34 #define NILFS_CNO_MAX (~(__u64)0)
35
36 static int nilfs_dat_prepare_entry(struct inode *dat,
37 struct nilfs_palloc_req *req, int create)
38 {
39 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
40 create, &req->pr_entry_bh);
41 }
42
43 static void nilfs_dat_commit_entry(struct inode *dat,
44 struct nilfs_palloc_req *req)
45 {
46 nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
47 nilfs_mdt_mark_dirty(dat);
48 brelse(req->pr_entry_bh);
49 }
50
51 static void nilfs_dat_abort_entry(struct inode *dat,
52 struct nilfs_palloc_req *req)
53 {
54 brelse(req->pr_entry_bh);
55 }
56
57 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
58 {
59 int ret;
60
61 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
62 if (ret < 0)
63 return ret;
64
65 ret = nilfs_dat_prepare_entry(dat, req, 1);
66 if (ret < 0)
67 nilfs_palloc_abort_alloc_entry(dat, req);
68
69 return ret;
70 }
71
72 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
73 {
74 struct nilfs_dat_entry *entry;
75 void *kaddr;
76
77 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
78 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
79 req->pr_entry_bh, kaddr);
80 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
81 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
82 entry->de_blocknr = cpu_to_le64(0);
83 kunmap_atomic(kaddr, KM_USER0);
84
85 nilfs_palloc_commit_alloc_entry(dat, req);
86 nilfs_dat_commit_entry(dat, req);
87 }
88
89 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
90 {
91 nilfs_dat_abort_entry(dat, req);
92 nilfs_palloc_abort_alloc_entry(dat, req);
93 }
94
95 void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
96 {
97 struct nilfs_dat_entry *entry;
98 void *kaddr;
99
100 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
101 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
102 req->pr_entry_bh, kaddr);
103 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
104 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
105 entry->de_blocknr = cpu_to_le64(0);
106 kunmap_atomic(kaddr, KM_USER0);
107
108 nilfs_dat_commit_entry(dat, req);
109 nilfs_palloc_commit_free_entry(dat, req);
110 }
111
112 void nilfs_dat_abort_free(struct inode *dat, struct nilfs_palloc_req *req)
113 {
114 nilfs_dat_abort_entry(dat, req);
115 nilfs_palloc_abort_free_entry(dat, req);
116 }
117
118 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
119 {
120 int ret;
121
122 ret = nilfs_dat_prepare_entry(dat, req, 0);
123 WARN_ON(ret == -ENOENT);
124 return ret;
125 }
126
127 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
128 sector_t blocknr)
129 {
130 struct nilfs_dat_entry *entry;
131 void *kaddr;
132
133 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
134 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
135 req->pr_entry_bh, kaddr);
136 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
137 if (entry->de_blocknr != cpu_to_le64(0) ||
138 entry->de_end != cpu_to_le64(NILFS_CNO_MAX)) {
139 printk(KERN_CRIT
140 "%s: vbn = %llu, start = %llu, end = %llu, pbn = %llu\n",
141 __func__, (unsigned long long)req->pr_entry_nr,
142 (unsigned long long)le64_to_cpu(entry->de_start),
143 (unsigned long long)le64_to_cpu(entry->de_end),
144 (unsigned long long)le64_to_cpu(entry->de_blocknr));
145 }
146 entry->de_blocknr = cpu_to_le64(blocknr);
147 kunmap_atomic(kaddr, KM_USER0);
148
149 nilfs_dat_commit_entry(dat, req);
150 }
151
152 void nilfs_dat_abort_start(struct inode *dat, struct nilfs_palloc_req *req)
153 {
154 nilfs_dat_abort_entry(dat, req);
155 }
156
157 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
158 {
159 struct nilfs_dat_entry *entry;
160 __u64 start;
161 sector_t blocknr;
162 void *kaddr;
163 int ret;
164
165 ret = nilfs_dat_prepare_entry(dat, req, 0);
166 if (ret < 0) {
167 WARN_ON(ret == -ENOENT);
168 return ret;
169 }
170
171 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
172 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
173 req->pr_entry_bh, kaddr);
174 start = le64_to_cpu(entry->de_start);
175 blocknr = le64_to_cpu(entry->de_blocknr);
176 kunmap_atomic(kaddr, KM_USER0);
177
178 if (blocknr == 0) {
179 ret = nilfs_palloc_prepare_free_entry(dat, req);
180 if (ret < 0) {
181 nilfs_dat_abort_entry(dat, req);
182 return ret;
183 }
184 }
185
186 return 0;
187 }
188
189 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
190 int dead)
191 {
192 struct nilfs_dat_entry *entry;
193 __u64 start, end;
194 sector_t blocknr;
195 void *kaddr;
196
197 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
198 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
199 req->pr_entry_bh, kaddr);
200 end = start = le64_to_cpu(entry->de_start);
201 if (!dead) {
202 end = nilfs_mdt_cno(dat);
203 WARN_ON(start > end);
204 }
205 entry->de_end = cpu_to_le64(end);
206 blocknr = le64_to_cpu(entry->de_blocknr);
207 kunmap_atomic(kaddr, KM_USER0);
208
209 if (blocknr == 0)
210 nilfs_dat_commit_free(dat, req);
211 else
212 nilfs_dat_commit_entry(dat, req);
213 }
214
215 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
216 {
217 struct nilfs_dat_entry *entry;
218 __u64 start;
219 sector_t blocknr;
220 void *kaddr;
221
222 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
223 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
224 req->pr_entry_bh, kaddr);
225 start = le64_to_cpu(entry->de_start);
226 blocknr = le64_to_cpu(entry->de_blocknr);
227 kunmap_atomic(kaddr, KM_USER0);
228
229 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
230 nilfs_palloc_abort_free_entry(dat, req);
231 nilfs_dat_abort_entry(dat, req);
232 }
233
234 /**
235 * nilfs_dat_mark_dirty -
236 * @dat: DAT file inode
237 * @vblocknr: virtual block number
238 *
239 * Description:
240 *
241 * Return Value: On success, 0 is returned. On error, one of the following
242 * negative error codes is returned.
243 *
244 * %-EIO - I/O error.
245 *
246 * %-ENOMEM - Insufficient amount of memory available.
247 */
248 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
249 {
250 struct nilfs_palloc_req req;
251 int ret;
252
253 req.pr_entry_nr = vblocknr;
254 ret = nilfs_dat_prepare_entry(dat, &req, 0);
255 if (ret == 0)
256 nilfs_dat_commit_entry(dat, &req);
257 return ret;
258 }
259
260 /**
261 * nilfs_dat_freev - free virtual block numbers
262 * @dat: DAT file inode
263 * @vblocknrs: array of virtual block numbers
264 * @nitems: number of virtual block numbers
265 *
266 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
267 * @vblocknrs and @nitems.
268 *
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * nagative error codes is returned.
271 *
272 * %-EIO - I/O error.
273 *
274 * %-ENOMEM - Insufficient amount of memory available.
275 *
276 * %-ENOENT - The virtual block number have not been allocated.
277 */
278 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
279 {
280 return nilfs_palloc_freev(dat, vblocknrs, nitems);
281 }
282
283 /**
284 * nilfs_dat_move - change a block number
285 * @dat: DAT file inode
286 * @vblocknr: virtual block number
287 * @blocknr: block number
288 *
289 * Description: nilfs_dat_move() changes the block number associated with
290 * @vblocknr to @blocknr.
291 *
292 * Return Value: On success, 0 is returned. On error, one of the following
293 * negative error codes is returned.
294 *
295 * %-EIO - I/O error.
296 *
297 * %-ENOMEM - Insufficient amount of memory available.
298 */
299 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
300 {
301 struct buffer_head *entry_bh;
302 struct nilfs_dat_entry *entry;
303 void *kaddr;
304 int ret;
305
306 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
307 if (ret < 0)
308 return ret;
309 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
310 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
311 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
312 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
313 (unsigned long long)vblocknr,
314 (unsigned long long)le64_to_cpu(entry->de_start),
315 (unsigned long long)le64_to_cpu(entry->de_end));
316 kunmap_atomic(kaddr, KM_USER0);
317 brelse(entry_bh);
318 return -EINVAL;
319 }
320 WARN_ON(blocknr == 0);
321 entry->de_blocknr = cpu_to_le64(blocknr);
322 kunmap_atomic(kaddr, KM_USER0);
323
324 nilfs_mdt_mark_buffer_dirty(entry_bh);
325 nilfs_mdt_mark_dirty(dat);
326
327 brelse(entry_bh);
328
329 return 0;
330 }
331
332 /**
333 * nilfs_dat_translate - translate a virtual block number to a block number
334 * @dat: DAT file inode
335 * @vblocknr: virtual block number
336 * @blocknrp: pointer to a block number
337 *
338 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
339 * to the corresponding block number.
340 *
341 * Return Value: On success, 0 is returned and the block number associated
342 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
343 * of the following negative error codes is returned.
344 *
345 * %-EIO - I/O error.
346 *
347 * %-ENOMEM - Insufficient amount of memory available.
348 *
349 * %-ENOENT - A block number associated with @vblocknr does not exist.
350 */
351 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
352 {
353 struct buffer_head *entry_bh;
354 struct nilfs_dat_entry *entry;
355 sector_t blocknr;
356 void *kaddr;
357 int ret;
358
359 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
360 if (ret < 0)
361 return ret;
362
363 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
364 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
365 blocknr = le64_to_cpu(entry->de_blocknr);
366 if (blocknr == 0) {
367 ret = -ENOENT;
368 goto out;
369 }
370 if (blocknrp != NULL)
371 *blocknrp = blocknr;
372
373 out:
374 kunmap_atomic(kaddr, KM_USER0);
375 brelse(entry_bh);
376 return ret;
377 }
378
379 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
380 size_t nvi)
381 {
382 struct buffer_head *entry_bh;
383 struct nilfs_dat_entry *entry;
384 struct nilfs_vinfo *vinfo = buf;
385 __u64 first, last;
386 void *kaddr;
387 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
388 int i, j, n, ret;
389
390 for (i = 0; i < nvi; i += n) {
391 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
392 0, &entry_bh);
393 if (ret < 0)
394 return ret;
395 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
396 /* last virtual block number in this block */
397 first = vinfo->vi_vblocknr;
398 do_div(first, entries_per_block);
399 first *= entries_per_block;
400 last = first + entries_per_block - 1;
401 for (j = i, n = 0;
402 j < nvi && vinfo->vi_vblocknr >= first &&
403 vinfo->vi_vblocknr <= last;
404 j++, n++, vinfo = (void *)vinfo + visz) {
405 entry = nilfs_palloc_block_get_entry(
406 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
407 vinfo->vi_start = le64_to_cpu(entry->de_start);
408 vinfo->vi_end = le64_to_cpu(entry->de_end);
409 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
410 }
411 kunmap_atomic(kaddr, KM_USER0);
412 brelse(entry_bh);
413 }
414
415 return nvi;
416 }