]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/udf/partition.c
UDF: coding style conversion - lindent
[mirror_ubuntu-artful-kernel.git] / fs / udf / partition.c
1 /*
2 * partition.c
3 *
4 * PURPOSE
5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1998-2001 Ben Fennema
14 *
15 * HISTORY
16 *
17 * 12/06/98 blf Created file.
18 *
19 */
20
21 #include "udfdecl.h"
22 #include "udf_sb.h"
23 #include "udf_i.h"
24
25 #include <linux/fs.h>
26 #include <linux/string.h>
27 #include <linux/udf_fs.h>
28 #include <linux/slab.h>
29 #include <linux/buffer_head.h>
30
31 inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
32 uint16_t partition, uint32_t offset)
33 {
34 if (partition >= UDF_SB_NUMPARTS(sb)) {
35 udf_debug
36 ("block=%d, partition=%d, offset=%d: invalid partition\n",
37 block, partition, offset);
38 return 0xFFFFFFFF;
39 }
40 if (UDF_SB_PARTFUNC(sb, partition))
41 return UDF_SB_PARTFUNC(sb, partition) (sb, block, partition,
42 offset);
43 else
44 return UDF_SB_PARTROOT(sb, partition) + block + offset;
45 }
46
47 uint32_t udf_get_pblock_virt15(struct super_block * sb, uint32_t block,
48 uint16_t partition, uint32_t offset)
49 {
50 struct buffer_head *bh = NULL;
51 uint32_t newblock;
52 uint32_t index;
53 uint32_t loc;
54
55 index =
56 (sb->s_blocksize -
57 UDF_SB_TYPEVIRT(sb, partition).s_start_offset) / sizeof(uint32_t);
58
59 if (block > UDF_SB_TYPEVIRT(sb, partition).s_num_entries) {
60 udf_debug
61 ("Trying to access block beyond end of VAT (%d max %d)\n",
62 block, UDF_SB_TYPEVIRT(sb, partition).s_num_entries);
63 return 0xFFFFFFFF;
64 }
65
66 if (block >= index) {
67 block -= index;
68 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
69 index = block % (sb->s_blocksize / sizeof(uint32_t));
70 } else {
71 newblock = 0;
72 index =
73 UDF_SB_TYPEVIRT(sb,
74 partition).s_start_offset /
75 sizeof(uint32_t) + block;
76 }
77
78 loc = udf_block_map(UDF_SB_VAT(sb), newblock);
79
80 if (!(bh = sb_bread(sb, loc))) {
81 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
82 sb, block, partition, loc, index);
83 return 0xFFFFFFFF;
84 }
85
86 loc = le32_to_cpu(((__le32 *) bh->b_data)[index]);
87
88 brelse(bh);
89
90 if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) {
91 udf_debug("recursive call to udf_get_pblock!\n");
92 return 0xFFFFFFFF;
93 }
94
95 return udf_get_pblock(sb, loc,
96 UDF_I_LOCATION(UDF_SB_VAT(sb)).
97 partitionReferenceNum, offset);
98 }
99
100 inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
101 uint16_t partition, uint32_t offset)
102 {
103 return udf_get_pblock_virt15(sb, block, partition, offset);
104 }
105
106 uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
107 uint16_t partition, uint32_t offset)
108 {
109 int i;
110 struct sparingTable *st = NULL;
111 uint32_t packet =
112 (block + offset) & ~(UDF_SB_TYPESPAR(sb, partition).s_packet_len -
113 1);
114
115 for (i = 0; i < 4; i++) {
116 if (UDF_SB_TYPESPAR(sb, partition).s_spar_map[i] != NULL) {
117 st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,
118 partition).
119 s_spar_map[i]->b_data;
120 break;
121 }
122 }
123
124 if (st) {
125 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
126 if (le32_to_cpu(st->mapEntry[i].origLocation) >=
127 0xFFFFFFF0)
128 break;
129 else if (le32_to_cpu(st->mapEntry[i].origLocation) ==
130 packet) {
131 return le32_to_cpu(st->mapEntry[i].
132 mappedLocation) + ((block +
133 offset) &
134 (UDF_SB_TYPESPAR
135 (sb,
136 partition).
137 s_packet_len
138 - 1));
139 } else if (le32_to_cpu(st->mapEntry[i].origLocation) >
140 packet)
141 break;
142 }
143 }
144 return UDF_SB_PARTROOT(sb, partition) + block + offset;
145 }
146
147 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
148 {
149 struct udf_sparing_data *sdata;
150 struct sparingTable *st = NULL;
151 struct sparingEntry mapEntry;
152 uint32_t packet;
153 int i, j, k, l;
154
155 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
156 if (old_block > UDF_SB_PARTROOT(sb, i) &&
157 old_block < UDF_SB_PARTROOT(sb, i) + UDF_SB_PARTLEN(sb, i))
158 {
159 sdata = &UDF_SB_TYPESPAR(sb, i);
160 packet =
161 (old_block -
162 UDF_SB_PARTROOT(sb,
163 i)) & ~(sdata->s_packet_len - 1);
164
165 for (j = 0; j < 4; j++) {
166 if (UDF_SB_TYPESPAR(sb, i).s_spar_map[j] !=
167 NULL) {
168 st = (struct sparingTable *)sdata->
169 s_spar_map[j]->b_data;
170 break;
171 }
172 }
173
174 if (!st)
175 return 1;
176
177 for (k = 0; k < le16_to_cpu(st->reallocationTableLen);
178 k++) {
179 if (le32_to_cpu(st->mapEntry[k].origLocation) ==
180 0xFFFFFFFF) {
181 for (; j < 4; j++) {
182 if (sdata->s_spar_map[j]) {
183 st = (struct
184 sparingTable *)
185 sdata->
186 s_spar_map[j]->
187 b_data;
188 st->mapEntry[k].
189 origLocation =
190 cpu_to_le32(packet);
191 udf_update_tag((char *)
192 st,
193 sizeof
194 (struct
195 sparingTable)
196 +
197 le16_to_cpu
198 (st->
199 reallocationTableLen)
200 *
201 sizeof
202 (struct
203 sparingEntry));
204 mark_buffer_dirty
205 (sdata->
206 s_spar_map[j]);
207 }
208 }
209 *new_block =
210 le32_to_cpu(st->mapEntry[k].
211 mappedLocation) +
212 ((old_block -
213 UDF_SB_PARTROOT(sb,
214 i)) & (sdata->
215 s_packet_len
216 - 1));
217 return 0;
218 } else
219 if (le32_to_cpu
220 (st->mapEntry[k].origLocation) ==
221 packet) {
222 *new_block =
223 le32_to_cpu(st->mapEntry[k].
224 mappedLocation) +
225 ((old_block -
226 UDF_SB_PARTROOT(sb,
227 i)) & (sdata->
228 s_packet_len
229 - 1));
230 return 0;
231 } else
232 if (le32_to_cpu
233 (st->mapEntry[k].origLocation) > packet)
234 break;
235 }
236 for (l = k; l < le16_to_cpu(st->reallocationTableLen);
237 l++) {
238 if (le32_to_cpu(st->mapEntry[l].origLocation) ==
239 0xFFFFFFFF) {
240 for (; j < 4; j++) {
241 if (sdata->s_spar_map[j]) {
242 st = (struct
243 sparingTable *)
244 sdata->
245 s_spar_map[j]->
246 b_data;
247 mapEntry =
248 st->mapEntry[l];
249 mapEntry.origLocation =
250 cpu_to_le32(packet);
251 memmove(&st->
252 mapEntry[k + 1],
253 &st->
254 mapEntry[k],
255 (l -
256 k) *
257 sizeof(struct
258 sparingEntry));
259 st->mapEntry[k] =
260 mapEntry;
261 udf_update_tag((char *)
262 st,
263 sizeof
264 (struct
265 sparingTable)
266 +
267 le16_to_cpu
268 (st->
269 reallocationTableLen)
270 *
271 sizeof
272 (struct
273 sparingEntry));
274 mark_buffer_dirty
275 (sdata->
276 s_spar_map[j]);
277 }
278 }
279 *new_block =
280 le32_to_cpu(st->mapEntry[k].
281 mappedLocation) +
282 ((old_block -
283 UDF_SB_PARTROOT(sb,
284 i)) & (sdata->
285 s_packet_len
286 - 1));
287 return 0;
288 }
289 }
290 return 1;
291 }
292 }
293 if (i == UDF_SB_NUMPARTS(sb)) {
294 /* outside of partitions */
295 /* for now, fail =) */
296 return 1;
297 }
298
299 return 0;
300 }