]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/reiserfs/objectid.c
reiserfs: make some warnings informational
[mirror_ubuntu-bionic-kernel.git] / fs / reiserfs / objectid.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
1da177e4
LT
5#include <linux/string.h>
6#include <linux/random.h>
7#include <linux/time.h>
8#include <linux/reiserfs_fs.h>
9#include <linux/reiserfs_fs_sb.h>
10
11// find where objectid map starts
12#define objectid_map(s,rs) (old_format_only (s) ? \
3e8962be
AV
13 (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
14 (__le32 *)((rs) + 1))
1da177e4 15
1da177e4
LT
16#ifdef CONFIG_REISERFS_CHECK
17
bd4c625c 18static void check_objectid_map(struct super_block *s, __le32 * map)
1da177e4 19{
bd4c625c
LT
20 if (le32_to_cpu(map[0]) != 1)
21 reiserfs_panic(s,
22 "vs-15010: check_objectid_map: map corrupted: %lx",
23 (long unsigned int)le32_to_cpu(map[0]));
1da177e4 24
bd4c625c 25 // FIXME: add something else here
1da177e4
LT
26}
27
28#else
bd4c625c
LT
29static void check_objectid_map(struct super_block *s, __le32 * map)
30{;
31}
1da177e4
LT
32#endif
33
1da177e4
LT
34/* When we allocate objectids we allocate the first unused objectid.
35 Each sequence of objectids in use (the odd sequences) is followed
36 by a sequence of objectids not in use (the even sequences). We
37 only need to record the last objectid in each of these sequences
38 (both the odd and even sequences) in order to fully define the
39 boundaries of the sequences. A consequence of allocating the first
40 objectid not in use is that under most conditions this scheme is
41 extremely compact. The exception is immediately after a sequence
42 of operations which deletes a large number of objects of
43 non-sequential objectids, and even then it will become compact
44 again as soon as more objects are created. Note that many
45 interesting optimizations of layout could result from complicating
46 objectid assignment, but we have deferred making them for now. */
47
1da177e4 48/* get unique object identifier */
bd4c625c 49__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
1da177e4 50{
bd4c625c
LT
51 struct super_block *s = th->t_super;
52 struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
53 __le32 *map = objectid_map(s, rs);
54 __u32 unused_objectid;
55
56 BUG_ON(!th->t_trans_id);
57
58 check_objectid_map(s, map);
59
60 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
61 /* comment needed -Hans */
62 unused_objectid = le32_to_cpu(map[1]);
63 if (unused_objectid == U32_MAX) {
fbe5498b 64 reiserfs_warning(s, "%s: no more object ids", __func__);
bd4c625c
LT
65 reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
66 return 0;
67 }
1da177e4 68
bd4c625c
LT
69 /* This incrementation allocates the first unused objectid. That
70 is to say, the first entry on the objectid map is the first
71 unused objectid, and by incrementing it we use it. See below
72 where we check to see if we eliminated a sequence of unused
73 objectids.... */
74 map[1] = cpu_to_le32(unused_objectid + 1);
75
76 /* Now we check to see if we eliminated the last remaining member of
77 the first even sequence (and can eliminate the sequence by
78 eliminating its last objectid from oids), and can collapse the
79 first two odd sequences into one sequence. If so, then the net
80 result is to eliminate a pair of objectids from oids. We do this
81 by shifting the entire map to the left. */
82 if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
83 memmove(map + 1, map + 3,
84 (sb_oid_cursize(rs) - 3) * sizeof(__u32));
85 set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
86 }
1da177e4 87
bd4c625c
LT
88 journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
89 return unused_objectid;
1da177e4
LT
90}
91
1da177e4 92/* makes object identifier unused */
bd4c625c
LT
93void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
94 __u32 objectid_to_release)
1da177e4 95{
bd4c625c
LT
96 struct super_block *s = th->t_super;
97 struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
98 __le32 *map = objectid_map(s, rs);
99 int i = 0;
100
101 BUG_ON(!th->t_trans_id);
102 //return;
103 check_objectid_map(s, map);
104
105 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
106 journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
107
108 /* start at the beginning of the objectid map (i = 0) and go to
109 the end of it (i = disk_sb->s_oid_cursize). Linear search is
110 what we use, though it is possible that binary search would be
111 more efficient after performing lots of deletions (which is
112 when oids is large.) We only check even i's. */
113 while (i < sb_oid_cursize(rs)) {
114 if (objectid_to_release == le32_to_cpu(map[i])) {
115 /* This incrementation unallocates the objectid. */
116 //map[i]++;
9e902df6 117 le32_add_cpu(&map[i], 1);
bd4c625c
LT
118
119 /* Did we unallocate the last member of an odd sequence, and can shrink oids? */
120 if (map[i] == map[i + 1]) {
121 /* shrink objectid map */
122 memmove(map + i, map + i + 2,
123 (sb_oid_cursize(rs) - i -
124 2) * sizeof(__u32));
125 //disk_sb->s_oid_cursize -= 2;
126 set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
127
128 RFALSE(sb_oid_cursize(rs) < 2 ||
129 sb_oid_cursize(rs) > sb_oid_maxsize(rs),
130 "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
131 sb_oid_cursize(rs), sb_oid_maxsize(rs));
132 }
133 return;
134 }
135
136 if (objectid_to_release > le32_to_cpu(map[i]) &&
137 objectid_to_release < le32_to_cpu(map[i + 1])) {
138 /* size of objectid map is not changed */
139 if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
140 //objectid_map[i+1]--;
9e902df6 141 le32_add_cpu(&map[i + 1], -1);
bd4c625c
LT
142 return;
143 }
144
145 /* JDM comparing two little-endian values for equality -- safe */
146 if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
147 /* objectid map must be expanded, but there is no space */
148 PROC_INFO_INC(s, leaked_oid);
149 return;
150 }
151
152 /* expand the objectid map */
153 memmove(map + i + 3, map + i + 1,
154 (sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
155 map[i + 1] = cpu_to_le32(objectid_to_release);
156 map[i + 2] = cpu_to_le32(objectid_to_release + 1);
157 set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
158 return;
159 }
160 i += 2;
1da177e4
LT
161 }
162
bd4c625c
LT
163 reiserfs_warning(s,
164 "vs-15011: reiserfs_release_objectid: tried to free free object id (%lu)",
165 (long unsigned)objectid_to_release);
166}
1da177e4 167
bd4c625c
LT
168int reiserfs_convert_objectid_map_v1(struct super_block *s)
169{
170 struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s);
171 int cur_size = sb_oid_cursize(disk_sb);
172 int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
173 int old_max = sb_oid_maxsize(disk_sb);
174 struct reiserfs_super_block_v1 *disk_sb_v1;
175 __le32 *objectid_map, *new_objectid_map;
176 int i;
177
178 disk_sb_v1 =
179 (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
180 objectid_map = (__le32 *) (disk_sb_v1 + 1);
181 new_objectid_map = (__le32 *) (disk_sb + 1);
182
183 if (cur_size > new_size) {
184 /* mark everyone used that was listed as free at the end of the objectid
185 ** map
186 */
187 objectid_map[new_size - 1] = objectid_map[cur_size - 1];
188 set_sb_oid_cursize(disk_sb, new_size);
189 }
190 /* move the smaller objectid map past the end of the new super */
191 for (i = new_size - 1; i >= 0; i--) {
192 objectid_map[i + (old_max - new_size)] = objectid_map[i];
1da177e4 193 }
1da177e4 194
bd4c625c
LT
195 /* set the max size so we don't overflow later */
196 set_sb_oid_maxsize(disk_sb, new_size);
1da177e4 197
bd4c625c
LT
198 /* Zero out label and generate random UUID */
199 memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label));
200 generate_random_uuid(disk_sb->s_uuid);
1da177e4 201
bd4c625c
LT
202 /* finally, zero out the unused chunk of the new super */
203 memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused));
204 return 0;
1da177e4 205}