]>
Commit | Line | Data |
---|---|---|
ec26815a | 1 | /* AFS volume management |
1da177e4 | 2 | * |
08e0e7c8 | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
1da177e4 LT |
4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/pagemap.h> | |
e8edc6e0 | 18 | #include <linux/sched.h> |
1da177e4 LT |
19 | #include "internal.h" |
20 | ||
1da177e4 | 21 | static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" }; |
1da177e4 | 22 | |
1da177e4 LT |
23 | /* |
24 | * lookup a volume by name | |
25 | * - this can be one of the following: | |
26 | * "%[cell:]volume[.]" R/W volume | |
27 | * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0), | |
28 | * or R/W (rwparent=1) volume | |
29 | * "%[cell:]volume.readonly" R/O volume | |
30 | * "#[cell:]volume.readonly" R/O volume | |
31 | * "%[cell:]volume.backup" Backup volume | |
32 | * "#[cell:]volume.backup" Backup volume | |
33 | * | |
34 | * The cell name is optional, and defaults to the current cell. | |
35 | * | |
36 | * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin | |
37 | * Guide | |
38 | * - Rule 1: Explicit type suffix forces access of that type or nothing | |
39 | * (no suffix, then use Rule 2 & 3) | |
40 | * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W | |
41 | * if not available | |
42 | * - Rule 3: If parent volume is R/W, then only mount R/W volume unless | |
43 | * explicitly told otherwise | |
44 | */ | |
00d3b7a4 | 45 | struct afs_volume *afs_volume_lookup(struct afs_mount_params *params) |
1da177e4 LT |
46 | { |
47 | struct afs_vlocation *vlocation = NULL; | |
48 | struct afs_volume *volume = NULL; | |
08e0e7c8 | 49 | struct afs_server *server = NULL; |
1da177e4 | 50 | char srvtmask; |
00d3b7a4 | 51 | int ret, loop; |
1da177e4 | 52 | |
00d3b7a4 DH |
53 | _enter("{%*.*s,%d}", |
54 | params->volnamesz, params->volnamesz, params->volname, params->rwpath); | |
1da177e4 LT |
55 | |
56 | /* lookup the volume location record */ | |
00d3b7a4 DH |
57 | vlocation = afs_vlocation_lookup(params->cell, params->key, |
58 | params->volname, params->volnamesz); | |
08e0e7c8 DH |
59 | if (IS_ERR(vlocation)) { |
60 | ret = PTR_ERR(vlocation); | |
61 | vlocation = NULL; | |
1da177e4 | 62 | goto error; |
08e0e7c8 | 63 | } |
1da177e4 LT |
64 | |
65 | /* make the final decision on the type we want */ | |
66 | ret = -ENOMEDIUM; | |
00d3b7a4 | 67 | if (params->force && !(vlocation->vldb.vidmask & (1 << params->type))) |
1da177e4 LT |
68 | goto error; |
69 | ||
70 | srvtmask = 0; | |
71 | for (loop = 0; loop < vlocation->vldb.nservers; loop++) | |
72 | srvtmask |= vlocation->vldb.srvtmask[loop]; | |
73 | ||
00d3b7a4 DH |
74 | if (params->force) { |
75 | if (!(srvtmask & (1 << params->type))) | |
1da177e4 | 76 | goto error; |
ec26815a | 77 | } else if (srvtmask & AFS_VOL_VTM_RO) { |
00d3b7a4 | 78 | params->type = AFSVL_ROVOL; |
ec26815a | 79 | } else if (srvtmask & AFS_VOL_VTM_RW) { |
00d3b7a4 | 80 | params->type = AFSVL_RWVOL; |
ec26815a | 81 | } else { |
1da177e4 LT |
82 | goto error; |
83 | } | |
84 | ||
00d3b7a4 | 85 | down_write(¶ms->cell->vl_sem); |
1da177e4 LT |
86 | |
87 | /* is the volume already active? */ | |
00d3b7a4 | 88 | if (vlocation->vols[params->type]) { |
1da177e4 | 89 | /* yes - re-use it */ |
00d3b7a4 | 90 | volume = vlocation->vols[params->type]; |
1da177e4 LT |
91 | afs_get_volume(volume); |
92 | goto success; | |
93 | } | |
94 | ||
95 | /* create a new volume record */ | |
96 | _debug("creating new volume record"); | |
97 | ||
98 | ret = -ENOMEM; | |
f8314dc6 | 99 | volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL); |
1da177e4 LT |
100 | if (!volume) |
101 | goto error_up; | |
102 | ||
1da177e4 | 103 | atomic_set(&volume->usage, 1); |
00d3b7a4 DH |
104 | volume->type = params->type; |
105 | volume->type_force = params->force; | |
106 | volume->cell = params->cell; | |
107 | volume->vid = vlocation->vldb.vid[params->type]; | |
1da177e4 LT |
108 | |
109 | init_rwsem(&volume->server_sem); | |
110 | ||
111 | /* look up all the applicable server records */ | |
112 | for (loop = 0; loop < 8; loop++) { | |
113 | if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) { | |
08e0e7c8 DH |
114 | server = afs_lookup_server( |
115 | volume->cell, &vlocation->vldb.servers[loop]); | |
116 | if (IS_ERR(server)) { | |
117 | ret = PTR_ERR(server); | |
1da177e4 | 118 | goto error_discard; |
08e0e7c8 | 119 | } |
1da177e4 | 120 | |
08e0e7c8 | 121 | volume->servers[volume->nservers] = server; |
1da177e4 LT |
122 | volume->nservers++; |
123 | } | |
124 | } | |
125 | ||
126 | /* attach the cache and volume location */ | |
9b3f26c9 DH |
127 | #ifdef CONFIG_AFS_FSCACHE |
128 | volume->cache = fscache_acquire_cookie(vlocation->cache, | |
129 | &afs_volume_cache_index_def, | |
94d30ae9 | 130 | volume, true); |
1da177e4 | 131 | #endif |
1da177e4 LT |
132 | afs_get_vlocation(vlocation); |
133 | volume->vlocation = vlocation; | |
134 | ||
00d3b7a4 | 135 | vlocation->vols[volume->type] = volume; |
1da177e4 | 136 | |
ec26815a | 137 | success: |
1da177e4 LT |
138 | _debug("kAFS selected %s volume %08x", |
139 | afs_voltypes[volume->type], volume->vid); | |
00d3b7a4 | 140 | up_write(¶ms->cell->vl_sem); |
08e0e7c8 | 141 | afs_put_vlocation(vlocation); |
08e0e7c8 DH |
142 | _leave(" = %p", volume); |
143 | return volume; | |
1da177e4 LT |
144 | |
145 | /* clean up */ | |
ec26815a | 146 | error_up: |
00d3b7a4 | 147 | up_write(¶ms->cell->vl_sem); |
ec26815a | 148 | error: |
1da177e4 | 149 | afs_put_vlocation(vlocation); |
08e0e7c8 DH |
150 | _leave(" = %d", ret); |
151 | return ERR_PTR(ret); | |
1da177e4 | 152 | |
ec26815a | 153 | error_discard: |
00d3b7a4 | 154 | up_write(¶ms->cell->vl_sem); |
1da177e4 LT |
155 | |
156 | for (loop = volume->nservers - 1; loop >= 0; loop--) | |
157 | afs_put_server(volume->servers[loop]); | |
158 | ||
159 | kfree(volume); | |
160 | goto error; | |
ec26815a | 161 | } |
1da177e4 | 162 | |
1da177e4 LT |
163 | /* |
164 | * destroy a volume record | |
165 | */ | |
166 | void afs_put_volume(struct afs_volume *volume) | |
167 | { | |
168 | struct afs_vlocation *vlocation; | |
169 | int loop; | |
170 | ||
171 | if (!volume) | |
172 | return; | |
173 | ||
174 | _enter("%p", volume); | |
175 | ||
08e0e7c8 | 176 | ASSERTCMP(atomic_read(&volume->usage), >, 0); |
1da177e4 | 177 | |
08e0e7c8 | 178 | vlocation = volume->vlocation; |
1da177e4 LT |
179 | |
180 | /* to prevent a race, the decrement and the dequeue must be effectively | |
181 | * atomic */ | |
182 | down_write(&vlocation->cell->vl_sem); | |
183 | ||
184 | if (likely(!atomic_dec_and_test(&volume->usage))) { | |
185 | up_write(&vlocation->cell->vl_sem); | |
186 | _leave(""); | |
187 | return; | |
188 | } | |
189 | ||
190 | vlocation->vols[volume->type] = NULL; | |
191 | ||
192 | up_write(&vlocation->cell->vl_sem); | |
193 | ||
194 | /* finish cleaning up the volume */ | |
9b3f26c9 DH |
195 | #ifdef CONFIG_AFS_FSCACHE |
196 | fscache_relinquish_cookie(volume->cache, 0); | |
1da177e4 LT |
197 | #endif |
198 | afs_put_vlocation(vlocation); | |
199 | ||
200 | for (loop = volume->nservers - 1; loop >= 0; loop--) | |
201 | afs_put_server(volume->servers[loop]); | |
202 | ||
203 | kfree(volume); | |
204 | ||
205 | _leave(" [destroyed]"); | |
ec26815a | 206 | } |
1da177e4 | 207 | |
1da177e4 LT |
208 | /* |
209 | * pick a server to use to try accessing this volume | |
210 | * - returns with an elevated usage count on the server chosen | |
211 | */ | |
08e0e7c8 | 212 | struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *vnode) |
1da177e4 | 213 | { |
08e0e7c8 | 214 | struct afs_volume *volume = vnode->volume; |
1da177e4 LT |
215 | struct afs_server *server; |
216 | int ret, state, loop; | |
217 | ||
218 | _enter("%s", volume->vlocation->vldb.name); | |
219 | ||
08e0e7c8 DH |
220 | /* stick with the server we're already using if we can */ |
221 | if (vnode->server && vnode->server->fs_state == 0) { | |
222 | afs_get_server(vnode->server); | |
223 | _leave(" = %p [current]", vnode->server); | |
224 | return vnode->server; | |
225 | } | |
226 | ||
1da177e4 LT |
227 | down_read(&volume->server_sem); |
228 | ||
229 | /* handle the no-server case */ | |
230 | if (volume->nservers == 0) { | |
231 | ret = volume->rjservers ? -ENOMEDIUM : -ESTALE; | |
232 | up_read(&volume->server_sem); | |
233 | _leave(" = %d [no servers]", ret); | |
08e0e7c8 | 234 | return ERR_PTR(ret); |
1da177e4 LT |
235 | } |
236 | ||
237 | /* basically, just search the list for the first live server and use | |
238 | * that */ | |
239 | ret = 0; | |
240 | for (loop = 0; loop < volume->nservers; loop++) { | |
241 | server = volume->servers[loop]; | |
242 | state = server->fs_state; | |
243 | ||
08e0e7c8 DH |
244 | _debug("consider %d [%d]", loop, state); |
245 | ||
1da177e4 LT |
246 | switch (state) { |
247 | /* found an apparently healthy server */ | |
248 | case 0: | |
249 | afs_get_server(server); | |
250 | up_read(&volume->server_sem); | |
08e0e7c8 DH |
251 | _leave(" = %p (picked %08x)", |
252 | server, ntohl(server->addr.s_addr)); | |
253 | return server; | |
1da177e4 LT |
254 | |
255 | case -ENETUNREACH: | |
256 | if (ret == 0) | |
257 | ret = state; | |
258 | break; | |
259 | ||
260 | case -EHOSTUNREACH: | |
261 | if (ret == 0 || | |
262 | ret == -ENETUNREACH) | |
263 | ret = state; | |
264 | break; | |
265 | ||
266 | case -ECONNREFUSED: | |
267 | if (ret == 0 || | |
268 | ret == -ENETUNREACH || | |
269 | ret == -EHOSTUNREACH) | |
270 | ret = state; | |
271 | break; | |
272 | ||
273 | default: | |
274 | case -EREMOTEIO: | |
275 | if (ret == 0 || | |
276 | ret == -ENETUNREACH || | |
277 | ret == -EHOSTUNREACH || | |
278 | ret == -ECONNREFUSED) | |
279 | ret = state; | |
280 | break; | |
281 | } | |
282 | } | |
283 | ||
284 | /* no available servers | |
285 | * - TODO: handle the no active servers case better | |
286 | */ | |
287 | up_read(&volume->server_sem); | |
288 | _leave(" = %d", ret); | |
08e0e7c8 | 289 | return ERR_PTR(ret); |
ec26815a | 290 | } |
1da177e4 | 291 | |
1da177e4 LT |
292 | /* |
293 | * release a server after use | |
294 | * - releases the ref on the server struct that was acquired by picking | |
295 | * - records result of using a particular server to access a volume | |
296 | * - return 0 to try again, 1 if okay or to issue error | |
260a9803 | 297 | * - the caller must release the server struct if result was 0 |
1da177e4 | 298 | */ |
08e0e7c8 | 299 | int afs_volume_release_fileserver(struct afs_vnode *vnode, |
1da177e4 LT |
300 | struct afs_server *server, |
301 | int result) | |
302 | { | |
08e0e7c8 | 303 | struct afs_volume *volume = vnode->volume; |
1da177e4 LT |
304 | unsigned loop; |
305 | ||
306 | _enter("%s,%08x,%d", | |
307 | volume->vlocation->vldb.name, ntohl(server->addr.s_addr), | |
308 | result); | |
309 | ||
310 | switch (result) { | |
311 | /* success */ | |
312 | case 0: | |
313 | server->fs_act_jif = jiffies; | |
08e0e7c8 | 314 | server->fs_state = 0; |
260a9803 DH |
315 | _leave(""); |
316 | return 1; | |
1da177e4 LT |
317 | |
318 | /* the fileserver denied all knowledge of the volume */ | |
319 | case -ENOMEDIUM: | |
320 | server->fs_act_jif = jiffies; | |
321 | down_write(&volume->server_sem); | |
322 | ||
08e0e7c8 | 323 | /* firstly, find where the server is in the active list (if it |
1da177e4 LT |
324 | * is) */ |
325 | for (loop = 0; loop < volume->nservers; loop++) | |
326 | if (volume->servers[loop] == server) | |
327 | goto present; | |
328 | ||
329 | /* no longer there - may have been discarded by another op */ | |
330 | goto try_next_server_upw; | |
331 | ||
332 | present: | |
333 | volume->nservers--; | |
334 | memmove(&volume->servers[loop], | |
335 | &volume->servers[loop + 1], | |
336 | sizeof(volume->servers[loop]) * | |
337 | (volume->nservers - loop)); | |
338 | volume->servers[volume->nservers] = NULL; | |
339 | afs_put_server(server); | |
340 | volume->rjservers++; | |
341 | ||
342 | if (volume->nservers > 0) | |
343 | /* another server might acknowledge its existence */ | |
344 | goto try_next_server_upw; | |
345 | ||
346 | /* handle the case where all the fileservers have rejected the | |
347 | * volume | |
348 | * - TODO: try asking the fileservers for volume information | |
349 | * - TODO: contact the VL server again to see if the volume is | |
350 | * no longer registered | |
351 | */ | |
352 | up_write(&volume->server_sem); | |
353 | afs_put_server(server); | |
354 | _leave(" [completely rejected]"); | |
355 | return 1; | |
356 | ||
357 | /* problem reaching the server */ | |
358 | case -ENETUNREACH: | |
359 | case -EHOSTUNREACH: | |
360 | case -ECONNREFUSED: | |
08e0e7c8 | 361 | case -ETIME: |
1da177e4 LT |
362 | case -ETIMEDOUT: |
363 | case -EREMOTEIO: | |
364 | /* mark the server as dead | |
365 | * TODO: vary dead timeout depending on error | |
366 | */ | |
367 | spin_lock(&server->fs_lock); | |
368 | if (!server->fs_state) { | |
369 | server->fs_dead_jif = jiffies + HZ * 10; | |
370 | server->fs_state = result; | |
371 | printk("kAFS: SERVER DEAD state=%d\n", result); | |
372 | } | |
373 | spin_unlock(&server->fs_lock); | |
374 | goto try_next_server; | |
375 | ||
376 | /* miscellaneous error */ | |
377 | default: | |
378 | server->fs_act_jif = jiffies; | |
379 | case -ENOMEM: | |
380 | case -ENONET: | |
260a9803 DH |
381 | /* tell the caller to accept the result */ |
382 | afs_put_server(server); | |
383 | _leave(" [local failure]"); | |
384 | return 1; | |
1da177e4 LT |
385 | } |
386 | ||
1da177e4 | 387 | /* tell the caller to loop around and try the next server */ |
ec26815a | 388 | try_next_server_upw: |
1da177e4 | 389 | up_write(&volume->server_sem); |
ec26815a | 390 | try_next_server: |
1da177e4 LT |
391 | afs_put_server(server); |
392 | _leave(" [try next server]"); | |
393 | return 0; | |
ec26815a | 394 | } |