]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/afs/volume.c
[AFS]: Add security support.
[mirror_ubuntu-bionic-kernel.git] / fs / afs / volume.c
1 /* AFS volume management
2 *
3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/fs.h>
17 #include <linux/pagemap.h>
18 #include "internal.h"
19
20 static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
21
22 /*
23 * lookup a volume by name
24 * - this can be one of the following:
25 * "%[cell:]volume[.]" R/W volume
26 * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0),
27 * or R/W (rwparent=1) volume
28 * "%[cell:]volume.readonly" R/O volume
29 * "#[cell:]volume.readonly" R/O volume
30 * "%[cell:]volume.backup" Backup volume
31 * "#[cell:]volume.backup" Backup volume
32 *
33 * The cell name is optional, and defaults to the current cell.
34 *
35 * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin
36 * Guide
37 * - Rule 1: Explicit type suffix forces access of that type or nothing
38 * (no suffix, then use Rule 2 & 3)
39 * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W
40 * if not available
41 * - Rule 3: If parent volume is R/W, then only mount R/W volume unless
42 * explicitly told otherwise
43 */
44 struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
45 {
46 struct afs_vlocation *vlocation = NULL;
47 struct afs_volume *volume = NULL;
48 struct afs_server *server = NULL;
49 char srvtmask;
50 int ret, loop;
51
52 _enter("{%*.*s,%d}",
53 params->volnamesz, params->volnamesz, params->volname, params->rwpath);
54
55 /* lookup the volume location record */
56 vlocation = afs_vlocation_lookup(params->cell, params->key,
57 params->volname, params->volnamesz);
58 if (IS_ERR(vlocation)) {
59 ret = PTR_ERR(vlocation);
60 vlocation = NULL;
61 goto error;
62 }
63
64 /* make the final decision on the type we want */
65 ret = -ENOMEDIUM;
66 if (params->force && !(vlocation->vldb.vidmask & (1 << params->type)))
67 goto error;
68
69 srvtmask = 0;
70 for (loop = 0; loop < vlocation->vldb.nservers; loop++)
71 srvtmask |= vlocation->vldb.srvtmask[loop];
72
73 if (params->force) {
74 if (!(srvtmask & (1 << params->type)))
75 goto error;
76 } else if (srvtmask & AFS_VOL_VTM_RO) {
77 params->type = AFSVL_ROVOL;
78 } else if (srvtmask & AFS_VOL_VTM_RW) {
79 params->type = AFSVL_RWVOL;
80 } else {
81 goto error;
82 }
83
84 down_write(&params->cell->vl_sem);
85
86 /* is the volume already active? */
87 if (vlocation->vols[params->type]) {
88 /* yes - re-use it */
89 volume = vlocation->vols[params->type];
90 afs_get_volume(volume);
91 goto success;
92 }
93
94 /* create a new volume record */
95 _debug("creating new volume record");
96
97 ret = -ENOMEM;
98 volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL);
99 if (!volume)
100 goto error_up;
101
102 atomic_set(&volume->usage, 1);
103 volume->type = params->type;
104 volume->type_force = params->force;
105 volume->cell = params->cell;
106 volume->vid = vlocation->vldb.vid[params->type];
107
108 init_rwsem(&volume->server_sem);
109
110 /* look up all the applicable server records */
111 for (loop = 0; loop < 8; loop++) {
112 if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
113 server = afs_lookup_server(
114 volume->cell, &vlocation->vldb.servers[loop]);
115 if (IS_ERR(server)) {
116 ret = PTR_ERR(server);
117 goto error_discard;
118 }
119
120 volume->servers[volume->nservers] = server;
121 volume->nservers++;
122 }
123 }
124
125 /* attach the cache and volume location */
126 #ifdef AFS_CACHING_SUPPORT
127 cachefs_acquire_cookie(vlocation->cache,
128 &afs_vnode_cache_index_def,
129 volume,
130 &volume->cache);
131 #endif
132
133 afs_get_vlocation(vlocation);
134 volume->vlocation = vlocation;
135
136 vlocation->vols[volume->type] = volume;
137
138 success:
139 _debug("kAFS selected %s volume %08x",
140 afs_voltypes[volume->type], volume->vid);
141 up_write(&params->cell->vl_sem);
142 afs_put_vlocation(vlocation);
143 _leave(" = %p", volume);
144 return volume;
145
146 /* clean up */
147 error_up:
148 up_write(&params->cell->vl_sem);
149 error:
150 afs_put_vlocation(vlocation);
151 _leave(" = %d", ret);
152 return ERR_PTR(ret);
153
154 error_discard:
155 up_write(&params->cell->vl_sem);
156
157 for (loop = volume->nservers - 1; loop >= 0; loop--)
158 afs_put_server(volume->servers[loop]);
159
160 kfree(volume);
161 goto error;
162 }
163
164 /*
165 * destroy a volume record
166 */
167 void afs_put_volume(struct afs_volume *volume)
168 {
169 struct afs_vlocation *vlocation;
170 int loop;
171
172 if (!volume)
173 return;
174
175 _enter("%p", volume);
176
177 ASSERTCMP(atomic_read(&volume->usage), >, 0);
178
179 vlocation = volume->vlocation;
180
181 /* to prevent a race, the decrement and the dequeue must be effectively
182 * atomic */
183 down_write(&vlocation->cell->vl_sem);
184
185 if (likely(!atomic_dec_and_test(&volume->usage))) {
186 up_write(&vlocation->cell->vl_sem);
187 _leave("");
188 return;
189 }
190
191 vlocation->vols[volume->type] = NULL;
192
193 up_write(&vlocation->cell->vl_sem);
194
195 /* finish cleaning up the volume */
196 #ifdef AFS_CACHING_SUPPORT
197 cachefs_relinquish_cookie(volume->cache, 0);
198 #endif
199 afs_put_vlocation(vlocation);
200
201 for (loop = volume->nservers - 1; loop >= 0; loop--)
202 afs_put_server(volume->servers[loop]);
203
204 kfree(volume);
205
206 _leave(" [destroyed]");
207 }
208
209 /*
210 * pick a server to use to try accessing this volume
211 * - returns with an elevated usage count on the server chosen
212 */
213 struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *vnode)
214 {
215 struct afs_volume *volume = vnode->volume;
216 struct afs_server *server;
217 int ret, state, loop;
218
219 _enter("%s", volume->vlocation->vldb.name);
220
221 /* stick with the server we're already using if we can */
222 if (vnode->server && vnode->server->fs_state == 0) {
223 afs_get_server(vnode->server);
224 _leave(" = %p [current]", vnode->server);
225 return vnode->server;
226 }
227
228 down_read(&volume->server_sem);
229
230 /* handle the no-server case */
231 if (volume->nservers == 0) {
232 ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
233 up_read(&volume->server_sem);
234 _leave(" = %d [no servers]", ret);
235 return ERR_PTR(ret);
236 }
237
238 /* basically, just search the list for the first live server and use
239 * that */
240 ret = 0;
241 for (loop = 0; loop < volume->nservers; loop++) {
242 server = volume->servers[loop];
243 state = server->fs_state;
244
245 _debug("consider %d [%d]", loop, state);
246
247 switch (state) {
248 /* found an apparently healthy server */
249 case 0:
250 afs_get_server(server);
251 up_read(&volume->server_sem);
252 _leave(" = %p (picked %08x)",
253 server, ntohl(server->addr.s_addr));
254 return server;
255
256 case -ENETUNREACH:
257 if (ret == 0)
258 ret = state;
259 break;
260
261 case -EHOSTUNREACH:
262 if (ret == 0 ||
263 ret == -ENETUNREACH)
264 ret = state;
265 break;
266
267 case -ECONNREFUSED:
268 if (ret == 0 ||
269 ret == -ENETUNREACH ||
270 ret == -EHOSTUNREACH)
271 ret = state;
272 break;
273
274 default:
275 case -EREMOTEIO:
276 if (ret == 0 ||
277 ret == -ENETUNREACH ||
278 ret == -EHOSTUNREACH ||
279 ret == -ECONNREFUSED)
280 ret = state;
281 break;
282 }
283 }
284
285 /* no available servers
286 * - TODO: handle the no active servers case better
287 */
288 up_read(&volume->server_sem);
289 _leave(" = %d", ret);
290 return ERR_PTR(ret);
291 }
292
293 /*
294 * release a server after use
295 * - releases the ref on the server struct that was acquired by picking
296 * - records result of using a particular server to access a volume
297 * - return 0 to try again, 1 if okay or to issue error
298 */
299 int afs_volume_release_fileserver(struct afs_vnode *vnode,
300 struct afs_server *server,
301 int result)
302 {
303 struct afs_volume *volume = vnode->volume;
304 unsigned loop;
305
306 _enter("%s,%08x,%d",
307 volume->vlocation->vldb.name, ntohl(server->addr.s_addr),
308 result);
309
310 switch (result) {
311 /* success */
312 case 0:
313 server->fs_act_jif = jiffies;
314 server->fs_state = 0;
315 break;
316
317 /* the fileserver denied all knowledge of the volume */
318 case -ENOMEDIUM:
319 server->fs_act_jif = jiffies;
320 down_write(&volume->server_sem);
321
322 /* firstly, find where the server is in the active list (if it
323 * is) */
324 for (loop = 0; loop < volume->nservers; loop++)
325 if (volume->servers[loop] == server)
326 goto present;
327
328 /* no longer there - may have been discarded by another op */
329 goto try_next_server_upw;
330
331 present:
332 volume->nservers--;
333 memmove(&volume->servers[loop],
334 &volume->servers[loop + 1],
335 sizeof(volume->servers[loop]) *
336 (volume->nservers - loop));
337 volume->servers[volume->nservers] = NULL;
338 afs_put_server(server);
339 volume->rjservers++;
340
341 if (volume->nservers > 0)
342 /* another server might acknowledge its existence */
343 goto try_next_server_upw;
344
345 /* handle the case where all the fileservers have rejected the
346 * volume
347 * - TODO: try asking the fileservers for volume information
348 * - TODO: contact the VL server again to see if the volume is
349 * no longer registered
350 */
351 up_write(&volume->server_sem);
352 afs_put_server(server);
353 _leave(" [completely rejected]");
354 return 1;
355
356 /* problem reaching the server */
357 case -ENETUNREACH:
358 case -EHOSTUNREACH:
359 case -ECONNREFUSED:
360 case -ETIME:
361 case -ETIMEDOUT:
362 case -EREMOTEIO:
363 /* mark the server as dead
364 * TODO: vary dead timeout depending on error
365 */
366 spin_lock(&server->fs_lock);
367 if (!server->fs_state) {
368 server->fs_dead_jif = jiffies + HZ * 10;
369 server->fs_state = result;
370 printk("kAFS: SERVER DEAD state=%d\n", result);
371 }
372 spin_unlock(&server->fs_lock);
373 goto try_next_server;
374
375 /* miscellaneous error */
376 default:
377 server->fs_act_jif = jiffies;
378 case -ENOMEM:
379 case -ENONET:
380 break;
381 }
382
383 /* tell the caller to accept the result */
384 afs_put_server(server);
385 _leave("");
386 return 1;
387
388 /* tell the caller to loop around and try the next server */
389 try_next_server_upw:
390 up_write(&volume->server_sem);
391 try_next_server:
392 afs_put_server(server);
393 _leave(" [try next server]");
394 return 0;
395 }