]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/lustre/lustre/llite/vvp_dev.c
Merge tag 'pinctrl-v4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / lustre / lustre / llite / vvp_dev.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl_device and cl_device_type implementation for VVP layer.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43
44 #include "../include/obd.h"
45 #include "../include/lustre_lite.h"
46 #include "llite_internal.h"
47 #include "vvp_internal.h"
48
49 /*****************************************************************************
50 *
51 * Vvp device and device type functions.
52 *
53 */
54
55 /*
56 * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
57 * "llite_" (var. "ll_") prefix.
58 */
59
60 static struct kmem_cache *vvp_thread_kmem;
61 static struct kmem_cache *vvp_session_kmem;
62 static struct lu_kmem_descr vvp_caches[] = {
63 {
64 .ckd_cache = &vvp_thread_kmem,
65 .ckd_name = "vvp_thread_kmem",
66 .ckd_size = sizeof(struct vvp_thread_info),
67 },
68 {
69 .ckd_cache = &vvp_session_kmem,
70 .ckd_name = "vvp_session_kmem",
71 .ckd_size = sizeof(struct vvp_session)
72 },
73 {
74 .ckd_cache = NULL
75 }
76 };
77
78 static void *vvp_key_init(const struct lu_context *ctx,
79 struct lu_context_key *key)
80 {
81 struct vvp_thread_info *info;
82
83 OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, GFP_NOFS);
84 if (info == NULL)
85 info = ERR_PTR(-ENOMEM);
86 return info;
87 }
88
89 static void vvp_key_fini(const struct lu_context *ctx,
90 struct lu_context_key *key, void *data)
91 {
92 struct vvp_thread_info *info = data;
93
94 OBD_SLAB_FREE_PTR(info, vvp_thread_kmem);
95 }
96
97 static void *vvp_session_key_init(const struct lu_context *ctx,
98 struct lu_context_key *key)
99 {
100 struct vvp_session *session;
101
102 OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS);
103 if (session == NULL)
104 session = ERR_PTR(-ENOMEM);
105 return session;
106 }
107
108 static void vvp_session_key_fini(const struct lu_context *ctx,
109 struct lu_context_key *key, void *data)
110 {
111 struct vvp_session *session = data;
112
113 OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
114 }
115
116
117 struct lu_context_key vvp_key = {
118 .lct_tags = LCT_CL_THREAD,
119 .lct_init = vvp_key_init,
120 .lct_fini = vvp_key_fini
121 };
122
123 struct lu_context_key vvp_session_key = {
124 .lct_tags = LCT_SESSION,
125 .lct_init = vvp_session_key_init,
126 .lct_fini = vvp_session_key_fini
127 };
128
129 /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
130 LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
131
132 static const struct lu_device_operations vvp_lu_ops = {
133 .ldo_object_alloc = vvp_object_alloc
134 };
135
136 static const struct cl_device_operations vvp_cl_ops = {
137 .cdo_req_init = ccc_req_init
138 };
139
140 static struct lu_device *vvp_device_alloc(const struct lu_env *env,
141 struct lu_device_type *t,
142 struct lustre_cfg *cfg)
143 {
144 return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
145 }
146
147 static const struct lu_device_type_operations vvp_device_type_ops = {
148 .ldto_init = vvp_type_init,
149 .ldto_fini = vvp_type_fini,
150
151 .ldto_start = vvp_type_start,
152 .ldto_stop = vvp_type_stop,
153
154 .ldto_device_alloc = vvp_device_alloc,
155 .ldto_device_free = ccc_device_free,
156 .ldto_device_init = ccc_device_init,
157 .ldto_device_fini = ccc_device_fini
158 };
159
160 struct lu_device_type vvp_device_type = {
161 .ldt_tags = LU_DEVICE_CL,
162 .ldt_name = LUSTRE_VVP_NAME,
163 .ldt_ops = &vvp_device_type_ops,
164 .ldt_ctx_tags = LCT_CL_THREAD
165 };
166
167 /**
168 * A mutex serializing calls to vvp_inode_fini() under extreme memory
169 * pressure, when environments cannot be allocated.
170 */
171 int vvp_global_init(void)
172 {
173 int result;
174
175 result = lu_kmem_init(vvp_caches);
176 if (result == 0) {
177 result = ccc_global_init(&vvp_device_type);
178 if (result != 0)
179 lu_kmem_fini(vvp_caches);
180 }
181 return result;
182 }
183
184 void vvp_global_fini(void)
185 {
186 ccc_global_fini(&vvp_device_type);
187 lu_kmem_fini(vvp_caches);
188 }
189
190
191 /*****************************************************************************
192 *
193 * mirror obd-devices into cl devices.
194 *
195 */
196
197 int cl_sb_init(struct super_block *sb)
198 {
199 struct ll_sb_info *sbi;
200 struct cl_device *cl;
201 struct lu_env *env;
202 int rc = 0;
203 int refcheck;
204
205 sbi = ll_s2sbi(sb);
206 env = cl_env_get(&refcheck);
207 if (!IS_ERR(env)) {
208 cl = cl_type_setup(env, NULL, &vvp_device_type,
209 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
210 if (!IS_ERR(cl)) {
211 cl2ccc_dev(cl)->cdv_sb = sb;
212 sbi->ll_cl = cl;
213 sbi->ll_site = cl2lu_dev(cl)->ld_site;
214 }
215 cl_env_put(env, &refcheck);
216 } else
217 rc = PTR_ERR(env);
218 return rc;
219 }
220
221 int cl_sb_fini(struct super_block *sb)
222 {
223 struct ll_sb_info *sbi;
224 struct lu_env *env;
225 struct cl_device *cld;
226 int refcheck;
227 int result;
228
229 sbi = ll_s2sbi(sb);
230 env = cl_env_get(&refcheck);
231 if (!IS_ERR(env)) {
232 cld = sbi->ll_cl;
233
234 if (cld != NULL) {
235 cl_stack_fini(env, cld);
236 sbi->ll_cl = NULL;
237 sbi->ll_site = NULL;
238 }
239 cl_env_put(env, &refcheck);
240 result = 0;
241 } else {
242 CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
243 result = PTR_ERR(env);
244 }
245 /*
246 * If mount failed (sbi->ll_cl == NULL), and this there are no other
247 * mounts, stop device types manually (this usually happens
248 * automatically when last device is destroyed).
249 */
250 lu_types_stop();
251 return result;
252 }
253
254 /****************************************************************************
255 *
256 * /proc/fs/lustre/llite/$MNT/dump_page_cache
257 *
258 ****************************************************************************/
259
260 /*
261 * To represent contents of a page cache as a byte stream, following
262 * information if encoded in 64bit offset:
263 *
264 * - file hash bucket in lu_site::ls_hash[] 28bits
265 *
266 * - how far file is from bucket head 4bits
267 *
268 * - page index 32bits
269 *
270 * First two data identify a file in the cache uniquely.
271 */
272
273 #define PGC_OBJ_SHIFT (32 + 4)
274 #define PGC_DEPTH_SHIFT (32)
275
276 struct vvp_pgcache_id {
277 unsigned vpi_bucket;
278 unsigned vpi_depth;
279 uint32_t vpi_index;
280
281 unsigned vpi_curdep;
282 struct lu_object_header *vpi_obj;
283 };
284
285 static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
286 {
287 CLASSERT(sizeof(pos) == sizeof(__u64));
288
289 id->vpi_index = pos & 0xffffffff;
290 id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
291 id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT;
292 }
293
294 static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
295 {
296 return
297 ((__u64)id->vpi_index) |
298 ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) |
299 ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
300 }
301
302 static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
303 struct hlist_node *hnode, void *data)
304 {
305 struct vvp_pgcache_id *id = data;
306 struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
307
308 if (id->vpi_curdep-- > 0)
309 return 0; /* continue */
310
311 if (lu_object_is_dying(hdr))
312 return 1;
313
314 cfs_hash_get(hs, hnode);
315 id->vpi_obj = hdr;
316 return 1;
317 }
318
319 static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
320 struct lu_device *dev,
321 struct vvp_pgcache_id *id)
322 {
323 LASSERT(lu_device_is_cl(dev));
324
325 id->vpi_depth &= 0xf;
326 id->vpi_obj = NULL;
327 id->vpi_curdep = id->vpi_depth;
328
329 cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
330 vvp_pgcache_obj_get, id);
331 if (id->vpi_obj != NULL) {
332 struct lu_object *lu_obj;
333
334 lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
335 if (lu_obj != NULL) {
336 lu_object_ref_add(lu_obj, "dump", current);
337 return lu2cl(lu_obj);
338 }
339 lu_object_put(env, lu_object_top(id->vpi_obj));
340
341 } else if (id->vpi_curdep > 0) {
342 id->vpi_depth = 0xf;
343 }
344 return NULL;
345 }
346
347 static loff_t vvp_pgcache_find(const struct lu_env *env,
348 struct lu_device *dev, loff_t pos)
349 {
350 struct cl_object *clob;
351 struct lu_site *site;
352 struct vvp_pgcache_id id;
353
354 site = dev->ld_site;
355 vvp_pgcache_id_unpack(pos, &id);
356
357 while (1) {
358 if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
359 return ~0ULL;
360 clob = vvp_pgcache_obj(env, dev, &id);
361 if (clob != NULL) {
362 struct cl_object_header *hdr;
363 int nr;
364 struct cl_page *pg;
365
366 /* got an object. Find next page. */
367 hdr = cl_object_header(clob);
368
369 spin_lock(&hdr->coh_page_guard);
370 nr = radix_tree_gang_lookup(&hdr->coh_tree,
371 (void **)&pg,
372 id.vpi_index, 1);
373 if (nr > 0) {
374 id.vpi_index = pg->cp_index;
375 /* Cant support over 16T file */
376 nr = !(pg->cp_index > 0xffffffff);
377 }
378 spin_unlock(&hdr->coh_page_guard);
379
380 lu_object_ref_del(&clob->co_lu, "dump", current);
381 cl_object_put(env, clob);
382 if (nr > 0)
383 return vvp_pgcache_id_pack(&id);
384 }
385 /* to the next object. */
386 ++id.vpi_depth;
387 id.vpi_depth &= 0xf;
388 if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
389 return ~0ULL;
390 id.vpi_index = 0;
391 }
392 }
393
394 #define seq_page_flag(seq, page, flag, has_flags) do { \
395 if (test_bit(PG_##flag, &(page)->flags)) { \
396 seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
397 has_flags = 1; \
398 } \
399 } while (0)
400
401 static void vvp_pgcache_page_show(const struct lu_env *env,
402 struct seq_file *seq, struct cl_page *page)
403 {
404 struct ccc_page *cpg;
405 struct page *vmpage;
406 int has_flags;
407
408 cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
409 vmpage = cpg->cpg_page;
410 seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
411 0 /* gen */,
412 cpg, page,
413 "none",
414 cpg->cpg_write_queued ? "wq" : "- ",
415 cpg->cpg_defer_uptodate ? "du" : "- ",
416 PageWriteback(vmpage) ? "wb" : "-",
417 vmpage, vmpage->mapping->host->i_ino,
418 vmpage->mapping->host->i_generation,
419 vmpage->mapping->host, vmpage->index,
420 page_count(vmpage));
421 has_flags = 0;
422 seq_page_flag(seq, vmpage, locked, has_flags);
423 seq_page_flag(seq, vmpage, error, has_flags);
424 seq_page_flag(seq, vmpage, referenced, has_flags);
425 seq_page_flag(seq, vmpage, uptodate, has_flags);
426 seq_page_flag(seq, vmpage, dirty, has_flags);
427 seq_page_flag(seq, vmpage, writeback, has_flags);
428 seq_printf(seq, "%s]\n", has_flags ? "" : "-");
429 }
430
431 static int vvp_pgcache_show(struct seq_file *f, void *v)
432 {
433 loff_t pos;
434 struct ll_sb_info *sbi;
435 struct cl_object *clob;
436 struct lu_env *env;
437 struct cl_page *page;
438 struct cl_object_header *hdr;
439 struct vvp_pgcache_id id;
440 int refcheck;
441 int result;
442
443 env = cl_env_get(&refcheck);
444 if (!IS_ERR(env)) {
445 pos = *(loff_t *) v;
446 vvp_pgcache_id_unpack(pos, &id);
447 sbi = f->private;
448 clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
449 if (clob != NULL) {
450 hdr = cl_object_header(clob);
451
452 spin_lock(&hdr->coh_page_guard);
453 page = cl_page_lookup(hdr, id.vpi_index);
454 spin_unlock(&hdr->coh_page_guard);
455
456 seq_printf(f, "%8x@"DFID": ",
457 id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
458 if (page != NULL) {
459 vvp_pgcache_page_show(env, f, page);
460 cl_page_put(env, page);
461 } else
462 seq_puts(f, "missing\n");
463 lu_object_ref_del(&clob->co_lu, "dump", current);
464 cl_object_put(env, clob);
465 } else
466 seq_printf(f, "%llx missing\n", pos);
467 cl_env_put(env, &refcheck);
468 result = 0;
469 } else
470 result = PTR_ERR(env);
471 return result;
472 }
473
474 static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
475 {
476 struct ll_sb_info *sbi;
477 struct lu_env *env;
478 int refcheck;
479
480 sbi = f->private;
481
482 env = cl_env_get(&refcheck);
483 if (!IS_ERR(env)) {
484 sbi = f->private;
485 if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
486 pos = ERR_PTR(-EFBIG);
487 else {
488 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
489 *pos);
490 if (*pos == ~0ULL)
491 pos = NULL;
492 }
493 cl_env_put(env, &refcheck);
494 }
495 return pos;
496 }
497
498 static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
499 {
500 struct ll_sb_info *sbi;
501 struct lu_env *env;
502 int refcheck;
503
504 env = cl_env_get(&refcheck);
505 if (!IS_ERR(env)) {
506 sbi = f->private;
507 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
508 if (*pos == ~0ULL)
509 pos = NULL;
510 cl_env_put(env, &refcheck);
511 }
512 return pos;
513 }
514
515 static void vvp_pgcache_stop(struct seq_file *f, void *v)
516 {
517 /* Nothing to do */
518 }
519
520 static struct seq_operations vvp_pgcache_ops = {
521 .start = vvp_pgcache_start,
522 .next = vvp_pgcache_next,
523 .stop = vvp_pgcache_stop,
524 .show = vvp_pgcache_show
525 };
526
527 static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
528 {
529 struct ll_sb_info *sbi = PDE_DATA(inode);
530 struct seq_file *seq;
531 int result;
532
533 result = seq_open(filp, &vvp_pgcache_ops);
534 if (result == 0) {
535 seq = filp->private_data;
536 seq->private = sbi;
537 }
538 return result;
539 }
540
541 const struct file_operations vvp_dump_pgcache_file_ops = {
542 .owner = THIS_MODULE,
543 .open = vvp_dump_pgcache_seq_open,
544 .read = seq_read,
545 .llseek = seq_lseek,
546 .release = seq_release,
547 };