4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include "../../include/linux/libcfs/libcfs.h"
49 # include <linux/module.h>
52 #include "../../include/linux/libcfs/libcfs_hash.h"
53 #include "../include/obd_class.h"
54 #include "../include/obd_support.h"
55 #include "../include/lustre_disk.h"
56 #include "../include/lustre_fid.h"
57 #include "../include/lu_object.h"
58 #include "../include/lu_ref.h"
59 #include <linux/list.h>
61 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
);
64 * Decrease reference counter on object. If last reference is freed, return
65 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
66 * case, free object immediately.
68 void lu_object_put(const struct lu_env
*env
, struct lu_object
*o
)
70 struct lu_site_bkt_data
*bkt
;
71 struct lu_object_header
*top
;
73 struct lu_object
*orig
;
74 struct cfs_hash_bd bd
;
75 const struct lu_fid
*fid
;
78 site
= o
->lo_dev
->ld_site
;
82 * till we have full fids-on-OST implemented anonymous objects
83 * are possible in OSP. such an object isn't listed in the site
84 * so we should not remove it from the site.
86 fid
= lu_object_fid(o
);
87 if (fid_is_zero(fid
)) {
88 LASSERT(top
->loh_hash
.next
== NULL
89 && top
->loh_hash
.pprev
== NULL
);
90 LASSERT(list_empty(&top
->loh_lru
));
91 if (!atomic_dec_and_test(&top
->loh_ref
))
93 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
94 if (o
->lo_ops
->loo_object_release
!= NULL
)
95 o
->lo_ops
->loo_object_release(env
, o
);
97 lu_object_free(env
, orig
);
101 cfs_hash_bd_get(site
->ls_obj_hash
, &top
->loh_fid
, &bd
);
102 bkt
= cfs_hash_bd_extra_get(site
->ls_obj_hash
, &bd
);
104 if (!cfs_hash_bd_dec_and_lock(site
->ls_obj_hash
, &bd
, &top
->loh_ref
)) {
105 if (lu_object_is_dying(top
)) {
108 * somebody may be waiting for this, currently only
109 * used for cl_object, see cl_object_put_last().
111 wake_up_all(&bkt
->lsb_marche_funebre
);
116 LASSERT(bkt
->lsb_busy
> 0);
119 * When last reference is released, iterate over object
120 * layers, and notify them that object is no longer busy.
122 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
123 if (o
->lo_ops
->loo_object_release
!= NULL
)
124 o
->lo_ops
->loo_object_release(env
, o
);
127 if (!lu_object_is_dying(top
)) {
128 LASSERT(list_empty(&top
->loh_lru
));
129 list_add_tail(&top
->loh_lru
, &bkt
->lsb_lru
);
130 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
135 * If object is dying (will not be cached), removed it
136 * from hash table and LRU.
138 * This is done with hash table and LRU lists locked. As the only
139 * way to acquire first reference to previously unreferenced
140 * object is through hash-table lookup (lu_object_find()),
141 * or LRU scanning (lu_site_purge()), that are done under hash-table
142 * and LRU lock, no race with concurrent object lookup is possible
143 * and we can safely destroy object below.
145 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
))
146 cfs_hash_bd_del_locked(site
->ls_obj_hash
, &bd
, &top
->loh_hash
);
147 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
149 * Object was already removed from hash and lru above, can
152 lu_object_free(env
, orig
);
154 EXPORT_SYMBOL(lu_object_put
);
157 * Put object and don't keep in cache. This is temporary solution for
158 * multi-site objects when its layering is not constant.
160 void lu_object_put_nocache(const struct lu_env
*env
, struct lu_object
*o
)
162 set_bit(LU_OBJECT_HEARD_BANSHEE
, &o
->lo_header
->loh_flags
);
163 return lu_object_put(env
, o
);
165 EXPORT_SYMBOL(lu_object_put_nocache
);
168 * Kill the object and take it out of LRU cache.
169 * Currently used by client code for layout change.
171 void lu_object_unhash(const struct lu_env
*env
, struct lu_object
*o
)
173 struct lu_object_header
*top
;
176 set_bit(LU_OBJECT_HEARD_BANSHEE
, &top
->loh_flags
);
177 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
)) {
178 struct cfs_hash
*obj_hash
= o
->lo_dev
->ld_site
->ls_obj_hash
;
179 struct cfs_hash_bd bd
;
181 cfs_hash_bd_get_and_lock(obj_hash
, &top
->loh_fid
, &bd
, 1);
182 list_del_init(&top
->loh_lru
);
183 cfs_hash_bd_del_locked(obj_hash
, &bd
, &top
->loh_hash
);
184 cfs_hash_bd_unlock(obj_hash
, &bd
, 1);
187 EXPORT_SYMBOL(lu_object_unhash
);
190 * Allocate new object.
192 * This follows object creation protocol, described in the comment within
193 * struct lu_device_operations definition.
195 static struct lu_object
*lu_object_alloc(const struct lu_env
*env
,
196 struct lu_device
*dev
,
197 const struct lu_fid
*f
,
198 const struct lu_object_conf
*conf
)
200 struct lu_object
*scan
;
201 struct lu_object
*top
;
202 struct list_head
*layers
;
203 unsigned int init_mask
= 0;
204 unsigned int init_flag
;
209 * Create top-level object slice. This will also create
212 top
= dev
->ld_ops
->ldo_object_alloc(env
, NULL
, dev
);
214 return ERR_PTR(-ENOMEM
);
218 * This is the only place where object fid is assigned. It's constant
221 top
->lo_header
->loh_fid
= *f
;
222 layers
= &top
->lo_header
->loh_layers
;
226 * Call ->loo_object_init() repeatedly, until no more new
227 * object slices are created.
231 list_for_each_entry(scan
, layers
, lo_linkage
) {
232 if (init_mask
& init_flag
)
235 scan
->lo_header
= top
->lo_header
;
236 result
= scan
->lo_ops
->loo_object_init(env
, scan
, conf
);
238 lu_object_free(env
, top
);
239 return ERR_PTR(result
);
241 init_mask
|= init_flag
;
247 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
248 if (scan
->lo_ops
->loo_object_start
!= NULL
) {
249 result
= scan
->lo_ops
->loo_object_start(env
, scan
);
251 lu_object_free(env
, top
);
252 return ERR_PTR(result
);
257 lprocfs_counter_incr(dev
->ld_site
->ls_stats
, LU_SS_CREATED
);
264 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
)
266 struct lu_site_bkt_data
*bkt
;
267 struct lu_site
*site
;
268 struct lu_object
*scan
;
269 struct list_head
*layers
;
270 struct list_head splice
;
272 site
= o
->lo_dev
->ld_site
;
273 layers
= &o
->lo_header
->loh_layers
;
274 bkt
= lu_site_bkt_from_fid(site
, &o
->lo_header
->loh_fid
);
276 * First call ->loo_object_delete() method to release all resources.
278 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
279 if (scan
->lo_ops
->loo_object_delete
!= NULL
)
280 scan
->lo_ops
->loo_object_delete(env
, scan
);
284 * Then, splice object layers into stand-alone list, and call
285 * ->loo_object_free() on all layers to free memory. Splice is
286 * necessary, because lu_object_header is freed together with the
289 INIT_LIST_HEAD(&splice
);
290 list_splice_init(layers
, &splice
);
291 while (!list_empty(&splice
)) {
293 * Free layers in bottom-to-top order, so that object header
294 * lives as long as possible and ->loo_object_free() methods
295 * can look at its contents.
297 o
= container_of0(splice
.prev
, struct lu_object
, lo_linkage
);
298 list_del_init(&o
->lo_linkage
);
299 LASSERT(o
->lo_ops
->loo_object_free
!= NULL
);
300 o
->lo_ops
->loo_object_free(env
, o
);
303 if (waitqueue_active(&bkt
->lsb_marche_funebre
))
304 wake_up_all(&bkt
->lsb_marche_funebre
);
308 * Free \a nr objects from the cold end of the site LRU list.
310 int lu_site_purge(const struct lu_env
*env
, struct lu_site
*s
, int nr
)
312 struct lu_object_header
*h
;
313 struct lu_object_header
*temp
;
314 struct lu_site_bkt_data
*bkt
;
315 struct cfs_hash_bd bd
;
316 struct cfs_hash_bd bd2
;
317 struct list_head dispose
;
324 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU
))
327 INIT_LIST_HEAD(&dispose
);
329 * Under LRU list lock, scan LRU list and move unreferenced objects to
330 * the dispose list, removing them from LRU and hash table.
332 start
= s
->ls_purge_start
;
333 bnr
= (nr
== ~0) ? -1 : nr
/ CFS_HASH_NBKT(s
->ls_obj_hash
) + 1;
336 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
340 cfs_hash_bd_lock(s
->ls_obj_hash
, &bd
, 1);
341 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
343 list_for_each_entry_safe(h
, temp
, &bkt
->lsb_lru
, loh_lru
) {
344 LASSERT(atomic_read(&h
->loh_ref
) == 0);
346 cfs_hash_bd_get(s
->ls_obj_hash
, &h
->loh_fid
, &bd2
);
347 LASSERT(bd
.bd_bucket
== bd2
.bd_bucket
);
349 cfs_hash_bd_del_locked(s
->ls_obj_hash
,
351 list_move(&h
->loh_lru
, &dispose
);
355 if (nr
!= ~0 && --nr
== 0)
358 if (count
> 0 && --count
== 0)
362 cfs_hash_bd_unlock(s
->ls_obj_hash
, &bd
, 1);
365 * Free everything on the dispose list. This is safe against
366 * races due to the reasons described in lu_object_put().
368 while (!list_empty(&dispose
)) {
369 h
= container_of0(dispose
.next
,
370 struct lu_object_header
, loh_lru
);
371 list_del_init(&h
->loh_lru
);
372 lu_object_free(env
, lu_object_top(h
));
373 lprocfs_counter_incr(s
->ls_stats
, LU_SS_LRU_PURGED
);
380 if (nr
!= 0 && did_sth
&& start
!= 0) {
381 start
= 0; /* restart from the first bucket */
384 /* race on s->ls_purge_start, but nobody cares */
385 s
->ls_purge_start
= i
% CFS_HASH_NBKT(s
->ls_obj_hash
);
389 EXPORT_SYMBOL(lu_site_purge
);
394 * Code below has to jump through certain loops to output object description
395 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
396 * composes object description from strings that are parts of _lines_ of
397 * output (i.e., strings that are not terminated by newline). This doesn't fit
398 * very well into libcfs_debug_msg() interface that assumes that each message
399 * supplied to it is a self-contained output line.
401 * To work around this, strings are collected in a temporary buffer
402 * (implemented as a value of lu_cdebug_key key), until terminating newline
403 * character is detected.
411 * XXX overflow is not handled correctly.
416 struct lu_cdebug_data
{
420 char lck_area
[LU_CDEBUG_LINE
];
423 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
424 LU_KEY_INIT_FINI(lu_global
, struct lu_cdebug_data
);
427 * Key, holding temporary buffer. This key is registered very early by
430 struct lu_context_key lu_global_key
= {
431 .lct_tags
= LCT_MD_THREAD
| LCT_DT_THREAD
|
432 LCT_MG_THREAD
| LCT_CL_THREAD
| LCT_LOCAL
,
433 .lct_init
= lu_global_key_init
,
434 .lct_fini
= lu_global_key_fini
438 * Printer function emitting messages through libcfs_debug_msg().
440 int lu_cdebug_printer(const struct lu_env
*env
,
441 void *cookie
, const char *format
, ...)
443 struct libcfs_debug_msg_data
*msgdata
= cookie
;
444 struct lu_cdebug_data
*key
;
449 va_start(args
, format
);
451 key
= lu_context_key_get(&env
->le_ctx
, &lu_global_key
);
452 LASSERT(key
!= NULL
);
454 used
= strlen(key
->lck_area
);
455 complete
= format
[strlen(format
) - 1] == '\n';
457 * Append new chunk to the buffer.
459 vsnprintf(key
->lck_area
+ used
,
460 ARRAY_SIZE(key
->lck_area
) - used
, format
, args
);
462 if (cfs_cdebug_show(msgdata
->msg_mask
, msgdata
->msg_subsys
))
463 libcfs_debug_msg(msgdata
, "%s", key
->lck_area
);
464 key
->lck_area
[0] = 0;
469 EXPORT_SYMBOL(lu_cdebug_printer
);
472 * Print object header.
474 void lu_object_header_print(const struct lu_env
*env
, void *cookie
,
475 lu_printer_t printer
,
476 const struct lu_object_header
*hdr
)
478 (*printer
)(env
, cookie
, "header@%p[%#lx, %d, "DFID
"%s%s%s]",
479 hdr
, hdr
->loh_flags
, atomic_read(&hdr
->loh_ref
),
481 hlist_unhashed(&hdr
->loh_hash
) ? "" : " hash",
482 list_empty((struct list_head
*)&hdr
->loh_lru
) ? \
484 hdr
->loh_attr
& LOHA_EXISTS
? " exist":"");
486 EXPORT_SYMBOL(lu_object_header_print
);
489 * Print human readable representation of the \a o to the \a printer.
491 void lu_object_print(const struct lu_env
*env
, void *cookie
,
492 lu_printer_t printer
, const struct lu_object
*o
)
494 static const char ruler
[] = "........................................";
495 struct lu_object_header
*top
;
499 lu_object_header_print(env
, cookie
, printer
, top
);
500 (*printer
)(env
, cookie
, "{\n");
502 list_for_each_entry(o
, &top
->loh_layers
, lo_linkage
) {
504 * print `.' \a depth times followed by type name and address
506 (*printer
)(env
, cookie
, "%*.*s%s@%p", depth
, depth
, ruler
,
507 o
->lo_dev
->ld_type
->ldt_name
, o
);
509 if (o
->lo_ops
->loo_object_print
!= NULL
)
510 (*o
->lo_ops
->loo_object_print
)(env
, cookie
, printer
, o
);
512 (*printer
)(env
, cookie
, "\n");
515 (*printer
)(env
, cookie
, "} header@%p\n", top
);
517 EXPORT_SYMBOL(lu_object_print
);
520 * Check object consistency.
522 int lu_object_invariant(const struct lu_object
*o
)
524 struct lu_object_header
*top
;
527 list_for_each_entry(o
, &top
->loh_layers
, lo_linkage
) {
528 if (o
->lo_ops
->loo_object_invariant
!= NULL
&&
529 !o
->lo_ops
->loo_object_invariant(o
))
534 EXPORT_SYMBOL(lu_object_invariant
);
536 static struct lu_object
*htable_lookup(struct lu_site
*s
,
537 struct cfs_hash_bd
*bd
,
538 const struct lu_fid
*f
,
539 wait_queue_t
*waiter
,
542 struct lu_site_bkt_data
*bkt
;
543 struct lu_object_header
*h
;
544 struct hlist_node
*hnode
;
545 __u64 ver
= cfs_hash_bd_version_get(bd
);
548 return ERR_PTR(-ENOENT
);
551 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, bd
);
552 /* cfs_hash_bd_peek_locked is a somehow "internal" function
553 * of cfs_hash, it doesn't add refcount on object. */
554 hnode
= cfs_hash_bd_peek_locked(s
->ls_obj_hash
, bd
, (void *)f
);
556 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_MISS
);
557 return ERR_PTR(-ENOENT
);
560 h
= container_of0(hnode
, struct lu_object_header
, loh_hash
);
561 if (likely(!lu_object_is_dying(h
))) {
562 cfs_hash_get(s
->ls_obj_hash
, hnode
);
563 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_HIT
);
564 list_del_init(&h
->loh_lru
);
565 return lu_object_top(h
);
569 * Lookup found an object being destroyed this object cannot be
570 * returned (to assure that references to dying objects are eventually
571 * drained), and moreover, lookup has to wait until object is freed.
574 init_waitqueue_entry(waiter
, current
);
575 add_wait_queue(&bkt
->lsb_marche_funebre
, waiter
);
576 set_current_state(TASK_UNINTERRUPTIBLE
);
577 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
);
578 return ERR_PTR(-EAGAIN
);
582 * Search cache for an object with the fid \a f. If such object is found,
583 * return it. Otherwise, create new object, insert it into cache and return
584 * it. In any case, additional reference is acquired on the returned object.
586 struct lu_object
*lu_object_find(const struct lu_env
*env
,
587 struct lu_device
*dev
, const struct lu_fid
*f
,
588 const struct lu_object_conf
*conf
)
590 return lu_object_find_at(env
, dev
->ld_site
->ls_top_dev
, f
, conf
);
592 EXPORT_SYMBOL(lu_object_find
);
594 static struct lu_object
*lu_object_new(const struct lu_env
*env
,
595 struct lu_device
*dev
,
596 const struct lu_fid
*f
,
597 const struct lu_object_conf
*conf
)
601 struct cfs_hash_bd bd
;
602 struct lu_site_bkt_data
*bkt
;
604 o
= lu_object_alloc(env
, dev
, f
, conf
);
605 if (unlikely(IS_ERR(o
)))
608 hs
= dev
->ld_site
->ls_obj_hash
;
609 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
610 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
611 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
613 cfs_hash_bd_unlock(hs
, &bd
, 1);
618 * Core logic of lu_object_find*() functions.
620 static struct lu_object
*lu_object_find_try(const struct lu_env
*env
,
621 struct lu_device
*dev
,
622 const struct lu_fid
*f
,
623 const struct lu_object_conf
*conf
,
624 wait_queue_t
*waiter
)
627 struct lu_object
*shadow
;
630 struct cfs_hash_bd bd
;
634 * This uses standard index maintenance protocol:
636 * - search index under lock, and return object if found;
637 * - otherwise, unlock index, allocate new object;
638 * - lock index and search again;
639 * - if nothing is found (usual case), insert newly created
641 * - otherwise (race: other thread inserted object), free
642 * object just allocated.
646 * For "LOC_F_NEW" case, we are sure the object is new established.
647 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
648 * just alloc and insert directly.
650 * If dying object is found during index search, add @waiter to the
651 * site wait-queue and return ERR_PTR(-EAGAIN).
653 if (conf
!= NULL
&& conf
->loc_flags
& LOC_F_NEW
)
654 return lu_object_new(env
, dev
, f
, conf
);
658 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
659 o
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
660 cfs_hash_bd_unlock(hs
, &bd
, 1);
661 if (!IS_ERR(o
) || PTR_ERR(o
) != -ENOENT
)
665 * Allocate new object. This may result in rather complicated
666 * operations, including fld queries, inode loading, etc.
668 o
= lu_object_alloc(env
, dev
, f
, conf
);
669 if (unlikely(IS_ERR(o
)))
672 LASSERT(lu_fid_eq(lu_object_fid(o
), f
));
674 cfs_hash_bd_lock(hs
, &bd
, 1);
676 shadow
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
677 if (likely(IS_ERR(shadow
) && PTR_ERR(shadow
) == -ENOENT
)) {
678 struct lu_site_bkt_data
*bkt
;
680 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
681 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
683 cfs_hash_bd_unlock(hs
, &bd
, 1);
687 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_RACE
);
688 cfs_hash_bd_unlock(hs
, &bd
, 1);
689 lu_object_free(env
, o
);
694 * Much like lu_object_find(), but top level device of object is specifically
695 * \a dev rather than top level device of the site. This interface allows
696 * objects of different "stacking" to be created within the same site.
698 struct lu_object
*lu_object_find_at(const struct lu_env
*env
,
699 struct lu_device
*dev
,
700 const struct lu_fid
*f
,
701 const struct lu_object_conf
*conf
)
703 struct lu_site_bkt_data
*bkt
;
704 struct lu_object
*obj
;
708 obj
= lu_object_find_try(env
, dev
, f
, conf
, &wait
);
709 if (obj
!= ERR_PTR(-EAGAIN
))
712 * lu_object_find_try() already added waiter into the
716 bkt
= lu_site_bkt_from_fid(dev
->ld_site
, (void *)f
);
717 remove_wait_queue(&bkt
->lsb_marche_funebre
, &wait
);
720 EXPORT_SYMBOL(lu_object_find_at
);
723 * Find object with given fid, and return its slice belonging to given device.
725 struct lu_object
*lu_object_find_slice(const struct lu_env
*env
,
726 struct lu_device
*dev
,
727 const struct lu_fid
*f
,
728 const struct lu_object_conf
*conf
)
730 struct lu_object
*top
;
731 struct lu_object
*obj
;
733 top
= lu_object_find(env
, dev
, f
, conf
);
735 obj
= lu_object_locate(top
->lo_header
, dev
->ld_type
);
737 lu_object_put(env
, top
);
742 EXPORT_SYMBOL(lu_object_find_slice
);
745 * Global list of all device types.
747 static LIST_HEAD(lu_device_types
);
749 int lu_device_type_init(struct lu_device_type
*ldt
)
753 INIT_LIST_HEAD(&ldt
->ldt_linkage
);
754 if (ldt
->ldt_ops
->ldto_init
)
755 result
= ldt
->ldt_ops
->ldto_init(ldt
);
757 list_add(&ldt
->ldt_linkage
, &lu_device_types
);
760 EXPORT_SYMBOL(lu_device_type_init
);
762 void lu_device_type_fini(struct lu_device_type
*ldt
)
764 list_del_init(&ldt
->ldt_linkage
);
765 if (ldt
->ldt_ops
->ldto_fini
)
766 ldt
->ldt_ops
->ldto_fini(ldt
);
768 EXPORT_SYMBOL(lu_device_type_fini
);
770 void lu_types_stop(void)
772 struct lu_device_type
*ldt
;
774 list_for_each_entry(ldt
, &lu_device_types
, ldt_linkage
) {
775 if (ldt
->ldt_device_nr
== 0 && ldt
->ldt_ops
->ldto_stop
)
776 ldt
->ldt_ops
->ldto_stop(ldt
);
779 EXPORT_SYMBOL(lu_types_stop
);
782 * Global list of all sites on this node
784 static LIST_HEAD(lu_sites
);
785 static DEFINE_MUTEX(lu_sites_guard
);
788 * Global environment used by site shrinker.
790 static struct lu_env lu_shrink_env
;
792 struct lu_site_print_arg
{
793 struct lu_env
*lsp_env
;
795 lu_printer_t lsp_printer
;
799 lu_site_obj_print(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
800 struct hlist_node
*hnode
, void *data
)
802 struct lu_site_print_arg
*arg
= (struct lu_site_print_arg
*)data
;
803 struct lu_object_header
*h
;
805 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
806 if (!list_empty(&h
->loh_layers
)) {
807 const struct lu_object
*o
;
809 o
= lu_object_top(h
);
810 lu_object_print(arg
->lsp_env
, arg
->lsp_cookie
,
811 arg
->lsp_printer
, o
);
813 lu_object_header_print(arg
->lsp_env
, arg
->lsp_cookie
,
814 arg
->lsp_printer
, h
);
820 * Print all objects in \a s.
822 void lu_site_print(const struct lu_env
*env
, struct lu_site
*s
, void *cookie
,
823 lu_printer_t printer
)
825 struct lu_site_print_arg arg
= {
826 .lsp_env
= (struct lu_env
*)env
,
827 .lsp_cookie
= cookie
,
828 .lsp_printer
= printer
,
831 cfs_hash_for_each(s
->ls_obj_hash
, lu_site_obj_print
, &arg
);
833 EXPORT_SYMBOL(lu_site_print
);
836 LU_CACHE_PERCENT_MAX
= 50,
837 LU_CACHE_PERCENT_DEFAULT
= 20
840 static unsigned int lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
841 module_param(lu_cache_percent
, int, 0644);
842 MODULE_PARM_DESC(lu_cache_percent
, "Percentage of memory to be used as lu_object cache");
845 * Return desired hash table order.
847 static int lu_htable_order(void)
849 unsigned long cache_size
;
853 * Calculate hash table size, assuming that we want reasonable
854 * performance when 20% of total memory is occupied by cache of
857 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
859 cache_size
= totalram_pages
;
861 #if BITS_PER_LONG == 32
862 /* limit hashtable size for lowmem systems to low RAM */
863 if (cache_size
> 1 << (30 - PAGE_CACHE_SHIFT
))
864 cache_size
= 1 << (30 - PAGE_CACHE_SHIFT
) * 3 / 4;
867 /* clear off unreasonable cache setting. */
868 if (lu_cache_percent
== 0 || lu_cache_percent
> LU_CACHE_PERCENT_MAX
) {
869 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
870 lu_cache_percent
, LU_CACHE_PERCENT_MAX
,
871 LU_CACHE_PERCENT_DEFAULT
);
873 lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
875 cache_size
= cache_size
/ 100 * lu_cache_percent
*
876 (PAGE_CACHE_SIZE
/ 1024);
878 for (bits
= 1; (1 << bits
) < cache_size
; ++bits
) {
884 static unsigned lu_obj_hop_hash(struct cfs_hash
*hs
,
885 const void *key
, unsigned mask
)
887 struct lu_fid
*fid
= (struct lu_fid
*)key
;
890 hash
= fid_flatten32(fid
);
891 hash
+= (hash
>> 4) + (hash
<< 12); /* mixing oid and seq */
892 hash
= hash_long(hash
, hs
->hs_bkt_bits
);
894 /* give me another random factor */
895 hash
-= hash_long((unsigned long)hs
, fid_oid(fid
) % 11 + 3);
897 hash
<<= hs
->hs_cur_bits
- hs
->hs_bkt_bits
;
898 hash
|= (fid_seq(fid
) + fid_oid(fid
)) & (CFS_HASH_NBKT(hs
) - 1);
903 static void *lu_obj_hop_object(struct hlist_node
*hnode
)
905 return hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
908 static void *lu_obj_hop_key(struct hlist_node
*hnode
)
910 struct lu_object_header
*h
;
912 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
916 static int lu_obj_hop_keycmp(const void *key
, struct hlist_node
*hnode
)
918 struct lu_object_header
*h
;
920 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
921 return lu_fid_eq(&h
->loh_fid
, (struct lu_fid
*)key
);
924 static void lu_obj_hop_get(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
926 struct lu_object_header
*h
;
928 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
929 if (atomic_add_return(1, &h
->loh_ref
) == 1) {
930 struct lu_site_bkt_data
*bkt
;
931 struct cfs_hash_bd bd
;
933 cfs_hash_bd_get(hs
, &h
->loh_fid
, &bd
);
934 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
939 static void lu_obj_hop_put_locked(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
941 LBUG(); /* we should never called it */
944 cfs_hash_ops_t lu_site_hash_ops
= {
945 .hs_hash
= lu_obj_hop_hash
,
946 .hs_key
= lu_obj_hop_key
,
947 .hs_keycmp
= lu_obj_hop_keycmp
,
948 .hs_object
= lu_obj_hop_object
,
949 .hs_get
= lu_obj_hop_get
,
950 .hs_put_locked
= lu_obj_hop_put_locked
,
953 void lu_dev_add_linkage(struct lu_site
*s
, struct lu_device
*d
)
955 spin_lock(&s
->ls_ld_lock
);
956 if (list_empty(&d
->ld_linkage
))
957 list_add(&d
->ld_linkage
, &s
->ls_ld_linkage
);
958 spin_unlock(&s
->ls_ld_lock
);
960 EXPORT_SYMBOL(lu_dev_add_linkage
);
962 void lu_dev_del_linkage(struct lu_site
*s
, struct lu_device
*d
)
964 spin_lock(&s
->ls_ld_lock
);
965 list_del_init(&d
->ld_linkage
);
966 spin_unlock(&s
->ls_ld_lock
);
968 EXPORT_SYMBOL(lu_dev_del_linkage
);
971 * Initialize site \a s, with \a d as the top level device.
973 #define LU_SITE_BITS_MIN 12
974 #define LU_SITE_BITS_MAX 24
976 * total 256 buckets, we don't want too many buckets because:
977 * - consume too much memory
978 * - avoid unbalanced LRU list
980 #define LU_SITE_BKT_BITS 8
982 int lu_site_init(struct lu_site
*s
, struct lu_device
*top
)
984 struct lu_site_bkt_data
*bkt
;
985 struct cfs_hash_bd bd
;
990 memset(s
, 0, sizeof(*s
));
991 bits
= lu_htable_order();
992 snprintf(name
, 16, "lu_site_%s", top
->ld_type
->ldt_name
);
993 for (bits
= min(max(LU_SITE_BITS_MIN
, bits
), LU_SITE_BITS_MAX
);
994 bits
>= LU_SITE_BITS_MIN
; bits
--) {
995 s
->ls_obj_hash
= cfs_hash_create(name
, bits
, bits
,
996 bits
- LU_SITE_BKT_BITS
,
999 CFS_HASH_SPIN_BKTLOCK
|
1000 CFS_HASH_NO_ITEMREF
|
1002 CFS_HASH_ASSERT_EMPTY
);
1003 if (s
->ls_obj_hash
!= NULL
)
1007 if (s
->ls_obj_hash
== NULL
) {
1008 CERROR("failed to create lu_site hash with bits: %d\n", bits
);
1012 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
1013 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
1014 INIT_LIST_HEAD(&bkt
->lsb_lru
);
1015 init_waitqueue_head(&bkt
->lsb_marche_funebre
);
1018 s
->ls_stats
= lprocfs_alloc_stats(LU_SS_LAST_STAT
, 0);
1019 if (s
->ls_stats
== NULL
) {
1020 cfs_hash_putref(s
->ls_obj_hash
);
1021 s
->ls_obj_hash
= NULL
;
1025 lprocfs_counter_init(s
->ls_stats
, LU_SS_CREATED
,
1026 0, "created", "created");
1027 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_HIT
,
1028 0, "cache_hit", "cache_hit");
1029 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_MISS
,
1030 0, "cache_miss", "cache_miss");
1031 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_RACE
,
1032 0, "cache_race", "cache_race");
1033 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
,
1034 0, "cache_death_race", "cache_death_race");
1035 lprocfs_counter_init(s
->ls_stats
, LU_SS_LRU_PURGED
,
1036 0, "lru_purged", "lru_purged");
1038 INIT_LIST_HEAD(&s
->ls_linkage
);
1039 s
->ls_top_dev
= top
;
1042 lu_ref_add(&top
->ld_reference
, "site-top", s
);
1044 INIT_LIST_HEAD(&s
->ls_ld_linkage
);
1045 spin_lock_init(&s
->ls_ld_lock
);
1047 lu_dev_add_linkage(s
, top
);
1051 EXPORT_SYMBOL(lu_site_init
);
1054 * Finalize \a s and release its resources.
1056 void lu_site_fini(struct lu_site
*s
)
1058 mutex_lock(&lu_sites_guard
);
1059 list_del_init(&s
->ls_linkage
);
1060 mutex_unlock(&lu_sites_guard
);
1062 if (s
->ls_obj_hash
!= NULL
) {
1063 cfs_hash_putref(s
->ls_obj_hash
);
1064 s
->ls_obj_hash
= NULL
;
1067 if (s
->ls_top_dev
!= NULL
) {
1068 s
->ls_top_dev
->ld_site
= NULL
;
1069 lu_ref_del(&s
->ls_top_dev
->ld_reference
, "site-top", s
);
1070 lu_device_put(s
->ls_top_dev
);
1071 s
->ls_top_dev
= NULL
;
1074 if (s
->ls_stats
!= NULL
)
1075 lprocfs_free_stats(&s
->ls_stats
);
1077 EXPORT_SYMBOL(lu_site_fini
);
1080 * Called when initialization of stack for this site is completed.
1082 int lu_site_init_finish(struct lu_site
*s
)
1085 mutex_lock(&lu_sites_guard
);
1086 result
= lu_context_refill(&lu_shrink_env
.le_ctx
);
1088 list_add(&s
->ls_linkage
, &lu_sites
);
1089 mutex_unlock(&lu_sites_guard
);
1092 EXPORT_SYMBOL(lu_site_init_finish
);
1095 * Acquire additional reference on device \a d
1097 void lu_device_get(struct lu_device
*d
)
1099 atomic_inc(&d
->ld_ref
);
1101 EXPORT_SYMBOL(lu_device_get
);
1104 * Release reference on device \a d.
1106 void lu_device_put(struct lu_device
*d
)
1108 LASSERT(atomic_read(&d
->ld_ref
) > 0);
1109 atomic_dec(&d
->ld_ref
);
1111 EXPORT_SYMBOL(lu_device_put
);
1114 * Initialize device \a d of type \a t.
1116 int lu_device_init(struct lu_device
*d
, struct lu_device_type
*t
)
1118 if (t
->ldt_device_nr
++ == 0 && t
->ldt_ops
->ldto_start
!= NULL
)
1119 t
->ldt_ops
->ldto_start(t
);
1120 memset(d
, 0, sizeof(*d
));
1121 atomic_set(&d
->ld_ref
, 0);
1123 lu_ref_init(&d
->ld_reference
);
1124 INIT_LIST_HEAD(&d
->ld_linkage
);
1127 EXPORT_SYMBOL(lu_device_init
);
1130 * Finalize device \a d.
1132 void lu_device_fini(struct lu_device
*d
)
1134 struct lu_device_type
*t
;
1137 if (d
->ld_obd
!= NULL
) {
1138 d
->ld_obd
->obd_lu_dev
= NULL
;
1142 lu_ref_fini(&d
->ld_reference
);
1143 LASSERTF(atomic_read(&d
->ld_ref
) == 0,
1144 "Refcount is %u\n", atomic_read(&d
->ld_ref
));
1145 LASSERT(t
->ldt_device_nr
> 0);
1146 if (--t
->ldt_device_nr
== 0 && t
->ldt_ops
->ldto_stop
!= NULL
)
1147 t
->ldt_ops
->ldto_stop(t
);
1149 EXPORT_SYMBOL(lu_device_fini
);
1152 * Initialize object \a o that is part of compound object \a h and was created
1155 int lu_object_init(struct lu_object
*o
, struct lu_object_header
*h
,
1156 struct lu_device
*d
)
1158 memset(o
, 0, sizeof(*o
));
1162 lu_ref_add_at(&d
->ld_reference
, &o
->lo_dev_ref
, "lu_object", o
);
1163 INIT_LIST_HEAD(&o
->lo_linkage
);
1167 EXPORT_SYMBOL(lu_object_init
);
1170 * Finalize object and release its resources.
1172 void lu_object_fini(struct lu_object
*o
)
1174 struct lu_device
*dev
= o
->lo_dev
;
1176 LASSERT(list_empty(&o
->lo_linkage
));
1179 lu_ref_del_at(&dev
->ld_reference
, &o
->lo_dev_ref
,
1185 EXPORT_SYMBOL(lu_object_fini
);
1188 * Add object \a o as first layer of compound object \a h
1190 * This is typically called by the ->ldo_object_alloc() method of top-level
1193 void lu_object_add_top(struct lu_object_header
*h
, struct lu_object
*o
)
1195 list_move(&o
->lo_linkage
, &h
->loh_layers
);
1197 EXPORT_SYMBOL(lu_object_add_top
);
1200 * Add object \a o as a layer of compound object, going after \a before.
1202 * This is typically called by the ->ldo_object_alloc() method of \a
1205 void lu_object_add(struct lu_object
*before
, struct lu_object
*o
)
1207 list_move(&o
->lo_linkage
, &before
->lo_linkage
);
1209 EXPORT_SYMBOL(lu_object_add
);
1212 * Initialize compound object.
1214 int lu_object_header_init(struct lu_object_header
*h
)
1216 memset(h
, 0, sizeof(*h
));
1217 atomic_set(&h
->loh_ref
, 1);
1218 INIT_HLIST_NODE(&h
->loh_hash
);
1219 INIT_LIST_HEAD(&h
->loh_lru
);
1220 INIT_LIST_HEAD(&h
->loh_layers
);
1221 lu_ref_init(&h
->loh_reference
);
1224 EXPORT_SYMBOL(lu_object_header_init
);
1227 * Finalize compound object.
1229 void lu_object_header_fini(struct lu_object_header
*h
)
1231 LASSERT(list_empty(&h
->loh_layers
));
1232 LASSERT(list_empty(&h
->loh_lru
));
1233 LASSERT(hlist_unhashed(&h
->loh_hash
));
1234 lu_ref_fini(&h
->loh_reference
);
1236 EXPORT_SYMBOL(lu_object_header_fini
);
1239 * Given a compound object, find its slice, corresponding to the device type
1242 struct lu_object
*lu_object_locate(struct lu_object_header
*h
,
1243 const struct lu_device_type
*dtype
)
1245 struct lu_object
*o
;
1247 list_for_each_entry(o
, &h
->loh_layers
, lo_linkage
) {
1248 if (o
->lo_dev
->ld_type
== dtype
)
1253 EXPORT_SYMBOL(lu_object_locate
);
1258 * Finalize and free devices in the device stack.
1260 * Finalize device stack by purging object cache, and calling
1261 * lu_device_type_operations::ldto_device_fini() and
1262 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1264 void lu_stack_fini(const struct lu_env
*env
, struct lu_device
*top
)
1266 struct lu_site
*site
= top
->ld_site
;
1267 struct lu_device
*scan
;
1268 struct lu_device
*next
;
1270 lu_site_purge(env
, site
, ~0);
1271 for (scan
= top
; scan
!= NULL
; scan
= next
) {
1272 next
= scan
->ld_type
->ldt_ops
->ldto_device_fini(env
, scan
);
1273 lu_ref_del(&scan
->ld_reference
, "lu-stack", &lu_site_init
);
1274 lu_device_put(scan
);
1278 lu_site_purge(env
, site
, ~0);
1280 for (scan
= top
; scan
!= NULL
; scan
= next
) {
1281 const struct lu_device_type
*ldt
= scan
->ld_type
;
1282 struct obd_type
*type
;
1284 next
= ldt
->ldt_ops
->ldto_device_free(env
, scan
);
1285 type
= ldt
->ldt_obd_type
;
1288 class_put_type(type
);
1292 EXPORT_SYMBOL(lu_stack_fini
);
1296 * Maximal number of tld slots.
1298 LU_CONTEXT_KEY_NR
= 40
1301 static struct lu_context_key
*lu_keys
[LU_CONTEXT_KEY_NR
] = { NULL
, };
1303 static DEFINE_SPINLOCK(lu_keys_guard
);
1306 * Global counter incremented whenever key is registered, unregistered,
1307 * revived or quiesced. This is used to void unnecessary calls to
1308 * lu_context_refill(). No locking is provided, as initialization and shutdown
1309 * are supposed to be externally serialized.
1311 static unsigned key_set_version
= 0;
1316 int lu_context_key_register(struct lu_context_key
*key
)
1321 LASSERT(key
->lct_init
!= NULL
);
1322 LASSERT(key
->lct_fini
!= NULL
);
1323 LASSERT(key
->lct_tags
!= 0);
1326 spin_lock(&lu_keys_guard
);
1327 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1328 if (lu_keys
[i
] == NULL
) {
1330 atomic_set(&key
->lct_used
, 1);
1332 lu_ref_init(&key
->lct_reference
);
1338 spin_unlock(&lu_keys_guard
);
1341 EXPORT_SYMBOL(lu_context_key_register
);
1343 static void key_fini(struct lu_context
*ctx
, int index
)
1345 if (ctx
->lc_value
!= NULL
&& ctx
->lc_value
[index
] != NULL
) {
1346 struct lu_context_key
*key
;
1348 key
= lu_keys
[index
];
1349 LASSERT(key
!= NULL
);
1350 LASSERT(key
->lct_fini
!= NULL
);
1351 LASSERT(atomic_read(&key
->lct_used
) > 1);
1353 key
->lct_fini(ctx
, key
, ctx
->lc_value
[index
]);
1354 lu_ref_del(&key
->lct_reference
, "ctx", ctx
);
1355 atomic_dec(&key
->lct_used
);
1357 if ((ctx
->lc_tags
& LCT_NOREF
) == 0) {
1358 #ifdef CONFIG_MODULE_UNLOAD
1359 LINVRNT(module_refcount(key
->lct_owner
) > 0);
1361 module_put(key
->lct_owner
);
1363 ctx
->lc_value
[index
] = NULL
;
1370 void lu_context_key_degister(struct lu_context_key
*key
)
1372 LASSERT(atomic_read(&key
->lct_used
) >= 1);
1373 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1375 lu_context_key_quiesce(key
);
1378 spin_lock(&lu_keys_guard
);
1379 key_fini(&lu_shrink_env
.le_ctx
, key
->lct_index
);
1380 if (lu_keys
[key
->lct_index
]) {
1381 lu_keys
[key
->lct_index
] = NULL
;
1382 lu_ref_fini(&key
->lct_reference
);
1384 spin_unlock(&lu_keys_guard
);
1386 LASSERTF(atomic_read(&key
->lct_used
) == 1,
1387 "key has instances: %d\n",
1388 atomic_read(&key
->lct_used
));
1390 EXPORT_SYMBOL(lu_context_key_degister
);
1393 * Register a number of keys. This has to be called after all keys have been
1394 * initialized by a call to LU_CONTEXT_KEY_INIT().
1396 int lu_context_key_register_many(struct lu_context_key
*k
, ...)
1398 struct lu_context_key
*key
= k
;
1404 result
= lu_context_key_register(key
);
1407 key
= va_arg(args
, struct lu_context_key
*);
1408 } while (key
!= NULL
);
1414 lu_context_key_degister(k
);
1415 k
= va_arg(args
, struct lu_context_key
*);
1422 EXPORT_SYMBOL(lu_context_key_register_many
);
1425 * De-register a number of keys. This is a dual to
1426 * lu_context_key_register_many().
1428 void lu_context_key_degister_many(struct lu_context_key
*k
, ...)
1434 lu_context_key_degister(k
);
1435 k
= va_arg(args
, struct lu_context_key
*);
1436 } while (k
!= NULL
);
1439 EXPORT_SYMBOL(lu_context_key_degister_many
);
1442 * Revive a number of keys.
1444 void lu_context_key_revive_many(struct lu_context_key
*k
, ...)
1450 lu_context_key_revive(k
);
1451 k
= va_arg(args
, struct lu_context_key
*);
1452 } while (k
!= NULL
);
1455 EXPORT_SYMBOL(lu_context_key_revive_many
);
1458 * Quiescent a number of keys.
1460 void lu_context_key_quiesce_many(struct lu_context_key
*k
, ...)
1466 lu_context_key_quiesce(k
);
1467 k
= va_arg(args
, struct lu_context_key
*);
1468 } while (k
!= NULL
);
1471 EXPORT_SYMBOL(lu_context_key_quiesce_many
);
1474 * Return value associated with key \a key in context \a ctx.
1476 void *lu_context_key_get(const struct lu_context
*ctx
,
1477 const struct lu_context_key
*key
)
1479 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1480 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1481 LASSERT(lu_keys
[key
->lct_index
] == key
);
1482 return ctx
->lc_value
[key
->lct_index
];
1484 EXPORT_SYMBOL(lu_context_key_get
);
1487 * List of remembered contexts. XXX document me.
1489 static LIST_HEAD(lu_context_remembered
);
1492 * Destroy \a key in all remembered contexts. This is used to destroy key
1493 * values in "shared" contexts (like service threads), when a module owning
1494 * the key is about to be unloaded.
1496 void lu_context_key_quiesce(struct lu_context_key
*key
)
1498 struct lu_context
*ctx
;
1500 if (!(key
->lct_tags
& LCT_QUIESCENT
)) {
1502 * XXX layering violation.
1504 key
->lct_tags
|= LCT_QUIESCENT
;
1506 * XXX memory barrier has to go here.
1508 spin_lock(&lu_keys_guard
);
1509 list_for_each_entry(ctx
, &lu_context_remembered
,
1511 key_fini(ctx
, key
->lct_index
);
1512 spin_unlock(&lu_keys_guard
);
1516 EXPORT_SYMBOL(lu_context_key_quiesce
);
1518 void lu_context_key_revive(struct lu_context_key
*key
)
1520 key
->lct_tags
&= ~LCT_QUIESCENT
;
1523 EXPORT_SYMBOL(lu_context_key_revive
);
1525 static void keys_fini(struct lu_context
*ctx
)
1529 if (ctx
->lc_value
== NULL
)
1532 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
)
1535 OBD_FREE(ctx
->lc_value
, ARRAY_SIZE(lu_keys
) * sizeof(ctx
->lc_value
[0]));
1536 ctx
->lc_value
= NULL
;
1539 static int keys_fill(struct lu_context
*ctx
)
1543 LINVRNT(ctx
->lc_value
!= NULL
);
1544 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1545 struct lu_context_key
*key
;
1548 if (ctx
->lc_value
[i
] == NULL
&& key
!= NULL
&&
1549 (key
->lct_tags
& ctx
->lc_tags
) &&
1551 * Don't create values for a LCT_QUIESCENT key, as this
1552 * will pin module owning a key.
1554 !(key
->lct_tags
& LCT_QUIESCENT
)) {
1557 LINVRNT(key
->lct_init
!= NULL
);
1558 LINVRNT(key
->lct_index
== i
);
1560 value
= key
->lct_init(ctx
, key
);
1561 if (unlikely(IS_ERR(value
)))
1562 return PTR_ERR(value
);
1564 if (!(ctx
->lc_tags
& LCT_NOREF
))
1565 try_module_get(key
->lct_owner
);
1566 lu_ref_add_atomic(&key
->lct_reference
, "ctx", ctx
);
1567 atomic_inc(&key
->lct_used
);
1569 * This is the only place in the code, where an
1570 * element of ctx->lc_value[] array is set to non-NULL
1573 ctx
->lc_value
[i
] = value
;
1574 if (key
->lct_exit
!= NULL
)
1575 ctx
->lc_tags
|= LCT_HAS_EXIT
;
1577 ctx
->lc_version
= key_set_version
;
1582 static int keys_init(struct lu_context
*ctx
)
1584 OBD_ALLOC(ctx
->lc_value
,
1585 ARRAY_SIZE(lu_keys
) * sizeof(ctx
->lc_value
[0]));
1586 if (likely(ctx
->lc_value
!= NULL
))
1587 return keys_fill(ctx
);
1593 * Initialize context data-structure. Create values for all keys.
1595 int lu_context_init(struct lu_context
*ctx
, __u32 tags
)
1599 memset(ctx
, 0, sizeof(*ctx
));
1600 ctx
->lc_state
= LCS_INITIALIZED
;
1601 ctx
->lc_tags
= tags
;
1602 if (tags
& LCT_REMEMBER
) {
1603 spin_lock(&lu_keys_guard
);
1604 list_add(&ctx
->lc_remember
, &lu_context_remembered
);
1605 spin_unlock(&lu_keys_guard
);
1607 INIT_LIST_HEAD(&ctx
->lc_remember
);
1610 rc
= keys_init(ctx
);
1612 lu_context_fini(ctx
);
1616 EXPORT_SYMBOL(lu_context_init
);
1619 * Finalize context data-structure. Destroy key values.
1621 void lu_context_fini(struct lu_context
*ctx
)
1623 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1624 ctx
->lc_state
= LCS_FINALIZED
;
1626 if ((ctx
->lc_tags
& LCT_REMEMBER
) == 0) {
1627 LASSERT(list_empty(&ctx
->lc_remember
));
1630 } else { /* could race with key degister */
1631 spin_lock(&lu_keys_guard
);
1633 list_del_init(&ctx
->lc_remember
);
1634 spin_unlock(&lu_keys_guard
);
1637 EXPORT_SYMBOL(lu_context_fini
);
1640 * Called before entering context.
1642 void lu_context_enter(struct lu_context
*ctx
)
1644 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1645 ctx
->lc_state
= LCS_ENTERED
;
1647 EXPORT_SYMBOL(lu_context_enter
);
1650 * Called after exiting from \a ctx
1652 void lu_context_exit(struct lu_context
*ctx
)
1656 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1657 ctx
->lc_state
= LCS_LEFT
;
1658 if (ctx
->lc_tags
& LCT_HAS_EXIT
&& ctx
->lc_value
!= NULL
) {
1659 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1660 if (ctx
->lc_value
[i
] != NULL
) {
1661 struct lu_context_key
*key
;
1664 LASSERT(key
!= NULL
);
1665 if (key
->lct_exit
!= NULL
)
1667 key
, ctx
->lc_value
[i
]);
1672 EXPORT_SYMBOL(lu_context_exit
);
1675 * Allocate for context all missing keys that were registered after context
1676 * creation. key_set_version is only changed in rare cases when modules
1677 * are loaded and removed.
1679 int lu_context_refill(struct lu_context
*ctx
)
1681 return likely(ctx
->lc_version
== key_set_version
) ? 0 : keys_fill(ctx
);
1683 EXPORT_SYMBOL(lu_context_refill
);
1686 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1687 * obd being added. Currently, this is only used on client side, specifically
1688 * for echo device client, for other stack (like ptlrpc threads), context are
1689 * predefined when the lu_device type are registered, during the module probe
1692 __u32 lu_context_tags_default
= 0;
1693 __u32 lu_session_tags_default
= 0;
1695 void lu_context_tags_update(__u32 tags
)
1697 spin_lock(&lu_keys_guard
);
1698 lu_context_tags_default
|= tags
;
1700 spin_unlock(&lu_keys_guard
);
1702 EXPORT_SYMBOL(lu_context_tags_update
);
1704 void lu_context_tags_clear(__u32 tags
)
1706 spin_lock(&lu_keys_guard
);
1707 lu_context_tags_default
&= ~tags
;
1709 spin_unlock(&lu_keys_guard
);
1711 EXPORT_SYMBOL(lu_context_tags_clear
);
1713 void lu_session_tags_update(__u32 tags
)
1715 spin_lock(&lu_keys_guard
);
1716 lu_session_tags_default
|= tags
;
1718 spin_unlock(&lu_keys_guard
);
1720 EXPORT_SYMBOL(lu_session_tags_update
);
1722 void lu_session_tags_clear(__u32 tags
)
1724 spin_lock(&lu_keys_guard
);
1725 lu_session_tags_default
&= ~tags
;
1727 spin_unlock(&lu_keys_guard
);
1729 EXPORT_SYMBOL(lu_session_tags_clear
);
1731 int lu_env_init(struct lu_env
*env
, __u32 tags
)
1736 result
= lu_context_init(&env
->le_ctx
, tags
);
1737 if (likely(result
== 0))
1738 lu_context_enter(&env
->le_ctx
);
1741 EXPORT_SYMBOL(lu_env_init
);
1743 void lu_env_fini(struct lu_env
*env
)
1745 lu_context_exit(&env
->le_ctx
);
1746 lu_context_fini(&env
->le_ctx
);
1749 EXPORT_SYMBOL(lu_env_fini
);
1751 int lu_env_refill(struct lu_env
*env
)
1755 result
= lu_context_refill(&env
->le_ctx
);
1756 if (result
== 0 && env
->le_ses
!= NULL
)
1757 result
= lu_context_refill(env
->le_ses
);
1760 EXPORT_SYMBOL(lu_env_refill
);
1763 * Currently, this API will only be used by echo client.
1764 * Because echo client and normal lustre client will share
1765 * same cl_env cache. So echo client needs to refresh
1766 * the env context after it get one from the cache, especially
1767 * when normal client and echo client co-exist in the same client.
1769 int lu_env_refill_by_tags(struct lu_env
*env
, __u32 ctags
,
1774 if ((env
->le_ctx
.lc_tags
& ctags
) != ctags
) {
1775 env
->le_ctx
.lc_version
= 0;
1776 env
->le_ctx
.lc_tags
|= ctags
;
1779 if (env
->le_ses
&& (env
->le_ses
->lc_tags
& stags
) != stags
) {
1780 env
->le_ses
->lc_version
= 0;
1781 env
->le_ses
->lc_tags
|= stags
;
1784 result
= lu_env_refill(env
);
1788 EXPORT_SYMBOL(lu_env_refill_by_tags
);
1791 typedef struct lu_site_stats
{
1792 unsigned lss_populated
;
1793 unsigned lss_max_search
;
1798 static void lu_site_stats_get(struct cfs_hash
*hs
,
1799 lu_site_stats_t
*stats
, int populated
)
1801 struct cfs_hash_bd bd
;
1804 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1805 struct lu_site_bkt_data
*bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
1806 struct hlist_head
*hhead
;
1808 cfs_hash_bd_lock(hs
, &bd
, 1);
1809 stats
->lss_busy
+= bkt
->lsb_busy
;
1810 stats
->lss_total
+= cfs_hash_bd_count_get(&bd
);
1811 stats
->lss_max_search
= max((int)stats
->lss_max_search
,
1812 cfs_hash_bd_depmax_get(&bd
));
1814 cfs_hash_bd_unlock(hs
, &bd
, 1);
1818 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
) {
1819 if (!hlist_empty(hhead
))
1820 stats
->lss_populated
++;
1822 cfs_hash_bd_unlock(hs
, &bd
, 1);
1828 * There exists a potential lock inversion deadlock scenario when using
1829 * Lustre on top of ZFS. This occurs between one of ZFS's
1830 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1831 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1832 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1833 * lock. Obviously neither thread will wake and drop their respective hold
1836 * To prevent this from happening we must ensure the lu_sites_guard lock is
1837 * not taken while down this code path. ZFS reliably does not set the
1838 * __GFP_FS bit in its code paths, so this can be used to determine if it
1839 * is safe to take the lu_sites_guard lock.
1841 * Ideally we should accurately return the remaining number of cached
1842 * objects without taking the lu_sites_guard lock, but this is not
1843 * possible in the current implementation.
1845 static unsigned long lu_cache_shrink_count(struct shrinker
*sk
,
1846 struct shrink_control
*sc
)
1848 lu_site_stats_t stats
;
1850 struct lu_site
*tmp
;
1851 unsigned long cached
= 0;
1853 if (!(sc
->gfp_mask
& __GFP_FS
))
1856 mutex_lock(&lu_sites_guard
);
1857 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1858 memset(&stats
, 0, sizeof(stats
));
1859 lu_site_stats_get(s
->ls_obj_hash
, &stats
, 0);
1860 cached
+= stats
.lss_total
- stats
.lss_busy
;
1862 mutex_unlock(&lu_sites_guard
);
1864 cached
= (cached
/ 100) * sysctl_vfs_cache_pressure
;
1865 CDEBUG(D_INODE
, "%ld objects cached\n", cached
);
1869 static unsigned long lu_cache_shrink_scan(struct shrinker
*sk
,
1870 struct shrink_control
*sc
)
1873 struct lu_site
*tmp
;
1874 unsigned long remain
= sc
->nr_to_scan
, freed
= 0;
1877 if (!(sc
->gfp_mask
& __GFP_FS
))
1878 /* We must not take the lu_sites_guard lock when
1879 * __GFP_FS is *not* set because of the deadlock
1880 * possibility detailed above. Additionally,
1881 * since we cannot determine the number of
1882 * objects in the cache without taking this
1883 * lock, we're in a particularly tough spot. As
1884 * a result, we'll just lie and say our cache is
1885 * empty. This _should_ be ok, as we can't
1886 * reclaim objects when __GFP_FS is *not* set
1891 mutex_lock(&lu_sites_guard
);
1892 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1893 freed
= lu_site_purge(&lu_shrink_env
, s
, remain
);
1896 * Move just shrunk site to the tail of site list to
1897 * assure shrinking fairness.
1899 list_move_tail(&s
->ls_linkage
, &splice
);
1901 list_splice(&splice
, lu_sites
.prev
);
1902 mutex_unlock(&lu_sites_guard
);
1904 return sc
->nr_to_scan
- remain
;
1912 * Environment to be used in debugger, contains all tags.
1914 struct lu_env lu_debugging_env
;
1917 * Debugging printer function using printk().
1919 int lu_printk_printer(const struct lu_env
*env
,
1920 void *unused
, const char *format
, ...)
1924 va_start(args
, format
);
1925 vprintk(format
, args
);
1930 static struct shrinker lu_site_shrinker
= {
1931 .count_objects
= lu_cache_shrink_count
,
1932 .scan_objects
= lu_cache_shrink_scan
,
1933 .seeks
= DEFAULT_SEEKS
,
1937 * Initialization of global lu_* data.
1939 int lu_global_init(void)
1943 CDEBUG(D_INFO
, "Lustre LU module (%p).\n", &lu_keys
);
1945 result
= lu_ref_global_init();
1949 LU_CONTEXT_KEY_INIT(&lu_global_key
);
1950 result
= lu_context_key_register(&lu_global_key
);
1955 * At this level, we don't know what tags are needed, so allocate them
1956 * conservatively. This should not be too bad, because this
1957 * environment is global.
1959 mutex_lock(&lu_sites_guard
);
1960 result
= lu_env_init(&lu_shrink_env
, LCT_SHRINKER
);
1961 mutex_unlock(&lu_sites_guard
);
1966 * seeks estimation: 3 seeks to read a record from oi, one to read
1967 * inode, one for ea. Unfortunately setting this high value results in
1968 * lu_object/inode cache consuming all the memory.
1970 register_shrinker(&lu_site_shrinker
);
1976 * Dual to lu_global_init().
1978 void lu_global_fini(void)
1980 unregister_shrinker(&lu_site_shrinker
);
1981 lu_context_key_degister(&lu_global_key
);
1984 * Tear shrinker environment down _after_ de-registering
1985 * lu_global_key, because the latter has a value in the former.
1987 mutex_lock(&lu_sites_guard
);
1988 lu_env_fini(&lu_shrink_env
);
1989 mutex_unlock(&lu_sites_guard
);
1991 lu_ref_global_fini();
1994 static __u32
ls_stats_read(struct lprocfs_stats
*stats
, int idx
)
1996 #if defined (CONFIG_PROC_FS)
1997 struct lprocfs_counter ret
;
1999 lprocfs_stats_collect(stats
, idx
, &ret
);
2000 return (__u32
)ret
.lc_count
;
2007 * Output site statistical counters into a buffer. Suitable for
2008 * lprocfs_rd_*()-style functions.
2010 int lu_site_stats_print(const struct lu_site
*s
, struct seq_file
*m
)
2012 lu_site_stats_t stats
;
2014 memset(&stats
, 0, sizeof(stats
));
2015 lu_site_stats_get(s
->ls_obj_hash
, &stats
, 1);
2017 return seq_printf(m
, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2020 stats
.lss_populated
,
2021 CFS_HASH_NHLIST(s
->ls_obj_hash
),
2022 stats
.lss_max_search
,
2023 ls_stats_read(s
->ls_stats
, LU_SS_CREATED
),
2024 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_HIT
),
2025 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_MISS
),
2026 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_RACE
),
2027 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
),
2028 ls_stats_read(s
->ls_stats
, LU_SS_LRU_PURGED
));
2030 EXPORT_SYMBOL(lu_site_stats_print
);
2033 * Helper function to initialize a number of kmem slab caches at once.
2035 int lu_kmem_init(struct lu_kmem_descr
*caches
)
2038 struct lu_kmem_descr
*iter
= caches
;
2040 for (result
= 0; iter
->ckd_cache
!= NULL
; ++iter
) {
2041 *iter
->ckd_cache
= kmem_cache_create(iter
->ckd_name
,
2044 if (*iter
->ckd_cache
== NULL
) {
2046 /* free all previously allocated caches */
2047 lu_kmem_fini(caches
);
2053 EXPORT_SYMBOL(lu_kmem_init
);
2056 * Helper function to finalize a number of kmem slab cached at once. Dual to
2059 void lu_kmem_fini(struct lu_kmem_descr
*caches
)
2061 for (; caches
->ckd_cache
!= NULL
; ++caches
) {
2062 if (*caches
->ckd_cache
!= NULL
) {
2063 kmem_cache_destroy(*caches
->ckd_cache
);
2064 *caches
->ckd_cache
= NULL
;
2068 EXPORT_SYMBOL(lu_kmem_fini
);
2071 * Temporary solution to be able to assign fid in ->do_create()
2072 * till we have fully-functional OST fids
2074 void lu_object_assign_fid(const struct lu_env
*env
, struct lu_object
*o
,
2075 const struct lu_fid
*fid
)
2077 struct lu_site
*s
= o
->lo_dev
->ld_site
;
2078 struct lu_fid
*old
= &o
->lo_header
->loh_fid
;
2079 struct lu_site_bkt_data
*bkt
;
2080 struct lu_object
*shadow
;
2081 wait_queue_t waiter
;
2082 struct cfs_hash
*hs
;
2083 struct cfs_hash_bd bd
;
2086 LASSERT(fid_is_zero(old
));
2088 hs
= s
->ls_obj_hash
;
2089 cfs_hash_bd_get_and_lock(hs
, (void *)fid
, &bd
, 1);
2090 shadow
= htable_lookup(s
, &bd
, fid
, &waiter
, &version
);
2091 /* supposed to be unique */
2092 LASSERT(IS_ERR(shadow
) && PTR_ERR(shadow
) == -ENOENT
);
2094 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
2095 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
2097 cfs_hash_bd_unlock(hs
, &bd
, 1);
2099 EXPORT_SYMBOL(lu_object_assign_fid
);
2102 * allocates object with 0 (non-assigned) fid
2103 * XXX: temporary solution to be able to assign fid in ->do_create()
2104 * till we have fully-functional OST fids
2106 struct lu_object
*lu_object_anon(const struct lu_env
*env
,
2107 struct lu_device
*dev
,
2108 const struct lu_object_conf
*conf
)
2111 struct lu_object
*o
;
2114 o
= lu_object_alloc(env
, dev
, &fid
, conf
);
2118 EXPORT_SYMBOL(lu_object_anon
);
2120 struct lu_buf LU_BUF_NULL
= {
2124 EXPORT_SYMBOL(LU_BUF_NULL
);
2126 void lu_buf_free(struct lu_buf
*buf
)
2130 LASSERT(buf
->lb_len
> 0);
2131 OBD_FREE_LARGE(buf
->lb_buf
, buf
->lb_len
);
2136 EXPORT_SYMBOL(lu_buf_free
);
2138 void lu_buf_alloc(struct lu_buf
*buf
, int size
)
2141 LASSERT(buf
->lb_buf
== NULL
);
2142 LASSERT(buf
->lb_len
== 0);
2143 OBD_ALLOC_LARGE(buf
->lb_buf
, size
);
2144 if (likely(buf
->lb_buf
))
2147 EXPORT_SYMBOL(lu_buf_alloc
);
2149 void lu_buf_realloc(struct lu_buf
*buf
, int size
)
2152 lu_buf_alloc(buf
, size
);
2154 EXPORT_SYMBOL(lu_buf_realloc
);
2156 struct lu_buf
*lu_buf_check_and_alloc(struct lu_buf
*buf
, int len
)
2158 if (buf
->lb_buf
== NULL
&& buf
->lb_len
== 0)
2159 lu_buf_alloc(buf
, len
);
2161 if ((len
> buf
->lb_len
) && (buf
->lb_buf
!= NULL
))
2162 lu_buf_realloc(buf
, len
);
2166 EXPORT_SYMBOL(lu_buf_check_and_alloc
);
2169 * Increase the size of the \a buf.
2170 * preserves old data in buffer
2171 * old buffer remains unchanged on error
2172 * \retval 0 or -ENOMEM
2174 int lu_buf_check_and_grow(struct lu_buf
*buf
, int len
)
2178 if (len
<= buf
->lb_len
)
2181 OBD_ALLOC_LARGE(ptr
, len
);
2185 /* Free the old buf */
2186 if (buf
->lb_buf
!= NULL
) {
2187 memcpy(ptr
, buf
->lb_buf
, buf
->lb_len
);
2188 OBD_FREE_LARGE(buf
->lb_buf
, buf
->lb_len
);
2195 EXPORT_SYMBOL(lu_buf_check_and_grow
);