]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
iio: imu: inv_mpu6050: test whoami first and against all known values
[mirror_ubuntu-artful-kernel.git] / drivers / staging / lustre / lustre / ldlm / ldlm_internal.h
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2015, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 */
32
33 #define MAX_STRING_SIZE 128
34
35 extern int ldlm_srv_namespace_nr;
36 extern int ldlm_cli_namespace_nr;
37 extern struct mutex ldlm_srv_namespace_lock;
38 extern struct list_head ldlm_srv_namespace_list;
39 extern struct mutex ldlm_cli_namespace_lock;
40 extern struct list_head ldlm_cli_active_namespace_list;
41
42 static inline int ldlm_namespace_nr_read(enum ldlm_side client)
43 {
44 return client == LDLM_NAMESPACE_SERVER ?
45 ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
46 }
47
48 static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
49 {
50 if (client == LDLM_NAMESPACE_SERVER)
51 ldlm_srv_namespace_nr++;
52 else
53 ldlm_cli_namespace_nr++;
54 }
55
56 static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
57 {
58 if (client == LDLM_NAMESPACE_SERVER)
59 ldlm_srv_namespace_nr--;
60 else
61 ldlm_cli_namespace_nr--;
62 }
63
64 static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
65 {
66 return client == LDLM_NAMESPACE_SERVER ?
67 &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
68 }
69
70 static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
71 {
72 return client == LDLM_NAMESPACE_SERVER ?
73 &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
74 }
75
76 /* ns_bref is the number of resources in this namespace */
77 static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
78 {
79 return atomic_read(&ns->ns_bref) == 0;
80 }
81
82 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
83 enum ldlm_side);
84 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
85 enum ldlm_side);
86 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
87
88 /* ldlm_request.c */
89 /* Cancel lru flag, it indicates we cancel aged locks. */
90 enum {
91 LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */
92 LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
93 LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
94 LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
95 LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither
96 * sending nor waiting for any rpcs)
97 */
98 LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */
99 };
100
101 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
102 enum ldlm_cancel_flags sync, int flags);
103 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
104 struct list_head *cancels, int count, int max,
105 enum ldlm_cancel_flags cancel_flags, int flags);
106 extern unsigned int ldlm_enqueue_min;
107 extern unsigned int ldlm_cancel_unused_locks_before_replay;
108
109 /* ldlm_resource.c */
110 int ldlm_resource_putref_locked(struct ldlm_resource *res);
111
112 /* ldlm_lock.c */
113
114 struct ldlm_cb_set_arg {
115 struct ptlrpc_request_set *set;
116 int type; /* LDLM_{CP,BL,GL}_CALLBACK */
117 atomic_t restart;
118 struct list_head *list;
119 union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */
120 };
121
122 enum ldlm_desc_ast_t {
123 LDLM_WORK_BL_AST,
124 LDLM_WORK_CP_AST,
125 LDLM_WORK_REVOKE_AST,
126 LDLM_WORK_GL_AST
127 };
128
129 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
130 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
131 enum req_location loc, void *data, int size);
132 struct ldlm_lock *
133 ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
134 enum ldlm_type type, enum ldlm_mode mode,
135 const struct ldlm_callback_suite *cbs,
136 void *data, __u32 lvb_len, enum lvb_type lvb_type);
137 enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
138 void *cookie, __u64 *flags);
139 void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
140 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
141 void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
142 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
143 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
144 enum ldlm_desc_ast_t ast_type);
145 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
146 #define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
147 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
148 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
149
150 /* ldlm_lockd.c */
151 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
152 struct ldlm_lock *lock);
153 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
154 struct ldlm_lock_desc *ld,
155 struct list_head *cancels, int count,
156 enum ldlm_cancel_flags cancel_flags);
157 int ldlm_bl_thread_wakeup(void);
158
159 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
160 struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
161
162 extern struct kmem_cache *ldlm_resource_slab;
163 extern struct kset *ldlm_ns_kset;
164
165 /* ldlm_lockd.c & ldlm_lock.c */
166 extern struct kmem_cache *ldlm_lock_slab;
167
168 /* ldlm_extent.c */
169 void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
170 void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
171
172 /* l_lock.c */
173 void l_check_ns_lock(struct ldlm_namespace *ns);
174 void l_check_no_ns_lock(struct ldlm_namespace *ns);
175
176 extern struct dentry *ldlm_svc_debugfs_dir;
177
178 struct ldlm_state {
179 struct ptlrpc_service *ldlm_cb_service;
180 struct ptlrpc_service *ldlm_cancel_service;
181 struct ptlrpc_client *ldlm_client;
182 struct ptlrpc_connection *ldlm_server_conn;
183 struct ldlm_bl_pool *ldlm_bl_pool;
184 };
185
186 /* ldlm_pool.c */
187 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
188 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
189 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
190
191 /* interval tree, for LDLM_EXTENT. */
192 extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
193 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
194 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
195 void ldlm_interval_free(struct ldlm_interval *node);
196 /* this function must be called with res lock held */
197 static inline struct ldlm_extent *
198 ldlm_interval_extent(struct ldlm_interval *node)
199 {
200 struct ldlm_lock *lock;
201
202 LASSERT(!list_empty(&node->li_group));
203
204 lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
205 return &lock->l_policy_data.l_extent;
206 }
207
208 int ldlm_init(void);
209 void ldlm_exit(void);
210
211 enum ldlm_policy_res {
212 LDLM_POLICY_CANCEL_LOCK,
213 LDLM_POLICY_KEEP_LOCK,
214 LDLM_POLICY_SKIP_LOCK
215 };
216
217 #define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
218 #define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
219 #define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
220 #define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; }
221 #define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v))
222 #define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b)
223
224 #define LDLM_POOL_SYSFS_READER_SHOW(var, type) \
225 static ssize_t var##_show(struct kobject *kobj, \
226 struct attribute *attr, \
227 char *buf) \
228 { \
229 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
230 pl_kobj); \
231 type tmp; \
232 \
233 spin_lock(&pl->pl_lock); \
234 tmp = pl->pl_##var; \
235 spin_unlock(&pl->pl_lock); \
236 \
237 return LDLM_POOL_SYSFS_PRINT_##type(tmp); \
238 } \
239 struct __##var##__dummy_read {; } /* semicolon catcher */
240
241 #define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \
242 static ssize_t var##_store(struct kobject *kobj, \
243 struct attribute *attr, \
244 const char *buffer, \
245 size_t count) \
246 { \
247 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
248 pl_kobj); \
249 unsigned long tmp; \
250 int rc; \
251 \
252 rc = kstrtoul(buffer, 10, &tmp); \
253 if (rc < 0) { \
254 return rc; \
255 } \
256 \
257 spin_lock(&pl->pl_lock); \
258 LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
259 spin_unlock(&pl->pl_lock); \
260 \
261 return count; \
262 } \
263 struct __##var##__dummy_write {; } /* semicolon catcher */
264
265 #define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \
266 static ssize_t var##_show(struct kobject *kobj, \
267 struct attribute *attr, \
268 char *buf) \
269 { \
270 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
271 pl_kobj); \
272 \
273 return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \
274 } \
275 struct __##var##__dummy_read {; } /* semicolon catcher */
276
277 #define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \
278 static ssize_t var##_store(struct kobject *kobj, \
279 struct attribute *attr, \
280 const char *buffer, \
281 size_t count) \
282 { \
283 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
284 pl_kobj); \
285 unsigned long tmp; \
286 int rc; \
287 \
288 rc = kstrtoul(buffer, 10, &tmp); \
289 if (rc < 0) { \
290 return rc; \
291 } \
292 \
293 LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
294 \
295 return count; \
296 } \
297 struct __##var##__dummy_write {; } /* semicolon catcher */
298
299 static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
300 {
301 int ret = 0;
302
303 lock_res_and_lock(lock);
304 if ((lock->l_req_mode == lock->l_granted_mode) &&
305 !ldlm_is_cp_reqd(lock))
306 ret = 1;
307 else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
308 ret = 1;
309 unlock_res_and_lock(lock);
310
311 return ret;
312 }
313
314 typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
315 union ldlm_policy_data *);
316
317 typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
318 union ldlm_wire_policy_data *);
319
320 void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
321 union ldlm_policy_data *lpolicy);
322 void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
323 union ldlm_wire_policy_data *wpolicy);
324 void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
325 union ldlm_policy_data *lpolicy);
326 void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
327 union ldlm_wire_policy_data *wpolicy);
328 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
329 union ldlm_policy_data *lpolicy);
330 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
331 union ldlm_wire_policy_data *wpolicy);
332 void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
333 union ldlm_policy_data *lpolicy);
334 void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
335 union ldlm_wire_policy_data *wpolicy);