]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
Merge tag 'iio-for-4.13b' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / lustre / lustre / ldlm / ldlm_internal.h
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2015, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 */
32
33 #define MAX_STRING_SIZE 128
34
35 extern int ldlm_srv_namespace_nr;
36 extern int ldlm_cli_namespace_nr;
37 extern struct mutex ldlm_srv_namespace_lock;
38 extern struct list_head ldlm_srv_namespace_list;
39 extern struct mutex ldlm_cli_namespace_lock;
40 extern struct list_head ldlm_cli_active_namespace_list;
41
42 static inline int ldlm_namespace_nr_read(enum ldlm_side client)
43 {
44 return client == LDLM_NAMESPACE_SERVER ?
45 ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
46 }
47
48 static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
49 {
50 if (client == LDLM_NAMESPACE_SERVER)
51 ldlm_srv_namespace_nr++;
52 else
53 ldlm_cli_namespace_nr++;
54 }
55
56 static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
57 {
58 if (client == LDLM_NAMESPACE_SERVER)
59 ldlm_srv_namespace_nr--;
60 else
61 ldlm_cli_namespace_nr--;
62 }
63
64 static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
65 {
66 return client == LDLM_NAMESPACE_SERVER ?
67 &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
68 }
69
70 static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
71 {
72 return client == LDLM_NAMESPACE_SERVER ?
73 &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
74 }
75
76 /* ns_bref is the number of resources in this namespace */
77 static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
78 {
79 return atomic_read(&ns->ns_bref) == 0;
80 }
81
82 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
83 enum ldlm_side client);
84 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
85 enum ldlm_side client);
86 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client);
87
88 /* ldlm_request.c */
89 /* Cancel lru flag, it indicates we cancel aged locks. */
90 enum {
91 LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */
92 LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
93 LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
94 LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
95 LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither
96 * sending nor waiting for any rpcs)
97 */
98 LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */
99 };
100
101 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
102 enum ldlm_cancel_flags sync, int flags);
103 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
104 struct list_head *cancels, int count, int max,
105 enum ldlm_cancel_flags cancel_flags, int flags);
106 extern unsigned int ldlm_enqueue_min;
107 extern unsigned int ldlm_cancel_unused_locks_before_replay;
108
109 /* ldlm_resource.c */
110 int ldlm_resource_putref_locked(struct ldlm_resource *res);
111
112 /* ldlm_lock.c */
113
114 struct ldlm_cb_set_arg {
115 struct ptlrpc_request_set *set;
116 int type; /* LDLM_{CP,BL,GL}_CALLBACK */
117 atomic_t restart;
118 struct list_head *list;
119 union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */
120 };
121
122 enum ldlm_desc_ast_t {
123 LDLM_WORK_BL_AST,
124 LDLM_WORK_CP_AST,
125 LDLM_WORK_REVOKE_AST,
126 LDLM_WORK_GL_AST
127 };
128
129 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
130 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
131 enum req_location loc, void *data, int size);
132 struct ldlm_lock *
133 ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *id,
134 enum ldlm_type type, enum ldlm_mode mode,
135 const struct ldlm_callback_suite *cbs,
136 void *data, __u32 lvb_len, enum lvb_type lvb_type);
137 enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
138 struct ldlm_lock **lock, void *cookie,
139 __u64 *flags);
140 void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode);
141 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
142 enum ldlm_mode mode);
143 void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode);
144 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
145 enum ldlm_mode mode);
146 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
147 enum ldlm_desc_ast_t ast_type);
148 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
149 #define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
150 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
151 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
152
153 /* ldlm_lockd.c */
154 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
155 struct ldlm_lock *lock);
156 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
157 struct ldlm_lock_desc *ld,
158 struct list_head *cancels, int count,
159 enum ldlm_cancel_flags cancel_flags);
160 int ldlm_bl_thread_wakeup(void);
161
162 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
163 struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
164
165 extern struct kmem_cache *ldlm_resource_slab;
166 extern struct kset *ldlm_ns_kset;
167
168 /* ldlm_lockd.c & ldlm_lock.c */
169 extern struct kmem_cache *ldlm_lock_slab;
170
171 /* ldlm_extent.c */
172 void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
173 void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
174
175 /* l_lock.c */
176 void l_check_ns_lock(struct ldlm_namespace *ns);
177 void l_check_no_ns_lock(struct ldlm_namespace *ns);
178
179 extern struct dentry *ldlm_svc_debugfs_dir;
180
181 struct ldlm_state {
182 struct ptlrpc_service *ldlm_cb_service;
183 struct ptlrpc_service *ldlm_cancel_service;
184 struct ptlrpc_client *ldlm_client;
185 struct ptlrpc_connection *ldlm_server_conn;
186 struct ldlm_bl_pool *ldlm_bl_pool;
187 };
188
189 /* ldlm_pool.c */
190 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
191 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
192 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
193
194 /* interval tree, for LDLM_EXTENT. */
195 extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
196 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
197 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
198 void ldlm_interval_free(struct ldlm_interval *node);
199 /* this function must be called with res lock held */
200 static inline struct ldlm_extent *
201 ldlm_interval_extent(struct ldlm_interval *node)
202 {
203 struct ldlm_lock *lock;
204
205 LASSERT(!list_empty(&node->li_group));
206
207 lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
208 return &lock->l_policy_data.l_extent;
209 }
210
211 int ldlm_init(void);
212 void ldlm_exit(void);
213
214 enum ldlm_policy_res {
215 LDLM_POLICY_CANCEL_LOCK,
216 LDLM_POLICY_KEEP_LOCK,
217 LDLM_POLICY_SKIP_LOCK
218 };
219
220 #define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
221 #define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
222 #define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
223 #define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; }
224 #define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v))
225 #define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b)
226
227 #define LDLM_POOL_SYSFS_READER_SHOW(var, type) \
228 static ssize_t var##_show(struct kobject *kobj, \
229 struct attribute *attr, \
230 char *buf) \
231 { \
232 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
233 pl_kobj); \
234 type tmp; \
235 \
236 spin_lock(&pl->pl_lock); \
237 tmp = pl->pl_##var; \
238 spin_unlock(&pl->pl_lock); \
239 \
240 return LDLM_POOL_SYSFS_PRINT_##type(tmp); \
241 } \
242 struct __##var##__dummy_read {; } /* semicolon catcher */
243
244 #define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \
245 static ssize_t var##_store(struct kobject *kobj, \
246 struct attribute *attr, \
247 const char *buffer, \
248 size_t count) \
249 { \
250 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
251 pl_kobj); \
252 unsigned long tmp; \
253 int rc; \
254 \
255 rc = kstrtoul(buffer, 10, &tmp); \
256 if (rc < 0) { \
257 return rc; \
258 } \
259 \
260 spin_lock(&pl->pl_lock); \
261 LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
262 spin_unlock(&pl->pl_lock); \
263 \
264 return count; \
265 } \
266 struct __##var##__dummy_write {; } /* semicolon catcher */
267
268 #define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \
269 static ssize_t var##_show(struct kobject *kobj, \
270 struct attribute *attr, \
271 char *buf) \
272 { \
273 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
274 pl_kobj); \
275 \
276 return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \
277 } \
278 struct __##var##__dummy_read {; } /* semicolon catcher */
279
280 #define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \
281 static ssize_t var##_store(struct kobject *kobj, \
282 struct attribute *attr, \
283 const char *buffer, \
284 size_t count) \
285 { \
286 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
287 pl_kobj); \
288 unsigned long tmp; \
289 int rc; \
290 \
291 rc = kstrtoul(buffer, 10, &tmp); \
292 if (rc < 0) { \
293 return rc; \
294 } \
295 \
296 LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
297 \
298 return count; \
299 } \
300 struct __##var##__dummy_write {; } /* semicolon catcher */
301
302 static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
303 {
304 int ret = 0;
305
306 lock_res_and_lock(lock);
307 if ((lock->l_req_mode == lock->l_granted_mode) &&
308 !ldlm_is_cp_reqd(lock))
309 ret = 1;
310 else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
311 ret = 1;
312 unlock_res_and_lock(lock);
313
314 return ret;
315 }
316
317 typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
318 union ldlm_policy_data *);
319
320 typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
321 union ldlm_wire_policy_data *);
322
323 void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
324 union ldlm_policy_data *lpolicy);
325 void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
326 union ldlm_wire_policy_data *wpolicy);
327 void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
328 union ldlm_policy_data *lpolicy);
329 void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
330 union ldlm_wire_policy_data *wpolicy);
331 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
332 union ldlm_policy_data *lpolicy);
333 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
334 union ldlm_wire_policy_data *wpolicy);
335 void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
336 union ldlm_policy_data *lpolicy);
337 void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
338 union ldlm_wire_policy_data *wpolicy);