]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/lustre/lustre/llite/llite_internal.h
140ee947ba4949ea547ac03ebaf9a9efb9e51ab1
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / lustre / lustre / llite / llite_internal.h
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #ifndef LLITE_INTERNAL_H
38 #define LLITE_INTERNAL_H
39 #include <lustre_debug.h>
40 #include <lustre_ver.h>
41 #include <lustre_disk.h> /* for s2sbi */
42 #include <lustre_eacl.h>
43
44 /* for struct cl_lock_descr and struct cl_io */
45 #include <cl_object.h>
46 #include <lclient.h>
47 #include <lustre_mdc.h>
48 #include <linux/lustre_intent.h>
49 #include <linux/compat.h>
50 #include <linux/posix_acl_xattr.h>
51
52 #ifndef FMODE_EXEC
53 #define FMODE_EXEC 0
54 #endif
55
56 #ifndef VM_FAULT_RETRY
57 #define VM_FAULT_RETRY 0
58 #endif
59
60 /* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it.
61 * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */
62 #ifndef LOOKUP_CONTINUE
63 #define LOOKUP_CONTINUE LOOKUP_PARENT
64 #endif
65
66 /** Only used on client-side for indicating the tail of dir hash/offset. */
67 #define LL_DIR_END_OFF 0x7fffffffffffffffULL
68 #define LL_DIR_END_OFF_32BIT 0x7fffffffUL
69
70 #define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
71 #define LUSTRE_FPRIVATE(file) ((file)->private_data)
72
73 struct ll_dentry_data {
74 struct lookup_intent *lld_it;
75 unsigned int lld_sa_generation;
76 unsigned int lld_invalid:1;
77 struct rcu_head lld_rcu_head;
78 };
79
80 #define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))
81
82 #define LLI_INODE_MAGIC 0x111d0de5
83 #define LLI_INODE_DEAD 0xdeadd00d
84
85 /* remote client permission cache */
86 #define REMOTE_PERM_HASHSIZE 16
87
88 struct ll_getname_data {
89 struct dir_context ctx;
90 char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
91 struct lu_fid lgd_fid; /* target fid we are looking for */
92 int lgd_found; /* inode matched? */
93 };
94
95 /* llite setxid/access permission for user on remote client */
96 struct ll_remote_perm {
97 struct hlist_node lrp_list;
98 uid_t lrp_uid;
99 gid_t lrp_gid;
100 uid_t lrp_fsuid;
101 gid_t lrp_fsgid;
102 int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
103 is access permission with
104 lrp_fsuid/lrp_fsgid. */
105 };
106
107 enum lli_flags {
108 /* MDS has an authority for the Size-on-MDS attributes. */
109 LLIF_MDS_SIZE_LOCK = (1 << 0),
110 /* Epoch close is postponed. */
111 LLIF_EPOCH_PENDING = (1 << 1),
112 /* DONE WRITING is allowed. */
113 LLIF_DONE_WRITING = (1 << 2),
114 /* Sizeon-on-MDS attributes are changed. An attribute update needs to
115 * be sent to MDS. */
116 LLIF_SOM_DIRTY = (1 << 3),
117 /* File data is modified. */
118 LLIF_DATA_MODIFIED = (1 << 4),
119 /* File is being restored */
120 LLIF_FILE_RESTORING = (1 << 5),
121 /* Xattr cache is attached to the file */
122 LLIF_XATTR_CACHE = (1 << 6),
123 };
124
125 struct ll_inode_info {
126 __u32 lli_inode_magic;
127 __u32 lli_flags;
128 __u64 lli_ioepoch;
129
130 spinlock_t lli_lock;
131 struct posix_acl *lli_posix_acl;
132
133 struct hlist_head *lli_remote_perms;
134 struct mutex lli_rmtperm_mutex;
135
136 /* identifying fields for both metadata and data stacks. */
137 struct lu_fid lli_fid;
138 /* Parent fid for accessing default stripe data on parent directory
139 * for allocating OST objects after a mknod() and later open-by-FID. */
140 struct lu_fid lli_pfid;
141
142 struct list_head lli_close_list;
143 struct list_head lli_oss_capas;
144 /* open count currently used by capability only, indicate whether
145 * capability needs renewal */
146 atomic_t lli_open_count;
147 struct obd_capa *lli_mds_capa;
148 cfs_time_t lli_rmtperm_time;
149
150 /* handle is to be sent to MDS later on done_writing and setattr.
151 * Open handle data are needed for the recovery to reconstruct
152 * the inode state on the MDS. XXX: recovery is not ready yet. */
153 struct obd_client_handle *lli_pending_och;
154
155 /* We need all three because every inode may be opened in different
156 * modes */
157 struct obd_client_handle *lli_mds_read_och;
158 struct obd_client_handle *lli_mds_write_och;
159 struct obd_client_handle *lli_mds_exec_och;
160 __u64 lli_open_fd_read_count;
161 __u64 lli_open_fd_write_count;
162 __u64 lli_open_fd_exec_count;
163 /* Protects access to och pointers and their usage counters */
164 struct mutex lli_och_mutex;
165
166 struct inode lli_vfs_inode;
167
168 /* the most recent timestamps obtained from mds */
169 struct ost_lvb lli_lvb;
170 spinlock_t lli_agl_lock;
171
172 /* Try to make the d::member and f::member are aligned. Before using
173 * these members, make clear whether it is directory or not. */
174 union {
175 /* for directory */
176 struct {
177 /* serialize normal readdir and statahead-readdir. */
178 struct mutex d_readdir_mutex;
179
180 /* metadata statahead */
181 /* since parent-child threads can share the same @file
182 * struct, "opendir_key" is the token when dir close for
183 * case of parent exit before child -- it is me should
184 * cleanup the dir readahead. */
185 void *d_opendir_key;
186 struct ll_statahead_info *d_sai;
187 /* protect statahead stuff. */
188 spinlock_t d_sa_lock;
189 /* "opendir_pid" is the token when lookup/revalid
190 * -- I am the owner of dir statahead. */
191 pid_t d_opendir_pid;
192 } d;
193
194 #define lli_readdir_mutex u.d.d_readdir_mutex
195 #define lli_opendir_key u.d.d_opendir_key
196 #define lli_sai u.d.d_sai
197 #define lli_sa_lock u.d.d_sa_lock
198 #define lli_opendir_pid u.d.d_opendir_pid
199
200 /* for non-directory */
201 struct {
202 struct mutex f_size_mutex;
203 char *f_symlink_name;
204 __u64 f_maxbytes;
205 /*
206 * struct rw_semaphore {
207 * signed long count; // align d.d_def_acl
208 * spinlock_t wait_lock; // align d.d_sa_lock
209 * struct list_head wait_list;
210 * }
211 */
212 struct rw_semaphore f_trunc_sem;
213 struct mutex f_write_mutex;
214
215 struct rw_semaphore f_glimpse_sem;
216 cfs_time_t f_glimpse_time;
217 struct list_head f_agl_list;
218 __u64 f_agl_index;
219
220 /* for writepage() only to communicate to fsync */
221 int f_async_rc;
222
223 /*
224 * whenever a process try to read/write the file, the
225 * jobid of the process will be saved here, and it'll
226 * be packed into the write PRC when flush later.
227 *
228 * so the read/write statistics for jobid will not be
229 * accurate if the file is shared by different jobs.
230 */
231 char f_jobid[JOBSTATS_JOBID_SIZE];
232 } f;
233
234 #define lli_size_mutex u.f.f_size_mutex
235 #define lli_symlink_name u.f.f_symlink_name
236 #define lli_maxbytes u.f.f_maxbytes
237 #define lli_trunc_sem u.f.f_trunc_sem
238 #define lli_write_mutex u.f.f_write_mutex
239 #define lli_glimpse_sem u.f.f_glimpse_sem
240 #define lli_glimpse_time u.f.f_glimpse_time
241 #define lli_agl_list u.f.f_agl_list
242 #define lli_agl_index u.f.f_agl_index
243 #define lli_async_rc u.f.f_async_rc
244 #define lli_jobid u.f.f_jobid
245
246 } u;
247
248 /* XXX: For following frequent used members, although they maybe special
249 * used for non-directory object, it is some time-wasting to check
250 * whether the object is directory or not before using them. On the
251 * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
252 * the "ll_inode_info" size even if moving those members into u.f.
253 * So keep them out side.
254 *
255 * In the future, if more members are added only for directory,
256 * some of the following members can be moved into u.f.
257 */
258 bool lli_has_smd;
259 struct cl_object *lli_clob;
260
261 /* mutex to request for layout lock exclusively. */
262 struct mutex lli_layout_mutex;
263 /* Layout version, protected by lli_layout_lock */
264 __u32 lli_layout_gen;
265 spinlock_t lli_layout_lock;
266
267 struct rw_semaphore lli_xattrs_list_rwsem;
268 struct mutex lli_xattrs_enq_lock;
269 struct list_head lli_xattrs;/* ll_xattr_entry->xe_list */
270 };
271
272 static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
273 {
274 __u32 gen;
275
276 spin_lock(&lli->lli_layout_lock);
277 gen = lli->lli_layout_gen;
278 spin_unlock(&lli->lli_layout_lock);
279
280 return gen;
281 }
282
283 static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen)
284 {
285 spin_lock(&lli->lli_layout_lock);
286 lli->lli_layout_gen = gen;
287 spin_unlock(&lli->lli_layout_lock);
288 }
289
290 int ll_xattr_cache_destroy(struct inode *inode);
291
292 int ll_xattr_cache_get(struct inode *inode,
293 const char *name,
294 char *buffer,
295 size_t size,
296 __u64 valid);
297
298 /*
299 * Locking to guarantee consistency of non-atomic updates to long long i_size,
300 * consistency between file size and KMS.
301 *
302 * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order.
303 */
304
305 void ll_inode_size_lock(struct inode *inode);
306 void ll_inode_size_unlock(struct inode *inode);
307
308 // FIXME: replace the name of this with LL_I to conform to kernel stuff
309 // static inline struct ll_inode_info *LL_I(struct inode *inode)
310 static inline struct ll_inode_info *ll_i2info(struct inode *inode)
311 {
312 return container_of(inode, struct ll_inode_info, lli_vfs_inode);
313 }
314
315 /* default to about 40meg of readahead on a given system. That much tied
316 * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
317 #define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
318
319 /* default to read-ahead full files smaller than 2MB on the second read */
320 #define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
321
322 enum ra_stat {
323 RA_STAT_HIT = 0,
324 RA_STAT_MISS,
325 RA_STAT_DISTANT_READPAGE,
326 RA_STAT_MISS_IN_WINDOW,
327 RA_STAT_FAILED_GRAB_PAGE,
328 RA_STAT_FAILED_MATCH,
329 RA_STAT_DISCARDED,
330 RA_STAT_ZERO_LEN,
331 RA_STAT_ZERO_WINDOW,
332 RA_STAT_EOF,
333 RA_STAT_MAX_IN_FLIGHT,
334 RA_STAT_WRONG_GRAB_PAGE,
335 _NR_RA_STAT,
336 };
337
338 struct ll_ra_info {
339 atomic_t ra_cur_pages;
340 unsigned long ra_max_pages;
341 unsigned long ra_max_pages_per_file;
342 unsigned long ra_max_read_ahead_whole_pages;
343 };
344
345 /* ra_io_arg will be filled in the beginning of ll_readahead with
346 * ras_lock, then the following ll_read_ahead_pages will read RA
347 * pages according to this arg, all the items in this structure are
348 * counted by page index.
349 */
350 struct ra_io_arg {
351 unsigned long ria_start; /* start offset of read-ahead*/
352 unsigned long ria_end; /* end offset of read-ahead*/
353 /* If stride read pattern is detected, ria_stoff means where
354 * stride read is started. Note: for normal read-ahead, the
355 * value here is meaningless, and also it will not be accessed*/
356 pgoff_t ria_stoff;
357 /* ria_length and ria_pages are the length and pages length in the
358 * stride I/O mode. And they will also be used to check whether
359 * it is stride I/O read-ahead in the read-ahead pages*/
360 unsigned long ria_length;
361 unsigned long ria_pages;
362 };
363
364 /* LL_HIST_MAX=32 causes an overflow */
365 #define LL_HIST_MAX 28
366 #define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
367 #define LL_PROCESS_HIST_MAX 10
368 struct per_process_info {
369 pid_t pid;
370 struct obd_histogram pp_r_hist;
371 struct obd_histogram pp_w_hist;
372 };
373
374 /* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
375 struct ll_rw_extents_info {
376 struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
377 };
378
379 #define LL_OFFSET_HIST_MAX 100
380 struct ll_rw_process_info {
381 pid_t rw_pid;
382 int rw_op;
383 loff_t rw_range_start;
384 loff_t rw_range_end;
385 loff_t rw_last_file_pos;
386 loff_t rw_offset;
387 size_t rw_smallest_extent;
388 size_t rw_largest_extent;
389 struct ll_file_data *rw_last_file;
390 };
391
392 enum stats_track_type {
393 STATS_TRACK_ALL = 0, /* track all processes */
394 STATS_TRACK_PID, /* track process with this pid */
395 STATS_TRACK_PPID, /* track processes with this ppid */
396 STATS_TRACK_GID, /* track processes with this gid */
397 STATS_TRACK_LAST,
398 };
399
400 /* flags for sbi->ll_flags */
401 #define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
402 #define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
403 #define LL_SBI_FLOCK 0x04
404 #define LL_SBI_USER_XATTR 0x08 /* support user xattr */
405 #define LL_SBI_ACL 0x10 /* support ACL */
406 #define LL_SBI_RMT_CLIENT 0x40 /* remote client */
407 #define LL_SBI_MDS_CAPA 0x80 /* support mds capa */
408 #define LL_SBI_OSS_CAPA 0x100 /* support oss capa */
409 #define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
410 #define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
411 #define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
412 #define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */
413 #define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
414 #define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
415 #define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
416 #define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
417 #define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
418 #define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
419 #define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
420
421 #define LL_SBI_FLAGS { \
422 "nolck", \
423 "checksum", \
424 "flock", \
425 "xattr", \
426 "acl", \
427 "???", \
428 "rmt_client", \
429 "mds_capa", \
430 "oss_capa", \
431 "flock", \
432 "lru_resize", \
433 "lazy_statfs", \
434 "som", \
435 "32bit_api", \
436 "64bit_hash", \
437 "agl", \
438 "verbose", \
439 "layout", \
440 "user_fid2path",\
441 "xattr", \
442 }
443
444 #define RCE_HASHES 32
445
446 struct rmtacl_ctl_entry {
447 struct list_head rce_list;
448 pid_t rce_key; /* hash key */
449 int rce_ops; /* acl operation type */
450 };
451
452 struct rmtacl_ctl_table {
453 spinlock_t rct_lock;
454 struct list_head rct_entries[RCE_HASHES];
455 };
456
457 #define EE_HASHES 32
458
459 struct eacl_table {
460 spinlock_t et_lock;
461 struct list_head et_entries[EE_HASHES];
462 };
463
464 struct ll_sb_info {
465 struct list_head ll_list;
466 /* this protects pglist and ra_info. It isn't safe to
467 * grab from interrupt contexts */
468 spinlock_t ll_lock;
469 spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
470 spinlock_t ll_process_lock; /* ll_rw_process_info */
471 struct obd_uuid ll_sb_uuid;
472 struct obd_export *ll_md_exp;
473 struct obd_export *ll_dt_exp;
474 struct proc_dir_entry* ll_proc_root;
475 struct lu_fid ll_root_fid; /* root object fid */
476
477 int ll_flags;
478 unsigned int ll_umounting:1,
479 ll_xattr_cache_enabled:1;
480 struct list_head ll_conn_chain; /* per-conn chain of SBs */
481 struct lustre_client_ocd ll_lco;
482
483 struct list_head ll_orphan_dentry_list; /*please don't ask -p*/
484 struct ll_close_queue *ll_lcq;
485
486 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
487
488 struct cl_client_cache ll_cache;
489
490 struct lprocfs_stats *ll_ra_stats;
491
492 struct ll_ra_info ll_ra_info;
493 unsigned int ll_namelen;
494 struct file_operations *ll_fop;
495
496 /* =0 - hold lock over whole read/write
497 * >0 - max. chunk to be read/written w/o lock re-acquiring */
498 unsigned long ll_max_rw_chunk;
499 unsigned int ll_md_brw_size; /* used by readdir */
500
501 struct lu_site *ll_site;
502 struct cl_device *ll_cl;
503 /* Statistics */
504 struct ll_rw_extents_info ll_rw_extents_info;
505 int ll_extent_process_count;
506 struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX];
507 unsigned int ll_offset_process_count;
508 struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX];
509 unsigned int ll_rw_offset_entry_count;
510 int ll_stats_track_id;
511 enum stats_track_type ll_stats_track_type;
512 int ll_rw_stats_on;
513
514 /* metadata stat-ahead */
515 unsigned int ll_sa_max; /* max statahead RPCs */
516 atomic_t ll_sa_total; /* statahead thread started
517 * count */
518 atomic_t ll_sa_wrong; /* statahead thread stopped for
519 * low hit ratio */
520 atomic_t ll_agl_total; /* AGL thread started count */
521
522 dev_t ll_sdev_orig; /* save s_dev before assign for
523 * clustered nfs */
524 struct rmtacl_ctl_table ll_rct;
525 struct eacl_table ll_et;
526 __kernel_fsid_t ll_fsid;
527 };
528
529 #define LL_DEFAULT_MAX_RW_CHUNK (32 * 1024 * 1024)
530
531 struct ll_ra_read {
532 pgoff_t lrr_start;
533 pgoff_t lrr_count;
534 struct task_struct *lrr_reader;
535 struct list_head lrr_linkage;
536 };
537
538 /*
539 * per file-descriptor read-ahead data.
540 */
541 struct ll_readahead_state {
542 spinlock_t ras_lock;
543 /*
544 * index of the last page that read(2) needed and that wasn't in the
545 * cache. Used by ras_update() to detect seeks.
546 *
547 * XXX nikita: if access seeks into cached region, Lustre doesn't see
548 * this.
549 */
550 unsigned long ras_last_readpage;
551 /*
552 * number of pages read after last read-ahead window reset. As window
553 * is reset on each seek, this is effectively a number of consecutive
554 * accesses. Maybe ->ras_accessed_in_window is better name.
555 *
556 * XXX nikita: window is also reset (by ras_update()) when Lustre
557 * believes that memory pressure evicts read-ahead pages. In that
558 * case, it probably doesn't make sense to expand window to
559 * PTLRPC_MAX_BRW_PAGES on the third access.
560 */
561 unsigned long ras_consecutive_pages;
562 /*
563 * number of read requests after the last read-ahead window reset
564 * As window is reset on each seek, this is effectively the number
565 * on consecutive read request and is used to trigger read-ahead.
566 */
567 unsigned long ras_consecutive_requests;
568 /*
569 * Parameters of current read-ahead window. Handled by
570 * ras_update(). On the initial access to the file or after a seek,
571 * window is reset to 0. After 3 consecutive accesses, window is
572 * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
573 * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
574 */
575 unsigned long ras_window_start, ras_window_len;
576 /*
577 * Where next read-ahead should start at. This lies within read-ahead
578 * window. Read-ahead window is read in pieces rather than at once
579 * because: 1. lustre limits total number of pages under read-ahead by
580 * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
581 * not covered by DLM lock.
582 */
583 unsigned long ras_next_readahead;
584 /*
585 * Total number of ll_file_read requests issued, reads originating
586 * due to mmap are not counted in this total. This value is used to
587 * trigger full file read-ahead after multiple reads to a small file.
588 */
589 unsigned long ras_requests;
590 /*
591 * Page index with respect to the current request, these value
592 * will not be accurate when dealing with reads issued via mmap.
593 */
594 unsigned long ras_request_index;
595 /*
596 * list of struct ll_ra_read's one per read(2) call current in
597 * progress against this file descriptor. Used by read-ahead code,
598 * protected by ->ras_lock.
599 */
600 struct list_head ras_read_beads;
601 /*
602 * The following 3 items are used for detecting the stride I/O
603 * mode.
604 * In stride I/O mode,
605 * ...............|-----data-----|****gap*****|--------|******|....
606 * offset |-stride_pages-|-stride_gap-|
607 * ras_stride_offset = offset;
608 * ras_stride_length = stride_pages + stride_gap;
609 * ras_stride_pages = stride_pages;
610 * Note: all these three items are counted by pages.
611 */
612 unsigned long ras_stride_length;
613 unsigned long ras_stride_pages;
614 pgoff_t ras_stride_offset;
615 /*
616 * number of consecutive stride request count, and it is similar as
617 * ras_consecutive_requests, but used for stride I/O mode.
618 * Note: only more than 2 consecutive stride request are detected,
619 * stride read-ahead will be enable
620 */
621 unsigned long ras_consecutive_stride_requests;
622 };
623
624 extern struct kmem_cache *ll_file_data_slab;
625 struct lustre_handle;
626 struct ll_file_data {
627 struct ll_readahead_state fd_ras;
628 struct ccc_grouplock fd_grouplock;
629 __u64 lfd_pos;
630 __u32 fd_flags;
631 fmode_t fd_omode;
632 /* openhandle if lease exists for this file.
633 * Borrow lli->lli_och_mutex to protect assignment */
634 struct obd_client_handle *fd_lease_och;
635 struct obd_client_handle *fd_och;
636 struct file *fd_file;
637 /* Indicate whether need to report failure when close.
638 * true: failure is known, not report again.
639 * false: unknown failure, should report. */
640 bool fd_write_failed;
641 };
642
643 struct lov_stripe_md;
644
645 extern spinlock_t inode_lock;
646
647 extern struct proc_dir_entry *proc_lustre_fs_root;
648
649 static inline struct inode *ll_info2i(struct ll_inode_info *lli)
650 {
651 return &lli->lli_vfs_inode;
652 }
653
654 __u32 ll_i2suppgid(struct inode *i);
655 void ll_i2gids(__u32 *suppgids, struct inode *i1,struct inode *i2);
656
657 static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
658 {
659 #if BITS_PER_LONG == 32
660 return 1;
661 #elif defined(CONFIG_COMPAT)
662 return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API));
663 #else
664 return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
665 #endif
666 }
667
668 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar);
669 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar);
670 struct ll_ra_read *ll_ra_read_get(struct file *f);
671
672 /* llite/lproc_llite.c */
673 #ifdef LPROCFS
674 int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
675 struct super_block *sb, char *osc, char *mdc);
676 void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi);
677 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
678 void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars);
679 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
680 struct ll_file_data *file, loff_t pos,
681 size_t count, int rw);
682 #else
683 static inline int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
684 struct super_block *sb, char *osc, char *mdc){return 0;}
685 static inline void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi) {}
686 static inline
687 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) {}
688 static inline void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
689 {
690 memset(lvars, 0, sizeof(*lvars));
691 }
692 static inline void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
693 struct ll_file_data *file, loff_t pos,
694 size_t count, int rw) {}
695 #endif
696
697
698 /* llite/dir.c */
699 void ll_release_page(struct page *page, int remove);
700 extern const struct file_operations ll_dir_operations;
701 extern const struct inode_operations ll_dir_inode_operations;
702 struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
703 struct ll_dir_chain *chain);
704 int ll_dir_read(struct inode *inode, struct dir_context *ctx);
705
706 int ll_get_mdt_idx(struct inode *inode);
707 /* llite/namei.c */
708 extern const struct inode_operations ll_special_inode_operations;
709
710 int ll_objects_destroy(struct ptlrpc_request *request,
711 struct inode *dir);
712 struct inode *ll_iget(struct super_block *sb, ino_t hash,
713 struct lustre_md *lic);
714 int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
715 void *data, int flag);
716 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
717 int ll_rmdir_entry(struct inode *dir, char *name, int namelen);
718
719 /* llite/rw.c */
720 int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
721 int ll_commit_write(struct file *, struct page *, unsigned from, unsigned to);
722 int ll_writepage(struct page *page, struct writeback_control *wbc);
723 int ll_writepages(struct address_space *, struct writeback_control *wbc);
724 int ll_readpage(struct file *file, struct page *page);
725 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
726 int ll_readahead(const struct lu_env *env, struct cl_io *io,
727 struct ll_readahead_state *ras, struct address_space *mapping,
728 struct cl_page_list *queue, int flags);
729
730 #ifndef MS_HAS_NEW_AOPS
731 extern const struct address_space_operations ll_aops;
732 #else
733 extern const struct address_space_operations_ext ll_aops;
734 #endif
735
736 /* llite/file.c */
737 extern struct file_operations ll_file_operations;
738 extern struct file_operations ll_file_operations_flock;
739 extern struct file_operations ll_file_operations_noflock;
740 extern struct inode_operations ll_file_inode_operations;
741 extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
742 ldlm_mode_t l_req_mode);
743 extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
744 struct lustre_handle *lockh, __u64 flags,
745 ldlm_mode_t mode);
746 int ll_file_open(struct inode *inode, struct file *file);
747 int ll_file_release(struct inode *inode, struct file *file);
748 int ll_glimpse_ioctl(struct ll_sb_info *sbi,
749 struct lov_stripe_md *lsm, lstat_t *st);
750 void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
751 int ll_release_openhandle(struct dentry *, struct lookup_intent *);
752 int ll_md_real_close(struct inode *inode, fmode_t fmode);
753 void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
754 struct obd_client_handle **och, unsigned long flags);
755 void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
756 int ll_som_update(struct inode *inode, struct md_op_data *op_data);
757 int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
758 __u64 ioepoch, int sync);
759 void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
760 struct lustre_handle *fh);
761 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
762 struct posix_acl *ll_get_acl(struct inode *inode, int type);
763
764 int ll_inode_permission(struct inode *inode, int mask);
765
766 int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
767 int flags, struct lov_user_md *lum,
768 int lum_size);
769 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
770 struct lov_mds_md **lmm, int *lmm_size,
771 struct ptlrpc_request **request);
772 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
773 int set_default);
774 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
775 int *lmm_size, struct ptlrpc_request **request);
776 int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
777 int ll_merge_lvb(const struct lu_env *env, struct inode *inode);
778 int ll_fid2path(struct inode *inode, void *arg);
779 int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
780 int ll_hsm_release(struct inode *inode);
781
782 /* llite/dcache.c */
783
784 int ll_d_init(struct dentry *de);
785 extern const struct dentry_operations ll_d_ops;
786 void ll_intent_drop_lock(struct lookup_intent *);
787 void ll_intent_release(struct lookup_intent *);
788 void ll_invalidate_aliases(struct inode *);
789 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
790 int ll_revalidate_it_finish(struct ptlrpc_request *request,
791 struct lookup_intent *it, struct dentry *de);
792
793 /* llite/llite_lib.c */
794 extern struct super_operations lustre_super_operations;
795
796 void ll_lli_init(struct ll_inode_info *lli);
797 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
798 void ll_put_super(struct super_block *sb);
799 void ll_kill_super(struct super_block *sb);
800 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
801 void ll_clear_inode(struct inode *inode);
802 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
803 int ll_setattr(struct dentry *de, struct iattr *attr);
804 int ll_statfs(struct dentry *de, struct kstatfs *sfs);
805 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
806 __u64 max_age, __u32 flags);
807 void ll_update_inode(struct inode *inode, struct lustre_md *md);
808 void ll_read_inode2(struct inode *inode, void *opaque);
809 void ll_delete_inode(struct inode *inode);
810 int ll_iocontrol(struct inode *inode, struct file *file,
811 unsigned int cmd, unsigned long arg);
812 int ll_flush_ctx(struct inode *inode);
813 void ll_umount_begin(struct super_block *sb);
814 int ll_remount_fs(struct super_block *sb, int *flags, char *data);
815 int ll_show_options(struct seq_file *seq, struct dentry *dentry);
816 void ll_dirty_page_discard_warn(struct page *page, int ioret);
817 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
818 struct super_block *, struct lookup_intent *);
819 void lustre_dump_dentry(struct dentry *, int recur);
820 int ll_obd_statfs(struct inode *inode, void *arg);
821 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
822 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
823 int ll_get_max_cookiesize(struct ll_sb_info *sbi, int *max_cookiesize);
824 int ll_get_default_cookiesize(struct ll_sb_info *sbi, int *default_cookiesize);
825 int ll_process_config(struct lustre_cfg *lcfg);
826 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
827 struct inode *i1, struct inode *i2,
828 const char *name, int namelen,
829 int mode, __u32 opc, void *data);
830 void ll_finish_md_op_data(struct md_op_data *op_data);
831 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
832 char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
833
834 /* llite/llite_nfs.c */
835 extern struct export_operations lustre_export_operations;
836 __u32 get_uuid2int(const char *name, int len);
837 void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
838 struct inode *search_inode_for_lustre(struct super_block *sb,
839 const struct lu_fid *fid);
840
841 /* llite/symlink.c */
842 extern struct inode_operations ll_fast_symlink_inode_operations;
843
844 /* llite/llite_close.c */
845 struct ll_close_queue {
846 spinlock_t lcq_lock;
847 struct list_head lcq_head;
848 wait_queue_head_t lcq_waitq;
849 struct completion lcq_comp;
850 atomic_t lcq_stop;
851 };
852
853 struct ccc_object *cl_inode2ccc(struct inode *inode);
854
855
856 void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
857 void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
858
859 /* specific architecture can implement only part of this list */
860 enum vvp_io_subtype {
861 /** normal IO */
862 IO_NORMAL,
863 /** io started from splice_{read|write} */
864 IO_SPLICE
865 };
866
867 /* IO subtypes */
868 struct vvp_io {
869 /** io subtype */
870 enum vvp_io_subtype cui_io_subtype;
871
872 union {
873 struct {
874 struct pipe_inode_info *cui_pipe;
875 unsigned int cui_flags;
876 } splice;
877 struct vvp_fault_io {
878 /**
879 * Inode modification time that is checked across DLM
880 * lock request.
881 */
882 time_t ft_mtime;
883 struct vm_area_struct *ft_vma;
884 /**
885 * locked page returned from vvp_io
886 */
887 struct page *ft_vmpage;
888 struct vm_fault_api {
889 /**
890 * kernel fault info
891 */
892 struct vm_fault *ft_vmf;
893 /**
894 * fault API used bitflags for return code.
895 */
896 unsigned int ft_flags;
897 } fault;
898 } fault;
899 } u;
900 /**
901 * Read-ahead state used by read and page-fault IO contexts.
902 */
903 struct ll_ra_read cui_bead;
904 /**
905 * Set when cui_bead has been initialized.
906 */
907 int cui_ra_window_set;
908 };
909
910 /**
911 * IO arguments for various VFS I/O interfaces.
912 */
913 struct vvp_io_args {
914 /** normal/splice */
915 enum vvp_io_subtype via_io_subtype;
916
917 union {
918 struct {
919 struct kiocb *via_iocb;
920 struct iov_iter *via_iter;
921 } normal;
922 struct {
923 struct pipe_inode_info *via_pipe;
924 unsigned int via_flags;
925 } splice;
926 } u;
927 };
928
929 struct ll_cl_context {
930 void *lcc_cookie;
931 struct cl_io *lcc_io;
932 struct cl_page *lcc_page;
933 struct lu_env *lcc_env;
934 int lcc_refcheck;
935 };
936
937 struct vvp_thread_info {
938 struct iovec vti_local_iov;
939 struct vvp_io_args vti_args;
940 struct ra_io_arg vti_ria;
941 struct kiocb vti_kiocb;
942 struct ll_cl_context vti_io_ctx;
943 };
944
945 static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
946 {
947 extern struct lu_context_key vvp_key;
948 struct vvp_thread_info *info;
949
950 info = lu_context_key_get(&env->le_ctx, &vvp_key);
951 LASSERT(info != NULL);
952 return info;
953 }
954
955 static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
956 enum vvp_io_subtype type)
957 {
958 struct vvp_io_args *ret = &vvp_env_info(env)->vti_args;
959
960 ret->via_io_subtype = type;
961
962 return ret;
963 }
964
965 struct vvp_session {
966 struct vvp_io vs_ios;
967 };
968
969 static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
970 {
971 extern struct lu_context_key vvp_session_key;
972 struct vvp_session *ses;
973
974 ses = lu_context_key_get(env->le_ses, &vvp_session_key);
975 LASSERT(ses != NULL);
976 return ses;
977 }
978
979 static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
980 {
981 return &vvp_env_session(env)->vs_ios;
982 }
983
984 int vvp_global_init(void);
985 void vvp_global_fini(void);
986
987 void ll_queue_done_writing(struct inode *inode, unsigned long flags);
988 void ll_close_thread_shutdown(struct ll_close_queue *lcq);
989 int ll_close_thread_start(struct ll_close_queue **lcq_ret);
990
991 /* llite/llite_mmap.c */
992
993 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
994 int ll_file_mmap(struct file * file, struct vm_area_struct * vma);
995 void policy_from_vma(ldlm_policy_data_t *policy,
996 struct vm_area_struct *vma, unsigned long addr, size_t count);
997 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
998 size_t count);
999
1000 static inline void ll_invalidate_page(struct page *vmpage)
1001 {
1002 struct address_space *mapping = vmpage->mapping;
1003 loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
1004
1005 LASSERT(PageLocked(vmpage));
1006 if (mapping == NULL)
1007 return;
1008
1009 ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
1010 truncate_complete_page(mapping, vmpage);
1011 }
1012
1013 #define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
1014
1015 /* don't need an addref as the sb_info should be holding one */
1016 static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
1017 {
1018 return ll_s2sbi(sb)->ll_dt_exp;
1019 }
1020
1021 /* don't need an addref as the sb_info should be holding one */
1022 static inline struct obd_export *ll_s2mdexp(struct super_block *sb)
1023 {
1024 return ll_s2sbi(sb)->ll_md_exp;
1025 }
1026
1027 static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
1028 {
1029 struct obd_device *obd = sbi->ll_md_exp->exp_obd;
1030 if (obd == NULL)
1031 LBUG();
1032 return &obd->u.cli;
1033 }
1034
1035 // FIXME: replace the name of this with LL_SB to conform to kernel stuff
1036 static inline struct ll_sb_info *ll_i2sbi(struct inode *inode)
1037 {
1038 return ll_s2sbi(inode->i_sb);
1039 }
1040
1041 static inline struct obd_export *ll_i2dtexp(struct inode *inode)
1042 {
1043 return ll_s2dtexp(inode->i_sb);
1044 }
1045
1046 static inline struct obd_export *ll_i2mdexp(struct inode *inode)
1047 {
1048 return ll_s2mdexp(inode->i_sb);
1049 }
1050
1051 static inline struct lu_fid *ll_inode2fid(struct inode *inode)
1052 {
1053 struct lu_fid *fid;
1054
1055 LASSERT(inode != NULL);
1056 fid = &ll_i2info(inode)->lli_fid;
1057
1058 return fid;
1059 }
1060
1061 static inline __u64 ll_file_maxbytes(struct inode *inode)
1062 {
1063 return ll_i2info(inode)->lli_maxbytes;
1064 }
1065
1066 /* llite/xattr.c */
1067 int ll_setxattr(struct dentry *dentry, const char *name,
1068 const void *value, size_t size, int flags);
1069 ssize_t ll_getxattr(struct dentry *dentry, const char *name,
1070 void *buffer, size_t size);
1071 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
1072 int ll_removexattr(struct dentry *dentry, const char *name);
1073
1074 /* llite/remote_perm.c */
1075 extern struct kmem_cache *ll_remote_perm_cachep;
1076 extern struct kmem_cache *ll_rmtperm_hash_cachep;
1077
1078 void free_rmtperm_hash(struct hlist_head *hash);
1079 int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
1080 int lustre_check_remote_perm(struct inode *inode, int mask);
1081
1082 /* llite/llite_capa.c */
1083 extern struct timer_list ll_capa_timer;
1084
1085 int ll_capa_thread_start(void);
1086 void ll_capa_thread_stop(void);
1087 void ll_capa_timer_callback(unsigned long unused);
1088
1089 struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa);
1090
1091 void ll_capa_open(struct inode *inode);
1092 void ll_capa_close(struct inode *inode);
1093
1094 struct obd_capa *ll_mdscapa_get(struct inode *inode);
1095 struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc);
1096
1097 void ll_truncate_free_capa(struct obd_capa *ocapa);
1098 void ll_clear_inode_capas(struct inode *inode);
1099 void ll_print_capa_stat(struct ll_sb_info *sbi);
1100
1101 /* llite/llite_cl.c */
1102 extern struct lu_device_type vvp_device_type;
1103
1104 /**
1105 * Common IO arguments for various VFS I/O interfaces.
1106 */
1107 int cl_sb_init(struct super_block *sb);
1108 int cl_sb_fini(struct super_block *sb);
1109 void ll_io_init(struct cl_io *io, const struct file *file, int write);
1110
1111 void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1112 struct ll_readahead_state *ras, unsigned long index,
1113 unsigned hit);
1114 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
1115 void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which);
1116
1117 /* llite/llite_rmtacl.c */
1118 #ifdef CONFIG_FS_POSIX_ACL
1119 struct eacl_entry {
1120 struct list_head ee_list;
1121 pid_t ee_key; /* hash key */
1122 struct lu_fid ee_fid;
1123 int ee_type; /* ACL type for ACCESS or DEFAULT */
1124 ext_acl_xattr_header *ee_acl;
1125 };
1126
1127 obd_valid rce_ops2valid(int ops);
1128 struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key);
1129 int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops);
1130 int rct_del(struct rmtacl_ctl_table *rct, pid_t key);
1131 void rct_init(struct rmtacl_ctl_table *rct);
1132 void rct_fini(struct rmtacl_ctl_table *rct);
1133
1134 void ee_free(struct eacl_entry *ee);
1135 int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
1136 ext_acl_xattr_header *header);
1137 struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
1138 struct lu_fid *fid, int type);
1139 void et_search_free(struct eacl_table *et, pid_t key);
1140 void et_init(struct eacl_table *et);
1141 void et_fini(struct eacl_table *et);
1142 #else
1143 static inline obd_valid rce_ops2valid(int ops)
1144 {
1145 return 0;
1146 }
1147 #endif
1148
1149 /* statahead.c */
1150
1151 #define LL_SA_RPC_MIN 2
1152 #define LL_SA_RPC_DEF 32
1153 #define LL_SA_RPC_MAX 8192
1154
1155 #define LL_SA_CACHE_BIT 5
1156 #define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
1157 #define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
1158
1159 /* per inode struct, for dir only */
1160 struct ll_statahead_info {
1161 struct inode *sai_inode;
1162 atomic_t sai_refcount; /* when access this struct, hold
1163 * refcount */
1164 unsigned int sai_generation; /* generation for statahead */
1165 unsigned int sai_max; /* max ahead of lookup */
1166 __u64 sai_sent; /* stat requests sent count */
1167 __u64 sai_replied; /* stat requests which received
1168 * reply */
1169 __u64 sai_index; /* index of statahead entry */
1170 __u64 sai_index_wait; /* index of entry which is the
1171 * caller is waiting for */
1172 __u64 sai_hit; /* hit count */
1173 __u64 sai_miss; /* miss count:
1174 * for "ls -al" case, it includes
1175 * hidden dentry miss;
1176 * for "ls -l" case, it does not
1177 * include hidden dentry miss.
1178 * "sai_miss_hidden" is used for
1179 * the later case.
1180 */
1181 unsigned int sai_consecutive_miss; /* consecutive miss */
1182 unsigned int sai_miss_hidden;/* "ls -al", but first dentry
1183 * is not a hidden one */
1184 unsigned int sai_skip_hidden;/* skipped hidden dentry count */
1185 unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
1186 * hidden entries */
1187 sai_agl_valid:1;/* AGL is valid for the dir */
1188 wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
1189 struct ptlrpc_thread sai_thread; /* stat-ahead thread */
1190 struct ptlrpc_thread sai_agl_thread; /* AGL thread */
1191 struct list_head sai_entries; /* entry list */
1192 struct list_head sai_entries_received; /* entries returned */
1193 struct list_head sai_entries_stated; /* entries stated */
1194 struct list_head sai_entries_agl; /* AGL entries to be sent */
1195 struct list_head sai_cache[LL_SA_CACHE_SIZE];
1196 spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
1197 atomic_t sai_cache_count; /* entry count in cache */
1198 };
1199
1200 int do_statahead_enter(struct inode *dir, struct dentry **dentry,
1201 int only_unplug);
1202 void ll_stop_statahead(struct inode *dir, void *key);
1203
1204 static inline int ll_glimpse_size(struct inode *inode)
1205 {
1206 struct ll_inode_info *lli = ll_i2info(inode);
1207 int rc;
1208
1209 down_read(&lli->lli_glimpse_sem);
1210 rc = cl_glimpse_size(inode);
1211 lli->lli_glimpse_time = cfs_time_current();
1212 up_read(&lli->lli_glimpse_sem);
1213 return rc;
1214 }
1215
1216 static inline void
1217 ll_statahead_mark(struct inode *dir, struct dentry *dentry)
1218 {
1219 struct ll_inode_info *lli = ll_i2info(dir);
1220 struct ll_statahead_info *sai = lli->lli_sai;
1221 struct ll_dentry_data *ldd = ll_d2d(dentry);
1222
1223 /* not the same process, don't mark */
1224 if (lli->lli_opendir_pid != current_pid())
1225 return;
1226
1227 LASSERT(ldd != NULL);
1228 if (sai != NULL)
1229 ldd->lld_sa_generation = sai->sai_generation;
1230 }
1231
1232 static inline int
1233 d_need_statahead(struct inode *dir, struct dentry *dentryp)
1234 {
1235 struct ll_inode_info *lli;
1236 struct ll_dentry_data *ldd;
1237
1238 if (ll_i2sbi(dir)->ll_sa_max == 0)
1239 return -EAGAIN;
1240
1241 lli = ll_i2info(dir);
1242 /* not the same process, don't statahead */
1243 if (lli->lli_opendir_pid != current_pid())
1244 return -EAGAIN;
1245
1246 /* statahead has been stopped */
1247 if (lli->lli_opendir_key == NULL)
1248 return -EAGAIN;
1249
1250 ldd = ll_d2d(dentryp);
1251 /*
1252 * When stats a dentry, the system trigger more than once "revalidate"
1253 * or "lookup", for "getattr", for "getxattr", and maybe for others.
1254 * Under patchless client mode, the operation intent is not accurate,
1255 * which maybe misguide the statahead thread. For example:
1256 * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe
1257 * have the same operation intent -- "IT_GETATTR".
1258 * In fact, one dentry should has only one chance to interact with the
1259 * statahead thread, otherwise the statahead windows will be confused.
1260 * The solution is as following:
1261 * Assign "lld_sa_generation" with "sai_generation" when a dentry
1262 * "IT_GETATTR" for the first time, and the subsequent "IT_GETATTR"
1263 * will bypass interacting with statahead thread for checking:
1264 * "lld_sa_generation == lli_sai->sai_generation"
1265 */
1266 if (ldd && lli->lli_sai &&
1267 ldd->lld_sa_generation == lli->lli_sai->sai_generation)
1268 return -EAGAIN;
1269
1270 return 1;
1271 }
1272
1273 static inline int
1274 ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
1275 {
1276 int ret;
1277
1278 ret = d_need_statahead(dir, *dentryp);
1279 if (ret <= 0)
1280 return ret;
1281
1282 return do_statahead_enter(dir, dentryp, only_unplug);
1283 }
1284
1285 /* llite ioctl register support routine */
1286 enum llioc_iter {
1287 LLIOC_CONT = 0,
1288 LLIOC_STOP
1289 };
1290
1291 #define LLIOC_MAX_CMD 256
1292
1293 /*
1294 * Rules to write a callback function:
1295 *
1296 * Parameters:
1297 * @magic: Dynamic ioctl call routine will feed this value with the pointer
1298 * returned to ll_iocontrol_register. Callback functions should use this
1299 * data to check the potential collasion of ioctl cmd. If collasion is
1300 * found, callback function should return LLIOC_CONT.
1301 * @rcp: The result of ioctl command.
1302 *
1303 * Return values:
1304 * If @magic matches the pointer returned by ll_iocontrol_data, the
1305 * callback should return LLIOC_STOP; return LLIOC_STOP otherwise.
1306 */
1307 typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
1308 struct file *file, unsigned int cmd, unsigned long arg,
1309 void *magic, int *rcp);
1310
1311 /* export functions */
1312 /* Register ioctl block dynamatically for a regular file.
1313 *
1314 * @cmd: the array of ioctl command set
1315 * @count: number of commands in the @cmd
1316 * @cb: callback function, it will be called if an ioctl command is found to
1317 * belong to the command list @cmd.
1318 *
1319 * Return value:
1320 * A magic pointer will be returned if success;
1321 * otherwise, NULL will be returned.
1322 * */
1323 void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
1324 void ll_iocontrol_unregister(void *magic);
1325
1326
1327 /* lclient compat stuff */
1328 #define cl_inode_info ll_inode_info
1329 #define cl_i2info(info) ll_i2info(info)
1330 #define cl_inode_mode(inode) ((inode)->i_mode)
1331 #define cl_i2sbi ll_i2sbi
1332
1333 static inline struct ll_file_data *cl_iattr2fd(struct inode *inode,
1334 const struct iattr *attr)
1335 {
1336 LASSERT(attr->ia_valid & ATTR_FILE);
1337 return LUSTRE_FPRIVATE(attr->ia_file);
1338 }
1339
1340 static inline void cl_isize_lock(struct inode *inode)
1341 {
1342 ll_inode_size_lock(inode);
1343 }
1344
1345 static inline void cl_isize_unlock(struct inode *inode)
1346 {
1347 ll_inode_size_unlock(inode);
1348 }
1349
1350 static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
1351 {
1352 LASSERT(mutex_is_locked(&ll_i2info(inode)->lli_size_mutex));
1353 i_size_write(inode, kms);
1354 }
1355
1356 static inline void cl_isize_write(struct inode *inode, loff_t kms)
1357 {
1358 ll_inode_size_lock(inode);
1359 i_size_write(inode, kms);
1360 ll_inode_size_unlock(inode);
1361 }
1362
1363 #define cl_isize_read(inode) i_size_read(inode)
1364
1365 static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
1366 {
1367 return ll_merge_lvb(env, inode);
1368 }
1369
1370 #define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
1371 #define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
1372 #define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
1373
1374 struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt);
1375
1376 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
1377 enum cl_fsync_mode mode, int ignore_layout);
1378
1379 /** direct write pages */
1380 struct ll_dio_pages {
1381 /** page array to be written. we don't support
1382 * partial pages except the last one. */
1383 struct page **ldp_pages;
1384 /* offset of each page */
1385 loff_t *ldp_offsets;
1386 /** if ldp_offsets is NULL, it means a sequential
1387 * pages to be written, then this is the file offset
1388 * of the * first page. */
1389 loff_t ldp_start_offset;
1390 /** how many bytes are to be written. */
1391 size_t ldp_size;
1392 /** # of pages in the array. */
1393 int ldp_nr;
1394 };
1395
1396 static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
1397 int rc)
1398 {
1399 int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
1400 LPROC_LL_OSC_WRITE;
1401
1402 ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
1403 }
1404
1405 extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
1406 int rw, struct inode *inode,
1407 struct ll_dio_pages *pv);
1408
1409 static inline int ll_file_nolock(const struct file *file)
1410 {
1411 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1412 struct inode *inode = file->f_dentry->d_inode;
1413
1414 LASSERT(fd != NULL);
1415 return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
1416 (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
1417 }
1418
1419 static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
1420 struct lookup_intent *it, __u64 *bits)
1421 {
1422 if (!it->d.lustre.it_lock_set) {
1423 struct lustre_handle handle;
1424
1425 /* If this inode is a remote object, it will get two
1426 * separate locks in different namespaces, Master MDT,
1427 * where the name entry is, will grant LOOKUP lock,
1428 * remote MDT, where the object is, will grant
1429 * UPDATE|PERM lock. The inode will be attached to both
1430 * LOOKUP and PERM locks, so revoking either locks will
1431 * case the dcache being cleared */
1432 if (it->d.lustre.it_remote_lock_mode) {
1433 handle.cookie = it->d.lustre.it_remote_lock_handle;
1434 CDEBUG(D_DLMTRACE, "setting l_data to inode %p"
1435 "(%lu/%u) for remote lock "LPX64"\n", inode,
1436 inode->i_ino, inode->i_generation,
1437 handle.cookie);
1438 md_set_lock_data(exp, &handle.cookie, inode, NULL);
1439 }
1440
1441 handle.cookie = it->d.lustre.it_lock_handle;
1442
1443 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)"
1444 " for lock "LPX64"\n", inode, inode->i_ino,
1445 inode->i_generation, handle.cookie);
1446
1447 md_set_lock_data(exp, &handle.cookie, inode,
1448 &it->d.lustre.it_lock_bits);
1449 it->d.lustre.it_lock_set = 1;
1450 }
1451
1452 if (bits != NULL)
1453 *bits = it->d.lustre.it_lock_bits;
1454 }
1455
1456 static inline void ll_lock_dcache(struct inode *inode)
1457 {
1458 spin_lock(&inode->i_lock);
1459 }
1460
1461 static inline void ll_unlock_dcache(struct inode *inode)
1462 {
1463 spin_unlock(&inode->i_lock);
1464 }
1465
1466 static inline int d_lustre_invalid(const struct dentry *dentry)
1467 {
1468 struct ll_dentry_data *lld = ll_d2d(dentry);
1469
1470 return (lld == NULL) || lld->lld_invalid;
1471 }
1472
1473 static inline void __d_lustre_invalidate(struct dentry *dentry)
1474 {
1475 struct ll_dentry_data *lld = ll_d2d(dentry);
1476
1477 if (lld != NULL)
1478 lld->lld_invalid = 1;
1479 }
1480
1481 /*
1482 * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
1483 * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
1484 * else dput() of the last refcount will unhash this dentry and kill it.
1485 */
1486 static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
1487 {
1488 CDEBUG(D_DENTRY, "invalidate dentry %.*s (%p) parent %p inode %p "
1489 "refc %d\n", dentry->d_name.len, dentry->d_name.name, dentry,
1490 dentry->d_parent, dentry->d_inode, d_count(dentry));
1491
1492 spin_lock_nested(&dentry->d_lock,
1493 nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
1494 __d_lustre_invalidate(dentry);
1495 if (d_count(dentry) == 0)
1496 __d_drop(dentry);
1497 spin_unlock(&dentry->d_lock);
1498 }
1499
1500 static inline void d_lustre_revalidate(struct dentry *dentry)
1501 {
1502 spin_lock(&dentry->d_lock);
1503 LASSERT(ll_d2d(dentry) != NULL);
1504 ll_d2d(dentry)->lld_invalid = 0;
1505 spin_unlock(&dentry->d_lock);
1506 }
1507
1508 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
1509 /* Compatibility for old (1.8) compiled userspace quota code */
1510 struct if_quotactl_18 {
1511 __u32 qc_cmd;
1512 __u32 qc_type;
1513 __u32 qc_id;
1514 __u32 qc_stat;
1515 struct obd_dqinfo qc_dqinfo;
1516 struct obd_dqblk qc_dqblk;
1517 char obd_type[16];
1518 struct obd_uuid obd_uuid;
1519 };
1520 #define LL_IOC_QUOTACTL_18 _IOWR('f', 162, struct if_quotactl_18 *)
1521 /* End compatibility for old (1.8) compiled userspace quota code */
1522 #else
1523 #warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
1524 #endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
1525
1526 enum {
1527 LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
1528 LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
1529 };
1530
1531 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
1532 int ll_layout_refresh(struct inode *inode, __u32 *gen);
1533 int ll_layout_restore(struct inode *inode);
1534
1535 int ll_xattr_init(void);
1536 void ll_xattr_fini(void);
1537
1538 #endif /* LLITE_INTERNAL_H */