4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #ifndef LLITE_INTERNAL_H
38 #define LLITE_INTERNAL_H
39 #include <lustre_debug.h>
40 #include <lustre_ver.h>
41 #include <lustre_disk.h> /* for s2sbi */
42 #include <lustre_eacl.h>
44 /* for struct cl_lock_descr and struct cl_io */
45 #include <cl_object.h>
47 #include <lustre_mdc.h>
48 #include <linux/lustre_intent.h>
49 #include <linux/compat.h>
50 #include <linux/posix_acl_xattr.h>
56 #ifndef VM_FAULT_RETRY
57 #define VM_FAULT_RETRY 0
60 /* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it.
61 * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */
62 #ifndef LOOKUP_CONTINUE
63 #define LOOKUP_CONTINUE LOOKUP_PARENT
66 /** Only used on client-side for indicating the tail of dir hash/offset. */
67 #define LL_DIR_END_OFF 0x7fffffffffffffffULL
68 #define LL_DIR_END_OFF_32BIT 0x7fffffffUL
70 #define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
71 #define LUSTRE_FPRIVATE(file) ((file)->private_data)
73 struct ll_dentry_data
{
74 struct lookup_intent
*lld_it
;
75 unsigned int lld_sa_generation
;
76 unsigned int lld_invalid
:1;
77 struct rcu_head lld_rcu_head
;
80 #define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))
82 #define LLI_INODE_MAGIC 0x111d0de5
83 #define LLI_INODE_DEAD 0xdeadd00d
85 /* remote client permission cache */
86 #define REMOTE_PERM_HASHSIZE 16
88 struct ll_getname_data
{
89 struct dir_context ctx
;
90 char *lgd_name
; /* points to a buffer with NAME_MAX+1 size */
91 struct lu_fid lgd_fid
; /* target fid we are looking for */
92 int lgd_found
; /* inode matched? */
95 /* llite setxid/access permission for user on remote client */
96 struct ll_remote_perm
{
97 struct hlist_node lrp_list
;
102 int lrp_access_perm
; /* MAY_READ/WRITE/EXEC, this
103 is access permission with
104 lrp_fsuid/lrp_fsgid. */
108 /* MDS has an authority for the Size-on-MDS attributes. */
109 LLIF_MDS_SIZE_LOCK
= (1 << 0),
110 /* Epoch close is postponed. */
111 LLIF_EPOCH_PENDING
= (1 << 1),
112 /* DONE WRITING is allowed. */
113 LLIF_DONE_WRITING
= (1 << 2),
114 /* Sizeon-on-MDS attributes are changed. An attribute update needs to
116 LLIF_SOM_DIRTY
= (1 << 3),
117 /* File data is modified. */
118 LLIF_DATA_MODIFIED
= (1 << 4),
119 /* File is being restored */
120 LLIF_FILE_RESTORING
= (1 << 5),
121 /* Xattr cache is attached to the file */
122 LLIF_XATTR_CACHE
= (1 << 6),
125 struct ll_inode_info
{
126 __u32 lli_inode_magic
;
131 struct posix_acl
*lli_posix_acl
;
133 struct hlist_head
*lli_remote_perms
;
134 struct mutex lli_rmtperm_mutex
;
136 /* identifying fields for both metadata and data stacks. */
137 struct lu_fid lli_fid
;
138 /* Parent fid for accessing default stripe data on parent directory
139 * for allocating OST objects after a mknod() and later open-by-FID. */
140 struct lu_fid lli_pfid
;
142 struct list_head lli_close_list
;
143 struct list_head lli_oss_capas
;
144 /* open count currently used by capability only, indicate whether
145 * capability needs renewal */
146 atomic_t lli_open_count
;
147 struct obd_capa
*lli_mds_capa
;
148 cfs_time_t lli_rmtperm_time
;
150 /* handle is to be sent to MDS later on done_writing and setattr.
151 * Open handle data are needed for the recovery to reconstruct
152 * the inode state on the MDS. XXX: recovery is not ready yet. */
153 struct obd_client_handle
*lli_pending_och
;
155 /* We need all three because every inode may be opened in different
157 struct obd_client_handle
*lli_mds_read_och
;
158 struct obd_client_handle
*lli_mds_write_och
;
159 struct obd_client_handle
*lli_mds_exec_och
;
160 __u64 lli_open_fd_read_count
;
161 __u64 lli_open_fd_write_count
;
162 __u64 lli_open_fd_exec_count
;
163 /* Protects access to och pointers and their usage counters */
164 struct mutex lli_och_mutex
;
166 struct inode lli_vfs_inode
;
168 /* the most recent timestamps obtained from mds */
169 struct ost_lvb lli_lvb
;
170 spinlock_t lli_agl_lock
;
172 /* Try to make the d::member and f::member are aligned. Before using
173 * these members, make clear whether it is directory or not. */
177 /* serialize normal readdir and statahead-readdir. */
178 struct mutex d_readdir_mutex
;
180 /* metadata statahead */
181 /* since parent-child threads can share the same @file
182 * struct, "opendir_key" is the token when dir close for
183 * case of parent exit before child -- it is me should
184 * cleanup the dir readahead. */
186 struct ll_statahead_info
*d_sai
;
187 /* protect statahead stuff. */
188 spinlock_t d_sa_lock
;
189 /* "opendir_pid" is the token when lookup/revalid
190 * -- I am the owner of dir statahead. */
194 #define lli_readdir_mutex u.d.d_readdir_mutex
195 #define lli_opendir_key u.d.d_opendir_key
196 #define lli_sai u.d.d_sai
197 #define lli_sa_lock u.d.d_sa_lock
198 #define lli_opendir_pid u.d.d_opendir_pid
200 /* for non-directory */
202 struct mutex f_size_mutex
;
203 char *f_symlink_name
;
206 * struct rw_semaphore {
207 * signed long count; // align d.d_def_acl
208 * spinlock_t wait_lock; // align d.d_sa_lock
209 * struct list_head wait_list;
212 struct rw_semaphore f_trunc_sem
;
213 struct mutex f_write_mutex
;
215 struct rw_semaphore f_glimpse_sem
;
216 cfs_time_t f_glimpse_time
;
217 struct list_head f_agl_list
;
220 /* for writepage() only to communicate to fsync */
224 * whenever a process try to read/write the file, the
225 * jobid of the process will be saved here, and it'll
226 * be packed into the write PRC when flush later.
228 * so the read/write statistics for jobid will not be
229 * accurate if the file is shared by different jobs.
231 char f_jobid
[JOBSTATS_JOBID_SIZE
];
234 #define lli_size_mutex u.f.f_size_mutex
235 #define lli_symlink_name u.f.f_symlink_name
236 #define lli_maxbytes u.f.f_maxbytes
237 #define lli_trunc_sem u.f.f_trunc_sem
238 #define lli_write_mutex u.f.f_write_mutex
239 #define lli_glimpse_sem u.f.f_glimpse_sem
240 #define lli_glimpse_time u.f.f_glimpse_time
241 #define lli_agl_list u.f.f_agl_list
242 #define lli_agl_index u.f.f_agl_index
243 #define lli_async_rc u.f.f_async_rc
244 #define lli_jobid u.f.f_jobid
248 /* XXX: For following frequent used members, although they maybe special
249 * used for non-directory object, it is some time-wasting to check
250 * whether the object is directory or not before using them. On the
251 * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
252 * the "ll_inode_info" size even if moving those members into u.f.
253 * So keep them out side.
255 * In the future, if more members are added only for directory,
256 * some of the following members can be moved into u.f.
259 struct cl_object
*lli_clob
;
261 /* mutex to request for layout lock exclusively. */
262 struct mutex lli_layout_mutex
;
263 /* Layout version, protected by lli_layout_lock */
264 __u32 lli_layout_gen
;
265 spinlock_t lli_layout_lock
;
267 struct rw_semaphore lli_xattrs_list_rwsem
;
268 struct mutex lli_xattrs_enq_lock
;
269 struct list_head lli_xattrs
;/* ll_xattr_entry->xe_list */
272 static inline __u32
ll_layout_version_get(struct ll_inode_info
*lli
)
276 spin_lock(&lli
->lli_layout_lock
);
277 gen
= lli
->lli_layout_gen
;
278 spin_unlock(&lli
->lli_layout_lock
);
283 static inline void ll_layout_version_set(struct ll_inode_info
*lli
, __u32 gen
)
285 spin_lock(&lli
->lli_layout_lock
);
286 lli
->lli_layout_gen
= gen
;
287 spin_unlock(&lli
->lli_layout_lock
);
290 int ll_xattr_cache_destroy(struct inode
*inode
);
292 int ll_xattr_cache_get(struct inode
*inode
,
299 * Locking to guarantee consistency of non-atomic updates to long long i_size,
300 * consistency between file size and KMS.
302 * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order.
305 void ll_inode_size_lock(struct inode
*inode
);
306 void ll_inode_size_unlock(struct inode
*inode
);
308 // FIXME: replace the name of this with LL_I to conform to kernel stuff
309 // static inline struct ll_inode_info *LL_I(struct inode *inode)
310 static inline struct ll_inode_info
*ll_i2info(struct inode
*inode
)
312 return container_of(inode
, struct ll_inode_info
, lli_vfs_inode
);
315 /* default to about 40meg of readahead on a given system. That much tied
316 * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
317 #define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
319 /* default to read-ahead full files smaller than 2MB on the second read */
320 #define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
325 RA_STAT_DISTANT_READPAGE
,
326 RA_STAT_MISS_IN_WINDOW
,
327 RA_STAT_FAILED_GRAB_PAGE
,
328 RA_STAT_FAILED_MATCH
,
333 RA_STAT_MAX_IN_FLIGHT
,
334 RA_STAT_WRONG_GRAB_PAGE
,
339 atomic_t ra_cur_pages
;
340 unsigned long ra_max_pages
;
341 unsigned long ra_max_pages_per_file
;
342 unsigned long ra_max_read_ahead_whole_pages
;
345 /* ra_io_arg will be filled in the beginning of ll_readahead with
346 * ras_lock, then the following ll_read_ahead_pages will read RA
347 * pages according to this arg, all the items in this structure are
348 * counted by page index.
351 unsigned long ria_start
; /* start offset of read-ahead*/
352 unsigned long ria_end
; /* end offset of read-ahead*/
353 /* If stride read pattern is detected, ria_stoff means where
354 * stride read is started. Note: for normal read-ahead, the
355 * value here is meaningless, and also it will not be accessed*/
357 /* ria_length and ria_pages are the length and pages length in the
358 * stride I/O mode. And they will also be used to check whether
359 * it is stride I/O read-ahead in the read-ahead pages*/
360 unsigned long ria_length
;
361 unsigned long ria_pages
;
364 /* LL_HIST_MAX=32 causes an overflow */
365 #define LL_HIST_MAX 28
366 #define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
367 #define LL_PROCESS_HIST_MAX 10
368 struct per_process_info
{
370 struct obd_histogram pp_r_hist
;
371 struct obd_histogram pp_w_hist
;
374 /* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
375 struct ll_rw_extents_info
{
376 struct per_process_info pp_extents
[LL_PROCESS_HIST_MAX
+ 1];
379 #define LL_OFFSET_HIST_MAX 100
380 struct ll_rw_process_info
{
383 loff_t rw_range_start
;
385 loff_t rw_last_file_pos
;
387 size_t rw_smallest_extent
;
388 size_t rw_largest_extent
;
389 struct ll_file_data
*rw_last_file
;
392 enum stats_track_type
{
393 STATS_TRACK_ALL
= 0, /* track all processes */
394 STATS_TRACK_PID
, /* track process with this pid */
395 STATS_TRACK_PPID
, /* track processes with this ppid */
396 STATS_TRACK_GID
, /* track processes with this gid */
400 /* flags for sbi->ll_flags */
401 #define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
402 #define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
403 #define LL_SBI_FLOCK 0x04
404 #define LL_SBI_USER_XATTR 0x08 /* support user xattr */
405 #define LL_SBI_ACL 0x10 /* support ACL */
406 #define LL_SBI_RMT_CLIENT 0x40 /* remote client */
407 #define LL_SBI_MDS_CAPA 0x80 /* support mds capa */
408 #define LL_SBI_OSS_CAPA 0x100 /* support oss capa */
409 #define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
410 #define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
411 #define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
412 #define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */
413 #define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
414 #define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
415 #define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
416 #define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
417 #define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
418 #define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
419 #define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
421 #define LL_SBI_FLAGS { \
444 #define RCE_HASHES 32
446 struct rmtacl_ctl_entry
{
447 struct list_head rce_list
;
448 pid_t rce_key
; /* hash key */
449 int rce_ops
; /* acl operation type */
452 struct rmtacl_ctl_table
{
454 struct list_head rct_entries
[RCE_HASHES
];
461 struct list_head et_entries
[EE_HASHES
];
465 struct list_head ll_list
;
466 /* this protects pglist and ra_info. It isn't safe to
467 * grab from interrupt contexts */
469 spinlock_t ll_pp_extent_lock
; /* pp_extent entry*/
470 spinlock_t ll_process_lock
; /* ll_rw_process_info */
471 struct obd_uuid ll_sb_uuid
;
472 struct obd_export
*ll_md_exp
;
473 struct obd_export
*ll_dt_exp
;
474 struct proc_dir_entry
* ll_proc_root
;
475 struct lu_fid ll_root_fid
; /* root object fid */
478 unsigned int ll_umounting
:1,
479 ll_xattr_cache_enabled
:1;
480 struct list_head ll_conn_chain
; /* per-conn chain of SBs */
481 struct lustre_client_ocd ll_lco
;
483 struct list_head ll_orphan_dentry_list
; /*please don't ask -p*/
484 struct ll_close_queue
*ll_lcq
;
486 struct lprocfs_stats
*ll_stats
; /* lprocfs stats counter */
488 struct cl_client_cache ll_cache
;
490 struct lprocfs_stats
*ll_ra_stats
;
492 struct ll_ra_info ll_ra_info
;
493 unsigned int ll_namelen
;
494 struct file_operations
*ll_fop
;
496 /* =0 - hold lock over whole read/write
497 * >0 - max. chunk to be read/written w/o lock re-acquiring */
498 unsigned long ll_max_rw_chunk
;
499 unsigned int ll_md_brw_size
; /* used by readdir */
501 struct lu_site
*ll_site
;
502 struct cl_device
*ll_cl
;
504 struct ll_rw_extents_info ll_rw_extents_info
;
505 int ll_extent_process_count
;
506 struct ll_rw_process_info ll_rw_process_info
[LL_PROCESS_HIST_MAX
];
507 unsigned int ll_offset_process_count
;
508 struct ll_rw_process_info ll_rw_offset_info
[LL_OFFSET_HIST_MAX
];
509 unsigned int ll_rw_offset_entry_count
;
510 int ll_stats_track_id
;
511 enum stats_track_type ll_stats_track_type
;
514 /* metadata stat-ahead */
515 unsigned int ll_sa_max
; /* max statahead RPCs */
516 atomic_t ll_sa_total
; /* statahead thread started
518 atomic_t ll_sa_wrong
; /* statahead thread stopped for
520 atomic_t ll_agl_total
; /* AGL thread started count */
522 dev_t ll_sdev_orig
; /* save s_dev before assign for
524 struct rmtacl_ctl_table ll_rct
;
525 struct eacl_table ll_et
;
526 __kernel_fsid_t ll_fsid
;
529 #define LL_DEFAULT_MAX_RW_CHUNK (32 * 1024 * 1024)
534 struct task_struct
*lrr_reader
;
535 struct list_head lrr_linkage
;
539 * per file-descriptor read-ahead data.
541 struct ll_readahead_state
{
544 * index of the last page that read(2) needed and that wasn't in the
545 * cache. Used by ras_update() to detect seeks.
547 * XXX nikita: if access seeks into cached region, Lustre doesn't see
550 unsigned long ras_last_readpage
;
552 * number of pages read after last read-ahead window reset. As window
553 * is reset on each seek, this is effectively a number of consecutive
554 * accesses. Maybe ->ras_accessed_in_window is better name.
556 * XXX nikita: window is also reset (by ras_update()) when Lustre
557 * believes that memory pressure evicts read-ahead pages. In that
558 * case, it probably doesn't make sense to expand window to
559 * PTLRPC_MAX_BRW_PAGES on the third access.
561 unsigned long ras_consecutive_pages
;
563 * number of read requests after the last read-ahead window reset
564 * As window is reset on each seek, this is effectively the number
565 * on consecutive read request and is used to trigger read-ahead.
567 unsigned long ras_consecutive_requests
;
569 * Parameters of current read-ahead window. Handled by
570 * ras_update(). On the initial access to the file or after a seek,
571 * window is reset to 0. After 3 consecutive accesses, window is
572 * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
573 * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
575 unsigned long ras_window_start
, ras_window_len
;
577 * Where next read-ahead should start at. This lies within read-ahead
578 * window. Read-ahead window is read in pieces rather than at once
579 * because: 1. lustre limits total number of pages under read-ahead by
580 * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
581 * not covered by DLM lock.
583 unsigned long ras_next_readahead
;
585 * Total number of ll_file_read requests issued, reads originating
586 * due to mmap are not counted in this total. This value is used to
587 * trigger full file read-ahead after multiple reads to a small file.
589 unsigned long ras_requests
;
591 * Page index with respect to the current request, these value
592 * will not be accurate when dealing with reads issued via mmap.
594 unsigned long ras_request_index
;
596 * list of struct ll_ra_read's one per read(2) call current in
597 * progress against this file descriptor. Used by read-ahead code,
598 * protected by ->ras_lock.
600 struct list_head ras_read_beads
;
602 * The following 3 items are used for detecting the stride I/O
604 * In stride I/O mode,
605 * ...............|-----data-----|****gap*****|--------|******|....
606 * offset |-stride_pages-|-stride_gap-|
607 * ras_stride_offset = offset;
608 * ras_stride_length = stride_pages + stride_gap;
609 * ras_stride_pages = stride_pages;
610 * Note: all these three items are counted by pages.
612 unsigned long ras_stride_length
;
613 unsigned long ras_stride_pages
;
614 pgoff_t ras_stride_offset
;
616 * number of consecutive stride request count, and it is similar as
617 * ras_consecutive_requests, but used for stride I/O mode.
618 * Note: only more than 2 consecutive stride request are detected,
619 * stride read-ahead will be enable
621 unsigned long ras_consecutive_stride_requests
;
624 extern struct kmem_cache
*ll_file_data_slab
;
625 struct lustre_handle
;
626 struct ll_file_data
{
627 struct ll_readahead_state fd_ras
;
628 struct ccc_grouplock fd_grouplock
;
632 /* openhandle if lease exists for this file.
633 * Borrow lli->lli_och_mutex to protect assignment */
634 struct obd_client_handle
*fd_lease_och
;
635 struct obd_client_handle
*fd_och
;
636 struct file
*fd_file
;
637 /* Indicate whether need to report failure when close.
638 * true: failure is known, not report again.
639 * false: unknown failure, should report. */
640 bool fd_write_failed
;
643 struct lov_stripe_md
;
645 extern spinlock_t inode_lock
;
647 extern struct proc_dir_entry
*proc_lustre_fs_root
;
649 static inline struct inode
*ll_info2i(struct ll_inode_info
*lli
)
651 return &lli
->lli_vfs_inode
;
654 __u32
ll_i2suppgid(struct inode
*i
);
655 void ll_i2gids(__u32
*suppgids
, struct inode
*i1
,struct inode
*i2
);
657 static inline int ll_need_32bit_api(struct ll_sb_info
*sbi
)
659 #if BITS_PER_LONG == 32
661 #elif defined(CONFIG_COMPAT)
662 return unlikely(is_compat_task() || (sbi
->ll_flags
& LL_SBI_32BIT_API
));
664 return unlikely(sbi
->ll_flags
& LL_SBI_32BIT_API
);
668 void ll_ra_read_in(struct file
*f
, struct ll_ra_read
*rar
);
669 void ll_ra_read_ex(struct file
*f
, struct ll_ra_read
*rar
);
670 struct ll_ra_read
*ll_ra_read_get(struct file
*f
);
672 /* llite/lproc_llite.c */
674 int lprocfs_register_mountpoint(struct proc_dir_entry
*parent
,
675 struct super_block
*sb
, char *osc
, char *mdc
);
676 void lprocfs_unregister_mountpoint(struct ll_sb_info
*sbi
);
677 void ll_stats_ops_tally(struct ll_sb_info
*sbi
, int op
, int count
);
678 void lprocfs_llite_init_vars(struct lprocfs_static_vars
*lvars
);
679 void ll_rw_stats_tally(struct ll_sb_info
*sbi
, pid_t pid
,
680 struct ll_file_data
*file
, loff_t pos
,
681 size_t count
, int rw
);
683 static inline int lprocfs_register_mountpoint(struct proc_dir_entry
*parent
,
684 struct super_block
*sb
, char *osc
, char *mdc
){return 0;}
685 static inline void lprocfs_unregister_mountpoint(struct ll_sb_info
*sbi
) {}
687 void ll_stats_ops_tally(struct ll_sb_info
*sbi
, int op
, int count
) {}
688 static inline void lprocfs_llite_init_vars(struct lprocfs_static_vars
*lvars
)
690 memset(lvars
, 0, sizeof(*lvars
));
692 static inline void ll_rw_stats_tally(struct ll_sb_info
*sbi
, pid_t pid
,
693 struct ll_file_data
*file
, loff_t pos
,
694 size_t count
, int rw
) {}
699 void ll_release_page(struct page
*page
, int remove
);
700 extern const struct file_operations ll_dir_operations
;
701 extern const struct inode_operations ll_dir_inode_operations
;
702 struct page
*ll_get_dir_page(struct inode
*dir
, __u64 hash
,
703 struct ll_dir_chain
*chain
);
704 int ll_dir_read(struct inode
*inode
, struct dir_context
*ctx
);
706 int ll_get_mdt_idx(struct inode
*inode
);
708 extern const struct inode_operations ll_special_inode_operations
;
710 int ll_objects_destroy(struct ptlrpc_request
*request
,
712 struct inode
*ll_iget(struct super_block
*sb
, ino_t hash
,
713 struct lustre_md
*lic
);
714 int ll_md_blocking_ast(struct ldlm_lock
*, struct ldlm_lock_desc
*,
715 void *data
, int flag
);
716 struct dentry
*ll_splice_alias(struct inode
*inode
, struct dentry
*de
);
717 int ll_rmdir_entry(struct inode
*dir
, char *name
, int namelen
);
720 int ll_prepare_write(struct file
*, struct page
*, unsigned from
, unsigned to
);
721 int ll_commit_write(struct file
*, struct page
*, unsigned from
, unsigned to
);
722 int ll_writepage(struct page
*page
, struct writeback_control
*wbc
);
723 int ll_writepages(struct address_space
*, struct writeback_control
*wbc
);
724 int ll_readpage(struct file
*file
, struct page
*page
);
725 void ll_readahead_init(struct inode
*inode
, struct ll_readahead_state
*ras
);
726 int ll_readahead(const struct lu_env
*env
, struct cl_io
*io
,
727 struct ll_readahead_state
*ras
, struct address_space
*mapping
,
728 struct cl_page_list
*queue
, int flags
);
730 #ifndef MS_HAS_NEW_AOPS
731 extern const struct address_space_operations ll_aops
;
733 extern const struct address_space_operations_ext ll_aops
;
737 extern struct file_operations ll_file_operations
;
738 extern struct file_operations ll_file_operations_flock
;
739 extern struct file_operations ll_file_operations_noflock
;
740 extern struct inode_operations ll_file_inode_operations
;
741 extern int ll_have_md_lock(struct inode
*inode
, __u64
*bits
,
742 ldlm_mode_t l_req_mode
);
743 extern ldlm_mode_t
ll_take_md_lock(struct inode
*inode
, __u64 bits
,
744 struct lustre_handle
*lockh
, __u64 flags
,
746 int ll_file_open(struct inode
*inode
, struct file
*file
);
747 int ll_file_release(struct inode
*inode
, struct file
*file
);
748 int ll_glimpse_ioctl(struct ll_sb_info
*sbi
,
749 struct lov_stripe_md
*lsm
, lstat_t
*st
);
750 void ll_ioepoch_open(struct ll_inode_info
*lli
, __u64 ioepoch
);
751 int ll_release_openhandle(struct dentry
*, struct lookup_intent
*);
752 int ll_md_real_close(struct inode
*inode
, fmode_t fmode
);
753 void ll_ioepoch_close(struct inode
*inode
, struct md_op_data
*op_data
,
754 struct obd_client_handle
**och
, unsigned long flags
);
755 void ll_done_writing_attr(struct inode
*inode
, struct md_op_data
*op_data
);
756 int ll_som_update(struct inode
*inode
, struct md_op_data
*op_data
);
757 int ll_inode_getattr(struct inode
*inode
, struct obdo
*obdo
,
758 __u64 ioepoch
, int sync
);
759 void ll_pack_inode2opdata(struct inode
*inode
, struct md_op_data
*op_data
,
760 struct lustre_handle
*fh
);
761 int ll_getattr(struct vfsmount
*mnt
, struct dentry
*de
, struct kstat
*stat
);
762 struct posix_acl
*ll_get_acl(struct inode
*inode
, int type
);
764 int ll_inode_permission(struct inode
*inode
, int mask
);
766 int ll_lov_setstripe_ea_info(struct inode
*inode
, struct file
*file
,
767 int flags
, struct lov_user_md
*lum
,
769 int ll_lov_getstripe_ea_info(struct inode
*inode
, const char *filename
,
770 struct lov_mds_md
**lmm
, int *lmm_size
,
771 struct ptlrpc_request
**request
);
772 int ll_dir_setstripe(struct inode
*inode
, struct lov_user_md
*lump
,
774 int ll_dir_getstripe(struct inode
*inode
, struct lov_mds_md
**lmmp
,
775 int *lmm_size
, struct ptlrpc_request
**request
);
776 int ll_fsync(struct file
*file
, loff_t start
, loff_t end
, int data
);
777 int ll_merge_lvb(const struct lu_env
*env
, struct inode
*inode
);
778 int ll_fid2path(struct inode
*inode
, void *arg
);
779 int ll_data_version(struct inode
*inode
, __u64
*data_version
, int extent_lock
);
780 int ll_hsm_release(struct inode
*inode
);
784 int ll_d_init(struct dentry
*de
);
785 extern const struct dentry_operations ll_d_ops
;
786 void ll_intent_drop_lock(struct lookup_intent
*);
787 void ll_intent_release(struct lookup_intent
*);
788 void ll_invalidate_aliases(struct inode
*);
789 void ll_lookup_finish_locks(struct lookup_intent
*it
, struct dentry
*dentry
);
790 int ll_revalidate_it_finish(struct ptlrpc_request
*request
,
791 struct lookup_intent
*it
, struct dentry
*de
);
793 /* llite/llite_lib.c */
794 extern struct super_operations lustre_super_operations
;
796 void ll_lli_init(struct ll_inode_info
*lli
);
797 int ll_fill_super(struct super_block
*sb
, struct vfsmount
*mnt
);
798 void ll_put_super(struct super_block
*sb
);
799 void ll_kill_super(struct super_block
*sb
);
800 struct inode
*ll_inode_from_resource_lock(struct ldlm_lock
*lock
);
801 void ll_clear_inode(struct inode
*inode
);
802 int ll_setattr_raw(struct dentry
*dentry
, struct iattr
*attr
, bool hsm_import
);
803 int ll_setattr(struct dentry
*de
, struct iattr
*attr
);
804 int ll_statfs(struct dentry
*de
, struct kstatfs
*sfs
);
805 int ll_statfs_internal(struct super_block
*sb
, struct obd_statfs
*osfs
,
806 __u64 max_age
, __u32 flags
);
807 void ll_update_inode(struct inode
*inode
, struct lustre_md
*md
);
808 void ll_read_inode2(struct inode
*inode
, void *opaque
);
809 void ll_delete_inode(struct inode
*inode
);
810 int ll_iocontrol(struct inode
*inode
, struct file
*file
,
811 unsigned int cmd
, unsigned long arg
);
812 int ll_flush_ctx(struct inode
*inode
);
813 void ll_umount_begin(struct super_block
*sb
);
814 int ll_remount_fs(struct super_block
*sb
, int *flags
, char *data
);
815 int ll_show_options(struct seq_file
*seq
, struct dentry
*dentry
);
816 void ll_dirty_page_discard_warn(struct page
*page
, int ioret
);
817 int ll_prep_inode(struct inode
**inode
, struct ptlrpc_request
*req
,
818 struct super_block
*, struct lookup_intent
*);
819 void lustre_dump_dentry(struct dentry
*, int recur
);
820 int ll_obd_statfs(struct inode
*inode
, void *arg
);
821 int ll_get_max_mdsize(struct ll_sb_info
*sbi
, int *max_mdsize
);
822 int ll_get_default_mdsize(struct ll_sb_info
*sbi
, int *default_mdsize
);
823 int ll_get_max_cookiesize(struct ll_sb_info
*sbi
, int *max_cookiesize
);
824 int ll_get_default_cookiesize(struct ll_sb_info
*sbi
, int *default_cookiesize
);
825 int ll_process_config(struct lustre_cfg
*lcfg
);
826 struct md_op_data
*ll_prep_md_op_data(struct md_op_data
*op_data
,
827 struct inode
*i1
, struct inode
*i2
,
828 const char *name
, int namelen
,
829 int mode
, __u32 opc
, void *data
);
830 void ll_finish_md_op_data(struct md_op_data
*op_data
);
831 int ll_get_obd_name(struct inode
*inode
, unsigned int cmd
, unsigned long arg
);
832 char *ll_get_fsname(struct super_block
*sb
, char *buf
, int buflen
);
834 /* llite/llite_nfs.c */
835 extern struct export_operations lustre_export_operations
;
836 __u32
get_uuid2int(const char *name
, int len
);
837 void get_uuid2fsid(const char *name
, int len
, __kernel_fsid_t
*fsid
);
838 struct inode
*search_inode_for_lustre(struct super_block
*sb
,
839 const struct lu_fid
*fid
);
841 /* llite/symlink.c */
842 extern struct inode_operations ll_fast_symlink_inode_operations
;
844 /* llite/llite_close.c */
845 struct ll_close_queue
{
847 struct list_head lcq_head
;
848 wait_queue_head_t lcq_waitq
;
849 struct completion lcq_comp
;
853 struct ccc_object
*cl_inode2ccc(struct inode
*inode
);
856 void vvp_write_pending (struct ccc_object
*club
, struct ccc_page
*page
);
857 void vvp_write_complete(struct ccc_object
*club
, struct ccc_page
*page
);
859 /* specific architecture can implement only part of this list */
860 enum vvp_io_subtype
{
863 /** io started from splice_{read|write} */
870 enum vvp_io_subtype cui_io_subtype
;
874 struct pipe_inode_info
*cui_pipe
;
875 unsigned int cui_flags
;
877 struct vvp_fault_io
{
879 * Inode modification time that is checked across DLM
883 struct vm_area_struct
*ft_vma
;
885 * locked page returned from vvp_io
887 struct page
*ft_vmpage
;
888 struct vm_fault_api
{
892 struct vm_fault
*ft_vmf
;
894 * fault API used bitflags for return code.
896 unsigned int ft_flags
;
901 * Read-ahead state used by read and page-fault IO contexts.
903 struct ll_ra_read cui_bead
;
905 * Set when cui_bead has been initialized.
907 int cui_ra_window_set
;
911 * IO arguments for various VFS I/O interfaces.
915 enum vvp_io_subtype via_io_subtype
;
919 struct kiocb
*via_iocb
;
920 struct iov_iter
*via_iter
;
923 struct pipe_inode_info
*via_pipe
;
924 unsigned int via_flags
;
929 struct ll_cl_context
{
931 struct cl_io
*lcc_io
;
932 struct cl_page
*lcc_page
;
933 struct lu_env
*lcc_env
;
937 struct vvp_thread_info
{
938 struct iovec vti_local_iov
;
939 struct vvp_io_args vti_args
;
940 struct ra_io_arg vti_ria
;
941 struct kiocb vti_kiocb
;
942 struct ll_cl_context vti_io_ctx
;
945 static inline struct vvp_thread_info
*vvp_env_info(const struct lu_env
*env
)
947 extern struct lu_context_key vvp_key
;
948 struct vvp_thread_info
*info
;
950 info
= lu_context_key_get(&env
->le_ctx
, &vvp_key
);
951 LASSERT(info
!= NULL
);
955 static inline struct vvp_io_args
*vvp_env_args(const struct lu_env
*env
,
956 enum vvp_io_subtype type
)
958 struct vvp_io_args
*ret
= &vvp_env_info(env
)->vti_args
;
960 ret
->via_io_subtype
= type
;
966 struct vvp_io vs_ios
;
969 static inline struct vvp_session
*vvp_env_session(const struct lu_env
*env
)
971 extern struct lu_context_key vvp_session_key
;
972 struct vvp_session
*ses
;
974 ses
= lu_context_key_get(env
->le_ses
, &vvp_session_key
);
975 LASSERT(ses
!= NULL
);
979 static inline struct vvp_io
*vvp_env_io(const struct lu_env
*env
)
981 return &vvp_env_session(env
)->vs_ios
;
984 int vvp_global_init(void);
985 void vvp_global_fini(void);
987 void ll_queue_done_writing(struct inode
*inode
, unsigned long flags
);
988 void ll_close_thread_shutdown(struct ll_close_queue
*lcq
);
989 int ll_close_thread_start(struct ll_close_queue
**lcq_ret
);
991 /* llite/llite_mmap.c */
993 int ll_teardown_mmaps(struct address_space
*mapping
, __u64 first
, __u64 last
);
994 int ll_file_mmap(struct file
* file
, struct vm_area_struct
* vma
);
995 void policy_from_vma(ldlm_policy_data_t
*policy
,
996 struct vm_area_struct
*vma
, unsigned long addr
, size_t count
);
997 struct vm_area_struct
*our_vma(struct mm_struct
*mm
, unsigned long addr
,
1000 static inline void ll_invalidate_page(struct page
*vmpage
)
1002 struct address_space
*mapping
= vmpage
->mapping
;
1003 loff_t offset
= vmpage
->index
<< PAGE_CACHE_SHIFT
;
1005 LASSERT(PageLocked(vmpage
));
1006 if (mapping
== NULL
)
1009 ll_teardown_mmaps(mapping
, offset
, offset
+ PAGE_CACHE_SIZE
);
1010 truncate_complete_page(mapping
, vmpage
);
1013 #define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
1015 /* don't need an addref as the sb_info should be holding one */
1016 static inline struct obd_export
*ll_s2dtexp(struct super_block
*sb
)
1018 return ll_s2sbi(sb
)->ll_dt_exp
;
1021 /* don't need an addref as the sb_info should be holding one */
1022 static inline struct obd_export
*ll_s2mdexp(struct super_block
*sb
)
1024 return ll_s2sbi(sb
)->ll_md_exp
;
1027 static inline struct client_obd
*sbi2mdc(struct ll_sb_info
*sbi
)
1029 struct obd_device
*obd
= sbi
->ll_md_exp
->exp_obd
;
1035 // FIXME: replace the name of this with LL_SB to conform to kernel stuff
1036 static inline struct ll_sb_info
*ll_i2sbi(struct inode
*inode
)
1038 return ll_s2sbi(inode
->i_sb
);
1041 static inline struct obd_export
*ll_i2dtexp(struct inode
*inode
)
1043 return ll_s2dtexp(inode
->i_sb
);
1046 static inline struct obd_export
*ll_i2mdexp(struct inode
*inode
)
1048 return ll_s2mdexp(inode
->i_sb
);
1051 static inline struct lu_fid
*ll_inode2fid(struct inode
*inode
)
1055 LASSERT(inode
!= NULL
);
1056 fid
= &ll_i2info(inode
)->lli_fid
;
1061 static inline __u64
ll_file_maxbytes(struct inode
*inode
)
1063 return ll_i2info(inode
)->lli_maxbytes
;
1067 int ll_setxattr(struct dentry
*dentry
, const char *name
,
1068 const void *value
, size_t size
, int flags
);
1069 ssize_t
ll_getxattr(struct dentry
*dentry
, const char *name
,
1070 void *buffer
, size_t size
);
1071 ssize_t
ll_listxattr(struct dentry
*dentry
, char *buffer
, size_t size
);
1072 int ll_removexattr(struct dentry
*dentry
, const char *name
);
1074 /* llite/remote_perm.c */
1075 extern struct kmem_cache
*ll_remote_perm_cachep
;
1076 extern struct kmem_cache
*ll_rmtperm_hash_cachep
;
1078 void free_rmtperm_hash(struct hlist_head
*hash
);
1079 int ll_update_remote_perm(struct inode
*inode
, struct mdt_remote_perm
*perm
);
1080 int lustre_check_remote_perm(struct inode
*inode
, int mask
);
1082 /* llite/llite_capa.c */
1083 extern struct timer_list ll_capa_timer
;
1085 int ll_capa_thread_start(void);
1086 void ll_capa_thread_stop(void);
1087 void ll_capa_timer_callback(unsigned long unused
);
1089 struct obd_capa
*ll_add_capa(struct inode
*inode
, struct obd_capa
*ocapa
);
1091 void ll_capa_open(struct inode
*inode
);
1092 void ll_capa_close(struct inode
*inode
);
1094 struct obd_capa
*ll_mdscapa_get(struct inode
*inode
);
1095 struct obd_capa
*ll_osscapa_get(struct inode
*inode
, __u64 opc
);
1097 void ll_truncate_free_capa(struct obd_capa
*ocapa
);
1098 void ll_clear_inode_capas(struct inode
*inode
);
1099 void ll_print_capa_stat(struct ll_sb_info
*sbi
);
1101 /* llite/llite_cl.c */
1102 extern struct lu_device_type vvp_device_type
;
1105 * Common IO arguments for various VFS I/O interfaces.
1107 int cl_sb_init(struct super_block
*sb
);
1108 int cl_sb_fini(struct super_block
*sb
);
1109 void ll_io_init(struct cl_io
*io
, const struct file
*file
, int write
);
1111 void ras_update(struct ll_sb_info
*sbi
, struct inode
*inode
,
1112 struct ll_readahead_state
*ras
, unsigned long index
,
1114 void ll_ra_count_put(struct ll_sb_info
*sbi
, unsigned long len
);
1115 void ll_ra_stats_inc(struct address_space
*mapping
, enum ra_stat which
);
1117 /* llite/llite_rmtacl.c */
1118 #ifdef CONFIG_FS_POSIX_ACL
1120 struct list_head ee_list
;
1121 pid_t ee_key
; /* hash key */
1122 struct lu_fid ee_fid
;
1123 int ee_type
; /* ACL type for ACCESS or DEFAULT */
1124 ext_acl_xattr_header
*ee_acl
;
1127 obd_valid
rce_ops2valid(int ops
);
1128 struct rmtacl_ctl_entry
*rct_search(struct rmtacl_ctl_table
*rct
, pid_t key
);
1129 int rct_add(struct rmtacl_ctl_table
*rct
, pid_t key
, int ops
);
1130 int rct_del(struct rmtacl_ctl_table
*rct
, pid_t key
);
1131 void rct_init(struct rmtacl_ctl_table
*rct
);
1132 void rct_fini(struct rmtacl_ctl_table
*rct
);
1134 void ee_free(struct eacl_entry
*ee
);
1135 int ee_add(struct eacl_table
*et
, pid_t key
, struct lu_fid
*fid
, int type
,
1136 ext_acl_xattr_header
*header
);
1137 struct eacl_entry
*et_search_del(struct eacl_table
*et
, pid_t key
,
1138 struct lu_fid
*fid
, int type
);
1139 void et_search_free(struct eacl_table
*et
, pid_t key
);
1140 void et_init(struct eacl_table
*et
);
1141 void et_fini(struct eacl_table
*et
);
1143 static inline obd_valid
rce_ops2valid(int ops
)
1151 #define LL_SA_RPC_MIN 2
1152 #define LL_SA_RPC_DEF 32
1153 #define LL_SA_RPC_MAX 8192
1155 #define LL_SA_CACHE_BIT 5
1156 #define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
1157 #define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
1159 /* per inode struct, for dir only */
1160 struct ll_statahead_info
{
1161 struct inode
*sai_inode
;
1162 atomic_t sai_refcount
; /* when access this struct, hold
1164 unsigned int sai_generation
; /* generation for statahead */
1165 unsigned int sai_max
; /* max ahead of lookup */
1166 __u64 sai_sent
; /* stat requests sent count */
1167 __u64 sai_replied
; /* stat requests which received
1169 __u64 sai_index
; /* index of statahead entry */
1170 __u64 sai_index_wait
; /* index of entry which is the
1171 * caller is waiting for */
1172 __u64 sai_hit
; /* hit count */
1173 __u64 sai_miss
; /* miss count:
1174 * for "ls -al" case, it includes
1175 * hidden dentry miss;
1176 * for "ls -l" case, it does not
1177 * include hidden dentry miss.
1178 * "sai_miss_hidden" is used for
1181 unsigned int sai_consecutive_miss
; /* consecutive miss */
1182 unsigned int sai_miss_hidden
;/* "ls -al", but first dentry
1183 * is not a hidden one */
1184 unsigned int sai_skip_hidden
;/* skipped hidden dentry count */
1185 unsigned int sai_ls_all
:1, /* "ls -al", do stat-ahead for
1187 sai_agl_valid
:1;/* AGL is valid for the dir */
1188 wait_queue_head_t sai_waitq
; /* stat-ahead wait queue */
1189 struct ptlrpc_thread sai_thread
; /* stat-ahead thread */
1190 struct ptlrpc_thread sai_agl_thread
; /* AGL thread */
1191 struct list_head sai_entries
; /* entry list */
1192 struct list_head sai_entries_received
; /* entries returned */
1193 struct list_head sai_entries_stated
; /* entries stated */
1194 struct list_head sai_entries_agl
; /* AGL entries to be sent */
1195 struct list_head sai_cache
[LL_SA_CACHE_SIZE
];
1196 spinlock_t sai_cache_lock
[LL_SA_CACHE_SIZE
];
1197 atomic_t sai_cache_count
; /* entry count in cache */
1200 int do_statahead_enter(struct inode
*dir
, struct dentry
**dentry
,
1202 void ll_stop_statahead(struct inode
*dir
, void *key
);
1204 static inline int ll_glimpse_size(struct inode
*inode
)
1206 struct ll_inode_info
*lli
= ll_i2info(inode
);
1209 down_read(&lli
->lli_glimpse_sem
);
1210 rc
= cl_glimpse_size(inode
);
1211 lli
->lli_glimpse_time
= cfs_time_current();
1212 up_read(&lli
->lli_glimpse_sem
);
1217 ll_statahead_mark(struct inode
*dir
, struct dentry
*dentry
)
1219 struct ll_inode_info
*lli
= ll_i2info(dir
);
1220 struct ll_statahead_info
*sai
= lli
->lli_sai
;
1221 struct ll_dentry_data
*ldd
= ll_d2d(dentry
);
1223 /* not the same process, don't mark */
1224 if (lli
->lli_opendir_pid
!= current_pid())
1227 LASSERT(ldd
!= NULL
);
1229 ldd
->lld_sa_generation
= sai
->sai_generation
;
1233 d_need_statahead(struct inode
*dir
, struct dentry
*dentryp
)
1235 struct ll_inode_info
*lli
;
1236 struct ll_dentry_data
*ldd
;
1238 if (ll_i2sbi(dir
)->ll_sa_max
== 0)
1241 lli
= ll_i2info(dir
);
1242 /* not the same process, don't statahead */
1243 if (lli
->lli_opendir_pid
!= current_pid())
1246 /* statahead has been stopped */
1247 if (lli
->lli_opendir_key
== NULL
)
1250 ldd
= ll_d2d(dentryp
);
1252 * When stats a dentry, the system trigger more than once "revalidate"
1253 * or "lookup", for "getattr", for "getxattr", and maybe for others.
1254 * Under patchless client mode, the operation intent is not accurate,
1255 * which maybe misguide the statahead thread. For example:
1256 * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe
1257 * have the same operation intent -- "IT_GETATTR".
1258 * In fact, one dentry should has only one chance to interact with the
1259 * statahead thread, otherwise the statahead windows will be confused.
1260 * The solution is as following:
1261 * Assign "lld_sa_generation" with "sai_generation" when a dentry
1262 * "IT_GETATTR" for the first time, and the subsequent "IT_GETATTR"
1263 * will bypass interacting with statahead thread for checking:
1264 * "lld_sa_generation == lli_sai->sai_generation"
1266 if (ldd
&& lli
->lli_sai
&&
1267 ldd
->lld_sa_generation
== lli
->lli_sai
->sai_generation
)
1274 ll_statahead_enter(struct inode
*dir
, struct dentry
**dentryp
, int only_unplug
)
1278 ret
= d_need_statahead(dir
, *dentryp
);
1282 return do_statahead_enter(dir
, dentryp
, only_unplug
);
1285 /* llite ioctl register support routine */
1291 #define LLIOC_MAX_CMD 256
1294 * Rules to write a callback function:
1297 * @magic: Dynamic ioctl call routine will feed this value with the pointer
1298 * returned to ll_iocontrol_register. Callback functions should use this
1299 * data to check the potential collasion of ioctl cmd. If collasion is
1300 * found, callback function should return LLIOC_CONT.
1301 * @rcp: The result of ioctl command.
1304 * If @magic matches the pointer returned by ll_iocontrol_data, the
1305 * callback should return LLIOC_STOP; return LLIOC_STOP otherwise.
1307 typedef enum llioc_iter (*llioc_callback_t
)(struct inode
*inode
,
1308 struct file
*file
, unsigned int cmd
, unsigned long arg
,
1309 void *magic
, int *rcp
);
1311 /* export functions */
1312 /* Register ioctl block dynamatically for a regular file.
1314 * @cmd: the array of ioctl command set
1315 * @count: number of commands in the @cmd
1316 * @cb: callback function, it will be called if an ioctl command is found to
1317 * belong to the command list @cmd.
1320 * A magic pointer will be returned if success;
1321 * otherwise, NULL will be returned.
1323 void *ll_iocontrol_register(llioc_callback_t cb
, int count
, unsigned int *cmd
);
1324 void ll_iocontrol_unregister(void *magic
);
1327 /* lclient compat stuff */
1328 #define cl_inode_info ll_inode_info
1329 #define cl_i2info(info) ll_i2info(info)
1330 #define cl_inode_mode(inode) ((inode)->i_mode)
1331 #define cl_i2sbi ll_i2sbi
1333 static inline struct ll_file_data
*cl_iattr2fd(struct inode
*inode
,
1334 const struct iattr
*attr
)
1336 LASSERT(attr
->ia_valid
& ATTR_FILE
);
1337 return LUSTRE_FPRIVATE(attr
->ia_file
);
1340 static inline void cl_isize_lock(struct inode
*inode
)
1342 ll_inode_size_lock(inode
);
1345 static inline void cl_isize_unlock(struct inode
*inode
)
1347 ll_inode_size_unlock(inode
);
1350 static inline void cl_isize_write_nolock(struct inode
*inode
, loff_t kms
)
1352 LASSERT(mutex_is_locked(&ll_i2info(inode
)->lli_size_mutex
));
1353 i_size_write(inode
, kms
);
1356 static inline void cl_isize_write(struct inode
*inode
, loff_t kms
)
1358 ll_inode_size_lock(inode
);
1359 i_size_write(inode
, kms
);
1360 ll_inode_size_unlock(inode
);
1363 #define cl_isize_read(inode) i_size_read(inode)
1365 static inline int cl_merge_lvb(const struct lu_env
*env
, struct inode
*inode
)
1367 return ll_merge_lvb(env
, inode
);
1370 #define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
1371 #define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
1372 #define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
1374 struct obd_capa
*cl_capa_lookup(struct inode
*inode
, enum cl_req_type crt
);
1376 int cl_sync_file_range(struct inode
*inode
, loff_t start
, loff_t end
,
1377 enum cl_fsync_mode mode
, int ignore_layout
);
1379 /** direct write pages */
1380 struct ll_dio_pages
{
1381 /** page array to be written. we don't support
1382 * partial pages except the last one. */
1383 struct page
**ldp_pages
;
1384 /* offset of each page */
1385 loff_t
*ldp_offsets
;
1386 /** if ldp_offsets is NULL, it means a sequential
1387 * pages to be written, then this is the file offset
1388 * of the * first page. */
1389 loff_t ldp_start_offset
;
1390 /** how many bytes are to be written. */
1392 /** # of pages in the array. */
1396 static inline void cl_stats_tally(struct cl_device
*dev
, enum cl_req_type crt
,
1399 int opc
= (crt
== CRT_READ
) ? LPROC_LL_OSC_READ
:
1402 ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev
)->cdv_sb
), opc
, rc
);
1405 extern ssize_t
ll_direct_rw_pages(const struct lu_env
*env
, struct cl_io
*io
,
1406 int rw
, struct inode
*inode
,
1407 struct ll_dio_pages
*pv
);
1409 static inline int ll_file_nolock(const struct file
*file
)
1411 struct ll_file_data
*fd
= LUSTRE_FPRIVATE(file
);
1412 struct inode
*inode
= file
->f_dentry
->d_inode
;
1414 LASSERT(fd
!= NULL
);
1415 return ((fd
->fd_flags
& LL_FILE_IGNORE_LOCK
) ||
1416 (ll_i2sbi(inode
)->ll_flags
& LL_SBI_NOLCK
));
1419 static inline void ll_set_lock_data(struct obd_export
*exp
, struct inode
*inode
,
1420 struct lookup_intent
*it
, __u64
*bits
)
1422 if (!it
->d
.lustre
.it_lock_set
) {
1423 struct lustre_handle handle
;
1425 /* If this inode is a remote object, it will get two
1426 * separate locks in different namespaces, Master MDT,
1427 * where the name entry is, will grant LOOKUP lock,
1428 * remote MDT, where the object is, will grant
1429 * UPDATE|PERM lock. The inode will be attached to both
1430 * LOOKUP and PERM locks, so revoking either locks will
1431 * case the dcache being cleared */
1432 if (it
->d
.lustre
.it_remote_lock_mode
) {
1433 handle
.cookie
= it
->d
.lustre
.it_remote_lock_handle
;
1434 CDEBUG(D_DLMTRACE
, "setting l_data to inode %p"
1435 "(%lu/%u) for remote lock "LPX64
"\n", inode
,
1436 inode
->i_ino
, inode
->i_generation
,
1438 md_set_lock_data(exp
, &handle
.cookie
, inode
, NULL
);
1441 handle
.cookie
= it
->d
.lustre
.it_lock_handle
;
1443 CDEBUG(D_DLMTRACE
, "setting l_data to inode %p (%lu/%u)"
1444 " for lock "LPX64
"\n", inode
, inode
->i_ino
,
1445 inode
->i_generation
, handle
.cookie
);
1447 md_set_lock_data(exp
, &handle
.cookie
, inode
,
1448 &it
->d
.lustre
.it_lock_bits
);
1449 it
->d
.lustre
.it_lock_set
= 1;
1453 *bits
= it
->d
.lustre
.it_lock_bits
;
1456 static inline void ll_lock_dcache(struct inode
*inode
)
1458 spin_lock(&inode
->i_lock
);
1461 static inline void ll_unlock_dcache(struct inode
*inode
)
1463 spin_unlock(&inode
->i_lock
);
1466 static inline int d_lustre_invalid(const struct dentry
*dentry
)
1468 struct ll_dentry_data
*lld
= ll_d2d(dentry
);
1470 return (lld
== NULL
) || lld
->lld_invalid
;
1473 static inline void __d_lustre_invalidate(struct dentry
*dentry
)
1475 struct ll_dentry_data
*lld
= ll_d2d(dentry
);
1478 lld
->lld_invalid
= 1;
1482 * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
1483 * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
1484 * else dput() of the last refcount will unhash this dentry and kill it.
1486 static inline void d_lustre_invalidate(struct dentry
*dentry
, int nested
)
1488 CDEBUG(D_DENTRY
, "invalidate dentry %.*s (%p) parent %p inode %p "
1489 "refc %d\n", dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
,
1490 dentry
->d_parent
, dentry
->d_inode
, d_count(dentry
));
1492 spin_lock_nested(&dentry
->d_lock
,
1493 nested
? DENTRY_D_LOCK_NESTED
: DENTRY_D_LOCK_NORMAL
);
1494 __d_lustre_invalidate(dentry
);
1495 if (d_count(dentry
) == 0)
1497 spin_unlock(&dentry
->d_lock
);
1500 static inline void d_lustre_revalidate(struct dentry
*dentry
)
1502 spin_lock(&dentry
->d_lock
);
1503 LASSERT(ll_d2d(dentry
) != NULL
);
1504 ll_d2d(dentry
)->lld_invalid
= 0;
1505 spin_unlock(&dentry
->d_lock
);
1508 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
1509 /* Compatibility for old (1.8) compiled userspace quota code */
1510 struct if_quotactl_18
{
1515 struct obd_dqinfo qc_dqinfo
;
1516 struct obd_dqblk qc_dqblk
;
1518 struct obd_uuid obd_uuid
;
1520 #define LL_IOC_QUOTACTL_18 _IOWR('f', 162, struct if_quotactl_18 *)
1521 /* End compatibility for old (1.8) compiled userspace quota code */
1523 #warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
1524 #endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
1527 LL_LAYOUT_GEN_NONE
= ((__u32
)-2), /* layout lock was cancelled */
1528 LL_LAYOUT_GEN_EMPTY
= ((__u32
)-1) /* for empty layout */
1531 int ll_layout_conf(struct inode
*inode
, const struct cl_object_conf
*conf
);
1532 int ll_layout_refresh(struct inode
*inode
, __u32
*gen
);
1533 int ll_layout_restore(struct inode
*inode
);
1535 int ll_xattr_init(void);
1536 void ll_xattr_fini(void);
1538 #endif /* LLITE_INTERNAL_H */