4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/llite/llite_lib.c
34 * Lustre Light Super operations
37 #define DEBUG_SUBSYSTEM S_LLITE
39 #include <linux/module.h>
40 #include <linux/statfs.h>
41 #include <linux/types.h>
44 #include "../include/lustre/lustre_ioctl.h"
45 #include "../include/lustre_lite.h"
46 #include "../include/lustre_ha.h"
47 #include "../include/lustre_dlm.h"
48 #include "../include/lprocfs_status.h"
49 #include "../include/lustre_disk.h"
50 #include "../include/lustre_param.h"
51 #include "../include/lustre_log.h"
52 #include "../include/cl_object.h"
53 #include "../include/obd_cksum.h"
54 #include "llite_internal.h"
56 struct kmem_cache
*ll_file_data_slab
;
57 struct dentry
*llite_root
;
58 struct kset
*llite_kset
;
61 #define log2(n) ffz(~(n))
64 static struct ll_sb_info
*ll_init_sbi(struct super_block
*sb
)
66 struct ll_sb_info
*sbi
= NULL
;
68 unsigned long lru_page_max
;
73 sbi
= kzalloc(sizeof(*sbi
), GFP_NOFS
);
77 spin_lock_init(&sbi
->ll_lock
);
78 mutex_init(&sbi
->ll_lco
.lco_lock
);
79 spin_lock_init(&sbi
->ll_pp_extent_lock
);
80 spin_lock_init(&sbi
->ll_process_lock
);
81 sbi
->ll_rw_stats_on
= 0;
84 pages
= si
.totalram
- si
.totalhigh
;
85 lru_page_max
= pages
/ 2;
87 sbi
->ll_cache
= cl_cache_init(lru_page_max
);
93 sbi
->ll_ra_info
.ra_max_pages_per_file
= min(pages
/ 32,
94 SBI_DEFAULT_READAHEAD_MAX
);
95 sbi
->ll_ra_info
.ra_max_pages
= sbi
->ll_ra_info
.ra_max_pages_per_file
;
96 sbi
->ll_ra_info
.ra_max_read_ahead_whole_pages
=
97 SBI_DEFAULT_READAHEAD_WHOLE_MAX
;
99 ll_generate_random_uuid(uuid
);
100 class_uuid_unparse(uuid
, &sbi
->ll_sb_uuid
);
101 CDEBUG(D_CONFIG
, "generated uuid: %s\n", sbi
->ll_sb_uuid
.uuid
);
103 sbi
->ll_flags
|= LL_SBI_VERBOSE
;
104 sbi
->ll_flags
|= LL_SBI_CHECKSUM
;
106 sbi
->ll_flags
|= LL_SBI_LRU_RESIZE
;
108 for (i
= 0; i
<= LL_PROCESS_HIST_MAX
; i
++) {
109 spin_lock_init(&sbi
->ll_rw_extents_info
.pp_extents
[i
].
111 spin_lock_init(&sbi
->ll_rw_extents_info
.pp_extents
[i
].
115 /* metadata statahead is enabled by default */
116 sbi
->ll_sa_max
= LL_SA_RPC_DEF
;
117 atomic_set(&sbi
->ll_sa_total
, 0);
118 atomic_set(&sbi
->ll_sa_wrong
, 0);
119 atomic_set(&sbi
->ll_agl_total
, 0);
120 sbi
->ll_flags
|= LL_SBI_AGL_ENABLED
;
127 static void ll_free_sbi(struct super_block
*sb
)
129 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
132 cl_cache_decref(sbi
->ll_cache
);
133 sbi
->ll_cache
= NULL
;
139 static int client_common_fill_super(struct super_block
*sb
, char *md
, char *dt
,
140 struct vfsmount
*mnt
)
142 struct inode
*root
= NULL
;
143 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
144 struct obd_device
*obd
;
145 struct obd_statfs
*osfs
= NULL
;
146 struct ptlrpc_request
*request
= NULL
;
147 struct obd_connect_data
*data
= NULL
;
148 struct obd_uuid
*uuid
;
149 struct md_op_data
*op_data
;
150 struct lustre_md lmd
;
152 int size
, err
, checksum
;
154 obd
= class_name2obd(md
);
156 CERROR("MD %s: not setup or attached\n", md
);
160 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
164 osfs
= kzalloc(sizeof(*osfs
), GFP_NOFS
);
170 /* indicate the features supported by this client */
171 data
->ocd_connect_flags
= OBD_CONNECT_IBITS
| OBD_CONNECT_NODEVOH
|
172 OBD_CONNECT_ATTRFID
|
173 OBD_CONNECT_VERSION
| OBD_CONNECT_BRW_SIZE
|
174 OBD_CONNECT_CANCELSET
| OBD_CONNECT_FID
|
175 OBD_CONNECT_AT
| OBD_CONNECT_LOV_V3
|
176 OBD_CONNECT_VBR
| OBD_CONNECT_FULL20
|
177 OBD_CONNECT_64BITHASH
|
178 OBD_CONNECT_EINPROGRESS
|
179 OBD_CONNECT_JOBSTATS
| OBD_CONNECT_LVB_TYPE
|
180 OBD_CONNECT_LAYOUTLOCK
|
181 OBD_CONNECT_PINGLESS
|
182 OBD_CONNECT_MAX_EASIZE
|
183 OBD_CONNECT_FLOCK_DEAD
|
184 OBD_CONNECT_DISP_STRIPE
;
186 if (sbi
->ll_flags
& LL_SBI_SOM_PREVIEW
)
187 data
->ocd_connect_flags
|= OBD_CONNECT_SOM
;
189 if (sbi
->ll_flags
& LL_SBI_LRU_RESIZE
)
190 data
->ocd_connect_flags
|= OBD_CONNECT_LRU_RESIZE
;
191 #ifdef CONFIG_FS_POSIX_ACL
192 data
->ocd_connect_flags
|= OBD_CONNECT_ACL
| OBD_CONNECT_UMASK
;
195 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT
))
196 /* flag mdc connection as lightweight, only used for test
197 * purpose, use with care
199 data
->ocd_connect_flags
|= OBD_CONNECT_LIGHTWEIGHT
;
201 data
->ocd_ibits_known
= MDS_INODELOCK_FULL
;
202 data
->ocd_version
= LUSTRE_VERSION_CODE
;
204 if (sb
->s_flags
& MS_RDONLY
)
205 data
->ocd_connect_flags
|= OBD_CONNECT_RDONLY
;
206 if (sbi
->ll_flags
& LL_SBI_USER_XATTR
)
207 data
->ocd_connect_flags
|= OBD_CONNECT_XATTR
;
209 if (sbi
->ll_flags
& LL_SBI_FLOCK
)
210 sbi
->ll_fop
= &ll_file_operations_flock
;
211 else if (sbi
->ll_flags
& LL_SBI_LOCALFLOCK
)
212 sbi
->ll_fop
= &ll_file_operations
;
214 sbi
->ll_fop
= &ll_file_operations_noflock
;
217 data
->ocd_connect_flags
|= OBD_CONNECT_REAL
;
219 data
->ocd_brw_size
= MD_MAX_BRW_SIZE
;
221 err
= obd_connect(NULL
, &sbi
->ll_md_exp
, obd
, &sbi
->ll_sb_uuid
,
224 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
228 CERROR("cannot connect to %s: rc = %d\n", md
, err
);
232 sbi
->ll_md_exp
->exp_connect_data
= *data
;
234 err
= obd_fid_init(sbi
->ll_md_exp
->exp_obd
, sbi
->ll_md_exp
,
235 LUSTRE_SEQ_METADATA
);
237 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
238 sbi
->ll_md_exp
->exp_obd
->obd_name
, err
);
242 /* For mount, we only need fs info from MDT0, and also in DNE, it
243 * can make sure the client can be mounted as long as MDT0 is
246 err
= obd_statfs(NULL
, sbi
->ll_md_exp
, osfs
,
247 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
248 OBD_STATFS_FOR_MDT0
);
252 /* This needs to be after statfs to ensure connect has finished.
253 * Note that "data" does NOT contain the valid connect reply.
254 * If connecting to a 1.8 server there will be no LMV device, so
255 * we can access the MDC export directly and exp_connect_flags will
256 * be non-zero, but if accessing an upgraded 2.1 server it will
257 * have the correct flags filled in.
258 * XXX: fill in the LMV exp_connect_flags from MDC(s).
260 valid
= exp_connect_flags(sbi
->ll_md_exp
) & CLIENT_CONNECT_MDT_REQD
;
261 if (exp_connect_flags(sbi
->ll_md_exp
) != 0 &&
262 valid
!= CLIENT_CONNECT_MDT_REQD
) {
265 buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
270 obd_connect_flags2str(buf
, PAGE_SIZE
,
271 valid
^ CLIENT_CONNECT_MDT_REQD
, ",");
272 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
273 sbi
->ll_md_exp
->exp_obd
->obd_name
, buf
);
279 size
= sizeof(*data
);
280 err
= obd_get_info(NULL
, sbi
->ll_md_exp
, sizeof(KEY_CONN_DATA
),
281 KEY_CONN_DATA
, &size
, data
, NULL
);
283 CERROR("%s: Get connect data failed: rc = %d\n",
284 sbi
->ll_md_exp
->exp_obd
->obd_name
, err
);
288 LASSERT(osfs
->os_bsize
);
289 sb
->s_blocksize
= osfs
->os_bsize
;
290 sb
->s_blocksize_bits
= log2(osfs
->os_bsize
);
291 sb
->s_magic
= LL_SUPER_MAGIC
;
292 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
293 sbi
->ll_namelen
= osfs
->os_namelen
;
295 if ((sbi
->ll_flags
& LL_SBI_USER_XATTR
) &&
296 !(data
->ocd_connect_flags
& OBD_CONNECT_XATTR
)) {
297 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
298 sbi
->ll_flags
&= ~LL_SBI_USER_XATTR
;
301 if (data
->ocd_connect_flags
& OBD_CONNECT_ACL
) {
302 sb
->s_flags
|= MS_POSIXACL
;
303 sbi
->ll_flags
|= LL_SBI_ACL
;
305 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
306 sb
->s_flags
&= ~MS_POSIXACL
;
307 sbi
->ll_flags
&= ~LL_SBI_ACL
;
310 if (data
->ocd_connect_flags
& OBD_CONNECT_64BITHASH
)
311 sbi
->ll_flags
|= LL_SBI_64BIT_HASH
;
313 if (data
->ocd_connect_flags
& OBD_CONNECT_BRW_SIZE
)
314 sbi
->ll_md_brw_size
= data
->ocd_brw_size
;
316 sbi
->ll_md_brw_size
= PAGE_SIZE
;
318 if (data
->ocd_connect_flags
& OBD_CONNECT_LAYOUTLOCK
)
319 sbi
->ll_flags
|= LL_SBI_LAYOUT_LOCK
;
321 if (data
->ocd_ibits_known
& MDS_INODELOCK_XATTR
) {
322 if (!(data
->ocd_connect_flags
& OBD_CONNECT_MAX_EASIZE
)) {
324 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
327 sbi
->ll_flags
|= LL_SBI_XATTR_CACHE
;
328 sbi
->ll_xattr_cache_enabled
= 1;
332 obd
= class_name2obd(dt
);
334 CERROR("DT %s: not setup or attached\n", dt
);
339 data
->ocd_connect_flags
= OBD_CONNECT_GRANT
| OBD_CONNECT_VERSION
|
340 OBD_CONNECT_REQPORTAL
| OBD_CONNECT_BRW_SIZE
|
341 OBD_CONNECT_CANCELSET
| OBD_CONNECT_FID
|
342 OBD_CONNECT_SRVLOCK
| OBD_CONNECT_TRUNCLOCK
|
343 OBD_CONNECT_AT
| OBD_CONNECT_OSS_CAPA
|
344 OBD_CONNECT_VBR
| OBD_CONNECT_FULL20
|
345 OBD_CONNECT_64BITHASH
| OBD_CONNECT_MAXBYTES
|
346 OBD_CONNECT_EINPROGRESS
|
347 OBD_CONNECT_JOBSTATS
| OBD_CONNECT_LVB_TYPE
|
348 OBD_CONNECT_LAYOUTLOCK
| OBD_CONNECT_PINGLESS
;
350 if (sbi
->ll_flags
& LL_SBI_SOM_PREVIEW
)
351 data
->ocd_connect_flags
|= OBD_CONNECT_SOM
;
353 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM
)) {
354 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
355 * disabled by default, because it can still be enabled on the
356 * fly via /sys. As a consequence, we still need to come to an
357 * agreement on the supported algorithms at connect time
359 data
->ocd_connect_flags
|= OBD_CONNECT_CKSUM
;
361 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY
))
362 data
->ocd_cksum_types
= OBD_CKSUM_ADLER
;
364 data
->ocd_cksum_types
= cksum_types_supported_client();
367 data
->ocd_connect_flags
|= OBD_CONNECT_LRU_RESIZE
;
369 CDEBUG(D_RPCTRACE
, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
370 data
->ocd_connect_flags
,
371 data
->ocd_version
, data
->ocd_grant
);
373 obd
->obd_upcall
.onu_owner
= &sbi
->ll_lco
;
374 obd
->obd_upcall
.onu_upcall
= cl_ocd_update
;
376 data
->ocd_brw_size
= DT_MAX_BRW_SIZE
;
378 err
= obd_connect(NULL
, &sbi
->ll_dt_exp
, obd
, &sbi
->ll_sb_uuid
, data
,
381 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
385 CERROR("%s: Cannot connect to %s: rc = %d\n",
386 sbi
->ll_dt_exp
->exp_obd
->obd_name
, dt
, err
);
390 sbi
->ll_dt_exp
->exp_connect_data
= *data
;
392 err
= obd_fid_init(sbi
->ll_dt_exp
->exp_obd
, sbi
->ll_dt_exp
,
393 LUSTRE_SEQ_METADATA
);
395 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
396 sbi
->ll_dt_exp
->exp_obd
->obd_name
, err
);
400 mutex_lock(&sbi
->ll_lco
.lco_lock
);
401 sbi
->ll_lco
.lco_flags
= data
->ocd_connect_flags
;
402 sbi
->ll_lco
.lco_md_exp
= sbi
->ll_md_exp
;
403 sbi
->ll_lco
.lco_dt_exp
= sbi
->ll_dt_exp
;
404 mutex_unlock(&sbi
->ll_lco
.lco_lock
);
406 fid_zero(&sbi
->ll_root_fid
);
407 err
= md_getstatus(sbi
->ll_md_exp
, &sbi
->ll_root_fid
);
409 CERROR("cannot mds_connect: rc = %d\n", err
);
412 if (!fid_is_sane(&sbi
->ll_root_fid
)) {
413 CERROR("%s: Invalid root fid "DFID
" during mount\n",
414 sbi
->ll_md_exp
->exp_obd
->obd_name
,
415 PFID(&sbi
->ll_root_fid
));
419 CDEBUG(D_SUPER
, "rootfid "DFID
"\n", PFID(&sbi
->ll_root_fid
));
421 sb
->s_op
= &lustre_super_operations
;
422 sb
->s_xattr
= ll_xattr_handlers
;
423 #if THREAD_SIZE >= 8192 /*b=17630*/
424 sb
->s_export_op
= &lustre_export_operations
;
428 * XXX: move this to after cbd setup?
430 valid
= OBD_MD_FLGETATTR
| OBD_MD_FLBLOCKS
| OBD_MD_FLMODEASIZE
;
431 if (sbi
->ll_flags
& LL_SBI_ACL
)
432 valid
|= OBD_MD_FLACL
;
434 op_data
= kzalloc(sizeof(*op_data
), GFP_NOFS
);
440 op_data
->op_fid1
= sbi
->ll_root_fid
;
441 op_data
->op_mode
= 0;
442 op_data
->op_valid
= valid
;
444 err
= md_getattr(sbi
->ll_md_exp
, op_data
, &request
);
447 CERROR("%s: md_getattr failed for root: rc = %d\n",
448 sbi
->ll_md_exp
->exp_obd
->obd_name
, err
);
452 err
= md_get_lustre_md(sbi
->ll_md_exp
, request
, sbi
->ll_dt_exp
,
453 sbi
->ll_md_exp
, &lmd
);
455 CERROR("failed to understand root inode md: rc = %d\n", err
);
456 ptlrpc_req_finished(request
);
460 LASSERT(fid_is_sane(&sbi
->ll_root_fid
));
461 root
= ll_iget(sb
, cl_fid_build_ino(&sbi
->ll_root_fid
,
462 sbi
->ll_flags
& LL_SBI_32BIT_API
),
464 md_free_lustre_md(sbi
->ll_md_exp
, &lmd
);
465 ptlrpc_req_finished(request
);
469 obd_free_memmd(sbi
->ll_dt_exp
, &lmd
.lsm
);
470 #ifdef CONFIG_FS_POSIX_ACL
472 posix_acl_release(lmd
.posix_acl
);
473 lmd
.posix_acl
= NULL
;
477 CERROR("lustre_lite: bad iget4 for root\n");
481 err
= ll_close_thread_start(&sbi
->ll_lcq
);
483 CERROR("cannot start close thread: rc %d\n", err
);
487 checksum
= sbi
->ll_flags
& LL_SBI_CHECKSUM
;
488 err
= obd_set_info_async(NULL
, sbi
->ll_dt_exp
, sizeof(KEY_CHECKSUM
),
489 KEY_CHECKSUM
, sizeof(checksum
), &checksum
,
493 err
= obd_set_info_async(NULL
, sbi
->ll_dt_exp
, sizeof(KEY_CACHE_SET
),
494 KEY_CACHE_SET
, sizeof(*sbi
->ll_cache
),
495 sbi
->ll_cache
, NULL
);
497 sb
->s_root
= d_make_root(root
);
499 CERROR("%s: can't make root dentry\n",
500 ll_get_fsname(sb
, NULL
, 0));
505 sbi
->ll_sdev_orig
= sb
->s_dev
;
507 /* We set sb->s_dev equal on all lustre clients in order to support
508 * NFS export clustering. NFSD requires that the FSID be the same
511 /* s_dev is also used in lt_compare() to compare two fs, but that is
512 * only a node-local comparison.
514 uuid
= obd_get_uuid(sbi
->ll_md_exp
);
516 sb
->s_dev
= get_uuid2int(uuid
->uuid
, strlen(uuid
->uuid
));
517 get_uuid2fsid(uuid
->uuid
, strlen(uuid
->uuid
), &sbi
->ll_fsid
);
524 err
= ldebugfs_register_mountpoint(llite_root
, sb
, dt
, md
);
526 CERROR("%s: could not register mount in debugfs: "
527 "rc = %d\n", ll_get_fsname(sb
, NULL
, 0), err
);
536 obd_fid_fini(sbi
->ll_dt_exp
->exp_obd
);
538 obd_disconnect(sbi
->ll_dt_exp
);
539 sbi
->ll_dt_exp
= NULL
;
541 obd_fid_fini(sbi
->ll_md_exp
->exp_obd
);
543 obd_disconnect(sbi
->ll_md_exp
);
544 sbi
->ll_md_exp
= NULL
;
551 int ll_get_max_mdsize(struct ll_sb_info
*sbi
, int *lmmsize
)
555 *lmmsize
= obd_size_diskmd(sbi
->ll_dt_exp
, NULL
);
557 rc
= obd_get_info(NULL
, sbi
->ll_md_exp
, sizeof(KEY_MAX_EASIZE
),
558 KEY_MAX_EASIZE
, &size
, lmmsize
, NULL
);
560 CERROR("Get max mdsize error rc %d\n", rc
);
565 int ll_get_default_mdsize(struct ll_sb_info
*sbi
, int *lmmsize
)
570 rc
= obd_get_info(NULL
, sbi
->ll_md_exp
, sizeof(KEY_DEFAULT_EASIZE
),
571 KEY_DEFAULT_EASIZE
, &size
, lmmsize
, NULL
);
573 CERROR("Get default mdsize error rc %d\n", rc
);
578 static void client_common_put_super(struct super_block
*sb
)
580 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
582 ll_close_thread_shutdown(sbi
->ll_lcq
);
586 obd_fid_fini(sbi
->ll_dt_exp
->exp_obd
);
587 obd_disconnect(sbi
->ll_dt_exp
);
588 sbi
->ll_dt_exp
= NULL
;
590 ldebugfs_unregister_mountpoint(sbi
);
592 obd_fid_fini(sbi
->ll_md_exp
->exp_obd
);
593 obd_disconnect(sbi
->ll_md_exp
);
594 sbi
->ll_md_exp
= NULL
;
597 void ll_kill_super(struct super_block
*sb
)
599 struct ll_sb_info
*sbi
;
602 if (!(sb
->s_flags
& MS_ACTIVE
))
606 /* we need to restore s_dev from changed for clustered NFS before
607 * put_super because new kernels have cached s_dev and change sb->s_dev
608 * in put_super not affected real removing devices
611 sb
->s_dev
= sbi
->ll_sdev_orig
;
612 sbi
->ll_umounting
= 1;
616 static inline int ll_set_opt(const char *opt
, char *data
, int fl
)
618 if (strncmp(opt
, data
, strlen(opt
)) != 0)
624 /* non-client-specific mount options are parsed in lmd_parse */
625 static int ll_options(char *options
, int *flags
)
628 char *s1
= options
, *s2
;
633 CDEBUG(D_CONFIG
, "Parsing opts %s\n", options
);
636 CDEBUG(D_SUPER
, "next opt=%s\n", s1
);
637 tmp
= ll_set_opt("nolock", s1
, LL_SBI_NOLCK
);
642 tmp
= ll_set_opt("flock", s1
, LL_SBI_FLOCK
);
647 tmp
= ll_set_opt("localflock", s1
, LL_SBI_LOCALFLOCK
);
652 tmp
= ll_set_opt("noflock", s1
, LL_SBI_FLOCK
|LL_SBI_LOCALFLOCK
);
657 tmp
= ll_set_opt("user_xattr", s1
, LL_SBI_USER_XATTR
);
662 tmp
= ll_set_opt("nouser_xattr", s1
, LL_SBI_USER_XATTR
);
667 tmp
= ll_set_opt("user_fid2path", s1
, LL_SBI_USER_FID2PATH
);
672 tmp
= ll_set_opt("nouser_fid2path", s1
, LL_SBI_USER_FID2PATH
);
678 tmp
= ll_set_opt("checksum", s1
, LL_SBI_CHECKSUM
);
683 tmp
= ll_set_opt("nochecksum", s1
, LL_SBI_CHECKSUM
);
688 tmp
= ll_set_opt("lruresize", s1
, LL_SBI_LRU_RESIZE
);
693 tmp
= ll_set_opt("nolruresize", s1
, LL_SBI_LRU_RESIZE
);
698 tmp
= ll_set_opt("lazystatfs", s1
, LL_SBI_LAZYSTATFS
);
703 tmp
= ll_set_opt("nolazystatfs", s1
, LL_SBI_LAZYSTATFS
);
708 tmp
= ll_set_opt("som_preview", s1
, LL_SBI_SOM_PREVIEW
);
713 tmp
= ll_set_opt("32bitapi", s1
, LL_SBI_32BIT_API
);
718 tmp
= ll_set_opt("verbose", s1
, LL_SBI_VERBOSE
);
723 tmp
= ll_set_opt("noverbose", s1
, LL_SBI_VERBOSE
);
728 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
734 s2
= strchr(s1
, ',');
742 void ll_lli_init(struct ll_inode_info
*lli
)
744 lli
->lli_inode_magic
= LLI_INODE_MAGIC
;
746 lli
->lli_ioepoch
= 0;
747 lli
->lli_maxbytes
= MAX_LFS_FILESIZE
;
748 spin_lock_init(&lli
->lli_lock
);
749 lli
->lli_posix_acl
= NULL
;
750 /* Do not set lli_fid, it has been initialized already. */
751 fid_zero(&lli
->lli_pfid
);
752 INIT_LIST_HEAD(&lli
->lli_close_list
);
753 lli
->lli_pending_och
= NULL
;
754 lli
->lli_mds_read_och
= NULL
;
755 lli
->lli_mds_write_och
= NULL
;
756 lli
->lli_mds_exec_och
= NULL
;
757 lli
->lli_open_fd_read_count
= 0;
758 lli
->lli_open_fd_write_count
= 0;
759 lli
->lli_open_fd_exec_count
= 0;
760 mutex_init(&lli
->lli_och_mutex
);
761 spin_lock_init(&lli
->lli_agl_lock
);
762 lli
->lli_has_smd
= false;
763 spin_lock_init(&lli
->lli_layout_lock
);
764 ll_layout_version_set(lli
, LL_LAYOUT_GEN_NONE
);
765 lli
->lli_clob
= NULL
;
767 init_rwsem(&lli
->lli_xattrs_list_rwsem
);
768 mutex_init(&lli
->lli_xattrs_enq_lock
);
770 LASSERT(lli
->lli_vfs_inode
.i_mode
!= 0);
771 if (S_ISDIR(lli
->lli_vfs_inode
.i_mode
)) {
772 mutex_init(&lli
->lli_readdir_mutex
);
773 lli
->lli_opendir_key
= NULL
;
775 spin_lock_init(&lli
->lli_sa_lock
);
776 lli
->lli_opendir_pid
= 0;
778 mutex_init(&lli
->lli_size_mutex
);
779 lli
->lli_symlink_name
= NULL
;
780 init_rwsem(&lli
->lli_trunc_sem
);
781 mutex_init(&lli
->lli_write_mutex
);
782 init_rwsem(&lli
->lli_glimpse_sem
);
783 lli
->lli_glimpse_time
= 0;
784 INIT_LIST_HEAD(&lli
->lli_agl_list
);
785 lli
->lli_agl_index
= 0;
786 lli
->lli_async_rc
= 0;
788 mutex_init(&lli
->lli_layout_mutex
);
791 static inline int ll_bdi_register(struct backing_dev_info
*bdi
)
793 static atomic_t ll_bdi_num
= ATOMIC_INIT(0);
795 bdi
->name
= "lustre";
796 return bdi_register(bdi
, NULL
, "lustre-%d",
797 atomic_inc_return(&ll_bdi_num
));
800 int ll_fill_super(struct super_block
*sb
, struct vfsmount
*mnt
)
802 struct lustre_profile
*lprof
= NULL
;
803 struct lustre_sb_info
*lsi
= s2lsi(sb
);
804 struct ll_sb_info
*sbi
;
805 char *dt
= NULL
, *md
= NULL
;
806 char *profilenm
= get_profile_name(sb
);
807 struct config_llog_instance
*cfg
;
810 CDEBUG(D_VFSTRACE
, "VFS Op: sb %p\n", sb
);
812 cfg
= kzalloc(sizeof(*cfg
), GFP_NOFS
);
816 try_module_get(THIS_MODULE
);
818 /* client additional sb info */
819 sbi
= ll_init_sbi(sb
);
820 lsi
->lsi_llsbi
= sbi
;
822 module_put(THIS_MODULE
);
827 err
= ll_options(lsi
->lsi_lmd
->lmd_opts
, &sbi
->ll_flags
);
831 err
= bdi_init(&lsi
->lsi_bdi
);
834 lsi
->lsi_flags
|= LSI_BDI_INITIALIZED
;
835 lsi
->lsi_bdi
.capabilities
= 0;
836 err
= ll_bdi_register(&lsi
->lsi_bdi
);
840 sb
->s_bdi
= &lsi
->lsi_bdi
;
841 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
842 sb
->s_d_op
= &ll_d_ops
;
844 /* Generate a string unique to this super, in case some joker tries
845 * to mount the same fs at two mount points.
846 * Use the address of the super itself.
848 cfg
->cfg_instance
= sb
;
849 cfg
->cfg_uuid
= lsi
->lsi_llsbi
->ll_sb_uuid
;
850 cfg
->cfg_callback
= class_config_llog_handler
;
851 /* set up client obds */
852 err
= lustre_process_log(sb
, profilenm
, cfg
);
856 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
857 lprof
= class_get_profile(profilenm
);
859 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
864 CDEBUG(D_CONFIG
, "Found profile %s: mdc=%s osc=%s\n", profilenm
,
865 lprof
->lp_md
, lprof
->lp_dt
);
867 dt
= kasprintf(GFP_NOFS
, "%s-%p", lprof
->lp_dt
, cfg
->cfg_instance
);
873 md
= kasprintf(GFP_NOFS
, "%s-%p", lprof
->lp_md
, cfg
->cfg_instance
);
879 /* connections, registrations, sb setup */
880 err
= client_common_fill_super(sb
, md
, dt
, mnt
);
887 else if (sbi
->ll_flags
& LL_SBI_VERBOSE
)
888 LCONSOLE_WARN("Mounted %s\n", profilenm
);
892 } /* ll_fill_super */
894 void ll_put_super(struct super_block
*sb
)
896 struct config_llog_instance cfg
, params_cfg
;
897 struct obd_device
*obd
;
898 struct lustre_sb_info
*lsi
= s2lsi(sb
);
899 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
900 char *profilenm
= get_profile_name(sb
);
901 int ccc_count
, next
, force
= 1, rc
= 0;
903 CDEBUG(D_VFSTRACE
, "VFS Op: sb %p - %s\n", sb
, profilenm
);
905 cfg
.cfg_instance
= sb
;
906 lustre_end_log(sb
, profilenm
, &cfg
);
908 params_cfg
.cfg_instance
= sb
;
909 lustre_end_log(sb
, PARAMS_FILENAME
, ¶ms_cfg
);
911 if (sbi
->ll_md_exp
) {
912 obd
= class_exp2obd(sbi
->ll_md_exp
);
914 force
= obd
->obd_force
;
917 /* Wait for unstable pages to be committed to stable storage */
919 struct l_wait_info lwi
= LWI_INTR(LWI_ON_SIGNAL_NOOP
, NULL
);
921 rc
= l_wait_event(sbi
->ll_cache
->ccc_unstable_waitq
,
922 !atomic_read(&sbi
->ll_cache
->ccc_unstable_nr
),
926 ccc_count
= atomic_read(&sbi
->ll_cache
->ccc_unstable_nr
);
927 if (!force
&& rc
!= -EINTR
)
928 LASSERTF(!ccc_count
, "count: %i\n", ccc_count
);
930 /* We need to set force before the lov_disconnect in
931 * lustre_common_put_super, since l_d cleans up osc's as well.
935 while ((obd
= class_devices_in_group(&sbi
->ll_sb_uuid
,
937 obd
->obd_force
= force
;
942 /* Only if client_common_fill_super succeeded */
943 client_common_put_super(sb
);
947 while ((obd
= class_devices_in_group(&sbi
->ll_sb_uuid
, &next
)))
948 class_manual_cleanup(obd
);
950 if (sbi
->ll_flags
& LL_SBI_VERBOSE
)
951 LCONSOLE_WARN("Unmounted %s\n", profilenm
? profilenm
: "");
954 class_del_profile(profilenm
);
956 if (lsi
->lsi_flags
& LSI_BDI_INITIALIZED
) {
957 bdi_destroy(&lsi
->lsi_bdi
);
958 lsi
->lsi_flags
&= ~LSI_BDI_INITIALIZED
;
962 lsi
->lsi_llsbi
= NULL
;
964 lustre_common_put_super(sb
);
966 cl_env_cache_purge(~0);
968 module_put(THIS_MODULE
);
969 } /* client_put_super */
971 struct inode
*ll_inode_from_resource_lock(struct ldlm_lock
*lock
)
973 struct inode
*inode
= NULL
;
975 /* NOTE: we depend on atomic igrab() -bzzz */
976 lock_res_and_lock(lock
);
977 if (lock
->l_resource
->lr_lvb_inode
) {
978 struct ll_inode_info
*lli
;
980 lli
= ll_i2info(lock
->l_resource
->lr_lvb_inode
);
981 if (lli
->lli_inode_magic
== LLI_INODE_MAGIC
) {
982 inode
= igrab(lock
->l_resource
->lr_lvb_inode
);
984 inode
= lock
->l_resource
->lr_lvb_inode
;
985 LDLM_DEBUG_LIMIT(inode
->i_state
& I_FREEING
? D_INFO
:
986 D_WARNING
, lock
, "lr_lvb_inode %p is bogus: magic %08x",
987 lock
->l_resource
->lr_lvb_inode
,
988 lli
->lli_inode_magic
);
992 unlock_res_and_lock(lock
);
996 static void ll_dir_clear_lsm_md(struct inode
*inode
)
998 struct ll_inode_info
*lli
= ll_i2info(inode
);
1000 LASSERT(S_ISDIR(inode
->i_mode
));
1002 if (lli
->lli_lsm_md
) {
1003 lmv_free_memmd(lli
->lli_lsm_md
);
1004 lli
->lli_lsm_md
= NULL
;
1008 static struct inode
*ll_iget_anon_dir(struct super_block
*sb
,
1009 const struct lu_fid
*fid
,
1010 struct lustre_md
*md
)
1012 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
1013 struct mdt_body
*body
= md
->body
;
1014 struct inode
*inode
;
1017 ino
= cl_fid_build_ino(fid
, sbi
->ll_flags
& LL_SBI_32BIT_API
);
1018 inode
= iget_locked(sb
, ino
);
1020 CERROR("%s: failed get simple inode "DFID
": rc = -ENOENT\n",
1021 ll_get_fsname(sb
, NULL
, 0), PFID(fid
));
1022 return ERR_PTR(-ENOENT
);
1025 if (inode
->i_state
& I_NEW
) {
1026 struct ll_inode_info
*lli
= ll_i2info(inode
);
1027 struct lmv_stripe_md
*lsm
= md
->lmv
;
1029 inode
->i_mode
= (inode
->i_mode
& ~S_IFMT
) |
1030 (body
->mode
& S_IFMT
);
1031 LASSERTF(S_ISDIR(inode
->i_mode
), "Not slave inode "DFID
"\n",
1034 LTIME_S(inode
->i_mtime
) = 0;
1035 LTIME_S(inode
->i_atime
) = 0;
1036 LTIME_S(inode
->i_ctime
) = 0;
1039 inode
->i_op
= &ll_dir_inode_operations
;
1040 inode
->i_fop
= &ll_dir_operations
;
1041 lli
->lli_fid
= *fid
;
1045 /* master object FID */
1046 lli
->lli_pfid
= body
->fid1
;
1047 CDEBUG(D_INODE
, "lli %p slave "DFID
" master "DFID
"\n",
1048 lli
, PFID(fid
), PFID(&lli
->lli_pfid
));
1049 unlock_new_inode(inode
);
1055 static int ll_init_lsm_md(struct inode
*inode
, struct lustre_md
*md
)
1057 struct lmv_stripe_md
*lsm
= md
->lmv
;
1063 * XXX sigh, this lsm_root initialization should be in
1064 * LMV layer, but it needs ll_iget right now, so we
1065 * put this here right now.
1067 for (i
= 0; i
< lsm
->lsm_md_stripe_count
; i
++) {
1068 fid
= &lsm
->lsm_md_oinfo
[i
].lmo_fid
;
1069 LASSERT(!lsm
->lsm_md_oinfo
[i
].lmo_root
);
1070 /* Unfortunately ll_iget will call ll_update_inode,
1071 * where the initialization of slave inode is slightly
1072 * different, so it reset lsm_md to NULL to avoid
1073 * initializing lsm for slave inode.
1075 /* For migrating inode, master stripe and master object will
1076 * be same, so we only need assign this inode
1078 if (lsm
->lsm_md_hash_type
& LMV_HASH_FLAG_MIGRATION
&& !i
)
1079 lsm
->lsm_md_oinfo
[i
].lmo_root
= inode
;
1081 lsm
->lsm_md_oinfo
[i
].lmo_root
=
1082 ll_iget_anon_dir(inode
->i_sb
, fid
, md
);
1083 if (IS_ERR(lsm
->lsm_md_oinfo
[i
].lmo_root
)) {
1084 int rc
= PTR_ERR(lsm
->lsm_md_oinfo
[i
].lmo_root
);
1086 lsm
->lsm_md_oinfo
[i
].lmo_root
= NULL
;
1092 * Here is where the lsm is being initialized(fill lmo_info) after
1093 * client retrieve MD stripe information from MDT.
1095 return md_update_lsm_md(ll_i2mdexp(inode
), lsm
, md
->body
,
1096 ll_md_blocking_ast
);
1099 static inline int lli_lsm_md_eq(const struct lmv_stripe_md
*lsm_md1
,
1100 const struct lmv_stripe_md
*lsm_md2
)
1102 return lsm_md1
->lsm_md_magic
== lsm_md2
->lsm_md_magic
&&
1103 lsm_md1
->lsm_md_stripe_count
== lsm_md2
->lsm_md_stripe_count
&&
1104 lsm_md1
->lsm_md_master_mdt_index
==
1105 lsm_md2
->lsm_md_master_mdt_index
&&
1106 lsm_md1
->lsm_md_hash_type
== lsm_md2
->lsm_md_hash_type
&&
1107 lsm_md1
->lsm_md_layout_version
==
1108 lsm_md2
->lsm_md_layout_version
&&
1109 !strcmp(lsm_md1
->lsm_md_pool_name
,
1110 lsm_md2
->lsm_md_pool_name
);
1113 static int ll_update_lsm_md(struct inode
*inode
, struct lustre_md
*md
)
1115 struct ll_inode_info
*lli
= ll_i2info(inode
);
1116 struct lmv_stripe_md
*lsm
= md
->lmv
;
1119 LASSERT(S_ISDIR(inode
->i_mode
));
1120 CDEBUG(D_INODE
, "update lsm %p of "DFID
"\n", lli
->lli_lsm_md
,
1121 PFID(ll_inode2fid(inode
)));
1123 /* no striped information from request. */
1125 if (!lli
->lli_lsm_md
) {
1127 } else if (lli
->lli_lsm_md
->lsm_md_hash_type
&
1128 LMV_HASH_FLAG_MIGRATION
) {
1130 * migration is done, the temporay MIGRATE layout has
1133 CDEBUG(D_INODE
, DFID
" finish migration.\n",
1134 PFID(ll_inode2fid(inode
)));
1135 lmv_free_memmd(lli
->lli_lsm_md
);
1136 lli
->lli_lsm_md
= NULL
;
1140 * The lustre_md from req does not include stripeEA,
1147 /* set the directory layout */
1148 if (!lli
->lli_lsm_md
) {
1149 rc
= ll_init_lsm_md(inode
, md
);
1153 lli
->lli_lsm_md
= lsm
;
1155 * set lsm_md to NULL, so the following free lustre_md
1156 * will not free this lsm
1159 CDEBUG(D_INODE
, "Set lsm %p magic %x to "DFID
"\n", lsm
,
1160 lsm
->lsm_md_magic
, PFID(ll_inode2fid(inode
)));
1164 /* Compare the old and new stripe information */
1165 if (!lsm_md_eq(lli
->lli_lsm_md
, lsm
)) {
1166 struct lmv_stripe_md
*old_lsm
= lli
->lli_lsm_md
;
1169 CERROR("%s: inode "DFID
"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
1170 ll_get_fsname(inode
->i_sb
, NULL
, 0), PFID(&lli
->lli_fid
),
1171 inode
, lsm
, old_lsm
,
1172 lsm
->lsm_md_magic
, old_lsm
->lsm_md_magic
,
1173 lsm
->lsm_md_stripe_count
,
1174 old_lsm
->lsm_md_stripe_count
,
1175 lsm
->lsm_md_master_mdt_index
,
1176 old_lsm
->lsm_md_master_mdt_index
,
1177 lsm
->lsm_md_hash_type
, old_lsm
->lsm_md_hash_type
,
1178 lsm
->lsm_md_layout_version
,
1179 old_lsm
->lsm_md_layout_version
,
1180 lsm
->lsm_md_pool_name
,
1181 old_lsm
->lsm_md_pool_name
);
1183 for (idx
= 0; idx
< old_lsm
->lsm_md_stripe_count
; idx
++) {
1184 CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID
"\n",
1185 ll_get_fsname(inode
->i_sb
, NULL
, 0), idx
,
1186 PFID(&old_lsm
->lsm_md_oinfo
[idx
].lmo_fid
));
1189 for (idx
= 0; idx
< lsm
->lsm_md_stripe_count
; idx
++) {
1190 CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID
"\n",
1191 ll_get_fsname(inode
->i_sb
, NULL
, 0), idx
,
1192 PFID(&lsm
->lsm_md_oinfo
[idx
].lmo_fid
));
1201 void ll_clear_inode(struct inode
*inode
)
1203 struct ll_inode_info
*lli
= ll_i2info(inode
);
1204 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
1206 CDEBUG(D_VFSTRACE
, "VFS Op:inode="DFID
"(%p)\n",
1207 PFID(ll_inode2fid(inode
)), inode
);
1209 if (S_ISDIR(inode
->i_mode
)) {
1210 /* these should have been cleared in ll_file_release */
1211 LASSERT(!lli
->lli_opendir_key
);
1212 LASSERT(!lli
->lli_sai
);
1213 LASSERT(lli
->lli_opendir_pid
== 0);
1216 spin_lock(&lli
->lli_lock
);
1217 ll_i2info(inode
)->lli_flags
&= ~LLIF_MDS_SIZE_LOCK
;
1218 spin_unlock(&lli
->lli_lock
);
1219 md_null_inode(sbi
->ll_md_exp
, ll_inode2fid(inode
));
1221 LASSERT(!lli
->lli_open_fd_write_count
);
1222 LASSERT(!lli
->lli_open_fd_read_count
);
1223 LASSERT(!lli
->lli_open_fd_exec_count
);
1225 if (lli
->lli_mds_write_och
)
1226 ll_md_real_close(inode
, FMODE_WRITE
);
1227 if (lli
->lli_mds_exec_och
)
1228 ll_md_real_close(inode
, FMODE_EXEC
);
1229 if (lli
->lli_mds_read_och
)
1230 ll_md_real_close(inode
, FMODE_READ
);
1232 if (S_ISLNK(inode
->i_mode
)) {
1233 kfree(lli
->lli_symlink_name
);
1234 lli
->lli_symlink_name
= NULL
;
1237 ll_xattr_cache_destroy(inode
);
1239 #ifdef CONFIG_FS_POSIX_ACL
1240 if (lli
->lli_posix_acl
) {
1241 LASSERT(atomic_read(&lli
->lli_posix_acl
->a_refcount
) == 1);
1242 posix_acl_release(lli
->lli_posix_acl
);
1243 lli
->lli_posix_acl
= NULL
;
1246 lli
->lli_inode_magic
= LLI_INODE_DEAD
;
1248 if (S_ISDIR(inode
->i_mode
))
1249 ll_dir_clear_lsm_md(inode
);
1250 if (S_ISREG(inode
->i_mode
) && !is_bad_inode(inode
))
1251 LASSERT(list_empty(&lli
->lli_agl_list
));
1254 * XXX This has to be done before lsm is freed below, because
1255 * cl_object still uses inode lsm.
1257 cl_inode_fini(inode
);
1258 lli
->lli_has_smd
= false;
1261 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1263 static int ll_md_setattr(struct dentry
*dentry
, struct md_op_data
*op_data
,
1264 struct md_open_data
**mod
)
1266 struct lustre_md md
;
1267 struct inode
*inode
= d_inode(dentry
);
1268 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
1269 struct ptlrpc_request
*request
= NULL
;
1272 op_data
= ll_prep_md_op_data(op_data
, inode
, NULL
, NULL
, 0, 0,
1273 LUSTRE_OPC_ANY
, NULL
);
1274 if (IS_ERR(op_data
))
1275 return PTR_ERR(op_data
);
1277 rc
= md_setattr(sbi
->ll_md_exp
, op_data
, NULL
, 0, NULL
, 0,
1280 ptlrpc_req_finished(request
);
1281 if (rc
== -ENOENT
) {
1283 /* Unlinked special device node? Or just a race?
1284 * Pretend we did everything.
1286 if (!S_ISREG(inode
->i_mode
) &&
1287 !S_ISDIR(inode
->i_mode
)) {
1288 ia_valid
= op_data
->op_attr
.ia_valid
;
1289 op_data
->op_attr
.ia_valid
&= ~TIMES_SET_FLAGS
;
1290 rc
= simple_setattr(dentry
, &op_data
->op_attr
);
1291 op_data
->op_attr
.ia_valid
= ia_valid
;
1293 } else if (rc
!= -EPERM
&& rc
!= -EACCES
&& rc
!= -ETXTBSY
) {
1294 CERROR("md_setattr fails: rc = %d\n", rc
);
1299 rc
= md_get_lustre_md(sbi
->ll_md_exp
, request
, sbi
->ll_dt_exp
,
1300 sbi
->ll_md_exp
, &md
);
1302 ptlrpc_req_finished(request
);
1306 ia_valid
= op_data
->op_attr
.ia_valid
;
1307 /* inode size will be in cl_setattr_ost, can't do it now since dirty
1308 * cache is not cleared yet.
1310 op_data
->op_attr
.ia_valid
&= ~(TIMES_SET_FLAGS
| ATTR_SIZE
);
1311 rc
= simple_setattr(dentry
, &op_data
->op_attr
);
1312 op_data
->op_attr
.ia_valid
= ia_valid
;
1314 /* Extract epoch data if obtained. */
1315 op_data
->op_handle
= md
.body
->handle
;
1316 op_data
->op_ioepoch
= md
.body
->ioepoch
;
1318 rc
= ll_update_inode(inode
, &md
);
1319 ptlrpc_req_finished(request
);
1324 /* Close IO epoch and send Size-on-MDS attribute update. */
1325 static int ll_setattr_done_writing(struct inode
*inode
,
1326 struct md_op_data
*op_data
,
1327 struct md_open_data
*mod
)
1329 struct ll_inode_info
*lli
= ll_i2info(inode
);
1332 if (!S_ISREG(inode
->i_mode
))
1335 CDEBUG(D_INODE
, "Epoch %llu closed on "DFID
" for truncate\n",
1336 op_data
->op_ioepoch
, PFID(&lli
->lli_fid
));
1338 op_data
->op_flags
= MF_EPOCH_CLOSE
;
1339 ll_done_writing_attr(inode
, op_data
);
1340 ll_pack_inode2opdata(inode
, op_data
, NULL
);
1342 rc
= md_done_writing(ll_i2sbi(inode
)->ll_md_exp
, op_data
, mod
);
1344 /* MDS has instructed us to obtain Size-on-MDS attribute
1345 * from OSTs and send setattr to back to MDS.
1347 rc
= ll_som_update(inode
, op_data
);
1349 CERROR("%s: inode "DFID
" mdc truncate failed: rc = %d\n",
1350 ll_i2sbi(inode
)->ll_md_exp
->exp_obd
->obd_name
,
1351 PFID(ll_inode2fid(inode
)), rc
);
1356 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1357 * object(s) determine the file size and mtime. Otherwise, the MDS will
1358 * keep these values until such a time that objects are allocated for it.
1359 * We do the MDS operations first, as it is checking permissions for us.
1360 * We don't to the MDS RPC if there is nothing that we want to store there,
1361 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1362 * going to do an RPC anyways.
1364 * If we are doing a truncate, we will send the mtime and ctime updates
1365 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1366 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1369 * In case of HSMimport, we only set attr on MDS.
1371 int ll_setattr_raw(struct dentry
*dentry
, struct iattr
*attr
, bool hsm_import
)
1373 struct inode
*inode
= d_inode(dentry
);
1374 struct ll_inode_info
*lli
= ll_i2info(inode
);
1375 struct md_op_data
*op_data
= NULL
;
1376 struct md_open_data
*mod
= NULL
;
1377 bool file_is_released
= false;
1378 int rc
= 0, rc1
= 0;
1380 CDEBUG(D_VFSTRACE
, "%s: setattr inode "DFID
"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1381 ll_get_fsname(inode
->i_sb
, NULL
, 0), PFID(&lli
->lli_fid
), inode
,
1382 i_size_read(inode
), attr
->ia_size
, attr
->ia_valid
, hsm_import
);
1384 if (attr
->ia_valid
& ATTR_SIZE
) {
1385 /* Check new size against VFS/VM file size limit and rlimit */
1386 rc
= inode_newsize_ok(inode
, attr
->ia_size
);
1390 /* The maximum Lustre file size is variable, based on the
1391 * OST maximum object size and number of stripes. This
1392 * needs another check in addition to the VFS check above.
1394 if (attr
->ia_size
> ll_file_maxbytes(inode
)) {
1395 CDEBUG(D_INODE
, "file "DFID
" too large %llu > %llu\n",
1396 PFID(&lli
->lli_fid
), attr
->ia_size
,
1397 ll_file_maxbytes(inode
));
1401 attr
->ia_valid
|= ATTR_MTIME
| ATTR_CTIME
;
1404 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1405 if (attr
->ia_valid
& TIMES_SET_FLAGS
) {
1406 if ((!uid_eq(current_fsuid(), inode
->i_uid
)) &&
1407 !capable(CFS_CAP_FOWNER
))
1411 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1412 if (attr
->ia_valid
& ATTR_CTIME
) {
1413 attr
->ia_ctime
= CURRENT_TIME
;
1414 attr
->ia_valid
|= ATTR_CTIME_SET
;
1416 if (!(attr
->ia_valid
& ATTR_ATIME_SET
) &&
1417 (attr
->ia_valid
& ATTR_ATIME
)) {
1418 attr
->ia_atime
= CURRENT_TIME
;
1419 attr
->ia_valid
|= ATTR_ATIME_SET
;
1421 if (!(attr
->ia_valid
& ATTR_MTIME_SET
) &&
1422 (attr
->ia_valid
& ATTR_MTIME
)) {
1423 attr
->ia_mtime
= CURRENT_TIME
;
1424 attr
->ia_valid
|= ATTR_MTIME_SET
;
1427 if (attr
->ia_valid
& (ATTR_MTIME
| ATTR_CTIME
))
1428 CDEBUG(D_INODE
, "setting mtime %lu, ctime %lu, now = %llu\n",
1429 LTIME_S(attr
->ia_mtime
), LTIME_S(attr
->ia_ctime
),
1430 (s64
)ktime_get_real_seconds());
1432 /* We always do an MDS RPC, even if we're only changing the size;
1433 * only the MDS knows whether truncate() should fail with -ETXTBUSY
1436 op_data
= kzalloc(sizeof(*op_data
), GFP_NOFS
);
1440 if (!S_ISDIR(inode
->i_mode
))
1441 inode_unlock(inode
);
1443 /* truncate on a released file must failed with -ENODATA,
1444 * so size must not be set on MDS for released file
1445 * but other attributes must be set
1447 if (S_ISREG(inode
->i_mode
)) {
1448 struct lov_stripe_md
*lsm
;
1451 ll_layout_refresh(inode
, &gen
);
1452 lsm
= ccc_inode_lsm_get(inode
);
1453 if (lsm
&& lsm
->lsm_pattern
& LOV_PATTERN_F_RELEASED
)
1454 file_is_released
= true;
1455 ccc_inode_lsm_put(inode
, lsm
);
1457 if (!hsm_import
&& attr
->ia_valid
& ATTR_SIZE
) {
1458 if (file_is_released
) {
1459 rc
= ll_layout_restore(inode
, 0, attr
->ia_size
);
1463 file_is_released
= false;
1464 ll_layout_refresh(inode
, &gen
);
1468 * If we are changing file size, file content is
1469 * modified, flag it.
1471 attr
->ia_valid
|= MDS_OPEN_OWNEROVERRIDE
;
1472 spin_lock(&lli
->lli_lock
);
1473 lli
->lli_flags
|= LLIF_DATA_MODIFIED
;
1474 spin_unlock(&lli
->lli_lock
);
1475 op_data
->op_bias
|= MDS_DATA_MODIFIED
;
1479 memcpy(&op_data
->op_attr
, attr
, sizeof(*attr
));
1481 /* Open epoch for truncate. */
1482 if (exp_connect_som(ll_i2mdexp(inode
)) && !hsm_import
&&
1483 (attr
->ia_valid
& (ATTR_SIZE
| ATTR_MTIME
| ATTR_MTIME_SET
)))
1484 op_data
->op_flags
= MF_EPOCH_OPEN
;
1486 rc
= ll_md_setattr(dentry
, op_data
, &mod
);
1490 /* RPC to MDT is sent, cancel data modification flag */
1491 if (op_data
->op_bias
& MDS_DATA_MODIFIED
) {
1492 spin_lock(&lli
->lli_lock
);
1493 lli
->lli_flags
&= ~LLIF_DATA_MODIFIED
;
1494 spin_unlock(&lli
->lli_lock
);
1497 ll_ioepoch_open(lli
, op_data
->op_ioepoch
);
1498 if (!S_ISREG(inode
->i_mode
) || file_is_released
) {
1503 if (attr
->ia_valid
& (ATTR_SIZE
|
1504 ATTR_ATIME
| ATTR_ATIME_SET
|
1505 ATTR_MTIME
| ATTR_MTIME_SET
)) {
1506 /* For truncate and utimes sending attributes to OSTs, setting
1507 * mtime/atime to the past will be performed under PW [0:EOF]
1508 * extent lock (new_size:EOF for truncate). It may seem
1509 * excessive to send mtime/atime updates to OSTs when not
1510 * setting times to past, but it is necessary due to possible
1511 * time de-synchronization between MDT inode and OST objects
1513 if (attr
->ia_valid
& ATTR_SIZE
)
1514 down_write(&lli
->lli_trunc_sem
);
1515 rc
= cl_setattr_ost(inode
, attr
);
1516 if (attr
->ia_valid
& ATTR_SIZE
)
1517 up_write(&lli
->lli_trunc_sem
);
1520 if (op_data
->op_ioepoch
) {
1521 rc1
= ll_setattr_done_writing(inode
, op_data
, mod
);
1525 ll_finish_md_op_data(op_data
);
1527 if (!S_ISDIR(inode
->i_mode
)) {
1529 if ((attr
->ia_valid
& ATTR_SIZE
) && !hsm_import
)
1530 inode_dio_wait(inode
);
1533 ll_stats_ops_tally(ll_i2sbi(inode
), (attr
->ia_valid
& ATTR_SIZE
) ?
1534 LPROC_LL_TRUNC
: LPROC_LL_SETATTR
, 1);
1539 int ll_setattr(struct dentry
*de
, struct iattr
*attr
)
1541 int mode
= d_inode(de
)->i_mode
;
1543 if ((attr
->ia_valid
& (ATTR_CTIME
|ATTR_SIZE
|ATTR_MODE
)) ==
1544 (ATTR_CTIME
|ATTR_SIZE
|ATTR_MODE
))
1545 attr
->ia_valid
|= MDS_OPEN_OWNEROVERRIDE
;
1547 if (((attr
->ia_valid
& (ATTR_MODE
|ATTR_FORCE
|ATTR_SIZE
)) ==
1548 (ATTR_SIZE
|ATTR_MODE
)) &&
1549 (((mode
& S_ISUID
) && !(attr
->ia_mode
& S_ISUID
)) ||
1550 (((mode
& (S_ISGID
|S_IXGRP
)) == (S_ISGID
|S_IXGRP
)) &&
1551 !(attr
->ia_mode
& S_ISGID
))))
1552 attr
->ia_valid
|= ATTR_FORCE
;
1554 if ((attr
->ia_valid
& ATTR_MODE
) &&
1556 !(attr
->ia_mode
& S_ISUID
) &&
1557 !(attr
->ia_valid
& ATTR_KILL_SUID
))
1558 attr
->ia_valid
|= ATTR_KILL_SUID
;
1560 if ((attr
->ia_valid
& ATTR_MODE
) &&
1561 ((mode
& (S_ISGID
|S_IXGRP
)) == (S_ISGID
|S_IXGRP
)) &&
1562 !(attr
->ia_mode
& S_ISGID
) &&
1563 !(attr
->ia_valid
& ATTR_KILL_SGID
))
1564 attr
->ia_valid
|= ATTR_KILL_SGID
;
1566 return ll_setattr_raw(de
, attr
, false);
1569 int ll_statfs_internal(struct super_block
*sb
, struct obd_statfs
*osfs
,
1570 __u64 max_age
, __u32 flags
)
1572 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
1573 struct obd_statfs obd_osfs
;
1576 rc
= obd_statfs(NULL
, sbi
->ll_md_exp
, osfs
, max_age
, flags
);
1578 CERROR("md_statfs fails: rc = %d\n", rc
);
1582 osfs
->os_type
= sb
->s_magic
;
1584 CDEBUG(D_SUPER
, "MDC blocks %llu/%llu objects %llu/%llu\n",
1585 osfs
->os_bavail
, osfs
->os_blocks
, osfs
->os_ffree
,
1588 if (sbi
->ll_flags
& LL_SBI_LAZYSTATFS
)
1589 flags
|= OBD_STATFS_NODELAY
;
1591 rc
= obd_statfs_rqset(sbi
->ll_dt_exp
, &obd_osfs
, max_age
, flags
);
1593 CERROR("obd_statfs fails: rc = %d\n", rc
);
1597 CDEBUG(D_SUPER
, "OSC blocks %llu/%llu objects %llu/%llu\n",
1598 obd_osfs
.os_bavail
, obd_osfs
.os_blocks
, obd_osfs
.os_ffree
,
1601 osfs
->os_bsize
= obd_osfs
.os_bsize
;
1602 osfs
->os_blocks
= obd_osfs
.os_blocks
;
1603 osfs
->os_bfree
= obd_osfs
.os_bfree
;
1604 osfs
->os_bavail
= obd_osfs
.os_bavail
;
1606 /* If we don't have as many objects free on the OST as inodes
1607 * on the MDS, we reduce the total number of inodes to
1608 * compensate, so that the "inodes in use" number is correct.
1610 if (obd_osfs
.os_ffree
< osfs
->os_ffree
) {
1611 osfs
->os_files
= (osfs
->os_files
- osfs
->os_ffree
) +
1613 osfs
->os_ffree
= obd_osfs
.os_ffree
;
1619 int ll_statfs(struct dentry
*de
, struct kstatfs
*sfs
)
1621 struct super_block
*sb
= de
->d_sb
;
1622 struct obd_statfs osfs
;
1625 CDEBUG(D_VFSTRACE
, "VFS Op: at %llu jiffies\n", get_jiffies_64());
1626 ll_stats_ops_tally(ll_s2sbi(sb
), LPROC_LL_STAFS
, 1);
1628 /* Some amount of caching on the client is allowed */
1629 rc
= ll_statfs_internal(sb
, &osfs
,
1630 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
1635 statfs_unpack(sfs
, &osfs
);
1637 /* We need to downshift for all 32-bit kernels, because we can't
1638 * tell if the kernel is being called via sys_statfs64() or not.
1639 * Stop before overflowing f_bsize - in which case it is better
1640 * to just risk EOVERFLOW if caller is using old sys_statfs().
1642 if (sizeof(long) < 8) {
1643 while (osfs
.os_blocks
> ~0UL && sfs
->f_bsize
< 0x40000000) {
1646 osfs
.os_blocks
>>= 1;
1647 osfs
.os_bfree
>>= 1;
1648 osfs
.os_bavail
>>= 1;
1652 sfs
->f_blocks
= osfs
.os_blocks
;
1653 sfs
->f_bfree
= osfs
.os_bfree
;
1654 sfs
->f_bavail
= osfs
.os_bavail
;
1655 sfs
->f_fsid
= ll_s2sbi(sb
)->ll_fsid
;
1659 void ll_inode_size_lock(struct inode
*inode
)
1661 struct ll_inode_info
*lli
;
1663 LASSERT(!S_ISDIR(inode
->i_mode
));
1665 lli
= ll_i2info(inode
);
1666 mutex_lock(&lli
->lli_size_mutex
);
1669 void ll_inode_size_unlock(struct inode
*inode
)
1671 struct ll_inode_info
*lli
;
1673 lli
= ll_i2info(inode
);
1674 mutex_unlock(&lli
->lli_size_mutex
);
1677 int ll_update_inode(struct inode
*inode
, struct lustre_md
*md
)
1679 struct ll_inode_info
*lli
= ll_i2info(inode
);
1680 struct mdt_body
*body
= md
->body
;
1681 struct lov_stripe_md
*lsm
= md
->lsm
;
1682 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
1684 LASSERT((lsm
!= NULL
) == ((body
->valid
& OBD_MD_FLEASIZE
) != 0));
1686 if (!lli
->lli_has_smd
&&
1687 !(sbi
->ll_flags
& LL_SBI_LAYOUT_LOCK
))
1688 cl_file_inode_init(inode
, md
);
1690 lli
->lli_maxbytes
= lsm
->lsm_maxbytes
;
1691 if (lli
->lli_maxbytes
> MAX_LFS_FILESIZE
)
1692 lli
->lli_maxbytes
= MAX_LFS_FILESIZE
;
1695 if (S_ISDIR(inode
->i_mode
)) {
1698 rc
= ll_update_lsm_md(inode
, md
);
1703 #ifdef CONFIG_FS_POSIX_ACL
1704 if (body
->valid
& OBD_MD_FLACL
) {
1705 spin_lock(&lli
->lli_lock
);
1706 if (lli
->lli_posix_acl
)
1707 posix_acl_release(lli
->lli_posix_acl
);
1708 lli
->lli_posix_acl
= md
->posix_acl
;
1709 spin_unlock(&lli
->lli_lock
);
1712 inode
->i_ino
= cl_fid_build_ino(&body
->fid1
,
1713 sbi
->ll_flags
& LL_SBI_32BIT_API
);
1714 inode
->i_generation
= cl_fid_build_gen(&body
->fid1
);
1716 if (body
->valid
& OBD_MD_FLATIME
) {
1717 if (body
->atime
> LTIME_S(inode
->i_atime
))
1718 LTIME_S(inode
->i_atime
) = body
->atime
;
1719 lli
->lli_atime
= body
->atime
;
1721 if (body
->valid
& OBD_MD_FLMTIME
) {
1722 if (body
->mtime
> LTIME_S(inode
->i_mtime
)) {
1723 CDEBUG(D_INODE
, "setting ino %lu mtime from %lu to %llu\n",
1724 inode
->i_ino
, LTIME_S(inode
->i_mtime
),
1726 LTIME_S(inode
->i_mtime
) = body
->mtime
;
1728 lli
->lli_mtime
= body
->mtime
;
1730 if (body
->valid
& OBD_MD_FLCTIME
) {
1731 if (body
->ctime
> LTIME_S(inode
->i_ctime
))
1732 LTIME_S(inode
->i_ctime
) = body
->ctime
;
1733 lli
->lli_ctime
= body
->ctime
;
1735 if (body
->valid
& OBD_MD_FLMODE
)
1736 inode
->i_mode
= (inode
->i_mode
& S_IFMT
)|(body
->mode
& ~S_IFMT
);
1737 if (body
->valid
& OBD_MD_FLTYPE
)
1738 inode
->i_mode
= (inode
->i_mode
& ~S_IFMT
)|(body
->mode
& S_IFMT
);
1739 LASSERT(inode
->i_mode
!= 0);
1740 if (S_ISREG(inode
->i_mode
))
1741 inode
->i_blkbits
= min(PTLRPC_MAX_BRW_BITS
+ 1,
1742 LL_MAX_BLKSIZE_BITS
);
1744 inode
->i_blkbits
= inode
->i_sb
->s_blocksize_bits
;
1745 if (body
->valid
& OBD_MD_FLUID
)
1746 inode
->i_uid
= make_kuid(&init_user_ns
, body
->uid
);
1747 if (body
->valid
& OBD_MD_FLGID
)
1748 inode
->i_gid
= make_kgid(&init_user_ns
, body
->gid
);
1749 if (body
->valid
& OBD_MD_FLFLAGS
)
1750 inode
->i_flags
= ll_ext_to_inode_flags(body
->flags
);
1751 if (body
->valid
& OBD_MD_FLNLINK
)
1752 set_nlink(inode
, body
->nlink
);
1753 if (body
->valid
& OBD_MD_FLRDEV
)
1754 inode
->i_rdev
= old_decode_dev(body
->rdev
);
1756 if (body
->valid
& OBD_MD_FLID
) {
1757 /* FID shouldn't be changed! */
1758 if (fid_is_sane(&lli
->lli_fid
)) {
1759 LASSERTF(lu_fid_eq(&lli
->lli_fid
, &body
->fid1
),
1760 "Trying to change FID "DFID
" to the "DFID
", inode "DFID
"(%p)\n",
1761 PFID(&lli
->lli_fid
), PFID(&body
->fid1
),
1762 PFID(ll_inode2fid(inode
)), inode
);
1764 lli
->lli_fid
= body
->fid1
;
1768 LASSERT(fid_seq(&lli
->lli_fid
) != 0);
1770 if (body
->valid
& OBD_MD_FLSIZE
) {
1771 if (exp_connect_som(ll_i2mdexp(inode
)) &&
1772 S_ISREG(inode
->i_mode
)) {
1773 struct lustre_handle lockh
;
1774 enum ldlm_mode mode
;
1776 /* As it is possible a blocking ast has been processed
1777 * by this time, we need to check there is an UPDATE
1778 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1781 mode
= ll_take_md_lock(inode
, MDS_INODELOCK_UPDATE
,
1782 &lockh
, LDLM_FL_CBPENDING
,
1786 if (lli
->lli_flags
& (LLIF_DONE_WRITING
|
1787 LLIF_EPOCH_PENDING
|
1789 CERROR("%s: inode "DFID
" flags %u still has size authority! do not trust the size got from MDS\n",
1790 sbi
->ll_md_exp
->exp_obd
->obd_name
,
1791 PFID(ll_inode2fid(inode
)),
1794 /* Use old size assignment to avoid
1795 * deadlock bz14138 & bz14326
1797 i_size_write(inode
, body
->size
);
1798 spin_lock(&lli
->lli_lock
);
1799 lli
->lli_flags
|= LLIF_MDS_SIZE_LOCK
;
1800 spin_unlock(&lli
->lli_lock
);
1802 ldlm_lock_decref(&lockh
, mode
);
1805 /* Use old size assignment to avoid
1806 * deadlock bz14138 & bz14326
1808 i_size_write(inode
, body
->size
);
1810 CDEBUG(D_VFSTRACE
, "inode=%lu, updating i_size %llu\n",
1811 inode
->i_ino
, (unsigned long long)body
->size
);
1814 if (body
->valid
& OBD_MD_FLBLOCKS
)
1815 inode
->i_blocks
= body
->blocks
;
1818 if (body
->valid
& OBD_MD_TSTATE
) {
1819 if (body
->t_state
& MS_RESTORE
)
1820 lli
->lli_flags
|= LLIF_FILE_RESTORING
;
1826 int ll_read_inode2(struct inode
*inode
, void *opaque
)
1828 struct lustre_md
*md
= opaque
;
1829 struct ll_inode_info
*lli
= ll_i2info(inode
);
1832 CDEBUG(D_VFSTRACE
, "VFS Op:inode="DFID
"(%p)\n",
1833 PFID(&lli
->lli_fid
), inode
);
1835 LASSERT(!lli
->lli_has_smd
);
1837 /* Core attributes from the MDS first. This is a new inode, and
1838 * the VFS doesn't zero times in the core inode so we have to do
1839 * it ourselves. They will be overwritten by either MDS or OST
1840 * attributes - we just need to make sure they aren't newer.
1842 LTIME_S(inode
->i_mtime
) = 0;
1843 LTIME_S(inode
->i_atime
) = 0;
1844 LTIME_S(inode
->i_ctime
) = 0;
1846 rc
= ll_update_inode(inode
, md
);
1850 /* OIDEBUG(inode); */
1852 if (S_ISREG(inode
->i_mode
)) {
1853 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
1855 inode
->i_op
= &ll_file_inode_operations
;
1856 inode
->i_fop
= sbi
->ll_fop
;
1857 inode
->i_mapping
->a_ops
= (struct address_space_operations
*)&ll_aops
;
1858 } else if (S_ISDIR(inode
->i_mode
)) {
1859 inode
->i_op
= &ll_dir_inode_operations
;
1860 inode
->i_fop
= &ll_dir_operations
;
1861 } else if (S_ISLNK(inode
->i_mode
)) {
1862 inode
->i_op
= &ll_fast_symlink_inode_operations
;
1864 inode
->i_op
= &ll_special_inode_operations
;
1866 init_special_inode(inode
, inode
->i_mode
,
1873 void ll_delete_inode(struct inode
*inode
)
1875 struct ll_inode_info
*lli
= ll_i2info(inode
);
1877 if (S_ISREG(inode
->i_mode
) && lli
->lli_clob
)
1878 /* discard all dirty pages before truncating them, required by
1879 * osc_extent implementation at LU-1030.
1881 cl_sync_file_range(inode
, 0, OBD_OBJECT_EOF
,
1882 CL_FSYNC_DISCARD
, 1);
1884 truncate_inode_pages_final(&inode
->i_data
);
1886 /* Workaround for LU-118 */
1887 if (inode
->i_data
.nrpages
) {
1888 spin_lock_irq(&inode
->i_data
.tree_lock
);
1889 spin_unlock_irq(&inode
->i_data
.tree_lock
);
1890 LASSERTF(inode
->i_data
.nrpages
== 0,
1891 "inode="DFID
"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1892 PFID(ll_inode2fid(inode
)), inode
,
1893 inode
->i_data
.nrpages
);
1895 /* Workaround end */
1897 ll_clear_inode(inode
);
1901 int ll_iocontrol(struct inode
*inode
, struct file
*file
,
1902 unsigned int cmd
, unsigned long arg
)
1904 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
1905 struct ptlrpc_request
*req
= NULL
;
1909 case FSFILT_IOC_GETFLAGS
: {
1910 struct mdt_body
*body
;
1911 struct md_op_data
*op_data
;
1913 op_data
= ll_prep_md_op_data(NULL
, inode
, NULL
, NULL
,
1914 0, 0, LUSTRE_OPC_ANY
,
1916 if (IS_ERR(op_data
))
1917 return PTR_ERR(op_data
);
1919 op_data
->op_valid
= OBD_MD_FLFLAGS
;
1920 rc
= md_getattr(sbi
->ll_md_exp
, op_data
, &req
);
1921 ll_finish_md_op_data(op_data
);
1923 CERROR("%s: failure inode "DFID
": rc = %d\n",
1924 sbi
->ll_md_exp
->exp_obd
->obd_name
,
1925 PFID(ll_inode2fid(inode
)), rc
);
1929 body
= req_capsule_server_get(&req
->rq_pill
, &RMF_MDT_BODY
);
1931 flags
= body
->flags
;
1933 ptlrpc_req_finished(req
);
1935 return put_user(flags
, (int __user
*)arg
);
1937 case FSFILT_IOC_SETFLAGS
: {
1938 struct lov_stripe_md
*lsm
;
1939 struct obd_info oinfo
= { };
1940 struct md_op_data
*op_data
;
1942 if (get_user(flags
, (int __user
*)arg
))
1945 op_data
= ll_prep_md_op_data(NULL
, inode
, NULL
, NULL
, 0, 0,
1946 LUSTRE_OPC_ANY
, NULL
);
1947 if (IS_ERR(op_data
))
1948 return PTR_ERR(op_data
);
1950 op_data
->op_attr_flags
= flags
;
1951 op_data
->op_attr
.ia_valid
|= ATTR_ATTR_FLAG
;
1952 rc
= md_setattr(sbi
->ll_md_exp
, op_data
,
1953 NULL
, 0, NULL
, 0, &req
, NULL
);
1954 ll_finish_md_op_data(op_data
);
1955 ptlrpc_req_finished(req
);
1959 inode
->i_flags
= ll_ext_to_inode_flags(flags
);
1961 lsm
= ccc_inode_lsm_get(inode
);
1962 if (!lsm_has_objects(lsm
)) {
1963 ccc_inode_lsm_put(inode
, lsm
);
1967 oinfo
.oi_oa
= kmem_cache_zalloc(obdo_cachep
, GFP_NOFS
);
1969 ccc_inode_lsm_put(inode
, lsm
);
1973 oinfo
.oi_oa
->o_oi
= lsm
->lsm_oi
;
1974 oinfo
.oi_oa
->o_flags
= flags
;
1975 oinfo
.oi_oa
->o_valid
= OBD_MD_FLID
| OBD_MD_FLFLAGS
|
1977 obdo_set_parent_fid(oinfo
.oi_oa
, &ll_i2info(inode
)->lli_fid
);
1978 rc
= obd_setattr_rqset(sbi
->ll_dt_exp
, &oinfo
, NULL
);
1979 kmem_cache_free(obdo_cachep
, oinfo
.oi_oa
);
1980 ccc_inode_lsm_put(inode
, lsm
);
1982 if (rc
&& rc
!= -EPERM
&& rc
!= -EACCES
)
1983 CERROR("osc_setattr_async fails: rc = %d\n", rc
);
1994 int ll_flush_ctx(struct inode
*inode
)
1996 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
1998 CDEBUG(D_SEC
, "flush context for user %d\n",
1999 from_kuid(&init_user_ns
, current_uid()));
2001 obd_set_info_async(NULL
, sbi
->ll_md_exp
,
2002 sizeof(KEY_FLUSH_CTX
), KEY_FLUSH_CTX
,
2004 obd_set_info_async(NULL
, sbi
->ll_dt_exp
,
2005 sizeof(KEY_FLUSH_CTX
), KEY_FLUSH_CTX
,
2010 /* umount -f client means force down, don't save state */
2011 void ll_umount_begin(struct super_block
*sb
)
2013 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
2014 struct obd_device
*obd
;
2015 struct obd_ioctl_data
*ioc_data
;
2017 CDEBUG(D_VFSTRACE
, "VFS Op: superblock %p count %d active %d\n", sb
,
2018 sb
->s_count
, atomic_read(&sb
->s_active
));
2020 obd
= class_exp2obd(sbi
->ll_md_exp
);
2022 CERROR("Invalid MDC connection handle %#llx\n",
2023 sbi
->ll_md_exp
->exp_handle
.h_cookie
);
2028 obd
= class_exp2obd(sbi
->ll_dt_exp
);
2030 CERROR("Invalid LOV connection handle %#llx\n",
2031 sbi
->ll_dt_exp
->exp_handle
.h_cookie
);
2036 ioc_data
= kzalloc(sizeof(*ioc_data
), GFP_NOFS
);
2038 obd_iocontrol(IOC_OSC_SET_ACTIVE
, sbi
->ll_md_exp
,
2039 sizeof(*ioc_data
), ioc_data
, NULL
);
2041 obd_iocontrol(IOC_OSC_SET_ACTIVE
, sbi
->ll_dt_exp
,
2042 sizeof(*ioc_data
), ioc_data
, NULL
);
2047 /* Really, we'd like to wait until there are no requests outstanding,
2048 * and then continue. For now, we just invalidate the requests,
2049 * schedule() and sleep one second if needed, and hope.
2054 int ll_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2056 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
2057 char *profilenm
= get_profile_name(sb
);
2061 if ((*flags
& MS_RDONLY
) != (sb
->s_flags
& MS_RDONLY
)) {
2062 read_only
= *flags
& MS_RDONLY
;
2063 err
= obd_set_info_async(NULL
, sbi
->ll_md_exp
,
2064 sizeof(KEY_READ_ONLY
),
2065 KEY_READ_ONLY
, sizeof(read_only
),
2068 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2069 profilenm
, read_only
?
2070 "read-only" : "read-write", err
);
2075 sb
->s_flags
|= MS_RDONLY
;
2077 sb
->s_flags
&= ~MS_RDONLY
;
2079 if (sbi
->ll_flags
& LL_SBI_VERBOSE
)
2080 LCONSOLE_WARN("Remounted %s %s\n", profilenm
,
2081 read_only
? "read-only" : "read-write");
2087 * Cleanup the open handle that is cached on MDT-side.
2089 * For open case, the client side open handling thread may hit error
2090 * after the MDT grant the open. Under such case, the client should
2091 * send close RPC to the MDT as cleanup; otherwise, the open handle
2092 * on the MDT will be leaked there until the client umount or evicted.
2094 * In further, if someone unlinked the file, because the open handle
2095 * holds the reference on such file/object, then it will block the
2096 * subsequent threads that want to locate such object via FID.
2098 * \param[in] sb super block for this file-system
2099 * \param[in] open_req pointer to the original open request
2101 void ll_open_cleanup(struct super_block
*sb
, struct ptlrpc_request
*open_req
)
2103 struct mdt_body
*body
;
2104 struct md_op_data
*op_data
;
2105 struct ptlrpc_request
*close_req
= NULL
;
2106 struct obd_export
*exp
= ll_s2sbi(sb
)->ll_md_exp
;
2108 body
= req_capsule_server_get(&open_req
->rq_pill
, &RMF_MDT_BODY
);
2109 op_data
= kzalloc(sizeof(*op_data
), GFP_NOFS
);
2113 op_data
->op_fid1
= body
->fid1
;
2114 op_data
->op_ioepoch
= body
->ioepoch
;
2115 op_data
->op_handle
= body
->handle
;
2116 op_data
->op_mod_time
= get_seconds();
2117 md_close(exp
, op_data
, NULL
, &close_req
);
2118 ptlrpc_req_finished(close_req
);
2119 ll_finish_md_op_data(op_data
);
2122 int ll_prep_inode(struct inode
**inode
, struct ptlrpc_request
*req
,
2123 struct super_block
*sb
, struct lookup_intent
*it
)
2125 struct ll_sb_info
*sbi
= NULL
;
2126 struct lustre_md md
= { NULL
};
2129 LASSERT(*inode
|| sb
);
2130 sbi
= sb
? ll_s2sbi(sb
) : ll_i2sbi(*inode
);
2131 rc
= md_get_lustre_md(sbi
->ll_md_exp
, req
, sbi
->ll_dt_exp
,
2132 sbi
->ll_md_exp
, &md
);
2137 rc
= ll_update_inode(*inode
, &md
);
2144 * At this point server returns to client's same fid as client
2145 * generated for creating. So using ->fid1 is okay here.
2147 if (!fid_is_sane(&md
.body
->fid1
)) {
2148 CERROR("%s: Fid is insane " DFID
"\n",
2149 ll_get_fsname(sb
, NULL
, 0),
2150 PFID(&md
.body
->fid1
));
2155 *inode
= ll_iget(sb
, cl_fid_build_ino(&md
.body
->fid1
,
2156 sbi
->ll_flags
& LL_SBI_32BIT_API
),
2158 if (IS_ERR(*inode
)) {
2159 #ifdef CONFIG_FS_POSIX_ACL
2161 posix_acl_release(md
.posix_acl
);
2162 md
.posix_acl
= NULL
;
2166 CERROR("new_inode -fatal: rc %d\n", rc
);
2171 /* Handling piggyback layout lock.
2172 * Layout lock can be piggybacked by getattr and open request.
2173 * The lsm can be applied to inode only if it comes with a layout lock
2174 * otherwise correct layout may be overwritten, for example:
2175 * 1. proc1: mdt returns a lsm but not granting layout
2176 * 2. layout was changed by another client
2177 * 3. proc2: refresh layout and layout lock granted
2178 * 4. proc1: to apply a stale layout
2180 if (it
&& it
->it_lock_mode
!= 0) {
2181 struct lustre_handle lockh
;
2182 struct ldlm_lock
*lock
;
2184 lockh
.cookie
= it
->it_lock_handle
;
2185 lock
= ldlm_handle2lock(&lockh
);
2187 if (ldlm_has_layout(lock
)) {
2188 struct cl_object_conf conf
;
2190 memset(&conf
, 0, sizeof(conf
));
2191 conf
.coc_opc
= OBJECT_CONF_SET
;
2192 conf
.coc_inode
= *inode
;
2193 conf
.coc_lock
= lock
;
2194 conf
.u
.coc_md
= &md
;
2195 (void)ll_layout_conf(*inode
, &conf
);
2197 LDLM_LOCK_PUT(lock
);
2202 obd_free_memmd(sbi
->ll_dt_exp
, &md
.lsm
);
2203 md_free_lustre_md(sbi
->ll_md_exp
, &md
);
2206 if (rc
!= 0 && it
&& it
->it_op
& IT_OPEN
)
2207 ll_open_cleanup(sb
? sb
: (*inode
)->i_sb
, req
);
2212 int ll_obd_statfs(struct inode
*inode
, void __user
*arg
)
2214 struct ll_sb_info
*sbi
= NULL
;
2215 struct obd_export
*exp
;
2217 struct obd_ioctl_data
*data
= NULL
;
2226 sbi
= ll_i2sbi(inode
);
2232 rc
= obd_ioctl_getdata(&buf
, &len
, arg
);
2237 if (!data
->ioc_inlbuf1
|| !data
->ioc_inlbuf2
||
2238 !data
->ioc_pbuf1
|| !data
->ioc_pbuf2
) {
2243 if (data
->ioc_inllen1
!= sizeof(__u32
) ||
2244 data
->ioc_inllen2
!= sizeof(__u32
) ||
2245 data
->ioc_plen1
!= sizeof(struct obd_statfs
) ||
2246 data
->ioc_plen2
!= sizeof(struct obd_uuid
)) {
2251 memcpy(&type
, data
->ioc_inlbuf1
, sizeof(__u32
));
2252 if (type
& LL_STATFS_LMV
) {
2253 exp
= sbi
->ll_md_exp
;
2254 } else if (type
& LL_STATFS_LOV
) {
2255 exp
= sbi
->ll_dt_exp
;
2261 rc
= obd_iocontrol(IOC_OBD_STATFS
, exp
, len
, buf
, NULL
);
2266 obd_ioctl_freedata(buf
, len
);
2270 int ll_process_config(struct lustre_cfg
*lcfg
)
2274 struct lprocfs_static_vars lvars
;
2278 lprocfs_llite_init_vars(&lvars
);
2280 /* The instance name contains the sb: lustre-client-aacfe000 */
2281 ptr
= strrchr(lustre_cfg_string(lcfg
, 0), '-');
2282 if (!ptr
|| !*(++ptr
))
2284 rc
= kstrtoul(ptr
, 16, &x
);
2288 /* This better be a real Lustre superblock! */
2289 LASSERT(s2lsi((struct super_block
*)sb
)->lsi_lmd
->lmd_magic
== LMD_MAGIC
);
2291 /* Note we have not called client_common_fill_super yet, so
2292 * proc fns must be able to handle that!
2294 rc
= class_process_proc_param(PARAM_LLITE
, lvars
.obd_vars
,
2301 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2302 struct md_op_data
*ll_prep_md_op_data(struct md_op_data
*op_data
,
2303 struct inode
*i1
, struct inode
*i2
,
2304 const char *name
, int namelen
,
2305 int mode
, __u32 opc
, void *data
)
2307 if (namelen
> ll_i2sbi(i1
)->ll_namelen
)
2308 return ERR_PTR(-ENAMETOOLONG
);
2311 op_data
= kzalloc(sizeof(*op_data
), GFP_NOFS
);
2314 return ERR_PTR(-ENOMEM
);
2316 ll_i2gids(op_data
->op_suppgids
, i1
, i2
);
2317 op_data
->op_fid1
= *ll_inode2fid(i1
);
2318 if (S_ISDIR(i1
->i_mode
))
2319 op_data
->op_mea1
= ll_i2info(i1
)->lli_lsm_md
;
2322 op_data
->op_fid2
= *ll_inode2fid(i2
);
2323 if (S_ISDIR(i2
->i_mode
))
2324 op_data
->op_mea2
= ll_i2info(i2
)->lli_lsm_md
;
2326 fid_zero(&op_data
->op_fid2
);
2329 if (ll_i2sbi(i1
)->ll_flags
& LL_SBI_64BIT_HASH
)
2330 op_data
->op_cli_flags
|= CLI_HASH64
;
2332 if (ll_need_32bit_api(ll_i2sbi(i1
)))
2333 op_data
->op_cli_flags
|= CLI_API32
;
2335 op_data
->op_name
= name
;
2336 op_data
->op_namelen
= namelen
;
2337 op_data
->op_mode
= mode
;
2338 op_data
->op_mod_time
= ktime_get_real_seconds();
2339 op_data
->op_fsuid
= from_kuid(&init_user_ns
, current_fsuid());
2340 op_data
->op_fsgid
= from_kgid(&init_user_ns
, current_fsgid());
2341 op_data
->op_cap
= cfs_curproc_cap_pack();
2342 op_data
->op_bias
= 0;
2343 op_data
->op_cli_flags
= 0;
2344 if ((opc
== LUSTRE_OPC_CREATE
) && name
&&
2345 filename_is_volatile(name
, namelen
, NULL
))
2346 op_data
->op_bias
|= MDS_CREATE_VOLATILE
;
2347 op_data
->op_mds
= 0;
2348 op_data
->op_data
= data
;
2350 /* If the file is being opened after mknod() (normally due to NFS)
2351 * try to use the default stripe data from parent directory for
2352 * allocating OST objects. Try to pass the parent FID to MDS.
2354 if (opc
== LUSTRE_OPC_CREATE
&& i1
== i2
&& S_ISREG(i2
->i_mode
) &&
2355 !ll_i2info(i2
)->lli_has_smd
) {
2356 struct ll_inode_info
*lli
= ll_i2info(i2
);
2358 spin_lock(&lli
->lli_lock
);
2359 if (likely(!lli
->lli_has_smd
&& !fid_is_zero(&lli
->lli_pfid
)))
2360 op_data
->op_fid1
= lli
->lli_pfid
;
2361 spin_unlock(&lli
->lli_lock
);
2364 /* When called by ll_setattr_raw, file is i1. */
2365 if (ll_i2info(i1
)->lli_flags
& LLIF_DATA_MODIFIED
)
2366 op_data
->op_bias
|= MDS_DATA_MODIFIED
;
2371 void ll_finish_md_op_data(struct md_op_data
*op_data
)
2376 int ll_show_options(struct seq_file
*seq
, struct dentry
*dentry
)
2378 struct ll_sb_info
*sbi
;
2380 LASSERT(seq
&& dentry
);
2381 sbi
= ll_s2sbi(dentry
->d_sb
);
2383 if (sbi
->ll_flags
& LL_SBI_NOLCK
)
2384 seq_puts(seq
, ",nolock");
2386 if (sbi
->ll_flags
& LL_SBI_FLOCK
)
2387 seq_puts(seq
, ",flock");
2389 if (sbi
->ll_flags
& LL_SBI_LOCALFLOCK
)
2390 seq_puts(seq
, ",localflock");
2392 if (sbi
->ll_flags
& LL_SBI_USER_XATTR
)
2393 seq_puts(seq
, ",user_xattr");
2395 if (sbi
->ll_flags
& LL_SBI_LAZYSTATFS
)
2396 seq_puts(seq
, ",lazystatfs");
2398 if (sbi
->ll_flags
& LL_SBI_USER_FID2PATH
)
2399 seq_puts(seq
, ",user_fid2path");
2405 * Get obd name by cmd, and copy out to user space
2407 int ll_get_obd_name(struct inode
*inode
, unsigned int cmd
, unsigned long arg
)
2409 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
2410 struct obd_device
*obd
;
2412 if (cmd
== OBD_IOC_GETDTNAME
)
2413 obd
= class_exp2obd(sbi
->ll_dt_exp
);
2414 else if (cmd
== OBD_IOC_GETMDNAME
)
2415 obd
= class_exp2obd(sbi
->ll_md_exp
);
2422 if (copy_to_user((void __user
*)arg
, obd
->obd_name
,
2423 strlen(obd
->obd_name
) + 1))
2430 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2431 * fsname will be returned in this buffer; otherwise, a static buffer will be
2432 * used to store the fsname and returned to caller.
2434 char *ll_get_fsname(struct super_block
*sb
, char *buf
, int buflen
)
2436 static char fsname_static
[MTI_NAME_MAXLEN
];
2437 struct lustre_sb_info
*lsi
= s2lsi(sb
);
2442 /* this means the caller wants to use static buffer
2443 * and it doesn't care about race. Usually this is
2444 * in error reporting path
2446 buf
= fsname_static
;
2447 buflen
= sizeof(fsname_static
);
2450 len
= strlen(lsi
->lsi_lmd
->lmd_profile
);
2451 ptr
= strrchr(lsi
->lsi_lmd
->lmd_profile
, '-');
2452 if (ptr
&& (strcmp(ptr
, "-client") == 0))
2455 if (unlikely(len
>= buflen
))
2457 strncpy(buf
, lsi
->lsi_lmd
->lmd_profile
, len
);
2463 void ll_dirty_page_discard_warn(struct page
*page
, int ioret
)
2465 char *buf
, *path
= NULL
;
2466 struct dentry
*dentry
= NULL
;
2467 struct vvp_object
*obj
= cl_inode2vvp(page
->mapping
->host
);
2469 /* this can be called inside spin lock so use GFP_ATOMIC. */
2470 buf
= (char *)__get_free_page(GFP_ATOMIC
);
2472 dentry
= d_find_alias(page
->mapping
->host
);
2474 path
= dentry_path_raw(dentry
, buf
, PAGE_SIZE
);
2478 "%s: dirty page discard: %s/fid: " DFID
"/%s may get corrupted (rc %d)\n",
2479 ll_get_fsname(page
->mapping
->host
->i_sb
, NULL
, 0),
2480 s2lsi(page
->mapping
->host
->i_sb
)->lsi_lmd
->lmd_dev
,
2481 PFID(&obj
->vob_header
.coh_lu
.loh_fid
),
2482 (path
&& !IS_ERR(path
)) ? path
: "", ioret
);
2488 free_page((unsigned long)buf
);