]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/lustre/lustre/llite/llite_lib.c
staging: lustre: lmv: separate master object with master stripe
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2015, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/llite/llite_lib.c
33 *
34 * Lustre Light Super operations
35 */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/module.h>
40 #include <linux/statfs.h>
41 #include <linux/types.h>
42 #include <linux/mm.h>
43
44 #include "../include/lustre/lustre_ioctl.h"
45 #include "../include/lustre_lite.h"
46 #include "../include/lustre_ha.h"
47 #include "../include/lustre_dlm.h"
48 #include "../include/lprocfs_status.h"
49 #include "../include/lustre_disk.h"
50 #include "../include/lustre_param.h"
51 #include "../include/lustre_log.h"
52 #include "../include/cl_object.h"
53 #include "../include/obd_cksum.h"
54 #include "llite_internal.h"
55
56 struct kmem_cache *ll_file_data_slab;
57 struct dentry *llite_root;
58 struct kset *llite_kset;
59
60 #ifndef log2
61 #define log2(n) ffz(~(n))
62 #endif
63
64 static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
65 {
66 struct ll_sb_info *sbi = NULL;
67 unsigned long pages;
68 unsigned long lru_page_max;
69 struct sysinfo si;
70 class_uuid_t uuid;
71 int i;
72
73 sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
74 if (!sbi)
75 return NULL;
76
77 spin_lock_init(&sbi->ll_lock);
78 mutex_init(&sbi->ll_lco.lco_lock);
79 spin_lock_init(&sbi->ll_pp_extent_lock);
80 spin_lock_init(&sbi->ll_process_lock);
81 sbi->ll_rw_stats_on = 0;
82
83 si_meminfo(&si);
84 pages = si.totalram - si.totalhigh;
85 lru_page_max = pages / 2;
86
87 sbi->ll_cache = cl_cache_init(lru_page_max);
88 if (!sbi->ll_cache) {
89 kfree(sbi);
90 return NULL;
91 }
92
93 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
94 SBI_DEFAULT_READAHEAD_MAX);
95 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
96 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
97 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
98
99 ll_generate_random_uuid(uuid);
100 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
101 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
102
103 sbi->ll_flags |= LL_SBI_VERBOSE;
104 sbi->ll_flags |= LL_SBI_CHECKSUM;
105
106 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
107
108 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
109 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
110 pp_r_hist.oh_lock);
111 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
112 pp_w_hist.oh_lock);
113 }
114
115 /* metadata statahead is enabled by default */
116 sbi->ll_sa_max = LL_SA_RPC_DEF;
117 atomic_set(&sbi->ll_sa_total, 0);
118 atomic_set(&sbi->ll_sa_wrong, 0);
119 atomic_set(&sbi->ll_agl_total, 0);
120 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
121
122 sbi->ll_sb = sb;
123
124 return sbi;
125 }
126
127 static void ll_free_sbi(struct super_block *sb)
128 {
129 struct ll_sb_info *sbi = ll_s2sbi(sb);
130
131 if (sbi->ll_cache) {
132 cl_cache_decref(sbi->ll_cache);
133 sbi->ll_cache = NULL;
134 }
135
136 kfree(sbi);
137 }
138
139 static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
140 struct vfsmount *mnt)
141 {
142 struct inode *root = NULL;
143 struct ll_sb_info *sbi = ll_s2sbi(sb);
144 struct obd_device *obd;
145 struct obd_statfs *osfs = NULL;
146 struct ptlrpc_request *request = NULL;
147 struct obd_connect_data *data = NULL;
148 struct obd_uuid *uuid;
149 struct md_op_data *op_data;
150 struct lustre_md lmd;
151 u64 valid;
152 int size, err, checksum;
153
154 obd = class_name2obd(md);
155 if (!obd) {
156 CERROR("MD %s: not setup or attached\n", md);
157 return -EINVAL;
158 }
159
160 data = kzalloc(sizeof(*data), GFP_NOFS);
161 if (!data)
162 return -ENOMEM;
163
164 osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
165 if (!osfs) {
166 kfree(data);
167 return -ENOMEM;
168 }
169
170 /* indicate the features supported by this client */
171 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
172 OBD_CONNECT_ATTRFID |
173 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
174 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
175 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
176 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
177 OBD_CONNECT_64BITHASH |
178 OBD_CONNECT_EINPROGRESS |
179 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
180 OBD_CONNECT_LAYOUTLOCK |
181 OBD_CONNECT_PINGLESS |
182 OBD_CONNECT_MAX_EASIZE |
183 OBD_CONNECT_FLOCK_DEAD |
184 OBD_CONNECT_DISP_STRIPE;
185
186 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
187 data->ocd_connect_flags |= OBD_CONNECT_SOM;
188
189 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
190 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
191 #ifdef CONFIG_FS_POSIX_ACL
192 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
193 #endif
194
195 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
196 /* flag mdc connection as lightweight, only used for test
197 * purpose, use with care
198 */
199 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
200
201 data->ocd_ibits_known = MDS_INODELOCK_FULL;
202 data->ocd_version = LUSTRE_VERSION_CODE;
203
204 if (sb->s_flags & MS_RDONLY)
205 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
206 if (sbi->ll_flags & LL_SBI_USER_XATTR)
207 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
208
209 if (sbi->ll_flags & LL_SBI_FLOCK)
210 sbi->ll_fop = &ll_file_operations_flock;
211 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
212 sbi->ll_fop = &ll_file_operations;
213 else
214 sbi->ll_fop = &ll_file_operations_noflock;
215
216 /* real client */
217 data->ocd_connect_flags |= OBD_CONNECT_REAL;
218
219 data->ocd_brw_size = MD_MAX_BRW_SIZE;
220
221 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
222 data, NULL);
223 if (err == -EBUSY) {
224 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
225 md);
226 goto out;
227 } else if (err) {
228 CERROR("cannot connect to %s: rc = %d\n", md, err);
229 goto out;
230 }
231
232 sbi->ll_md_exp->exp_connect_data = *data;
233
234 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
235 LUSTRE_SEQ_METADATA);
236 if (err) {
237 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
238 sbi->ll_md_exp->exp_obd->obd_name, err);
239 goto out_md;
240 }
241
242 /* For mount, we only need fs info from MDT0, and also in DNE, it
243 * can make sure the client can be mounted as long as MDT0 is
244 * available
245 */
246 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
247 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
248 OBD_STATFS_FOR_MDT0);
249 if (err)
250 goto out_md_fid;
251
252 /* This needs to be after statfs to ensure connect has finished.
253 * Note that "data" does NOT contain the valid connect reply.
254 * If connecting to a 1.8 server there will be no LMV device, so
255 * we can access the MDC export directly and exp_connect_flags will
256 * be non-zero, but if accessing an upgraded 2.1 server it will
257 * have the correct flags filled in.
258 * XXX: fill in the LMV exp_connect_flags from MDC(s).
259 */
260 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
261 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
262 valid != CLIENT_CONNECT_MDT_REQD) {
263 char *buf;
264
265 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
266 if (!buf) {
267 err = -ENOMEM;
268 goto out_md_fid;
269 }
270 obd_connect_flags2str(buf, PAGE_SIZE,
271 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
272 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
273 sbi->ll_md_exp->exp_obd->obd_name, buf);
274 kfree(buf);
275 err = -EPROTO;
276 goto out_md_fid;
277 }
278
279 size = sizeof(*data);
280 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
281 KEY_CONN_DATA, &size, data, NULL);
282 if (err) {
283 CERROR("%s: Get connect data failed: rc = %d\n",
284 sbi->ll_md_exp->exp_obd->obd_name, err);
285 goto out_md_fid;
286 }
287
288 LASSERT(osfs->os_bsize);
289 sb->s_blocksize = osfs->os_bsize;
290 sb->s_blocksize_bits = log2(osfs->os_bsize);
291 sb->s_magic = LL_SUPER_MAGIC;
292 sb->s_maxbytes = MAX_LFS_FILESIZE;
293 sbi->ll_namelen = osfs->os_namelen;
294
295 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
296 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
297 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
298 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
299 }
300
301 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
302 sb->s_flags |= MS_POSIXACL;
303 sbi->ll_flags |= LL_SBI_ACL;
304 } else {
305 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
306 sb->s_flags &= ~MS_POSIXACL;
307 sbi->ll_flags &= ~LL_SBI_ACL;
308 }
309
310 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
311 sbi->ll_flags |= LL_SBI_64BIT_HASH;
312
313 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
314 sbi->ll_md_brw_size = data->ocd_brw_size;
315 else
316 sbi->ll_md_brw_size = PAGE_SIZE;
317
318 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
319 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
320
321 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
322 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
323 LCONSOLE_INFO(
324 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
325 dt);
326 } else {
327 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
328 sbi->ll_xattr_cache_enabled = 1;
329 }
330 }
331
332 obd = class_name2obd(dt);
333 if (!obd) {
334 CERROR("DT %s: not setup or attached\n", dt);
335 err = -ENODEV;
336 goto out_md_fid;
337 }
338
339 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
340 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
341 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
342 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
343 OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
344 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
345 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
346 OBD_CONNECT_EINPROGRESS |
347 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
348 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
349
350 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
351 data->ocd_connect_flags |= OBD_CONNECT_SOM;
352
353 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
354 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
355 * disabled by default, because it can still be enabled on the
356 * fly via /sys. As a consequence, we still need to come to an
357 * agreement on the supported algorithms at connect time
358 */
359 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
360
361 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
362 data->ocd_cksum_types = OBD_CKSUM_ADLER;
363 else
364 data->ocd_cksum_types = cksum_types_supported_client();
365 }
366
367 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
368
369 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
370 data->ocd_connect_flags,
371 data->ocd_version, data->ocd_grant);
372
373 obd->obd_upcall.onu_owner = &sbi->ll_lco;
374 obd->obd_upcall.onu_upcall = cl_ocd_update;
375
376 data->ocd_brw_size = DT_MAX_BRW_SIZE;
377
378 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
379 NULL);
380 if (err == -EBUSY) {
381 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
382 dt);
383 goto out_md;
384 } else if (err) {
385 CERROR("%s: Cannot connect to %s: rc = %d\n",
386 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
387 goto out_md;
388 }
389
390 sbi->ll_dt_exp->exp_connect_data = *data;
391
392 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
393 LUSTRE_SEQ_METADATA);
394 if (err) {
395 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
396 sbi->ll_dt_exp->exp_obd->obd_name, err);
397 goto out_dt;
398 }
399
400 mutex_lock(&sbi->ll_lco.lco_lock);
401 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
402 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
403 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
404 mutex_unlock(&sbi->ll_lco.lco_lock);
405
406 fid_zero(&sbi->ll_root_fid);
407 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
408 if (err) {
409 CERROR("cannot mds_connect: rc = %d\n", err);
410 goto out_lock_cn_cb;
411 }
412 if (!fid_is_sane(&sbi->ll_root_fid)) {
413 CERROR("%s: Invalid root fid "DFID" during mount\n",
414 sbi->ll_md_exp->exp_obd->obd_name,
415 PFID(&sbi->ll_root_fid));
416 err = -EINVAL;
417 goto out_lock_cn_cb;
418 }
419 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
420
421 sb->s_op = &lustre_super_operations;
422 sb->s_xattr = ll_xattr_handlers;
423 #if THREAD_SIZE >= 8192 /*b=17630*/
424 sb->s_export_op = &lustre_export_operations;
425 #endif
426
427 /* make root inode
428 * XXX: move this to after cbd setup?
429 */
430 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
431 if (sbi->ll_flags & LL_SBI_ACL)
432 valid |= OBD_MD_FLACL;
433
434 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
435 if (!op_data) {
436 err = -ENOMEM;
437 goto out_lock_cn_cb;
438 }
439
440 op_data->op_fid1 = sbi->ll_root_fid;
441 op_data->op_mode = 0;
442 op_data->op_valid = valid;
443
444 err = md_getattr(sbi->ll_md_exp, op_data, &request);
445 kfree(op_data);
446 if (err) {
447 CERROR("%s: md_getattr failed for root: rc = %d\n",
448 sbi->ll_md_exp->exp_obd->obd_name, err);
449 goto out_lock_cn_cb;
450 }
451
452 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
453 sbi->ll_md_exp, &lmd);
454 if (err) {
455 CERROR("failed to understand root inode md: rc = %d\n", err);
456 ptlrpc_req_finished(request);
457 goto out_lock_cn_cb;
458 }
459
460 LASSERT(fid_is_sane(&sbi->ll_root_fid));
461 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
462 sbi->ll_flags & LL_SBI_32BIT_API),
463 &lmd);
464 md_free_lustre_md(sbi->ll_md_exp, &lmd);
465 ptlrpc_req_finished(request);
466
467 if (IS_ERR(root)) {
468 if (lmd.lsm)
469 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
470 #ifdef CONFIG_FS_POSIX_ACL
471 if (lmd.posix_acl) {
472 posix_acl_release(lmd.posix_acl);
473 lmd.posix_acl = NULL;
474 }
475 #endif
476 err = -EBADF;
477 CERROR("lustre_lite: bad iget4 for root\n");
478 goto out_root;
479 }
480
481 err = ll_close_thread_start(&sbi->ll_lcq);
482 if (err) {
483 CERROR("cannot start close thread: rc %d\n", err);
484 goto out_root;
485 }
486
487 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
488 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
489 KEY_CHECKSUM, sizeof(checksum), &checksum,
490 NULL);
491 cl_sb_init(sb);
492
493 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
494 KEY_CACHE_SET, sizeof(*sbi->ll_cache),
495 sbi->ll_cache, NULL);
496
497 sb->s_root = d_make_root(root);
498 if (!sb->s_root) {
499 CERROR("%s: can't make root dentry\n",
500 ll_get_fsname(sb, NULL, 0));
501 err = -ENOMEM;
502 goto out_lock_cn_cb;
503 }
504
505 sbi->ll_sdev_orig = sb->s_dev;
506
507 /* We set sb->s_dev equal on all lustre clients in order to support
508 * NFS export clustering. NFSD requires that the FSID be the same
509 * on all clients.
510 */
511 /* s_dev is also used in lt_compare() to compare two fs, but that is
512 * only a node-local comparison.
513 */
514 uuid = obd_get_uuid(sbi->ll_md_exp);
515 if (uuid) {
516 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
517 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
518 }
519
520 kfree(data);
521 kfree(osfs);
522
523 if (llite_root) {
524 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
525 if (err < 0) {
526 CERROR("%s: could not register mount in debugfs: "
527 "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
528 err = 0;
529 }
530 }
531
532 return err;
533 out_root:
534 iput(root);
535 out_lock_cn_cb:
536 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
537 out_dt:
538 obd_disconnect(sbi->ll_dt_exp);
539 sbi->ll_dt_exp = NULL;
540 out_md_fid:
541 obd_fid_fini(sbi->ll_md_exp->exp_obd);
542 out_md:
543 obd_disconnect(sbi->ll_md_exp);
544 sbi->ll_md_exp = NULL;
545 out:
546 kfree(data);
547 kfree(osfs);
548 return err;
549 }
550
551 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
552 {
553 int size, rc;
554
555 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
556 size = sizeof(int);
557 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
558 KEY_MAX_EASIZE, &size, lmmsize, NULL);
559 if (rc)
560 CERROR("Get max mdsize error rc %d\n", rc);
561
562 return rc;
563 }
564
565 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
566 {
567 int size, rc;
568
569 size = sizeof(int);
570 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
571 KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
572 if (rc)
573 CERROR("Get default mdsize error rc %d\n", rc);
574
575 return rc;
576 }
577
578 static void client_common_put_super(struct super_block *sb)
579 {
580 struct ll_sb_info *sbi = ll_s2sbi(sb);
581
582 ll_close_thread_shutdown(sbi->ll_lcq);
583
584 cl_sb_fini(sb);
585
586 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
587 obd_disconnect(sbi->ll_dt_exp);
588 sbi->ll_dt_exp = NULL;
589
590 ldebugfs_unregister_mountpoint(sbi);
591
592 obd_fid_fini(sbi->ll_md_exp->exp_obd);
593 obd_disconnect(sbi->ll_md_exp);
594 sbi->ll_md_exp = NULL;
595 }
596
597 void ll_kill_super(struct super_block *sb)
598 {
599 struct ll_sb_info *sbi;
600
601 /* not init sb ?*/
602 if (!(sb->s_flags & MS_ACTIVE))
603 return;
604
605 sbi = ll_s2sbi(sb);
606 /* we need to restore s_dev from changed for clustered NFS before
607 * put_super because new kernels have cached s_dev and change sb->s_dev
608 * in put_super not affected real removing devices
609 */
610 if (sbi) {
611 sb->s_dev = sbi->ll_sdev_orig;
612 sbi->ll_umounting = 1;
613 }
614 }
615
616 static inline int ll_set_opt(const char *opt, char *data, int fl)
617 {
618 if (strncmp(opt, data, strlen(opt)) != 0)
619 return 0;
620 else
621 return fl;
622 }
623
624 /* non-client-specific mount options are parsed in lmd_parse */
625 static int ll_options(char *options, int *flags)
626 {
627 int tmp;
628 char *s1 = options, *s2;
629
630 if (!options)
631 return 0;
632
633 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
634
635 while (*s1) {
636 CDEBUG(D_SUPER, "next opt=%s\n", s1);
637 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
638 if (tmp) {
639 *flags |= tmp;
640 goto next;
641 }
642 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
643 if (tmp) {
644 *flags |= tmp;
645 goto next;
646 }
647 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
648 if (tmp) {
649 *flags |= tmp;
650 goto next;
651 }
652 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
653 if (tmp) {
654 *flags &= ~tmp;
655 goto next;
656 }
657 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
658 if (tmp) {
659 *flags |= tmp;
660 goto next;
661 }
662 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
663 if (tmp) {
664 *flags &= ~tmp;
665 goto next;
666 }
667 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
668 if (tmp) {
669 *flags |= tmp;
670 goto next;
671 }
672 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
673 if (tmp) {
674 *flags &= ~tmp;
675 goto next;
676 }
677
678 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
679 if (tmp) {
680 *flags |= tmp;
681 goto next;
682 }
683 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
684 if (tmp) {
685 *flags &= ~tmp;
686 goto next;
687 }
688 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
689 if (tmp) {
690 *flags |= tmp;
691 goto next;
692 }
693 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
694 if (tmp) {
695 *flags &= ~tmp;
696 goto next;
697 }
698 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
699 if (tmp) {
700 *flags |= tmp;
701 goto next;
702 }
703 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
704 if (tmp) {
705 *flags &= ~tmp;
706 goto next;
707 }
708 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
709 if (tmp) {
710 *flags |= tmp;
711 goto next;
712 }
713 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
714 if (tmp) {
715 *flags |= tmp;
716 goto next;
717 }
718 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
719 if (tmp) {
720 *flags |= tmp;
721 goto next;
722 }
723 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
724 if (tmp) {
725 *flags &= ~tmp;
726 goto next;
727 }
728 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
729 s1);
730 return -EINVAL;
731
732 next:
733 /* Find next opt */
734 s2 = strchr(s1, ',');
735 if (!s2)
736 break;
737 s1 = s2 + 1;
738 }
739 return 0;
740 }
741
742 void ll_lli_init(struct ll_inode_info *lli)
743 {
744 lli->lli_inode_magic = LLI_INODE_MAGIC;
745 lli->lli_flags = 0;
746 lli->lli_ioepoch = 0;
747 lli->lli_maxbytes = MAX_LFS_FILESIZE;
748 spin_lock_init(&lli->lli_lock);
749 lli->lli_posix_acl = NULL;
750 /* Do not set lli_fid, it has been initialized already. */
751 fid_zero(&lli->lli_pfid);
752 INIT_LIST_HEAD(&lli->lli_close_list);
753 lli->lli_pending_och = NULL;
754 lli->lli_mds_read_och = NULL;
755 lli->lli_mds_write_och = NULL;
756 lli->lli_mds_exec_och = NULL;
757 lli->lli_open_fd_read_count = 0;
758 lli->lli_open_fd_write_count = 0;
759 lli->lli_open_fd_exec_count = 0;
760 mutex_init(&lli->lli_och_mutex);
761 spin_lock_init(&lli->lli_agl_lock);
762 lli->lli_has_smd = false;
763 spin_lock_init(&lli->lli_layout_lock);
764 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
765 lli->lli_clob = NULL;
766
767 init_rwsem(&lli->lli_xattrs_list_rwsem);
768 mutex_init(&lli->lli_xattrs_enq_lock);
769
770 LASSERT(lli->lli_vfs_inode.i_mode != 0);
771 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
772 mutex_init(&lli->lli_readdir_mutex);
773 lli->lli_opendir_key = NULL;
774 lli->lli_sai = NULL;
775 spin_lock_init(&lli->lli_sa_lock);
776 lli->lli_opendir_pid = 0;
777 } else {
778 mutex_init(&lli->lli_size_mutex);
779 lli->lli_symlink_name = NULL;
780 init_rwsem(&lli->lli_trunc_sem);
781 mutex_init(&lli->lli_write_mutex);
782 init_rwsem(&lli->lli_glimpse_sem);
783 lli->lli_glimpse_time = 0;
784 INIT_LIST_HEAD(&lli->lli_agl_list);
785 lli->lli_agl_index = 0;
786 lli->lli_async_rc = 0;
787 }
788 mutex_init(&lli->lli_layout_mutex);
789 }
790
791 static inline int ll_bdi_register(struct backing_dev_info *bdi)
792 {
793 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
794
795 bdi->name = "lustre";
796 return bdi_register(bdi, NULL, "lustre-%d",
797 atomic_inc_return(&ll_bdi_num));
798 }
799
800 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
801 {
802 struct lustre_profile *lprof = NULL;
803 struct lustre_sb_info *lsi = s2lsi(sb);
804 struct ll_sb_info *sbi;
805 char *dt = NULL, *md = NULL;
806 char *profilenm = get_profile_name(sb);
807 struct config_llog_instance *cfg;
808 int err;
809
810 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
811
812 cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
813 if (!cfg)
814 return -ENOMEM;
815
816 try_module_get(THIS_MODULE);
817
818 /* client additional sb info */
819 sbi = ll_init_sbi(sb);
820 lsi->lsi_llsbi = sbi;
821 if (!sbi) {
822 module_put(THIS_MODULE);
823 kfree(cfg);
824 return -ENOMEM;
825 }
826
827 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
828 if (err)
829 goto out_free;
830
831 err = bdi_init(&lsi->lsi_bdi);
832 if (err)
833 goto out_free;
834 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
835 lsi->lsi_bdi.capabilities = 0;
836 err = ll_bdi_register(&lsi->lsi_bdi);
837 if (err)
838 goto out_free;
839
840 sb->s_bdi = &lsi->lsi_bdi;
841 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
842 sb->s_d_op = &ll_d_ops;
843
844 /* Generate a string unique to this super, in case some joker tries
845 * to mount the same fs at two mount points.
846 * Use the address of the super itself.
847 */
848 cfg->cfg_instance = sb;
849 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
850 cfg->cfg_callback = class_config_llog_handler;
851 /* set up client obds */
852 err = lustre_process_log(sb, profilenm, cfg);
853 if (err < 0)
854 goto out_free;
855
856 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
857 lprof = class_get_profile(profilenm);
858 if (!lprof) {
859 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
860 profilenm);
861 err = -EINVAL;
862 goto out_free;
863 }
864 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
865 lprof->lp_md, lprof->lp_dt);
866
867 dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
868 if (!dt) {
869 err = -ENOMEM;
870 goto out_free;
871 }
872
873 md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
874 if (!md) {
875 err = -ENOMEM;
876 goto out_free;
877 }
878
879 /* connections, registrations, sb setup */
880 err = client_common_fill_super(sb, md, dt, mnt);
881
882 out_free:
883 kfree(md);
884 kfree(dt);
885 if (err)
886 ll_put_super(sb);
887 else if (sbi->ll_flags & LL_SBI_VERBOSE)
888 LCONSOLE_WARN("Mounted %s\n", profilenm);
889
890 kfree(cfg);
891 return err;
892 } /* ll_fill_super */
893
894 void ll_put_super(struct super_block *sb)
895 {
896 struct config_llog_instance cfg, params_cfg;
897 struct obd_device *obd;
898 struct lustre_sb_info *lsi = s2lsi(sb);
899 struct ll_sb_info *sbi = ll_s2sbi(sb);
900 char *profilenm = get_profile_name(sb);
901 int ccc_count, next, force = 1, rc = 0;
902
903 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
904
905 cfg.cfg_instance = sb;
906 lustre_end_log(sb, profilenm, &cfg);
907
908 params_cfg.cfg_instance = sb;
909 lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
910
911 if (sbi->ll_md_exp) {
912 obd = class_exp2obd(sbi->ll_md_exp);
913 if (obd)
914 force = obd->obd_force;
915 }
916
917 /* Wait for unstable pages to be committed to stable storage */
918 if (!force) {
919 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
920
921 rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
922 !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
923 &lwi);
924 }
925
926 ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
927 if (!force && rc != -EINTR)
928 LASSERTF(!ccc_count, "count: %i\n", ccc_count);
929
930 /* We need to set force before the lov_disconnect in
931 * lustre_common_put_super, since l_d cleans up osc's as well.
932 */
933 if (force) {
934 next = 0;
935 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
936 &next)) != NULL) {
937 obd->obd_force = force;
938 }
939 }
940
941 if (sbi->ll_lcq) {
942 /* Only if client_common_fill_super succeeded */
943 client_common_put_super(sb);
944 }
945
946 next = 0;
947 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
948 class_manual_cleanup(obd);
949
950 if (sbi->ll_flags & LL_SBI_VERBOSE)
951 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
952
953 if (profilenm)
954 class_del_profile(profilenm);
955
956 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
957 bdi_destroy(&lsi->lsi_bdi);
958 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
959 }
960
961 ll_free_sbi(sb);
962 lsi->lsi_llsbi = NULL;
963
964 lustre_common_put_super(sb);
965
966 cl_env_cache_purge(~0);
967
968 module_put(THIS_MODULE);
969 } /* client_put_super */
970
971 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
972 {
973 struct inode *inode = NULL;
974
975 /* NOTE: we depend on atomic igrab() -bzzz */
976 lock_res_and_lock(lock);
977 if (lock->l_resource->lr_lvb_inode) {
978 struct ll_inode_info *lli;
979
980 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
981 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
982 inode = igrab(lock->l_resource->lr_lvb_inode);
983 } else {
984 inode = lock->l_resource->lr_lvb_inode;
985 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
986 D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
987 lock->l_resource->lr_lvb_inode,
988 lli->lli_inode_magic);
989 inode = NULL;
990 }
991 }
992 unlock_res_and_lock(lock);
993 return inode;
994 }
995
996 static void ll_dir_clear_lsm_md(struct inode *inode)
997 {
998 struct ll_inode_info *lli = ll_i2info(inode);
999
1000 LASSERT(S_ISDIR(inode->i_mode));
1001
1002 if (lli->lli_lsm_md) {
1003 lmv_free_memmd(lli->lli_lsm_md);
1004 lli->lli_lsm_md = NULL;
1005 }
1006 }
1007
1008 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1009 const struct lu_fid *fid,
1010 struct lustre_md *md)
1011 {
1012 struct ll_sb_info *sbi = ll_s2sbi(sb);
1013 struct mdt_body *body = md->body;
1014 struct inode *inode;
1015 ino_t ino;
1016
1017 ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1018 inode = iget_locked(sb, ino);
1019 if (!inode) {
1020 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1021 ll_get_fsname(sb, NULL, 0), PFID(fid));
1022 return ERR_PTR(-ENOENT);
1023 }
1024
1025 if (inode->i_state & I_NEW) {
1026 struct ll_inode_info *lli = ll_i2info(inode);
1027 struct lmv_stripe_md *lsm = md->lmv;
1028
1029 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1030 (body->mode & S_IFMT);
1031 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1032 PFID(fid));
1033
1034 LTIME_S(inode->i_mtime) = 0;
1035 LTIME_S(inode->i_atime) = 0;
1036 LTIME_S(inode->i_ctime) = 0;
1037 inode->i_rdev = 0;
1038
1039 inode->i_op = &ll_dir_inode_operations;
1040 inode->i_fop = &ll_dir_operations;
1041 lli->lli_fid = *fid;
1042 ll_lli_init(lli);
1043
1044 LASSERT(lsm);
1045 /* master object FID */
1046 lli->lli_pfid = body->fid1;
1047 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1048 lli, PFID(fid), PFID(&lli->lli_pfid));
1049 unlock_new_inode(inode);
1050 }
1051
1052 return inode;
1053 }
1054
1055 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1056 {
1057 struct lmv_stripe_md *lsm = md->lmv;
1058 struct lu_fid *fid;
1059 int i;
1060
1061 LASSERT(lsm);
1062 /*
1063 * XXX sigh, this lsm_root initialization should be in
1064 * LMV layer, but it needs ll_iget right now, so we
1065 * put this here right now.
1066 */
1067 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1068 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1069 LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
1070 /* Unfortunately ll_iget will call ll_update_inode,
1071 * where the initialization of slave inode is slightly
1072 * different, so it reset lsm_md to NULL to avoid
1073 * initializing lsm for slave inode.
1074 */
1075 /* For migrating inode, master stripe and master object will
1076 * be same, so we only need assign this inode
1077 */
1078 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
1079 lsm->lsm_md_oinfo[i].lmo_root = inode;
1080 else
1081 lsm->lsm_md_oinfo[i].lmo_root =
1082 ll_iget_anon_dir(inode->i_sb, fid, md);
1083 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1084 int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1085
1086 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1087 return rc;
1088 }
1089 }
1090
1091 /*
1092 * Here is where the lsm is being initialized(fill lmo_info) after
1093 * client retrieve MD stripe information from MDT.
1094 */
1095 return md_update_lsm_md(ll_i2mdexp(inode), lsm, md->body,
1096 ll_md_blocking_ast);
1097 }
1098
1099 static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
1100 const struct lmv_stripe_md *lsm_md2)
1101 {
1102 return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
1103 lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
1104 lsm_md1->lsm_md_master_mdt_index ==
1105 lsm_md2->lsm_md_master_mdt_index &&
1106 lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
1107 lsm_md1->lsm_md_layout_version ==
1108 lsm_md2->lsm_md_layout_version &&
1109 !strcmp(lsm_md1->lsm_md_pool_name,
1110 lsm_md2->lsm_md_pool_name);
1111 }
1112
1113 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1114 {
1115 struct ll_inode_info *lli = ll_i2info(inode);
1116 struct lmv_stripe_md *lsm = md->lmv;
1117 int rc;
1118
1119 LASSERT(S_ISDIR(inode->i_mode));
1120 CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1121 PFID(ll_inode2fid(inode)));
1122
1123 /* no striped information from request. */
1124 if (!lsm) {
1125 if (!lli->lli_lsm_md) {
1126 return 0;
1127 } else if (lli->lli_lsm_md->lsm_md_hash_type &
1128 LMV_HASH_FLAG_MIGRATION) {
1129 /*
1130 * migration is done, the temporay MIGRATE layout has
1131 * been removed
1132 */
1133 CDEBUG(D_INODE, DFID" finish migration.\n",
1134 PFID(ll_inode2fid(inode)));
1135 lmv_free_memmd(lli->lli_lsm_md);
1136 lli->lli_lsm_md = NULL;
1137 return 0;
1138 } else {
1139 /*
1140 * The lustre_md from req does not include stripeEA,
1141 * see ll_md_setattr
1142 */
1143 return 0;
1144 }
1145 }
1146
1147 /* set the directory layout */
1148 if (!lli->lli_lsm_md) {
1149 rc = ll_init_lsm_md(inode, md);
1150 if (rc)
1151 return rc;
1152
1153 lli->lli_lsm_md = lsm;
1154 /*
1155 * set lsm_md to NULL, so the following free lustre_md
1156 * will not free this lsm
1157 */
1158 md->lmv = NULL;
1159 CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
1160 lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
1161 return 0;
1162 }
1163
1164 /* Compare the old and new stripe information */
1165 if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
1166 struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
1167 int idx;
1168
1169 CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
1170 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
1171 inode, lsm, old_lsm,
1172 lsm->lsm_md_magic, old_lsm->lsm_md_magic,
1173 lsm->lsm_md_stripe_count,
1174 old_lsm->lsm_md_stripe_count,
1175 lsm->lsm_md_master_mdt_index,
1176 old_lsm->lsm_md_master_mdt_index,
1177 lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
1178 lsm->lsm_md_layout_version,
1179 old_lsm->lsm_md_layout_version,
1180 lsm->lsm_md_pool_name,
1181 old_lsm->lsm_md_pool_name);
1182
1183 for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
1184 CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
1185 ll_get_fsname(inode->i_sb, NULL, 0), idx,
1186 PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
1187 }
1188
1189 for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
1190 CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
1191 ll_get_fsname(inode->i_sb, NULL, 0), idx,
1192 PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
1193 }
1194
1195 return -EIO;
1196 }
1197
1198 return 0;
1199 }
1200
1201 void ll_clear_inode(struct inode *inode)
1202 {
1203 struct ll_inode_info *lli = ll_i2info(inode);
1204 struct ll_sb_info *sbi = ll_i2sbi(inode);
1205
1206 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1207 PFID(ll_inode2fid(inode)), inode);
1208
1209 if (S_ISDIR(inode->i_mode)) {
1210 /* these should have been cleared in ll_file_release */
1211 LASSERT(!lli->lli_opendir_key);
1212 LASSERT(!lli->lli_sai);
1213 LASSERT(lli->lli_opendir_pid == 0);
1214 }
1215
1216 spin_lock(&lli->lli_lock);
1217 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1218 spin_unlock(&lli->lli_lock);
1219 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1220
1221 LASSERT(!lli->lli_open_fd_write_count);
1222 LASSERT(!lli->lli_open_fd_read_count);
1223 LASSERT(!lli->lli_open_fd_exec_count);
1224
1225 if (lli->lli_mds_write_och)
1226 ll_md_real_close(inode, FMODE_WRITE);
1227 if (lli->lli_mds_exec_och)
1228 ll_md_real_close(inode, FMODE_EXEC);
1229 if (lli->lli_mds_read_och)
1230 ll_md_real_close(inode, FMODE_READ);
1231
1232 if (S_ISLNK(inode->i_mode)) {
1233 kfree(lli->lli_symlink_name);
1234 lli->lli_symlink_name = NULL;
1235 }
1236
1237 ll_xattr_cache_destroy(inode);
1238
1239 #ifdef CONFIG_FS_POSIX_ACL
1240 if (lli->lli_posix_acl) {
1241 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1242 posix_acl_release(lli->lli_posix_acl);
1243 lli->lli_posix_acl = NULL;
1244 }
1245 #endif
1246 lli->lli_inode_magic = LLI_INODE_DEAD;
1247
1248 if (S_ISDIR(inode->i_mode))
1249 ll_dir_clear_lsm_md(inode);
1250 if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1251 LASSERT(list_empty(&lli->lli_agl_list));
1252
1253 /*
1254 * XXX This has to be done before lsm is freed below, because
1255 * cl_object still uses inode lsm.
1256 */
1257 cl_inode_fini(inode);
1258 lli->lli_has_smd = false;
1259 }
1260
1261 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1262
1263 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1264 struct md_open_data **mod)
1265 {
1266 struct lustre_md md;
1267 struct inode *inode = d_inode(dentry);
1268 struct ll_sb_info *sbi = ll_i2sbi(inode);
1269 struct ptlrpc_request *request = NULL;
1270 int rc, ia_valid;
1271
1272 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1273 LUSTRE_OPC_ANY, NULL);
1274 if (IS_ERR(op_data))
1275 return PTR_ERR(op_data);
1276
1277 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1278 &request, mod);
1279 if (rc) {
1280 ptlrpc_req_finished(request);
1281 if (rc == -ENOENT) {
1282 clear_nlink(inode);
1283 /* Unlinked special device node? Or just a race?
1284 * Pretend we did everything.
1285 */
1286 if (!S_ISREG(inode->i_mode) &&
1287 !S_ISDIR(inode->i_mode)) {
1288 ia_valid = op_data->op_attr.ia_valid;
1289 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1290 rc = simple_setattr(dentry, &op_data->op_attr);
1291 op_data->op_attr.ia_valid = ia_valid;
1292 }
1293 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1294 CERROR("md_setattr fails: rc = %d\n", rc);
1295 }
1296 return rc;
1297 }
1298
1299 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1300 sbi->ll_md_exp, &md);
1301 if (rc) {
1302 ptlrpc_req_finished(request);
1303 return rc;
1304 }
1305
1306 ia_valid = op_data->op_attr.ia_valid;
1307 /* inode size will be in cl_setattr_ost, can't do it now since dirty
1308 * cache is not cleared yet.
1309 */
1310 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1311 rc = simple_setattr(dentry, &op_data->op_attr);
1312 op_data->op_attr.ia_valid = ia_valid;
1313
1314 /* Extract epoch data if obtained. */
1315 op_data->op_handle = md.body->handle;
1316 op_data->op_ioepoch = md.body->ioepoch;
1317
1318 rc = ll_update_inode(inode, &md);
1319 ptlrpc_req_finished(request);
1320
1321 return rc;
1322 }
1323
1324 /* Close IO epoch and send Size-on-MDS attribute update. */
1325 static int ll_setattr_done_writing(struct inode *inode,
1326 struct md_op_data *op_data,
1327 struct md_open_data *mod)
1328 {
1329 struct ll_inode_info *lli = ll_i2info(inode);
1330 int rc = 0;
1331
1332 if (!S_ISREG(inode->i_mode))
1333 return 0;
1334
1335 CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
1336 op_data->op_ioepoch, PFID(&lli->lli_fid));
1337
1338 op_data->op_flags = MF_EPOCH_CLOSE;
1339 ll_done_writing_attr(inode, op_data);
1340 ll_pack_inode2opdata(inode, op_data, NULL);
1341
1342 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1343 if (rc == -EAGAIN)
1344 /* MDS has instructed us to obtain Size-on-MDS attribute
1345 * from OSTs and send setattr to back to MDS.
1346 */
1347 rc = ll_som_update(inode, op_data);
1348 else if (rc) {
1349 CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
1350 ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
1351 PFID(ll_inode2fid(inode)), rc);
1352 }
1353 return rc;
1354 }
1355
1356 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1357 * object(s) determine the file size and mtime. Otherwise, the MDS will
1358 * keep these values until such a time that objects are allocated for it.
1359 * We do the MDS operations first, as it is checking permissions for us.
1360 * We don't to the MDS RPC if there is nothing that we want to store there,
1361 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1362 * going to do an RPC anyways.
1363 *
1364 * If we are doing a truncate, we will send the mtime and ctime updates
1365 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1366 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1367 * at the same time.
1368 *
1369 * In case of HSMimport, we only set attr on MDS.
1370 */
1371 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1372 {
1373 struct inode *inode = d_inode(dentry);
1374 struct ll_inode_info *lli = ll_i2info(inode);
1375 struct md_op_data *op_data = NULL;
1376 struct md_open_data *mod = NULL;
1377 bool file_is_released = false;
1378 int rc = 0, rc1 = 0;
1379
1380 CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1381 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
1382 i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
1383
1384 if (attr->ia_valid & ATTR_SIZE) {
1385 /* Check new size against VFS/VM file size limit and rlimit */
1386 rc = inode_newsize_ok(inode, attr->ia_size);
1387 if (rc)
1388 return rc;
1389
1390 /* The maximum Lustre file size is variable, based on the
1391 * OST maximum object size and number of stripes. This
1392 * needs another check in addition to the VFS check above.
1393 */
1394 if (attr->ia_size > ll_file_maxbytes(inode)) {
1395 CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
1396 PFID(&lli->lli_fid), attr->ia_size,
1397 ll_file_maxbytes(inode));
1398 return -EFBIG;
1399 }
1400
1401 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1402 }
1403
1404 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1405 if (attr->ia_valid & TIMES_SET_FLAGS) {
1406 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1407 !capable(CFS_CAP_FOWNER))
1408 return -EPERM;
1409 }
1410
1411 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1412 if (attr->ia_valid & ATTR_CTIME) {
1413 attr->ia_ctime = CURRENT_TIME;
1414 attr->ia_valid |= ATTR_CTIME_SET;
1415 }
1416 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1417 (attr->ia_valid & ATTR_ATIME)) {
1418 attr->ia_atime = CURRENT_TIME;
1419 attr->ia_valid |= ATTR_ATIME_SET;
1420 }
1421 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1422 (attr->ia_valid & ATTR_MTIME)) {
1423 attr->ia_mtime = CURRENT_TIME;
1424 attr->ia_valid |= ATTR_MTIME_SET;
1425 }
1426
1427 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1428 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
1429 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1430 (s64)ktime_get_real_seconds());
1431
1432 /* We always do an MDS RPC, even if we're only changing the size;
1433 * only the MDS knows whether truncate() should fail with -ETXTBUSY
1434 */
1435
1436 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1437 if (!op_data)
1438 return -ENOMEM;
1439
1440 if (!S_ISDIR(inode->i_mode))
1441 inode_unlock(inode);
1442
1443 /* truncate on a released file must failed with -ENODATA,
1444 * so size must not be set on MDS for released file
1445 * but other attributes must be set
1446 */
1447 if (S_ISREG(inode->i_mode)) {
1448 struct lov_stripe_md *lsm;
1449 __u32 gen;
1450
1451 ll_layout_refresh(inode, &gen);
1452 lsm = ccc_inode_lsm_get(inode);
1453 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
1454 file_is_released = true;
1455 ccc_inode_lsm_put(inode, lsm);
1456
1457 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1458 if (file_is_released) {
1459 rc = ll_layout_restore(inode, 0, attr->ia_size);
1460 if (rc < 0)
1461 goto out;
1462
1463 file_is_released = false;
1464 ll_layout_refresh(inode, &gen);
1465 }
1466
1467 /*
1468 * If we are changing file size, file content is
1469 * modified, flag it.
1470 */
1471 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1472 spin_lock(&lli->lli_lock);
1473 lli->lli_flags |= LLIF_DATA_MODIFIED;
1474 spin_unlock(&lli->lli_lock);
1475 op_data->op_bias |= MDS_DATA_MODIFIED;
1476 }
1477 }
1478
1479 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1480
1481 /* Open epoch for truncate. */
1482 if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
1483 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1484 op_data->op_flags = MF_EPOCH_OPEN;
1485
1486 rc = ll_md_setattr(dentry, op_data, &mod);
1487 if (rc)
1488 goto out;
1489
1490 /* RPC to MDT is sent, cancel data modification flag */
1491 if (op_data->op_bias & MDS_DATA_MODIFIED) {
1492 spin_lock(&lli->lli_lock);
1493 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1494 spin_unlock(&lli->lli_lock);
1495 }
1496
1497 ll_ioepoch_open(lli, op_data->op_ioepoch);
1498 if (!S_ISREG(inode->i_mode) || file_is_released) {
1499 rc = 0;
1500 goto out;
1501 }
1502
1503 if (attr->ia_valid & (ATTR_SIZE |
1504 ATTR_ATIME | ATTR_ATIME_SET |
1505 ATTR_MTIME | ATTR_MTIME_SET)) {
1506 /* For truncate and utimes sending attributes to OSTs, setting
1507 * mtime/atime to the past will be performed under PW [0:EOF]
1508 * extent lock (new_size:EOF for truncate). It may seem
1509 * excessive to send mtime/atime updates to OSTs when not
1510 * setting times to past, but it is necessary due to possible
1511 * time de-synchronization between MDT inode and OST objects
1512 */
1513 if (attr->ia_valid & ATTR_SIZE)
1514 down_write(&lli->lli_trunc_sem);
1515 rc = cl_setattr_ost(inode, attr);
1516 if (attr->ia_valid & ATTR_SIZE)
1517 up_write(&lli->lli_trunc_sem);
1518 }
1519 out:
1520 if (op_data->op_ioepoch) {
1521 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1522 if (!rc)
1523 rc = rc1;
1524 }
1525 ll_finish_md_op_data(op_data);
1526
1527 if (!S_ISDIR(inode->i_mode)) {
1528 inode_lock(inode);
1529 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
1530 inode_dio_wait(inode);
1531 }
1532
1533 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1534 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1535
1536 return rc;
1537 }
1538
1539 int ll_setattr(struct dentry *de, struct iattr *attr)
1540 {
1541 int mode = d_inode(de)->i_mode;
1542
1543 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1544 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1545 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1546
1547 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
1548 (ATTR_SIZE|ATTR_MODE)) &&
1549 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1550 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1551 !(attr->ia_mode & S_ISGID))))
1552 attr->ia_valid |= ATTR_FORCE;
1553
1554 if ((attr->ia_valid & ATTR_MODE) &&
1555 (mode & S_ISUID) &&
1556 !(attr->ia_mode & S_ISUID) &&
1557 !(attr->ia_valid & ATTR_KILL_SUID))
1558 attr->ia_valid |= ATTR_KILL_SUID;
1559
1560 if ((attr->ia_valid & ATTR_MODE) &&
1561 ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1562 !(attr->ia_mode & S_ISGID) &&
1563 !(attr->ia_valid & ATTR_KILL_SGID))
1564 attr->ia_valid |= ATTR_KILL_SGID;
1565
1566 return ll_setattr_raw(de, attr, false);
1567 }
1568
1569 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1570 __u64 max_age, __u32 flags)
1571 {
1572 struct ll_sb_info *sbi = ll_s2sbi(sb);
1573 struct obd_statfs obd_osfs;
1574 int rc;
1575
1576 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1577 if (rc) {
1578 CERROR("md_statfs fails: rc = %d\n", rc);
1579 return rc;
1580 }
1581
1582 osfs->os_type = sb->s_magic;
1583
1584 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1585 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1586 osfs->os_files);
1587
1588 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1589 flags |= OBD_STATFS_NODELAY;
1590
1591 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1592 if (rc) {
1593 CERROR("obd_statfs fails: rc = %d\n", rc);
1594 return rc;
1595 }
1596
1597 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
1598 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1599 obd_osfs.os_files);
1600
1601 osfs->os_bsize = obd_osfs.os_bsize;
1602 osfs->os_blocks = obd_osfs.os_blocks;
1603 osfs->os_bfree = obd_osfs.os_bfree;
1604 osfs->os_bavail = obd_osfs.os_bavail;
1605
1606 /* If we don't have as many objects free on the OST as inodes
1607 * on the MDS, we reduce the total number of inodes to
1608 * compensate, so that the "inodes in use" number is correct.
1609 */
1610 if (obd_osfs.os_ffree < osfs->os_ffree) {
1611 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1612 obd_osfs.os_ffree;
1613 osfs->os_ffree = obd_osfs.os_ffree;
1614 }
1615
1616 return rc;
1617 }
1618
1619 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1620 {
1621 struct super_block *sb = de->d_sb;
1622 struct obd_statfs osfs;
1623 int rc;
1624
1625 CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
1626 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1627
1628 /* Some amount of caching on the client is allowed */
1629 rc = ll_statfs_internal(sb, &osfs,
1630 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1631 0);
1632 if (rc)
1633 return rc;
1634
1635 statfs_unpack(sfs, &osfs);
1636
1637 /* We need to downshift for all 32-bit kernels, because we can't
1638 * tell if the kernel is being called via sys_statfs64() or not.
1639 * Stop before overflowing f_bsize - in which case it is better
1640 * to just risk EOVERFLOW if caller is using old sys_statfs().
1641 */
1642 if (sizeof(long) < 8) {
1643 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1644 sfs->f_bsize <<= 1;
1645
1646 osfs.os_blocks >>= 1;
1647 osfs.os_bfree >>= 1;
1648 osfs.os_bavail >>= 1;
1649 }
1650 }
1651
1652 sfs->f_blocks = osfs.os_blocks;
1653 sfs->f_bfree = osfs.os_bfree;
1654 sfs->f_bavail = osfs.os_bavail;
1655 sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
1656 return 0;
1657 }
1658
1659 void ll_inode_size_lock(struct inode *inode)
1660 {
1661 struct ll_inode_info *lli;
1662
1663 LASSERT(!S_ISDIR(inode->i_mode));
1664
1665 lli = ll_i2info(inode);
1666 mutex_lock(&lli->lli_size_mutex);
1667 }
1668
1669 void ll_inode_size_unlock(struct inode *inode)
1670 {
1671 struct ll_inode_info *lli;
1672
1673 lli = ll_i2info(inode);
1674 mutex_unlock(&lli->lli_size_mutex);
1675 }
1676
1677 int ll_update_inode(struct inode *inode, struct lustre_md *md)
1678 {
1679 struct ll_inode_info *lli = ll_i2info(inode);
1680 struct mdt_body *body = md->body;
1681 struct lov_stripe_md *lsm = md->lsm;
1682 struct ll_sb_info *sbi = ll_i2sbi(inode);
1683
1684 LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1685 if (lsm) {
1686 if (!lli->lli_has_smd &&
1687 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1688 cl_file_inode_init(inode, md);
1689
1690 lli->lli_maxbytes = lsm->lsm_maxbytes;
1691 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1692 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1693 }
1694
1695 if (S_ISDIR(inode->i_mode)) {
1696 int rc;
1697
1698 rc = ll_update_lsm_md(inode, md);
1699 if (rc)
1700 return rc;
1701 }
1702
1703 #ifdef CONFIG_FS_POSIX_ACL
1704 if (body->valid & OBD_MD_FLACL) {
1705 spin_lock(&lli->lli_lock);
1706 if (lli->lli_posix_acl)
1707 posix_acl_release(lli->lli_posix_acl);
1708 lli->lli_posix_acl = md->posix_acl;
1709 spin_unlock(&lli->lli_lock);
1710 }
1711 #endif
1712 inode->i_ino = cl_fid_build_ino(&body->fid1,
1713 sbi->ll_flags & LL_SBI_32BIT_API);
1714 inode->i_generation = cl_fid_build_gen(&body->fid1);
1715
1716 if (body->valid & OBD_MD_FLATIME) {
1717 if (body->atime > LTIME_S(inode->i_atime))
1718 LTIME_S(inode->i_atime) = body->atime;
1719 lli->lli_atime = body->atime;
1720 }
1721 if (body->valid & OBD_MD_FLMTIME) {
1722 if (body->mtime > LTIME_S(inode->i_mtime)) {
1723 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1724 inode->i_ino, LTIME_S(inode->i_mtime),
1725 body->mtime);
1726 LTIME_S(inode->i_mtime) = body->mtime;
1727 }
1728 lli->lli_mtime = body->mtime;
1729 }
1730 if (body->valid & OBD_MD_FLCTIME) {
1731 if (body->ctime > LTIME_S(inode->i_ctime))
1732 LTIME_S(inode->i_ctime) = body->ctime;
1733 lli->lli_ctime = body->ctime;
1734 }
1735 if (body->valid & OBD_MD_FLMODE)
1736 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1737 if (body->valid & OBD_MD_FLTYPE)
1738 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1739 LASSERT(inode->i_mode != 0);
1740 if (S_ISREG(inode->i_mode))
1741 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1742 LL_MAX_BLKSIZE_BITS);
1743 else
1744 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1745 if (body->valid & OBD_MD_FLUID)
1746 inode->i_uid = make_kuid(&init_user_ns, body->uid);
1747 if (body->valid & OBD_MD_FLGID)
1748 inode->i_gid = make_kgid(&init_user_ns, body->gid);
1749 if (body->valid & OBD_MD_FLFLAGS)
1750 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1751 if (body->valid & OBD_MD_FLNLINK)
1752 set_nlink(inode, body->nlink);
1753 if (body->valid & OBD_MD_FLRDEV)
1754 inode->i_rdev = old_decode_dev(body->rdev);
1755
1756 if (body->valid & OBD_MD_FLID) {
1757 /* FID shouldn't be changed! */
1758 if (fid_is_sane(&lli->lli_fid)) {
1759 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1760 "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
1761 PFID(&lli->lli_fid), PFID(&body->fid1),
1762 PFID(ll_inode2fid(inode)), inode);
1763 } else {
1764 lli->lli_fid = body->fid1;
1765 }
1766 }
1767
1768 LASSERT(fid_seq(&lli->lli_fid) != 0);
1769
1770 if (body->valid & OBD_MD_FLSIZE) {
1771 if (exp_connect_som(ll_i2mdexp(inode)) &&
1772 S_ISREG(inode->i_mode)) {
1773 struct lustre_handle lockh;
1774 enum ldlm_mode mode;
1775
1776 /* As it is possible a blocking ast has been processed
1777 * by this time, we need to check there is an UPDATE
1778 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1779 * it.
1780 */
1781 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1782 &lockh, LDLM_FL_CBPENDING,
1783 LCK_CR | LCK_CW |
1784 LCK_PR | LCK_PW);
1785 if (mode) {
1786 if (lli->lli_flags & (LLIF_DONE_WRITING |
1787 LLIF_EPOCH_PENDING |
1788 LLIF_SOM_DIRTY)) {
1789 CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
1790 sbi->ll_md_exp->exp_obd->obd_name,
1791 PFID(ll_inode2fid(inode)),
1792 lli->lli_flags);
1793 } else {
1794 /* Use old size assignment to avoid
1795 * deadlock bz14138 & bz14326
1796 */
1797 i_size_write(inode, body->size);
1798 spin_lock(&lli->lli_lock);
1799 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1800 spin_unlock(&lli->lli_lock);
1801 }
1802 ldlm_lock_decref(&lockh, mode);
1803 }
1804 } else {
1805 /* Use old size assignment to avoid
1806 * deadlock bz14138 & bz14326
1807 */
1808 i_size_write(inode, body->size);
1809
1810 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1811 inode->i_ino, (unsigned long long)body->size);
1812 }
1813
1814 if (body->valid & OBD_MD_FLBLOCKS)
1815 inode->i_blocks = body->blocks;
1816 }
1817
1818 if (body->valid & OBD_MD_TSTATE) {
1819 if (body->t_state & MS_RESTORE)
1820 lli->lli_flags |= LLIF_FILE_RESTORING;
1821 }
1822
1823 return 0;
1824 }
1825
1826 int ll_read_inode2(struct inode *inode, void *opaque)
1827 {
1828 struct lustre_md *md = opaque;
1829 struct ll_inode_info *lli = ll_i2info(inode);
1830 int rc;
1831
1832 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1833 PFID(&lli->lli_fid), inode);
1834
1835 LASSERT(!lli->lli_has_smd);
1836
1837 /* Core attributes from the MDS first. This is a new inode, and
1838 * the VFS doesn't zero times in the core inode so we have to do
1839 * it ourselves. They will be overwritten by either MDS or OST
1840 * attributes - we just need to make sure they aren't newer.
1841 */
1842 LTIME_S(inode->i_mtime) = 0;
1843 LTIME_S(inode->i_atime) = 0;
1844 LTIME_S(inode->i_ctime) = 0;
1845 inode->i_rdev = 0;
1846 rc = ll_update_inode(inode, md);
1847 if (rc)
1848 return rc;
1849
1850 /* OIDEBUG(inode); */
1851
1852 if (S_ISREG(inode->i_mode)) {
1853 struct ll_sb_info *sbi = ll_i2sbi(inode);
1854
1855 inode->i_op = &ll_file_inode_operations;
1856 inode->i_fop = sbi->ll_fop;
1857 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
1858 } else if (S_ISDIR(inode->i_mode)) {
1859 inode->i_op = &ll_dir_inode_operations;
1860 inode->i_fop = &ll_dir_operations;
1861 } else if (S_ISLNK(inode->i_mode)) {
1862 inode->i_op = &ll_fast_symlink_inode_operations;
1863 } else {
1864 inode->i_op = &ll_special_inode_operations;
1865
1866 init_special_inode(inode, inode->i_mode,
1867 inode->i_rdev);
1868 }
1869
1870 return 0;
1871 }
1872
1873 void ll_delete_inode(struct inode *inode)
1874 {
1875 struct ll_inode_info *lli = ll_i2info(inode);
1876
1877 if (S_ISREG(inode->i_mode) && lli->lli_clob)
1878 /* discard all dirty pages before truncating them, required by
1879 * osc_extent implementation at LU-1030.
1880 */
1881 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1882 CL_FSYNC_DISCARD, 1);
1883
1884 truncate_inode_pages_final(&inode->i_data);
1885
1886 /* Workaround for LU-118 */
1887 if (inode->i_data.nrpages) {
1888 spin_lock_irq(&inode->i_data.tree_lock);
1889 spin_unlock_irq(&inode->i_data.tree_lock);
1890 LASSERTF(inode->i_data.nrpages == 0,
1891 "inode="DFID"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1892 PFID(ll_inode2fid(inode)), inode,
1893 inode->i_data.nrpages);
1894 }
1895 /* Workaround end */
1896
1897 ll_clear_inode(inode);
1898 clear_inode(inode);
1899 }
1900
1901 int ll_iocontrol(struct inode *inode, struct file *file,
1902 unsigned int cmd, unsigned long arg)
1903 {
1904 struct ll_sb_info *sbi = ll_i2sbi(inode);
1905 struct ptlrpc_request *req = NULL;
1906 int rc, flags = 0;
1907
1908 switch (cmd) {
1909 case FSFILT_IOC_GETFLAGS: {
1910 struct mdt_body *body;
1911 struct md_op_data *op_data;
1912
1913 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1914 0, 0, LUSTRE_OPC_ANY,
1915 NULL);
1916 if (IS_ERR(op_data))
1917 return PTR_ERR(op_data);
1918
1919 op_data->op_valid = OBD_MD_FLFLAGS;
1920 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1921 ll_finish_md_op_data(op_data);
1922 if (rc) {
1923 CERROR("%s: failure inode "DFID": rc = %d\n",
1924 sbi->ll_md_exp->exp_obd->obd_name,
1925 PFID(ll_inode2fid(inode)), rc);
1926 return -abs(rc);
1927 }
1928
1929 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1930
1931 flags = body->flags;
1932
1933 ptlrpc_req_finished(req);
1934
1935 return put_user(flags, (int __user *)arg);
1936 }
1937 case FSFILT_IOC_SETFLAGS: {
1938 struct lov_stripe_md *lsm;
1939 struct obd_info oinfo = { };
1940 struct md_op_data *op_data;
1941
1942 if (get_user(flags, (int __user *)arg))
1943 return -EFAULT;
1944
1945 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1946 LUSTRE_OPC_ANY, NULL);
1947 if (IS_ERR(op_data))
1948 return PTR_ERR(op_data);
1949
1950 op_data->op_attr_flags = flags;
1951 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1952 rc = md_setattr(sbi->ll_md_exp, op_data,
1953 NULL, 0, NULL, 0, &req, NULL);
1954 ll_finish_md_op_data(op_data);
1955 ptlrpc_req_finished(req);
1956 if (rc)
1957 return rc;
1958
1959 inode->i_flags = ll_ext_to_inode_flags(flags);
1960
1961 lsm = ccc_inode_lsm_get(inode);
1962 if (!lsm_has_objects(lsm)) {
1963 ccc_inode_lsm_put(inode, lsm);
1964 return 0;
1965 }
1966
1967 oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
1968 if (!oinfo.oi_oa) {
1969 ccc_inode_lsm_put(inode, lsm);
1970 return -ENOMEM;
1971 }
1972 oinfo.oi_md = lsm;
1973 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1974 oinfo.oi_oa->o_flags = flags;
1975 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1976 OBD_MD_FLGROUP;
1977 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
1978 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1979 kmem_cache_free(obdo_cachep, oinfo.oi_oa);
1980 ccc_inode_lsm_put(inode, lsm);
1981
1982 if (rc && rc != -EPERM && rc != -EACCES)
1983 CERROR("osc_setattr_async fails: rc = %d\n", rc);
1984
1985 return rc;
1986 }
1987 default:
1988 return -ENOSYS;
1989 }
1990
1991 return 0;
1992 }
1993
1994 int ll_flush_ctx(struct inode *inode)
1995 {
1996 struct ll_sb_info *sbi = ll_i2sbi(inode);
1997
1998 CDEBUG(D_SEC, "flush context for user %d\n",
1999 from_kuid(&init_user_ns, current_uid()));
2000
2001 obd_set_info_async(NULL, sbi->ll_md_exp,
2002 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2003 0, NULL, NULL);
2004 obd_set_info_async(NULL, sbi->ll_dt_exp,
2005 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2006 0, NULL, NULL);
2007 return 0;
2008 }
2009
2010 /* umount -f client means force down, don't save state */
2011 void ll_umount_begin(struct super_block *sb)
2012 {
2013 struct ll_sb_info *sbi = ll_s2sbi(sb);
2014 struct obd_device *obd;
2015 struct obd_ioctl_data *ioc_data;
2016
2017 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2018 sb->s_count, atomic_read(&sb->s_active));
2019
2020 obd = class_exp2obd(sbi->ll_md_exp);
2021 if (!obd) {
2022 CERROR("Invalid MDC connection handle %#llx\n",
2023 sbi->ll_md_exp->exp_handle.h_cookie);
2024 return;
2025 }
2026 obd->obd_force = 1;
2027
2028 obd = class_exp2obd(sbi->ll_dt_exp);
2029 if (!obd) {
2030 CERROR("Invalid LOV connection handle %#llx\n",
2031 sbi->ll_dt_exp->exp_handle.h_cookie);
2032 return;
2033 }
2034 obd->obd_force = 1;
2035
2036 ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
2037 if (ioc_data) {
2038 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2039 sizeof(*ioc_data), ioc_data, NULL);
2040
2041 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2042 sizeof(*ioc_data), ioc_data, NULL);
2043
2044 kfree(ioc_data);
2045 }
2046
2047 /* Really, we'd like to wait until there are no requests outstanding,
2048 * and then continue. For now, we just invalidate the requests,
2049 * schedule() and sleep one second if needed, and hope.
2050 */
2051 schedule();
2052 }
2053
2054 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2055 {
2056 struct ll_sb_info *sbi = ll_s2sbi(sb);
2057 char *profilenm = get_profile_name(sb);
2058 int err;
2059 __u32 read_only;
2060
2061 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2062 read_only = *flags & MS_RDONLY;
2063 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2064 sizeof(KEY_READ_ONLY),
2065 KEY_READ_ONLY, sizeof(read_only),
2066 &read_only, NULL);
2067 if (err) {
2068 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2069 profilenm, read_only ?
2070 "read-only" : "read-write", err);
2071 return err;
2072 }
2073
2074 if (read_only)
2075 sb->s_flags |= MS_RDONLY;
2076 else
2077 sb->s_flags &= ~MS_RDONLY;
2078
2079 if (sbi->ll_flags & LL_SBI_VERBOSE)
2080 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2081 read_only ? "read-only" : "read-write");
2082 }
2083 return 0;
2084 }
2085
2086 /**
2087 * Cleanup the open handle that is cached on MDT-side.
2088 *
2089 * For open case, the client side open handling thread may hit error
2090 * after the MDT grant the open. Under such case, the client should
2091 * send close RPC to the MDT as cleanup; otherwise, the open handle
2092 * on the MDT will be leaked there until the client umount or evicted.
2093 *
2094 * In further, if someone unlinked the file, because the open handle
2095 * holds the reference on such file/object, then it will block the
2096 * subsequent threads that want to locate such object via FID.
2097 *
2098 * \param[in] sb super block for this file-system
2099 * \param[in] open_req pointer to the original open request
2100 */
2101 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2102 {
2103 struct mdt_body *body;
2104 struct md_op_data *op_data;
2105 struct ptlrpc_request *close_req = NULL;
2106 struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
2107
2108 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2109 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
2110 if (!op_data)
2111 return;
2112
2113 op_data->op_fid1 = body->fid1;
2114 op_data->op_ioepoch = body->ioepoch;
2115 op_data->op_handle = body->handle;
2116 op_data->op_mod_time = get_seconds();
2117 md_close(exp, op_data, NULL, &close_req);
2118 ptlrpc_req_finished(close_req);
2119 ll_finish_md_op_data(op_data);
2120 }
2121
2122 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2123 struct super_block *sb, struct lookup_intent *it)
2124 {
2125 struct ll_sb_info *sbi = NULL;
2126 struct lustre_md md = { NULL };
2127 int rc;
2128
2129 LASSERT(*inode || sb);
2130 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2131 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2132 sbi->ll_md_exp, &md);
2133 if (rc)
2134 goto cleanup;
2135
2136 if (*inode) {
2137 rc = ll_update_inode(*inode, &md);
2138 if (rc)
2139 goto out;
2140 } else {
2141 LASSERT(sb);
2142
2143 /*
2144 * At this point server returns to client's same fid as client
2145 * generated for creating. So using ->fid1 is okay here.
2146 */
2147 if (!fid_is_sane(&md.body->fid1)) {
2148 CERROR("%s: Fid is insane " DFID "\n",
2149 ll_get_fsname(sb, NULL, 0),
2150 PFID(&md.body->fid1));
2151 rc = -EINVAL;
2152 goto out;
2153 }
2154
2155 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
2156 sbi->ll_flags & LL_SBI_32BIT_API),
2157 &md);
2158 if (IS_ERR(*inode)) {
2159 #ifdef CONFIG_FS_POSIX_ACL
2160 if (md.posix_acl) {
2161 posix_acl_release(md.posix_acl);
2162 md.posix_acl = NULL;
2163 }
2164 #endif
2165 rc = -ENOMEM;
2166 CERROR("new_inode -fatal: rc %d\n", rc);
2167 goto out;
2168 }
2169 }
2170
2171 /* Handling piggyback layout lock.
2172 * Layout lock can be piggybacked by getattr and open request.
2173 * The lsm can be applied to inode only if it comes with a layout lock
2174 * otherwise correct layout may be overwritten, for example:
2175 * 1. proc1: mdt returns a lsm but not granting layout
2176 * 2. layout was changed by another client
2177 * 3. proc2: refresh layout and layout lock granted
2178 * 4. proc1: to apply a stale layout
2179 */
2180 if (it && it->it_lock_mode != 0) {
2181 struct lustre_handle lockh;
2182 struct ldlm_lock *lock;
2183
2184 lockh.cookie = it->it_lock_handle;
2185 lock = ldlm_handle2lock(&lockh);
2186 LASSERT(lock);
2187 if (ldlm_has_layout(lock)) {
2188 struct cl_object_conf conf;
2189
2190 memset(&conf, 0, sizeof(conf));
2191 conf.coc_opc = OBJECT_CONF_SET;
2192 conf.coc_inode = *inode;
2193 conf.coc_lock = lock;
2194 conf.u.coc_md = &md;
2195 (void)ll_layout_conf(*inode, &conf);
2196 }
2197 LDLM_LOCK_PUT(lock);
2198 }
2199
2200 out:
2201 if (md.lsm)
2202 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2203 md_free_lustre_md(sbi->ll_md_exp, &md);
2204
2205 cleanup:
2206 if (rc != 0 && it && it->it_op & IT_OPEN)
2207 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
2208
2209 return rc;
2210 }
2211
2212 int ll_obd_statfs(struct inode *inode, void __user *arg)
2213 {
2214 struct ll_sb_info *sbi = NULL;
2215 struct obd_export *exp;
2216 char *buf = NULL;
2217 struct obd_ioctl_data *data = NULL;
2218 __u32 type;
2219 int len = 0, rc;
2220
2221 if (!inode) {
2222 rc = -EINVAL;
2223 goto out_statfs;
2224 }
2225
2226 sbi = ll_i2sbi(inode);
2227 if (!sbi) {
2228 rc = -EINVAL;
2229 goto out_statfs;
2230 }
2231
2232 rc = obd_ioctl_getdata(&buf, &len, arg);
2233 if (rc)
2234 goto out_statfs;
2235
2236 data = (void *)buf;
2237 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2238 !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2239 rc = -EINVAL;
2240 goto out_statfs;
2241 }
2242
2243 if (data->ioc_inllen1 != sizeof(__u32) ||
2244 data->ioc_inllen2 != sizeof(__u32) ||
2245 data->ioc_plen1 != sizeof(struct obd_statfs) ||
2246 data->ioc_plen2 != sizeof(struct obd_uuid)) {
2247 rc = -EINVAL;
2248 goto out_statfs;
2249 }
2250
2251 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2252 if (type & LL_STATFS_LMV) {
2253 exp = sbi->ll_md_exp;
2254 } else if (type & LL_STATFS_LOV) {
2255 exp = sbi->ll_dt_exp;
2256 } else {
2257 rc = -ENODEV;
2258 goto out_statfs;
2259 }
2260
2261 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2262 if (rc)
2263 goto out_statfs;
2264 out_statfs:
2265 if (buf)
2266 obd_ioctl_freedata(buf, len);
2267 return rc;
2268 }
2269
2270 int ll_process_config(struct lustre_cfg *lcfg)
2271 {
2272 char *ptr;
2273 void *sb;
2274 struct lprocfs_static_vars lvars;
2275 unsigned long x;
2276 int rc = 0;
2277
2278 lprocfs_llite_init_vars(&lvars);
2279
2280 /* The instance name contains the sb: lustre-client-aacfe000 */
2281 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2282 if (!ptr || !*(++ptr))
2283 return -EINVAL;
2284 rc = kstrtoul(ptr, 16, &x);
2285 if (rc != 0)
2286 return -EINVAL;
2287 sb = (void *)x;
2288 /* This better be a real Lustre superblock! */
2289 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2290
2291 /* Note we have not called client_common_fill_super yet, so
2292 * proc fns must be able to handle that!
2293 */
2294 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2295 lcfg, sb);
2296 if (rc > 0)
2297 rc = 0;
2298 return rc;
2299 }
2300
2301 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2302 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2303 struct inode *i1, struct inode *i2,
2304 const char *name, int namelen,
2305 int mode, __u32 opc, void *data)
2306 {
2307 if (namelen > ll_i2sbi(i1)->ll_namelen)
2308 return ERR_PTR(-ENAMETOOLONG);
2309
2310 if (!op_data)
2311 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
2312
2313 if (!op_data)
2314 return ERR_PTR(-ENOMEM);
2315
2316 ll_i2gids(op_data->op_suppgids, i1, i2);
2317 op_data->op_fid1 = *ll_inode2fid(i1);
2318 if (S_ISDIR(i1->i_mode))
2319 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2320
2321 if (i2) {
2322 op_data->op_fid2 = *ll_inode2fid(i2);
2323 if (S_ISDIR(i2->i_mode))
2324 op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
2325 } else {
2326 fid_zero(&op_data->op_fid2);
2327 }
2328
2329 if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2330 op_data->op_cli_flags |= CLI_HASH64;
2331
2332 if (ll_need_32bit_api(ll_i2sbi(i1)))
2333 op_data->op_cli_flags |= CLI_API32;
2334
2335 op_data->op_name = name;
2336 op_data->op_namelen = namelen;
2337 op_data->op_mode = mode;
2338 op_data->op_mod_time = ktime_get_real_seconds();
2339 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2340 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2341 op_data->op_cap = cfs_curproc_cap_pack();
2342 op_data->op_bias = 0;
2343 op_data->op_cli_flags = 0;
2344 if ((opc == LUSTRE_OPC_CREATE) && name &&
2345 filename_is_volatile(name, namelen, NULL))
2346 op_data->op_bias |= MDS_CREATE_VOLATILE;
2347 op_data->op_mds = 0;
2348 op_data->op_data = data;
2349
2350 /* If the file is being opened after mknod() (normally due to NFS)
2351 * try to use the default stripe data from parent directory for
2352 * allocating OST objects. Try to pass the parent FID to MDS.
2353 */
2354 if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
2355 !ll_i2info(i2)->lli_has_smd) {
2356 struct ll_inode_info *lli = ll_i2info(i2);
2357
2358 spin_lock(&lli->lli_lock);
2359 if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
2360 op_data->op_fid1 = lli->lli_pfid;
2361 spin_unlock(&lli->lli_lock);
2362 }
2363
2364 /* When called by ll_setattr_raw, file is i1. */
2365 if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
2366 op_data->op_bias |= MDS_DATA_MODIFIED;
2367
2368 return op_data;
2369 }
2370
2371 void ll_finish_md_op_data(struct md_op_data *op_data)
2372 {
2373 kfree(op_data);
2374 }
2375
2376 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2377 {
2378 struct ll_sb_info *sbi;
2379
2380 LASSERT(seq && dentry);
2381 sbi = ll_s2sbi(dentry->d_sb);
2382
2383 if (sbi->ll_flags & LL_SBI_NOLCK)
2384 seq_puts(seq, ",nolock");
2385
2386 if (sbi->ll_flags & LL_SBI_FLOCK)
2387 seq_puts(seq, ",flock");
2388
2389 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2390 seq_puts(seq, ",localflock");
2391
2392 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2393 seq_puts(seq, ",user_xattr");
2394
2395 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2396 seq_puts(seq, ",lazystatfs");
2397
2398 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2399 seq_puts(seq, ",user_fid2path");
2400
2401 return 0;
2402 }
2403
2404 /**
2405 * Get obd name by cmd, and copy out to user space
2406 */
2407 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2408 {
2409 struct ll_sb_info *sbi = ll_i2sbi(inode);
2410 struct obd_device *obd;
2411
2412 if (cmd == OBD_IOC_GETDTNAME)
2413 obd = class_exp2obd(sbi->ll_dt_exp);
2414 else if (cmd == OBD_IOC_GETMDNAME)
2415 obd = class_exp2obd(sbi->ll_md_exp);
2416 else
2417 return -EINVAL;
2418
2419 if (!obd)
2420 return -ENOENT;
2421
2422 if (copy_to_user((void __user *)arg, obd->obd_name,
2423 strlen(obd->obd_name) + 1))
2424 return -EFAULT;
2425
2426 return 0;
2427 }
2428
2429 /**
2430 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2431 * fsname will be returned in this buffer; otherwise, a static buffer will be
2432 * used to store the fsname and returned to caller.
2433 */
2434 char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2435 {
2436 static char fsname_static[MTI_NAME_MAXLEN];
2437 struct lustre_sb_info *lsi = s2lsi(sb);
2438 char *ptr;
2439 int len;
2440
2441 if (!buf) {
2442 /* this means the caller wants to use static buffer
2443 * and it doesn't care about race. Usually this is
2444 * in error reporting path
2445 */
2446 buf = fsname_static;
2447 buflen = sizeof(fsname_static);
2448 }
2449
2450 len = strlen(lsi->lsi_lmd->lmd_profile);
2451 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2452 if (ptr && (strcmp(ptr, "-client") == 0))
2453 len -= 7;
2454
2455 if (unlikely(len >= buflen))
2456 len = buflen - 1;
2457 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2458 buf[len] = '\0';
2459
2460 return buf;
2461 }
2462
2463 void ll_dirty_page_discard_warn(struct page *page, int ioret)
2464 {
2465 char *buf, *path = NULL;
2466 struct dentry *dentry = NULL;
2467 struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
2468
2469 /* this can be called inside spin lock so use GFP_ATOMIC. */
2470 buf = (char *)__get_free_page(GFP_ATOMIC);
2471 if (buf) {
2472 dentry = d_find_alias(page->mapping->host);
2473 if (dentry)
2474 path = dentry_path_raw(dentry, buf, PAGE_SIZE);
2475 }
2476
2477 CDEBUG(D_WARNING,
2478 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2479 ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2480 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2481 PFID(&obj->vob_header.coh_lu.loh_fid),
2482 (path && !IS_ERR(path)) ? path : "", ioret);
2483
2484 if (dentry)
2485 dput(dentry);
2486
2487 if (buf)
2488 free_page((unsigned long)buf);
2489 }