]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/staging/lustre/lustre/llite/llite_lib.c
staging/lustre: Get rid of ldlm_cmd_t typedef
[mirror_ubuntu-hirsute-kernel.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/llite/llite_lib.c
37 *
38 * Lustre Light Super operations
39 */
40
41#define DEBUG_SUBSYSTEM S_LLITE
42
43#include <linux/module.h>
a9c7db39 44#include <linux/statfs.h>
d7e09d03 45#include <linux/types.h>
d7e09d03
PT
46#include <linux/mm.h>
47
67a235f5
GKH
48#include "../include/lustre_lite.h"
49#include "../include/lustre_ha.h"
50#include "../include/lustre_dlm.h"
51#include "../include/lprocfs_status.h"
52#include "../include/lustre_disk.h"
53#include "../include/lustre_param.h"
54#include "../include/lustre_log.h"
55#include "../include/cl_object.h"
56#include "../include/obd_cksum.h"
d7e09d03
PT
57#include "llite_internal.h"
58
59struct kmem_cache *ll_file_data_slab;
ae7c0f48 60struct dentry *llite_root;
fd0d04ba 61struct kset *llite_kset;
d7e09d03 62
d7e09d03
PT
63#ifndef log2
64#define log2(n) ffz(~(n))
65#endif
66
fd0d04ba 67static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
d7e09d03
PT
68{
69 struct ll_sb_info *sbi = NULL;
70 unsigned long pages;
71 unsigned long lru_page_max;
72 struct sysinfo si;
73 class_uuid_t uuid;
74 int i;
d7e09d03 75
496a51bd 76 sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
d7e09d03 77 if (!sbi)
0a3bdb00 78 return NULL;
d7e09d03
PT
79
80 spin_lock_init(&sbi->ll_lock);
81 mutex_init(&sbi->ll_lco.lco_lock);
82 spin_lock_init(&sbi->ll_pp_extent_lock);
83 spin_lock_init(&sbi->ll_process_lock);
84 sbi->ll_rw_stats_on = 0;
85
86 si_meminfo(&si);
87 pages = si.totalram - si.totalhigh;
99d08456 88 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
d7e09d03 89 lru_page_max = pages / 2;
99d08456 90 else
d7e09d03 91 lru_page_max = (pages / 4) * 3;
d7e09d03 92
c52f69c5 93 /* initialize lru data */
d7e09d03
PT
94 atomic_set(&sbi->ll_cache.ccc_users, 0);
95 sbi->ll_cache.ccc_lru_max = lru_page_max;
96 atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
97 spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
98 INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
99
d7e09d03
PT
100 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
101 SBI_DEFAULT_READAHEAD_MAX);
102 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
103 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
104 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
105 INIT_LIST_HEAD(&sbi->ll_conn_chain);
106 INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
107
108 ll_generate_random_uuid(uuid);
109 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
110 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
111
d7e09d03
PT
112 sbi->ll_flags |= LL_SBI_VERBOSE;
113 sbi->ll_flags |= LL_SBI_CHECKSUM;
114
115 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
116
117 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
118 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
119 pp_r_hist.oh_lock);
120 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
121 pp_w_hist.oh_lock);
122 }
123
124 /* metadata statahead is enabled by default */
125 sbi->ll_sa_max = LL_SA_RPC_DEF;
126 atomic_set(&sbi->ll_sa_total, 0);
127 atomic_set(&sbi->ll_sa_wrong, 0);
128 atomic_set(&sbi->ll_agl_total, 0);
129 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
130
fd0d04ba
OD
131 sbi->ll_sb = sb;
132
0a3bdb00 133 return sbi;
d7e09d03
PT
134}
135
2d95f10e 136static void ll_free_sbi(struct super_block *sb)
d7e09d03
PT
137{
138 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03 139
ad88aae0 140 kfree(sbi);
d7e09d03
PT
141}
142
d7e09d03
PT
143static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
144 struct vfsmount *mnt)
145{
ea7893bb 146 struct inode *root = NULL;
d7e09d03
PT
147 struct ll_sb_info *sbi = ll_s2sbi(sb);
148 struct obd_device *obd;
d7e09d03
PT
149 struct obd_statfs *osfs = NULL;
150 struct ptlrpc_request *request = NULL;
151 struct obd_connect_data *data = NULL;
152 struct obd_uuid *uuid;
153 struct md_op_data *op_data;
154 struct lustre_md lmd;
21aef7d9 155 u64 valid;
d7e09d03 156 int size, err, checksum;
d7e09d03
PT
157
158 obd = class_name2obd(md);
159 if (!obd) {
160 CERROR("MD %s: not setup or attached\n", md);
0a3bdb00 161 return -EINVAL;
d7e09d03
PT
162 }
163
496a51bd
JL
164 data = kzalloc(sizeof(*data), GFP_NOFS);
165 if (!data)
0a3bdb00 166 return -ENOMEM;
d7e09d03 167
496a51bd
JL
168 osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
169 if (!osfs) {
97903a26 170 kfree(data);
0a3bdb00 171 return -ENOMEM;
d7e09d03
PT
172 }
173
6e16818b 174 if (llite_root) {
ae7c0f48 175 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
d7e09d03 176 if (err < 0)
ae7c0f48 177 CERROR("could not register mount in <debugfs>/lustre/llite\n");
d7e09d03
PT
178 }
179
180 /* indicate the features supported by this client */
181 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
182 OBD_CONNECT_ATTRFID |
183 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
d7e09d03
PT
184 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
185 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
186 OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
187 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
188 OBD_CONNECT_EINPROGRESS |
189 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
7fc1f831 190 OBD_CONNECT_LAYOUTLOCK |
69342b78
AS
191 OBD_CONNECT_PINGLESS |
192 OBD_CONNECT_MAX_EASIZE |
63d42578
HZ
193 OBD_CONNECT_FLOCK_DEAD |
194 OBD_CONNECT_DISP_STRIPE;
d7e09d03
PT
195
196 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
197 data->ocd_connect_flags |= OBD_CONNECT_SOM;
198
199 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
200 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
201#ifdef CONFIG_FS_POSIX_ACL
202 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
203#endif
204
205 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
206 /* flag mdc connection as lightweight, only used for test
207 * purpose, use with care */
208 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
209
210 data->ocd_ibits_known = MDS_INODELOCK_FULL;
211 data->ocd_version = LUSTRE_VERSION_CODE;
212
213 if (sb->s_flags & MS_RDONLY)
214 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
215 if (sbi->ll_flags & LL_SBI_USER_XATTR)
216 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
217
d7e09d03
PT
218 if (sbi->ll_flags & LL_SBI_FLOCK)
219 sbi->ll_fop = &ll_file_operations_flock;
220 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
221 sbi->ll_fop = &ll_file_operations;
222 else
223 sbi->ll_fop = &ll_file_operations_noflock;
224
225 /* real client */
226 data->ocd_connect_flags |= OBD_CONNECT_REAL;
227 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
228 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
229
230 data->ocd_brw_size = MD_MAX_BRW_SIZE;
231
e6768831
TJ
232 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
233 data, NULL);
d7e09d03 234 if (err == -EBUSY) {
2d00bd17
JP
235 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
236 md);
34e1f2bb 237 goto out;
d7e09d03
PT
238 } else if (err) {
239 CERROR("cannot connect to %s: rc = %d\n", md, err);
34e1f2bb 240 goto out;
d7e09d03
PT
241 }
242
243 sbi->ll_md_exp->exp_connect_data = *data;
244
245 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
246 LUSTRE_SEQ_METADATA);
247 if (err) {
2d00bd17
JP
248 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
249 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 250 goto out_md;
d7e09d03
PT
251 }
252
253 /* For mount, we only need fs info from MDT0, and also in DNE, it
254 * can make sure the client can be mounted as long as MDT0 is
d0a0acc3 255 * available */
d7e09d03
PT
256 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
257 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
258 OBD_STATFS_FOR_MDT0);
259 if (err)
34e1f2bb 260 goto out_md_fid;
d7e09d03
PT
261
262 /* This needs to be after statfs to ensure connect has finished.
263 * Note that "data" does NOT contain the valid connect reply.
264 * If connecting to a 1.8 server there will be no LMV device, so
265 * we can access the MDC export directly and exp_connect_flags will
266 * be non-zero, but if accessing an upgraded 2.1 server it will
267 * have the correct flags filled in.
268 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
269 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
270 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
271 valid != CLIENT_CONNECT_MDT_REQD) {
272 char *buf;
273
496a51bd 274 buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
db562e81
GEHP
275 if (!buf) {
276 err = -ENOMEM;
277 goto out_md_fid;
278 }
d7e09d03
PT
279 obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
280 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
2d00bd17 281 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
d7e09d03 282 sbi->ll_md_exp->exp_obd->obd_name, buf);
97903a26 283 kfree(buf);
34e1f2bb
JL
284 err = -EPROTO;
285 goto out_md_fid;
d7e09d03
PT
286 }
287
288 size = sizeof(*data);
289 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
290 KEY_CONN_DATA, &size, data, NULL);
291 if (err) {
292 CERROR("%s: Get connect data failed: rc = %d\n",
293 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 294 goto out_md_fid;
d7e09d03
PT
295 }
296
297 LASSERT(osfs->os_bsize);
298 sb->s_blocksize = osfs->os_bsize;
299 sb->s_blocksize_bits = log2(osfs->os_bsize);
300 sb->s_magic = LL_SUPER_MAGIC;
301 sb->s_maxbytes = MAX_LFS_FILESIZE;
302 sbi->ll_namelen = osfs->os_namelen;
d7e09d03
PT
303
304 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
305 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
2d00bd17 306 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
d7e09d03
PT
307 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
308 }
309
310 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
311#ifdef MS_POSIXACL
312 sb->s_flags |= MS_POSIXACL;
313#endif
314 sbi->ll_flags |= LL_SBI_ACL;
315 } else {
316 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
317#ifdef MS_POSIXACL
318 sb->s_flags &= ~MS_POSIXACL;
319#endif
320 sbi->ll_flags &= ~LL_SBI_ACL;
321 }
322
323 if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
324 if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
325 sbi->ll_flags |= LL_SBI_RMT_CLIENT;
326 LCONSOLE_INFO("client is set as remote by default.\n");
327 }
328 } else {
329 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
330 sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
2d00bd17 331 LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
d7e09d03
PT
332 }
333 }
334
d7e09d03
PT
335 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
336 sbi->ll_flags |= LL_SBI_64BIT_HASH;
337
338 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
339 sbi->ll_md_brw_size = data->ocd_brw_size;
340 else
341 sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
342
343 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
344 LCONSOLE_INFO("Layout lock feature supported.\n");
345 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
346 }
347
7fc1f831
AP
348 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
349 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
350 LCONSOLE_INFO(
351 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
352 dt);
353 } else {
354 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
355 sbi->ll_xattr_cache_enabled = 1;
356 }
357 }
358
d7e09d03
PT
359 obd = class_name2obd(dt);
360 if (!obd) {
361 CERROR("DT %s: not setup or attached\n", dt);
34e1f2bb
JL
362 err = -ENODEV;
363 goto out_md_fid;
d7e09d03
PT
364 }
365
366 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
367 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
368 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
369 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
370 OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
371 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
372 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
373 OBD_CONNECT_MAXBYTES |
374 OBD_CONNECT_EINPROGRESS |
375 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
376 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
377
378 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
379 data->ocd_connect_flags |= OBD_CONNECT_SOM;
380
381 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
382 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
383 * disabled by default, because it can still be enabled on the
40cc864a 384 * fly via /sys. As a consequence, we still need to come to an
d7e09d03
PT
385 * agreement on the supported algorithms at connect time */
386 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
387
388 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
389 data->ocd_cksum_types = OBD_CKSUM_ADLER;
390 else
391 data->ocd_cksum_types = cksum_types_supported_client();
392 }
393
394 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
395 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
396 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
397
2d00bd17
JP
398 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
399 data->ocd_connect_flags,
d7e09d03
PT
400 data->ocd_version, data->ocd_grant);
401
402 obd->obd_upcall.onu_owner = &sbi->ll_lco;
403 obd->obd_upcall.onu_upcall = cl_ocd_update;
404
405 data->ocd_brw_size = DT_MAX_BRW_SIZE;
406
407 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
408 NULL);
409 if (err == -EBUSY) {
2d00bd17
JP
410 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
411 dt);
34e1f2bb 412 goto out_md;
d7e09d03
PT
413 } else if (err) {
414 CERROR("%s: Cannot connect to %s: rc = %d\n",
415 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
34e1f2bb 416 goto out_md;
d7e09d03
PT
417 }
418
419 sbi->ll_dt_exp->exp_connect_data = *data;
420
421 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
422 LUSTRE_SEQ_METADATA);
423 if (err) {
2d00bd17
JP
424 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
425 sbi->ll_dt_exp->exp_obd->obd_name, err);
34e1f2bb 426 goto out_dt;
d7e09d03
PT
427 }
428
429 mutex_lock(&sbi->ll_lco.lco_lock);
430 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
431 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
432 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
433 mutex_unlock(&sbi->ll_lco.lco_lock);
434
435 fid_zero(&sbi->ll_root_fid);
ef2e0f55 436 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
d7e09d03
PT
437 if (err) {
438 CERROR("cannot mds_connect: rc = %d\n", err);
34e1f2bb 439 goto out_lock_cn_cb;
d7e09d03
PT
440 }
441 if (!fid_is_sane(&sbi->ll_root_fid)) {
442 CERROR("%s: Invalid root fid "DFID" during mount\n",
443 sbi->ll_md_exp->exp_obd->obd_name,
444 PFID(&sbi->ll_root_fid));
34e1f2bb
JL
445 err = -EINVAL;
446 goto out_lock_cn_cb;
d7e09d03
PT
447 }
448 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
449
450 sb->s_op = &lustre_super_operations;
451#if THREAD_SIZE >= 8192 /*b=17630*/
452 sb->s_export_op = &lustre_export_operations;
453#endif
454
455 /* make root inode
456 * XXX: move this to after cbd setup? */
ef2e0f55 457 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS;
d7e09d03
PT
458 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
459 valid |= OBD_MD_FLRMTPERM;
460 else if (sbi->ll_flags & LL_SBI_ACL)
461 valid |= OBD_MD_FLACL;
462
496a51bd
JL
463 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
464 if (!op_data) {
34e1f2bb
JL
465 err = -ENOMEM;
466 goto out_lock_cn_cb;
467 }
d7e09d03
PT
468
469 op_data->op_fid1 = sbi->ll_root_fid;
470 op_data->op_mode = 0;
d7e09d03
PT
471 op_data->op_valid = valid;
472
473 err = md_getattr(sbi->ll_md_exp, op_data, &request);
97903a26 474 kfree(op_data);
d7e09d03
PT
475 if (err) {
476 CERROR("%s: md_getattr failed for root: rc = %d\n",
477 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 478 goto out_lock_cn_cb;
d7e09d03
PT
479 }
480
481 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
482 sbi->ll_md_exp, &lmd);
483 if (err) {
484 CERROR("failed to understand root inode md: rc = %d\n", err);
485 ptlrpc_req_finished(request);
34e1f2bb 486 goto out_lock_cn_cb;
d7e09d03
PT
487 }
488
489 LASSERT(fid_is_sane(&sbi->ll_root_fid));
490 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
c1e2699d 491 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
492 &lmd);
493 md_free_lustre_md(sbi->ll_md_exp, &lmd);
494 ptlrpc_req_finished(request);
495
6e16818b 496 if (IS_ERR_OR_NULL(root)) {
d7e09d03
PT
497 if (lmd.lsm)
498 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
499#ifdef CONFIG_FS_POSIX_ACL
500 if (lmd.posix_acl) {
501 posix_acl_release(lmd.posix_acl);
502 lmd.posix_acl = NULL;
503 }
504#endif
505 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
506 root = NULL;
507 CERROR("lustre_lite: bad iget4 for root\n");
34e1f2bb 508 goto out_root;
d7e09d03
PT
509 }
510
511 err = ll_close_thread_start(&sbi->ll_lcq);
512 if (err) {
513 CERROR("cannot start close thread: rc %d\n", err);
34e1f2bb 514 goto out_root;
d7e09d03
PT
515 }
516
517#ifdef CONFIG_FS_POSIX_ACL
518 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
519 rct_init(&sbi->ll_rct);
520 et_init(&sbi->ll_et);
521 }
522#endif
523
524 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
525 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
526 KEY_CHECKSUM, sizeof(checksum), &checksum,
527 NULL);
528 cl_sb_init(sb);
529
530 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
531 KEY_CACHE_SET, sizeof(sbi->ll_cache),
532 &sbi->ll_cache, NULL);
533
534 sb->s_root = d_make_root(root);
6e16818b 535 if (!sb->s_root) {
d7e09d03
PT
536 CERROR("%s: can't make root dentry\n",
537 ll_get_fsname(sb, NULL, 0));
34e1f2bb 538 err = -ENOMEM;
caf382fe 539 goto out_lock_cn_cb;
d7e09d03
PT
540 }
541
d7e09d03
PT
542 sbi->ll_sdev_orig = sb->s_dev;
543
544 /* We set sb->s_dev equal on all lustre clients in order to support
545 * NFS export clustering. NFSD requires that the FSID be the same
546 * on all clients. */
547 /* s_dev is also used in lt_compare() to compare two fs, but that is
548 * only a node-local comparison. */
549 uuid = obd_get_uuid(sbi->ll_md_exp);
6e16818b 550 if (uuid) {
d7e09d03 551 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
bd994071
FY
552 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
553 }
d7e09d03 554
081825f5
JL
555 kfree(data);
556 kfree(osfs);
d7e09d03 557
0a3bdb00 558 return err;
d7e09d03 559out_root:
ddafd514 560 iput(root);
d7e09d03
PT
561out_lock_cn_cb:
562 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
563out_dt:
564 obd_disconnect(sbi->ll_dt_exp);
565 sbi->ll_dt_exp = NULL;
566 /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
567 obd_zombie_barrier();
568out_md_fid:
569 obd_fid_fini(sbi->ll_md_exp->exp_obd);
570out_md:
571 obd_disconnect(sbi->ll_md_exp);
572 sbi->ll_md_exp = NULL;
573out:
081825f5
JL
574 kfree(data);
575 kfree(osfs);
ae7c0f48 576 ldebugfs_unregister_mountpoint(sbi);
d7e09d03
PT
577 return err;
578}
579
580int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
581{
582 int size, rc;
583
584 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
585 size = sizeof(int);
586 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
587 KEY_MAX_EASIZE, &size, lmmsize, NULL);
588 if (rc)
4f211c20 589 CERROR("Get max mdsize error rc %d\n", rc);
d7e09d03 590
0a3bdb00 591 return rc;
44779340
BB
592}
593
594int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
595{
596 int size, rc;
597
598 size = sizeof(int);
599 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
600 KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
601 if (rc)
602 CERROR("Get default mdsize error rc %d\n", rc);
603
604 return rc;
605}
606
2d95f10e 607static void client_common_put_super(struct super_block *sb)
d7e09d03
PT
608{
609 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03
PT
610
611#ifdef CONFIG_FS_POSIX_ACL
612 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
613 et_fini(&sbi->ll_et);
614 rct_fini(&sbi->ll_rct);
615 }
616#endif
617
618 ll_close_thread_shutdown(sbi->ll_lcq);
619
620 cl_sb_fini(sb);
621
622 list_del(&sbi->ll_conn_chain);
623
624 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
625 obd_disconnect(sbi->ll_dt_exp);
626 sbi->ll_dt_exp = NULL;
627 /* wait till all OSCs are gone, since cl_cache is accessing sbi.
628 * see LU-2543. */
629 obd_zombie_barrier();
630
ae7c0f48 631 ldebugfs_unregister_mountpoint(sbi);
d7e09d03
PT
632
633 obd_fid_fini(sbi->ll_md_exp->exp_obd);
634 obd_disconnect(sbi->ll_md_exp);
635 sbi->ll_md_exp = NULL;
d7e09d03
PT
636}
637
638void ll_kill_super(struct super_block *sb)
639{
640 struct ll_sb_info *sbi;
641
d7e09d03
PT
642 /* not init sb ?*/
643 if (!(sb->s_flags & MS_ACTIVE))
644 return;
645
646 sbi = ll_s2sbi(sb);
e6768831
TJ
647 /* we need to restore s_dev from changed for clustered NFS before
648 * put_super because new kernels have cached s_dev and change sb->s_dev
649 * in put_super not affected real removing devices */
65fb55d1 650 if (sbi) {
d7e09d03 651 sb->s_dev = sbi->ll_sdev_orig;
65fb55d1
NY
652 sbi->ll_umounting = 1;
653 }
d7e09d03
PT
654}
655
d7e09d03
PT
656static inline int ll_set_opt(const char *opt, char *data, int fl)
657{
658 if (strncmp(opt, data, strlen(opt)) != 0)
fbe7c6c7 659 return 0;
d7e09d03 660 else
fbe7c6c7 661 return fl;
d7e09d03
PT
662}
663
664/* non-client-specific mount options are parsed in lmd_parse */
665static int ll_options(char *options, int *flags)
666{
667 int tmp;
668 char *s1 = options, *s2;
d7e09d03
PT
669
670 if (!options)
0a3bdb00 671 return 0;
d7e09d03
PT
672
673 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
674
675 while (*s1) {
676 CDEBUG(D_SUPER, "next opt=%s\n", s1);
677 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
678 if (tmp) {
679 *flags |= tmp;
680 goto next;
681 }
682 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
683 if (tmp) {
684 *flags |= tmp;
685 goto next;
686 }
687 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
688 if (tmp) {
689 *flags |= tmp;
690 goto next;
691 }
692 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
693 if (tmp) {
694 *flags &= ~tmp;
695 goto next;
696 }
697 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
698 if (tmp) {
699 *flags |= tmp;
700 goto next;
701 }
702 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
703 if (tmp) {
704 *flags &= ~tmp;
705 goto next;
706 }
d7e09d03
PT
707 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
708 if (tmp) {
709 *flags |= tmp;
710 goto next;
711 }
712 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
713 if (tmp) {
714 *flags |= tmp;
715 goto next;
716 }
717 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
718 if (tmp) {
719 *flags &= ~tmp;
720 goto next;
721 }
722
723 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
724 if (tmp) {
725 *flags |= tmp;
726 goto next;
727 }
728 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
729 if (tmp) {
730 *flags &= ~tmp;
731 goto next;
732 }
733 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
734 if (tmp) {
735 *flags |= tmp;
736 goto next;
737 }
738 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
739 if (tmp) {
740 *flags &= ~tmp;
741 goto next;
742 }
743 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
744 if (tmp) {
745 *flags |= tmp;
746 goto next;
747 }
748 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
749 if (tmp) {
750 *flags &= ~tmp;
751 goto next;
752 }
753 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
754 if (tmp) {
755 *flags |= tmp;
756 goto next;
757 }
758 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
759 if (tmp) {
760 *flags |= tmp;
761 goto next;
762 }
763 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
764 if (tmp) {
765 *flags |= tmp;
766 goto next;
767 }
768 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
769 if (tmp) {
770 *flags &= ~tmp;
771 goto next;
772 }
773 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
774 s1);
0a3bdb00 775 return -EINVAL;
d7e09d03
PT
776
777next:
778 /* Find next opt */
779 s2 = strchr(s1, ',');
6e16818b 780 if (!s2)
d7e09d03
PT
781 break;
782 s1 = s2 + 1;
783 }
0a3bdb00 784 return 0;
d7e09d03
PT
785}
786
787void ll_lli_init(struct ll_inode_info *lli)
788{
789 lli->lli_inode_magic = LLI_INODE_MAGIC;
790 lli->lli_flags = 0;
791 lli->lli_ioepoch = 0;
792 lli->lli_maxbytes = MAX_LFS_FILESIZE;
793 spin_lock_init(&lli->lli_lock);
794 lli->lli_posix_acl = NULL;
795 lli->lli_remote_perms = NULL;
796 mutex_init(&lli->lli_rmtperm_mutex);
797 /* Do not set lli_fid, it has been initialized already. */
798 fid_zero(&lli->lli_pfid);
799 INIT_LIST_HEAD(&lli->lli_close_list);
d7e09d03 800 atomic_set(&lli->lli_open_count, 0);
d7e09d03
PT
801 lli->lli_rmtperm_time = 0;
802 lli->lli_pending_och = NULL;
803 lli->lli_mds_read_och = NULL;
804 lli->lli_mds_write_och = NULL;
805 lli->lli_mds_exec_och = NULL;
806 lli->lli_open_fd_read_count = 0;
807 lli->lli_open_fd_write_count = 0;
808 lli->lli_open_fd_exec_count = 0;
809 mutex_init(&lli->lli_och_mutex);
810 spin_lock_init(&lli->lli_agl_lock);
811 lli->lli_has_smd = false;
09aed8a5
JX
812 spin_lock_init(&lli->lli_layout_lock);
813 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
d7e09d03
PT
814 lli->lli_clob = NULL;
815
7fc1f831
AP
816 init_rwsem(&lli->lli_xattrs_list_rwsem);
817 mutex_init(&lli->lli_xattrs_enq_lock);
818
d7e09d03
PT
819 LASSERT(lli->lli_vfs_inode.i_mode != 0);
820 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
821 mutex_init(&lli->lli_readdir_mutex);
822 lli->lli_opendir_key = NULL;
823 lli->lli_sai = NULL;
d7e09d03
PT
824 spin_lock_init(&lli->lli_sa_lock);
825 lli->lli_opendir_pid = 0;
826 } else {
47a57bde 827 mutex_init(&lli->lli_size_mutex);
d7e09d03
PT
828 lli->lli_symlink_name = NULL;
829 init_rwsem(&lli->lli_trunc_sem);
830 mutex_init(&lli->lli_write_mutex);
831 init_rwsem(&lli->lli_glimpse_sem);
832 lli->lli_glimpse_time = 0;
833 INIT_LIST_HEAD(&lli->lli_agl_list);
834 lli->lli_agl_index = 0;
835 lli->lli_async_rc = 0;
d7e09d03
PT
836 }
837 mutex_init(&lli->lli_layout_mutex);
838}
839
840static inline int ll_bdi_register(struct backing_dev_info *bdi)
841{
842 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
843
844 bdi->name = "lustre";
845 return bdi_register(bdi, NULL, "lustre-%d",
846 atomic_inc_return(&ll_bdi_num));
847}
848
849int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
850{
851 struct lustre_profile *lprof = NULL;
852 struct lustre_sb_info *lsi = s2lsi(sb);
853 struct ll_sb_info *sbi;
854 char *dt = NULL, *md = NULL;
855 char *profilenm = get_profile_name(sb);
856 struct config_llog_instance *cfg;
d7e09d03 857 int err;
d7e09d03
PT
858
859 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
860
496a51bd
JL
861 cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
862 if (!cfg)
0a3bdb00 863 return -ENOMEM;
d7e09d03
PT
864
865 try_module_get(THIS_MODULE);
866
867 /* client additional sb info */
fd0d04ba 868 lsi->lsi_llsbi = sbi = ll_init_sbi(sb);
d7e09d03
PT
869 if (!sbi) {
870 module_put(THIS_MODULE);
97903a26 871 kfree(cfg);
0a3bdb00 872 return -ENOMEM;
d7e09d03
PT
873 }
874
875 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
876 if (err)
34e1f2bb 877 goto out_free;
d7e09d03
PT
878
879 err = bdi_init(&lsi->lsi_bdi);
880 if (err)
34e1f2bb 881 goto out_free;
d7e09d03 882 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
b4caecd4 883 lsi->lsi_bdi.capabilities = 0;
d7e09d03
PT
884 err = ll_bdi_register(&lsi->lsi_bdi);
885 if (err)
34e1f2bb 886 goto out_free;
d7e09d03
PT
887
888 sb->s_bdi = &lsi->lsi_bdi;
3ea8f3bc
LS
889 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
890 sb->s_d_op = &ll_d_ops;
d7e09d03
PT
891
892 /* Generate a string unique to this super, in case some joker tries
893 to mount the same fs at two mount points.
894 Use the address of the super itself.*/
895 cfg->cfg_instance = sb;
896 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
897 cfg->cfg_callback = class_config_llog_handler;
898 /* set up client obds */
899 err = lustre_process_log(sb, profilenm, cfg);
900 if (err < 0) {
901 CERROR("Unable to process log: %d\n", err);
34e1f2bb 902 goto out_free;
d7e09d03
PT
903 }
904
905 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
906 lprof = class_get_profile(profilenm);
6e16818b 907 if (!lprof) {
2d00bd17
JP
908 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
909 profilenm);
34e1f2bb
JL
910 err = -EINVAL;
911 goto out_free;
d7e09d03
PT
912 }
913 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
914 lprof->lp_md, lprof->lp_dt);
915
95745e9b 916 dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
34e1f2bb
JL
917 if (!dt) {
918 err = -ENOMEM;
919 goto out_free;
920 }
d7e09d03 921
ef2e1a44 922 md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
34e1f2bb
JL
923 if (!md) {
924 err = -ENOMEM;
925 goto out_free;
926 }
d7e09d03
PT
927
928 /* connections, registrations, sb setup */
929 err = client_common_fill_super(sb, md, dt, mnt);
930
931out_free:
0550db92 932 kfree(md);
933 kfree(dt);
d7e09d03
PT
934 if (err)
935 ll_put_super(sb);
936 else if (sbi->ll_flags & LL_SBI_VERBOSE)
937 LCONSOLE_WARN("Mounted %s\n", profilenm);
938
97903a26 939 kfree(cfg);
0a3bdb00 940 return err;
d7e09d03
PT
941} /* ll_fill_super */
942
d7e09d03
PT
943void ll_put_super(struct super_block *sb)
944{
7d4bae45 945 struct config_llog_instance cfg, params_cfg;
d7e09d03
PT
946 struct obd_device *obd;
947 struct lustre_sb_info *lsi = s2lsi(sb);
948 struct ll_sb_info *sbi = ll_s2sbi(sb);
949 char *profilenm = get_profile_name(sb);
c52f69c5 950 int next, force = 1;
d7e09d03
PT
951
952 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
953
d7e09d03
PT
954 cfg.cfg_instance = sb;
955 lustre_end_log(sb, profilenm, &cfg);
956
7d4bae45
AB
957 params_cfg.cfg_instance = sb;
958 lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
959
d7e09d03
PT
960 if (sbi->ll_md_exp) {
961 obd = class_exp2obd(sbi->ll_md_exp);
962 if (obd)
963 force = obd->obd_force;
964 }
965
d7e09d03
PT
966 /* We need to set force before the lov_disconnect in
967 lustre_common_put_super, since l_d cleans up osc's as well. */
968 if (force) {
969 next = 0;
970 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
971 &next)) != NULL) {
972 obd->obd_force = force;
973 }
974 }
975
976 if (sbi->ll_lcq) {
977 /* Only if client_common_fill_super succeeded */
978 client_common_put_super(sb);
979 }
980
981 next = 0;
a15dbf99 982 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
d7e09d03 983 class_manual_cleanup(obd);
d7e09d03
PT
984
985 if (sbi->ll_flags & LL_SBI_VERBOSE)
986 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
987
988 if (profilenm)
989 class_del_profile(profilenm);
990
991 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
992 bdi_destroy(&lsi->lsi_bdi);
993 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
994 }
995
996 ll_free_sbi(sb);
997 lsi->lsi_llsbi = NULL;
998
999 lustre_common_put_super(sb);
1000
1001 module_put(THIS_MODULE);
d7e09d03
PT
1002} /* client_put_super */
1003
1004struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1005{
1006 struct inode *inode = NULL;
1007
1008 /* NOTE: we depend on atomic igrab() -bzzz */
1009 lock_res_and_lock(lock);
1010 if (lock->l_resource->lr_lvb_inode) {
aff9d8e8 1011 struct ll_inode_info *lli;
cf29a7b6 1012
d7e09d03
PT
1013 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1014 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1015 inode = igrab(lock->l_resource->lr_lvb_inode);
1016 } else {
1017 inode = lock->l_resource->lr_lvb_inode;
1018 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
2d00bd17 1019 D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
d7e09d03
PT
1020 lock->l_resource->lr_lvb_inode,
1021 lli->lli_inode_magic);
1022 inode = NULL;
1023 }
1024 }
1025 unlock_res_and_lock(lock);
1026 return inode;
1027}
1028
d7e09d03
PT
1029void ll_clear_inode(struct inode *inode)
1030{
1031 struct ll_inode_info *lli = ll_i2info(inode);
1032 struct ll_sb_info *sbi = ll_i2sbi(inode);
d7e09d03
PT
1033
1034 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1035 inode->i_generation, inode);
1036
1037 if (S_ISDIR(inode->i_mode)) {
1038 /* these should have been cleared in ll_file_release */
6e16818b
OD
1039 LASSERT(!lli->lli_opendir_key);
1040 LASSERT(!lli->lli_sai);
d7e09d03
PT
1041 LASSERT(lli->lli_opendir_pid == 0);
1042 }
1043
ae5ef67b 1044 spin_lock(&lli->lli_lock);
d7e09d03 1045 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
ae5ef67b 1046 spin_unlock(&lli->lli_lock);
d7e09d03
PT
1047 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1048
1049 LASSERT(!lli->lli_open_fd_write_count);
1050 LASSERT(!lli->lli_open_fd_read_count);
1051 LASSERT(!lli->lli_open_fd_exec_count);
1052
1053 if (lli->lli_mds_write_och)
1054 ll_md_real_close(inode, FMODE_WRITE);
1055 if (lli->lli_mds_exec_och)
1056 ll_md_real_close(inode, FMODE_EXEC);
1057 if (lli->lli_mds_read_och)
1058 ll_md_real_close(inode, FMODE_READ);
1059
a5cb8880 1060 if (S_ISLNK(inode->i_mode)) {
97903a26 1061 kfree(lli->lli_symlink_name);
d7e09d03
PT
1062 lli->lli_symlink_name = NULL;
1063 }
1064
7fc1f831
AP
1065 ll_xattr_cache_destroy(inode);
1066
d7e09d03 1067 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
6e16818b 1068 LASSERT(!lli->lli_posix_acl);
d7e09d03
PT
1069 if (lli->lli_remote_perms) {
1070 free_rmtperm_hash(lli->lli_remote_perms);
1071 lli->lli_remote_perms = NULL;
1072 }
1073 }
1074#ifdef CONFIG_FS_POSIX_ACL
1075 else if (lli->lli_posix_acl) {
1076 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
6e16818b 1077 LASSERT(!lli->lli_remote_perms);
d7e09d03
PT
1078 posix_acl_release(lli->lli_posix_acl);
1079 lli->lli_posix_acl = NULL;
1080 }
1081#endif
1082 lli->lli_inode_magic = LLI_INODE_DEAD;
1083
d7e09d03
PT
1084 if (!S_ISDIR(inode->i_mode))
1085 LASSERT(list_empty(&lli->lli_agl_list));
1086
1087 /*
1088 * XXX This has to be done before lsm is freed below, because
1089 * cl_object still uses inode lsm.
1090 */
1091 cl_inode_fini(inode);
1092 lli->lli_has_smd = false;
d7e09d03
PT
1093}
1094
b81f9b6d
OD
1095#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1096
3d3ab8cc 1097static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
d7e09d03
PT
1098 struct md_open_data **mod)
1099{
1100 struct lustre_md md;
2b0143b5 1101 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1102 struct ll_sb_info *sbi = ll_i2sbi(inode);
1103 struct ptlrpc_request *request = NULL;
1104 int rc, ia_valid;
d7e09d03
PT
1105
1106 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1107 LUSTRE_OPC_ANY, NULL);
1108 if (IS_ERR(op_data))
0a3bdb00 1109 return PTR_ERR(op_data);
d7e09d03
PT
1110
1111 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1112 &request, mod);
1113 if (rc) {
1114 ptlrpc_req_finished(request);
1115 if (rc == -ENOENT) {
1116 clear_nlink(inode);
1117 /* Unlinked special device node? Or just a race?
1118 * Pretend we done everything. */
1119 if (!S_ISREG(inode->i_mode) &&
1120 !S_ISDIR(inode->i_mode)) {
1121 ia_valid = op_data->op_attr.ia_valid;
1122 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1123 rc = simple_setattr(dentry, &op_data->op_attr);
1124 op_data->op_attr.ia_valid = ia_valid;
1125 }
1126 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1127 CERROR("md_setattr fails: rc = %d\n", rc);
1128 }
0a3bdb00 1129 return rc;
d7e09d03
PT
1130 }
1131
1132 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1133 sbi->ll_md_exp, &md);
1134 if (rc) {
1135 ptlrpc_req_finished(request);
0a3bdb00 1136 return rc;
d7e09d03
PT
1137 }
1138
251c4317 1139 ia_valid = op_data->op_attr.ia_valid;
ef2e0f55 1140 /* inode size will be in cl_setattr_ost, can't do it now since dirty
251c4317
JH
1141 * cache is not cleared yet. */
1142 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1143 rc = simple_setattr(dentry, &op_data->op_attr);
1144 op_data->op_attr.ia_valid = ia_valid;
1145
d7e09d03
PT
1146 /* Extract epoch data if obtained. */
1147 op_data->op_handle = md.body->handle;
1148 op_data->op_ioepoch = md.body->ioepoch;
1149
1150 ll_update_inode(inode, &md);
1151 ptlrpc_req_finished(request);
1152
0a3bdb00 1153 return rc;
d7e09d03
PT
1154}
1155
1156/* Close IO epoch and send Size-on-MDS attribute update. */
1157static int ll_setattr_done_writing(struct inode *inode,
1158 struct md_op_data *op_data,
1159 struct md_open_data *mod)
1160{
1161 struct ll_inode_info *lli = ll_i2info(inode);
1162 int rc = 0;
d7e09d03 1163
d7e09d03 1164 if (!S_ISREG(inode->i_mode))
0a3bdb00 1165 return 0;
d7e09d03 1166
b0f5aad5 1167 CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
d7e09d03
PT
1168 op_data->op_ioepoch, PFID(&lli->lli_fid));
1169
1170 op_data->op_flags = MF_EPOCH_CLOSE;
1171 ll_done_writing_attr(inode, op_data);
1172 ll_pack_inode2opdata(inode, op_data, NULL);
1173
1174 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
9d927bc5 1175 if (rc == -EAGAIN)
d7e09d03
PT
1176 /* MDS has instructed us to obtain Size-on-MDS attribute
1177 * from OSTs and send setattr to back to MDS. */
1178 rc = ll_som_update(inode, op_data);
9d927bc5 1179 else if (rc)
d7e09d03
PT
1180 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1181 inode->i_ino, rc);
0a3bdb00 1182 return rc;
d7e09d03
PT
1183}
1184
d7e09d03
PT
1185/* If this inode has objects allocated to it (lsm != NULL), then the OST
1186 * object(s) determine the file size and mtime. Otherwise, the MDS will
1187 * keep these values until such a time that objects are allocated for it.
1188 * We do the MDS operations first, as it is checking permissions for us.
1189 * We don't to the MDS RPC if there is nothing that we want to store there,
1190 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1191 * going to do an RPC anyways.
1192 *
1193 * If we are doing a truncate, we will send the mtime and ctime updates
1194 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1195 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1196 * at the same time.
a720b790
JL
1197 *
1198 * In case of HSMimport, we only set attr on MDS.
d7e09d03 1199 */
a720b790 1200int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
d7e09d03 1201{
2b0143b5 1202 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1203 struct ll_inode_info *lli = ll_i2info(inode);
1204 struct md_op_data *op_data = NULL;
1205 struct md_open_data *mod = NULL;
5ea17d6c 1206 bool file_is_released = false;
d7e09d03 1207 int rc = 0, rc1 = 0;
d7e09d03 1208
a720b790
JL
1209 CDEBUG(D_VFSTRACE,
1210 "%s: setattr inode %p/fid:"DFID
1211 " from %llu to %llu, valid %x, hsm_import %d\n",
1212 ll_get_fsname(inode->i_sb, NULL, 0), inode,
d7e09d03 1213 PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
a720b790 1214 attr->ia_valid, hsm_import);
d7e09d03
PT
1215
1216 if (attr->ia_valid & ATTR_SIZE) {
1217 /* Check new size against VFS/VM file size limit and rlimit */
1218 rc = inode_newsize_ok(inode, attr->ia_size);
1219 if (rc)
0a3bdb00 1220 return rc;
d7e09d03
PT
1221
1222 /* The maximum Lustre file size is variable, based on the
1223 * OST maximum object size and number of stripes. This
1224 * needs another check in addition to the VFS check above. */
1225 if (attr->ia_size > ll_file_maxbytes(inode)) {
1d8cb70c 1226 CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
d7e09d03
PT
1227 PFID(&lli->lli_fid), attr->ia_size,
1228 ll_file_maxbytes(inode));
0a3bdb00 1229 return -EFBIG;
d7e09d03
PT
1230 }
1231
1232 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1233 }
1234
1235 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1236 if (attr->ia_valid & TIMES_SET_FLAGS) {
4b1a25f0 1237 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2eb90a75 1238 !capable(CFS_CAP_FOWNER))
0a3bdb00 1239 return -EPERM;
d7e09d03
PT
1240 }
1241
1242 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1243 if (attr->ia_valid & ATTR_CTIME) {
0f1c743b 1244 attr->ia_ctime = CURRENT_TIME;
d7e09d03
PT
1245 attr->ia_valid |= ATTR_CTIME_SET;
1246 }
1247 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1248 (attr->ia_valid & ATTR_ATIME)) {
0f1c743b 1249 attr->ia_atime = CURRENT_TIME;
d7e09d03
PT
1250 attr->ia_valid |= ATTR_ATIME_SET;
1251 }
1252 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1253 (attr->ia_valid & ATTR_MTIME)) {
0f1c743b 1254 attr->ia_mtime = CURRENT_TIME;
d7e09d03
PT
1255 attr->ia_valid |= ATTR_MTIME_SET;
1256 }
1257
1258 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
8d7eed54 1259 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
d7e09d03 1260 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
8d7eed54 1261 (s64)ktime_get_real_seconds());
d7e09d03
PT
1262
1263 /* If we are changing file size, file content is modified, flag it. */
1264 if (attr->ia_valid & ATTR_SIZE) {
1265 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1266 spin_lock(&lli->lli_lock);
1267 lli->lli_flags |= LLIF_DATA_MODIFIED;
1268 spin_unlock(&lli->lli_lock);
1269 }
1270
1271 /* We always do an MDS RPC, even if we're only changing the size;
1272 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1273
496a51bd
JL
1274 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1275 if (!op_data)
0a3bdb00 1276 return -ENOMEM;
d7e09d03 1277
81e053c7 1278 if (!S_ISDIR(inode->i_mode))
5955102c 1279 inode_unlock(inode);
d7e09d03
PT
1280
1281 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1282
1283 /* Open epoch for truncate. */
1284 if (exp_connect_som(ll_i2mdexp(inode)) &&
1285 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1286 op_data->op_flags = MF_EPOCH_OPEN;
1287
5ea17d6c
JL
1288 /* truncate on a released file must failed with -ENODATA,
1289 * so size must not be set on MDS for released file
1290 * but other attributes must be set
1291 */
1292 if (S_ISREG(inode->i_mode)) {
1293 struct lov_stripe_md *lsm;
1294 __u32 gen;
1295
1296 ll_layout_refresh(inode, &gen);
1297 lsm = ccc_inode_lsm_get(inode);
1298 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
1299 file_is_released = true;
1300 ccc_inode_lsm_put(inode, lsm);
1301 }
1302
a720b790 1303 /* if not in HSM import mode, clear size attr for released file
5ea17d6c
JL
1304 * we clear the attribute send to MDT in op_data, not the original
1305 * received from caller in attr which is used later to
1306 * decide return code */
a720b790 1307 if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
5ea17d6c
JL
1308 op_data->op_attr.ia_valid &= ~ATTR_SIZE;
1309
d7e09d03
PT
1310 rc = ll_md_setattr(dentry, op_data, &mod);
1311 if (rc)
34e1f2bb 1312 goto out;
d7e09d03 1313
a720b790 1314 /* truncate failed (only when non HSM import), others succeed */
5ea17d6c 1315 if (file_is_released) {
a720b790 1316 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
34e1f2bb 1317 rc = -ENODATA;
5ea17d6c 1318 else
34e1f2bb
JL
1319 rc = 0;
1320 goto out;
5ea17d6c
JL
1321 }
1322
d7e09d03
PT
1323 /* RPC to MDT is sent, cancel data modification flag */
1324 if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
1325 spin_lock(&lli->lli_lock);
1326 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1327 spin_unlock(&lli->lli_lock);
1328 }
1329
1330 ll_ioepoch_open(lli, op_data->op_ioepoch);
34e1f2bb
JL
1331 if (!S_ISREG(inode->i_mode)) {
1332 rc = 0;
1333 goto out;
1334 }
d7e09d03
PT
1335
1336 if (attr->ia_valid & (ATTR_SIZE |
1337 ATTR_ATIME | ATTR_ATIME_SET |
53bd4a00 1338 ATTR_MTIME | ATTR_MTIME_SET)) {
d7e09d03
PT
1339 /* For truncate and utimes sending attributes to OSTs, setting
1340 * mtime/atime to the past will be performed under PW [0:EOF]
1341 * extent lock (new_size:EOF for truncate). It may seem
1342 * excessive to send mtime/atime updates to OSTs when not
1343 * setting times to past, but it is necessary due to possible
1344 * time de-synchronization between MDT inode and OST objects */
178ba1e0
BJ
1345 if (attr->ia_valid & ATTR_SIZE)
1346 down_write(&lli->lli_trunc_sem);
ef2e0f55 1347 rc = cl_setattr_ost(inode, attr);
178ba1e0
BJ
1348 if (attr->ia_valid & ATTR_SIZE)
1349 up_write(&lli->lli_trunc_sem);
53bd4a00 1350 }
d7e09d03 1351out:
83d6b8fe
SB
1352 if (op_data->op_ioepoch) {
1353 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1354 if (!rc)
1355 rc = rc1;
d7e09d03 1356 }
83d6b8fe
SB
1357 ll_finish_md_op_data(op_data);
1358
d7e09d03 1359 if (!S_ISDIR(inode->i_mode)) {
5955102c 1360 inode_lock(inode);
a720b790 1361 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
d7e09d03
PT
1362 inode_dio_wait(inode);
1363 }
1364
1365 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1366 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1367
251c4317 1368 return rc;
d7e09d03
PT
1369}
1370
1371int ll_setattr(struct dentry *de, struct iattr *attr)
1372{
2b0143b5 1373 int mode = d_inode(de)->i_mode;
d7e09d03
PT
1374
1375 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1376 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1377 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1378
1379 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
1380 (ATTR_SIZE|ATTR_MODE)) &&
1381 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1382 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1383 !(attr->ia_mode & S_ISGID))))
1384 attr->ia_valid |= ATTR_FORCE;
1385
98639249
NC
1386 if ((attr->ia_valid & ATTR_MODE) &&
1387 (mode & S_ISUID) &&
d7e09d03
PT
1388 !(attr->ia_mode & S_ISUID) &&
1389 !(attr->ia_valid & ATTR_KILL_SUID))
1390 attr->ia_valid |= ATTR_KILL_SUID;
1391
98639249
NC
1392 if ((attr->ia_valid & ATTR_MODE) &&
1393 ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
d7e09d03
PT
1394 !(attr->ia_mode & S_ISGID) &&
1395 !(attr->ia_valid & ATTR_KILL_SGID))
1396 attr->ia_valid |= ATTR_KILL_SGID;
1397
a720b790 1398 return ll_setattr_raw(de, attr, false);
d7e09d03
PT
1399}
1400
1401int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1402 __u64 max_age, __u32 flags)
1403{
1404 struct ll_sb_info *sbi = ll_s2sbi(sb);
1405 struct obd_statfs obd_osfs;
1406 int rc;
d7e09d03
PT
1407
1408 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1409 if (rc) {
1410 CERROR("md_statfs fails: rc = %d\n", rc);
0a3bdb00 1411 return rc;
d7e09d03
PT
1412 }
1413
1414 osfs->os_type = sb->s_magic;
1415
b0f5aad5 1416 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1d8cb70c
GD
1417 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1418 osfs->os_files);
d7e09d03
PT
1419
1420 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1421 flags |= OBD_STATFS_NODELAY;
1422
1423 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1424 if (rc) {
1425 CERROR("obd_statfs fails: rc = %d\n", rc);
0a3bdb00 1426 return rc;
d7e09d03
PT
1427 }
1428
b0f5aad5 1429 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
d7e09d03
PT
1430 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1431 obd_osfs.os_files);
1432
1433 osfs->os_bsize = obd_osfs.os_bsize;
1434 osfs->os_blocks = obd_osfs.os_blocks;
1435 osfs->os_bfree = obd_osfs.os_bfree;
1436 osfs->os_bavail = obd_osfs.os_bavail;
1437
1438 /* If we don't have as many objects free on the OST as inodes
1439 * on the MDS, we reduce the total number of inodes to
1440 * compensate, so that the "inodes in use" number is correct.
1441 */
1442 if (obd_osfs.os_ffree < osfs->os_ffree) {
1443 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1444 obd_osfs.os_ffree;
1445 osfs->os_ffree = obd_osfs.os_ffree;
1446 }
1447
0a3bdb00 1448 return rc;
d7e09d03 1449}
c9f6bb96 1450
d7e09d03
PT
1451int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1452{
1453 struct super_block *sb = de->d_sb;
1454 struct obd_statfs osfs;
1455 int rc;
1456
b0f5aad5 1457 CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
d7e09d03
PT
1458 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1459
1460 /* Some amount of caching on the client is allowed */
1461 rc = ll_statfs_internal(sb, &osfs,
1462 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1463 0);
1464 if (rc)
1465 return rc;
1466
1467 statfs_unpack(sfs, &osfs);
1468
1469 /* We need to downshift for all 32-bit kernels, because we can't
1470 * tell if the kernel is being called via sys_statfs64() or not.
1471 * Stop before overflowing f_bsize - in which case it is better
1472 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1473 if (sizeof(long) < 8) {
1474 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1475 sfs->f_bsize <<= 1;
1476
1477 osfs.os_blocks >>= 1;
1478 osfs.os_bfree >>= 1;
1479 osfs.os_bavail >>= 1;
1480 }
1481 }
1482
1483 sfs->f_blocks = osfs.os_blocks;
1484 sfs->f_bfree = osfs.os_bfree;
1485 sfs->f_bavail = osfs.os_bavail;
bd994071 1486 sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
d7e09d03
PT
1487 return 0;
1488}
1489
1490void ll_inode_size_lock(struct inode *inode)
1491{
1492 struct ll_inode_info *lli;
1493
1494 LASSERT(!S_ISDIR(inode->i_mode));
1495
1496 lli = ll_i2info(inode);
47a57bde 1497 mutex_lock(&lli->lli_size_mutex);
d7e09d03
PT
1498}
1499
1500void ll_inode_size_unlock(struct inode *inode)
1501{
1502 struct ll_inode_info *lli;
1503
1504 lli = ll_i2info(inode);
47a57bde 1505 mutex_unlock(&lli->lli_size_mutex);
d7e09d03
PT
1506}
1507
1508void ll_update_inode(struct inode *inode, struct lustre_md *md)
1509{
1510 struct ll_inode_info *lli = ll_i2info(inode);
1511 struct mdt_body *body = md->body;
1512 struct lov_stripe_md *lsm = md->lsm;
1513 struct ll_sb_info *sbi = ll_i2sbi(inode);
1514
629ecb5b 1515 LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
6e16818b 1516 if (lsm) {
d7e09d03
PT
1517 if (!lli->lli_has_smd &&
1518 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1519 cl_file_inode_init(inode, md);
1520
1521 lli->lli_maxbytes = lsm->lsm_maxbytes;
1522 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1523 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1524 }
1525
1526 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1527 if (body->valid & OBD_MD_FLRMTPERM)
1528 ll_update_remote_perm(inode, md->remote_perm);
1529 }
1530#ifdef CONFIG_FS_POSIX_ACL
1531 else if (body->valid & OBD_MD_FLACL) {
1532 spin_lock(&lli->lli_lock);
1533 if (lli->lli_posix_acl)
1534 posix_acl_release(lli->lli_posix_acl);
1535 lli->lli_posix_acl = md->posix_acl;
1536 spin_unlock(&lli->lli_lock);
1537 }
1538#endif
c1e2699d 1539 inode->i_ino = cl_fid_build_ino(&body->fid1,
1540 sbi->ll_flags & LL_SBI_32BIT_API);
d7e09d03
PT
1541 inode->i_generation = cl_fid_build_gen(&body->fid1);
1542
1543 if (body->valid & OBD_MD_FLATIME) {
1544 if (body->atime > LTIME_S(inode->i_atime))
1545 LTIME_S(inode->i_atime) = body->atime;
1546 lli->lli_lvb.lvb_atime = body->atime;
1547 }
1548 if (body->valid & OBD_MD_FLMTIME) {
1549 if (body->mtime > LTIME_S(inode->i_mtime)) {
b0f5aad5
GKH
1550 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1551 inode->i_ino, LTIME_S(inode->i_mtime),
1552 body->mtime);
d7e09d03
PT
1553 LTIME_S(inode->i_mtime) = body->mtime;
1554 }
1555 lli->lli_lvb.lvb_mtime = body->mtime;
1556 }
1557 if (body->valid & OBD_MD_FLCTIME) {
1558 if (body->ctime > LTIME_S(inode->i_ctime))
1559 LTIME_S(inode->i_ctime) = body->ctime;
1560 lli->lli_lvb.lvb_ctime = body->ctime;
1561 }
1562 if (body->valid & OBD_MD_FLMODE)
1563 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1564 if (body->valid & OBD_MD_FLTYPE)
1565 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1566 LASSERT(inode->i_mode != 0);
566be54d 1567 if (S_ISREG(inode->i_mode))
e6768831
TJ
1568 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1569 LL_MAX_BLKSIZE_BITS);
566be54d 1570 else
d7e09d03 1571 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
d7e09d03 1572 if (body->valid & OBD_MD_FLUID)
4b1a25f0 1573 inode->i_uid = make_kuid(&init_user_ns, body->uid);
d7e09d03 1574 if (body->valid & OBD_MD_FLGID)
4b1a25f0 1575 inode->i_gid = make_kgid(&init_user_ns, body->gid);
d7e09d03
PT
1576 if (body->valid & OBD_MD_FLFLAGS)
1577 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1578 if (body->valid & OBD_MD_FLNLINK)
1579 set_nlink(inode, body->nlink);
1580 if (body->valid & OBD_MD_FLRDEV)
1581 inode->i_rdev = old_decode_dev(body->rdev);
1582
1583 if (body->valid & OBD_MD_FLID) {
1584 /* FID shouldn't be changed! */
1585 if (fid_is_sane(&lli->lli_fid)) {
1586 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1587 "Trying to change FID "DFID
1588 " to the "DFID", inode %lu/%u(%p)\n",
1589 PFID(&lli->lli_fid), PFID(&body->fid1),
1590 inode->i_ino, inode->i_generation, inode);
1591 } else
1592 lli->lli_fid = body->fid1;
1593 }
1594
1595 LASSERT(fid_seq(&lli->lli_fid) != 0);
1596
1597 if (body->valid & OBD_MD_FLSIZE) {
1598 if (exp_connect_som(ll_i2mdexp(inode)) &&
1599 S_ISREG(inode->i_mode)) {
1600 struct lustre_handle lockh;
1601 ldlm_mode_t mode;
1602
1603 /* As it is possible a blocking ast has been processed
1604 * by this time, we need to check there is an UPDATE
1605 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1606 * it. */
1607 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
7fc1f831
AP
1608 &lockh, LDLM_FL_CBPENDING,
1609 LCK_CR | LCK_CW |
1610 LCK_PR | LCK_PW);
d7e09d03
PT
1611 if (mode) {
1612 if (lli->lli_flags & (LLIF_DONE_WRITING |
1613 LLIF_EPOCH_PENDING |
1614 LLIF_SOM_DIRTY)) {
2d00bd17 1615 CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
d7e09d03
PT
1616 inode->i_ino, lli->lli_flags);
1617 } else {
1618 /* Use old size assignment to avoid
1619 * deadlock bz14138 & bz14326 */
1620 i_size_write(inode, body->size);
ae5ef67b 1621 spin_lock(&lli->lli_lock);
d7e09d03 1622 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
ae5ef67b 1623 spin_unlock(&lli->lli_lock);
d7e09d03
PT
1624 }
1625 ldlm_lock_decref(&lockh, mode);
1626 }
1627 } else {
1628 /* Use old size assignment to avoid
1629 * deadlock bz14138 & bz14326 */
1630 i_size_write(inode, body->size);
1631
1632 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1633 inode->i_ino, (unsigned long long)body->size);
1634 }
1635
1636 if (body->valid & OBD_MD_FLBLOCKS)
1637 inode->i_blocks = body->blocks;
1638 }
1639
5ea17d6c
JL
1640 if (body->valid & OBD_MD_TSTATE) {
1641 if (body->t_state & MS_RESTORE)
1642 lli->lli_flags |= LLIF_FILE_RESTORING;
1643 }
d7e09d03
PT
1644}
1645
1646void ll_read_inode2(struct inode *inode, void *opaque)
1647{
1648 struct lustre_md *md = opaque;
1649 struct ll_inode_info *lli = ll_i2info(inode);
d7e09d03
PT
1650
1651 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1652 PFID(&lli->lli_fid), inode);
1653
1654 LASSERT(!lli->lli_has_smd);
1655
1656 /* Core attributes from the MDS first. This is a new inode, and
1657 * the VFS doesn't zero times in the core inode so we have to do
1658 * it ourselves. They will be overwritten by either MDS or OST
1659 * attributes - we just need to make sure they aren't newer. */
1660 LTIME_S(inode->i_mtime) = 0;
1661 LTIME_S(inode->i_atime) = 0;
1662 LTIME_S(inode->i_ctime) = 0;
1663 inode->i_rdev = 0;
1664 ll_update_inode(inode, md);
1665
1666 /* OIDEBUG(inode); */
1667
d7e09d03
PT
1668 if (S_ISREG(inode->i_mode)) {
1669 struct ll_sb_info *sbi = ll_i2sbi(inode);
cf29a7b6 1670
d7e09d03
PT
1671 inode->i_op = &ll_file_inode_operations;
1672 inode->i_fop = sbi->ll_fop;
1673 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
d7e09d03
PT
1674 } else if (S_ISDIR(inode->i_mode)) {
1675 inode->i_op = &ll_dir_inode_operations;
1676 inode->i_fop = &ll_dir_operations;
d7e09d03
PT
1677 } else if (S_ISLNK(inode->i_mode)) {
1678 inode->i_op = &ll_fast_symlink_inode_operations;
d7e09d03
PT
1679 } else {
1680 inode->i_op = &ll_special_inode_operations;
1681
1682 init_special_inode(inode, inode->i_mode,
1683 inode->i_rdev);
d7e09d03
PT
1684 }
1685}
1686
1687void ll_delete_inode(struct inode *inode)
1688{
1689 struct cl_inode_info *lli = cl_i2info(inode);
d7e09d03 1690
6e16818b 1691 if (S_ISREG(inode->i_mode) && lli->lli_clob)
d7e09d03
PT
1692 /* discard all dirty pages before truncating them, required by
1693 * osc_extent implementation at LU-1030. */
65fb55d1
NY
1694 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1695 CL_FSYNC_DISCARD, 1);
d7e09d03 1696
91b0abe3 1697 truncate_inode_pages_final(&inode->i_data);
d7e09d03
PT
1698
1699 /* Workaround for LU-118 */
1700 if (inode->i_data.nrpages) {
c4226c54
VT
1701 spin_lock_irq(&inode->i_data.tree_lock);
1702 spin_unlock_irq(&inode->i_data.tree_lock);
d7e09d03 1703 LASSERTF(inode->i_data.nrpages == 0,
2d00bd17 1704 "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
d7e09d03
PT
1705 inode->i_ino, inode->i_generation, inode,
1706 inode->i_data.nrpages);
1707 }
1708 /* Workaround end */
1709
1710 ll_clear_inode(inode);
1711 clear_inode(inode);
d7e09d03
PT
1712}
1713
1714int ll_iocontrol(struct inode *inode, struct file *file,
1715 unsigned int cmd, unsigned long arg)
1716{
1717 struct ll_sb_info *sbi = ll_i2sbi(inode);
1718 struct ptlrpc_request *req = NULL;
1719 int rc, flags = 0;
d7e09d03 1720
a58a38ac 1721 switch (cmd) {
d7e09d03
PT
1722 case FSFILT_IOC_GETFLAGS: {
1723 struct mdt_body *body;
1724 struct md_op_data *op_data;
1725
1726 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1727 0, 0, LUSTRE_OPC_ANY,
1728 NULL);
1729 if (IS_ERR(op_data))
0a3bdb00 1730 return PTR_ERR(op_data);
d7e09d03
PT
1731
1732 op_data->op_valid = OBD_MD_FLFLAGS;
1733 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1734 ll_finish_md_op_data(op_data);
1735 if (rc) {
1736 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
0a3bdb00 1737 return -abs(rc);
d7e09d03
PT
1738 }
1739
1740 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1741
1742 flags = body->flags;
1743
1744 ptlrpc_req_finished(req);
1745
7ac5db21 1746 return put_user(flags, (int __user *)arg);
d7e09d03
PT
1747 }
1748 case FSFILT_IOC_SETFLAGS: {
1749 struct lov_stripe_md *lsm;
45efd655 1750 struct obd_info oinfo = { };
d7e09d03
PT
1751 struct md_op_data *op_data;
1752
7ac5db21 1753 if (get_user(flags, (int __user *)arg))
0a3bdb00 1754 return -EFAULT;
d7e09d03
PT
1755
1756 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1757 LUSTRE_OPC_ANY, NULL);
1758 if (IS_ERR(op_data))
0a3bdb00 1759 return PTR_ERR(op_data);
d7e09d03
PT
1760
1761 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1762 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1763 rc = md_setattr(sbi->ll_md_exp, op_data,
1764 NULL, 0, NULL, 0, &req, NULL);
1765 ll_finish_md_op_data(op_data);
1766 ptlrpc_req_finished(req);
1767 if (rc)
0a3bdb00 1768 return rc;
d7e09d03
PT
1769
1770 inode->i_flags = ll_ext_to_inode_flags(flags);
1771
1772 lsm = ccc_inode_lsm_get(inode);
5dd16419
JX
1773 if (!lsm_has_objects(lsm)) {
1774 ccc_inode_lsm_put(inode, lsm);
0a3bdb00 1775 return 0;
5dd16419 1776 }
d7e09d03 1777
131637b8
MR
1778 oinfo.oi_oa = kmem_cache_alloc(obdo_cachep,
1779 GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
1780 if (!oinfo.oi_oa) {
1781 ccc_inode_lsm_put(inode, lsm);
0a3bdb00 1782 return -ENOMEM;
d7e09d03
PT
1783 }
1784 oinfo.oi_md = lsm;
1785 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1786 oinfo.oi_oa->o_flags = flags;
1787 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1788 OBD_MD_FLGROUP;
d7e09d03
PT
1789 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
1790 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
2ba262fb 1791 kmem_cache_free(obdo_cachep, oinfo.oi_oa);
d7e09d03
PT
1792 ccc_inode_lsm_put(inode, lsm);
1793
1794 if (rc && rc != -EPERM && rc != -EACCES)
1795 CERROR("osc_setattr_async fails: rc = %d\n", rc);
1796
0a3bdb00 1797 return rc;
d7e09d03
PT
1798 }
1799 default:
0a3bdb00 1800 return -ENOSYS;
d7e09d03
PT
1801 }
1802
0a3bdb00 1803 return 0;
d7e09d03
PT
1804}
1805
1806int ll_flush_ctx(struct inode *inode)
1807{
1808 struct ll_sb_info *sbi = ll_i2sbi(inode);
1809
4b1a25f0
PT
1810 CDEBUG(D_SEC, "flush context for user %d\n",
1811 from_kuid(&init_user_ns, current_uid()));
d7e09d03
PT
1812
1813 obd_set_info_async(NULL, sbi->ll_md_exp,
1814 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1815 0, NULL, NULL);
1816 obd_set_info_async(NULL, sbi->ll_dt_exp,
1817 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1818 0, NULL, NULL);
1819 return 0;
1820}
1821
1822/* umount -f client means force down, don't save state */
1823void ll_umount_begin(struct super_block *sb)
1824{
1825 struct ll_sb_info *sbi = ll_s2sbi(sb);
1826 struct obd_device *obd;
1827 struct obd_ioctl_data *ioc_data;
d7e09d03
PT
1828
1829 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1830 sb->s_count, atomic_read(&sb->s_active));
1831
1832 obd = class_exp2obd(sbi->ll_md_exp);
6e16818b 1833 if (!obd) {
55f5a824 1834 CERROR("Invalid MDC connection handle %#llx\n",
d7e09d03 1835 sbi->ll_md_exp->exp_handle.h_cookie);
d7e09d03
PT
1836 return;
1837 }
1838 obd->obd_force = 1;
1839
1840 obd = class_exp2obd(sbi->ll_dt_exp);
6e16818b 1841 if (!obd) {
55f5a824 1842 CERROR("Invalid LOV connection handle %#llx\n",
d7e09d03 1843 sbi->ll_dt_exp->exp_handle.h_cookie);
d7e09d03
PT
1844 return;
1845 }
1846 obd->obd_force = 1;
1847
496a51bd 1848 ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
d7e09d03
PT
1849 if (ioc_data) {
1850 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
ec83e611 1851 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03
PT
1852
1853 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
ec83e611 1854 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03 1855
97903a26 1856 kfree(ioc_data);
d7e09d03
PT
1857 }
1858
d7e09d03
PT
1859 /* Really, we'd like to wait until there are no requests outstanding,
1860 * and then continue. For now, we just invalidate the requests,
1861 * schedule() and sleep one second if needed, and hope.
1862 */
1863 schedule();
d7e09d03
PT
1864}
1865
1866int ll_remount_fs(struct super_block *sb, int *flags, char *data)
1867{
1868 struct ll_sb_info *sbi = ll_s2sbi(sb);
1869 char *profilenm = get_profile_name(sb);
1870 int err;
1871 __u32 read_only;
1872
1873 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
1874 read_only = *flags & MS_RDONLY;
1875 err = obd_set_info_async(NULL, sbi->ll_md_exp,
1876 sizeof(KEY_READ_ONLY),
1877 KEY_READ_ONLY, sizeof(read_only),
1878 &read_only, NULL);
1879 if (err) {
1880 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
1881 profilenm, read_only ?
1882 "read-only" : "read-write", err);
1883 return err;
1884 }
1885
1886 if (read_only)
1887 sb->s_flags |= MS_RDONLY;
1888 else
1889 sb->s_flags &= ~MS_RDONLY;
1890
1891 if (sbi->ll_flags & LL_SBI_VERBOSE)
1892 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
1893 read_only ? "read-only" : "read-write");
1894 }
1895 return 0;
1896}
1897
44ecac68
FY
1898/**
1899 * Cleanup the open handle that is cached on MDT-side.
1900 *
1901 * For open case, the client side open handling thread may hit error
1902 * after the MDT grant the open. Under such case, the client should
1903 * send close RPC to the MDT as cleanup; otherwise, the open handle
1904 * on the MDT will be leaked there until the client umount or evicted.
1905 *
1906 * In further, if someone unlinked the file, because the open handle
1907 * holds the reference on such file/object, then it will block the
1908 * subsequent threads that want to locate such object via FID.
1909 *
1910 * \param[in] sb super block for this file-system
1911 * \param[in] open_req pointer to the original open request
1912 */
1913void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
1914{
1915 struct mdt_body *body;
1916 struct md_op_data *op_data;
1917 struct ptlrpc_request *close_req = NULL;
1918 struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
1919
1920 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
af13af52 1921 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
44ecac68
FY
1922 if (!op_data) {
1923 CWARN("%s: cannot allocate op_data to release open handle for "
1924 DFID "\n",
1925 ll_get_fsname(sb, NULL, 0), PFID(&body->fid1));
1926
1927 return;
1928 }
1929
1930 op_data->op_fid1 = body->fid1;
1931 op_data->op_ioepoch = body->ioepoch;
1932 op_data->op_handle = body->handle;
1933 op_data->op_mod_time = get_seconds();
1934 md_close(exp, op_data, NULL, &close_req);
1935 ptlrpc_req_finished(close_req);
1936 ll_finish_md_op_data(op_data);
1937}
1938
d7e09d03
PT
1939int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
1940 struct super_block *sb, struct lookup_intent *it)
1941{
1942 struct ll_sb_info *sbi = NULL;
24af3e16 1943 struct lustre_md md = { NULL };
d7e09d03 1944 int rc;
d7e09d03
PT
1945
1946 LASSERT(*inode || sb);
1947 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
1948 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
1949 sbi->ll_md_exp, &md);
1950 if (rc)
44ecac68 1951 goto cleanup;
d7e09d03
PT
1952
1953 if (*inode) {
1954 ll_update_inode(*inode, &md);
1955 } else {
6e16818b 1956 LASSERT(sb);
d7e09d03
PT
1957
1958 /*
1959 * At this point server returns to client's same fid as client
1960 * generated for creating. So using ->fid1 is okay here.
1961 */
1962 LASSERT(fid_is_sane(&md.body->fid1));
1963
1964 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
c1e2699d 1965 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03 1966 &md);
6e16818b 1967 if (IS_ERR_OR_NULL(*inode)) {
d7e09d03
PT
1968#ifdef CONFIG_FS_POSIX_ACL
1969 if (md.posix_acl) {
1970 posix_acl_release(md.posix_acl);
1971 md.posix_acl = NULL;
1972 }
1973#endif
1974 rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
1975 *inode = NULL;
1976 CERROR("new_inode -fatal: rc %d\n", rc);
34e1f2bb 1977 goto out;
d7e09d03
PT
1978 }
1979 }
1980
1981 /* Handling piggyback layout lock.
1982 * Layout lock can be piggybacked by getattr and open request.
1983 * The lsm can be applied to inode only if it comes with a layout lock
1984 * otherwise correct layout may be overwritten, for example:
1985 * 1. proc1: mdt returns a lsm but not granting layout
1986 * 2. layout was changed by another client
1987 * 3. proc2: refresh layout and layout lock granted
1988 * 4. proc1: to apply a stale layout */
6e16818b 1989 if (it && it->d.lustre.it_lock_mode != 0) {
d7e09d03
PT
1990 struct lustre_handle lockh;
1991 struct ldlm_lock *lock;
1992
1993 lockh.cookie = it->d.lustre.it_lock_handle;
1994 lock = ldlm_handle2lock(&lockh);
6e16818b 1995 LASSERT(lock);
d7e09d03
PT
1996 if (ldlm_has_layout(lock)) {
1997 struct cl_object_conf conf;
1998
1999 memset(&conf, 0, sizeof(conf));
2000 conf.coc_opc = OBJECT_CONF_SET;
2001 conf.coc_inode = *inode;
2002 conf.coc_lock = lock;
2003 conf.u.coc_md = &md;
2004 (void)ll_layout_conf(*inode, &conf);
2005 }
2006 LDLM_LOCK_PUT(lock);
2007 }
2008
2009out:
6e16818b 2010 if (md.lsm)
d7e09d03
PT
2011 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2012 md_free_lustre_md(sbi->ll_md_exp, &md);
44ecac68
FY
2013
2014cleanup:
2015 if (rc != 0 && it && it->it_op & IT_OPEN)
2016 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
2017
0a3bdb00 2018 return rc;
d7e09d03
PT
2019}
2020
4c6243ec 2021int ll_obd_statfs(struct inode *inode, void __user *arg)
d7e09d03
PT
2022{
2023 struct ll_sb_info *sbi = NULL;
2024 struct obd_export *exp;
2025 char *buf = NULL;
2026 struct obd_ioctl_data *data = NULL;
2027 __u32 type;
d7e09d03
PT
2028 int len = 0, rc;
2029
c650ba73
TR
2030 if (!inode) {
2031 rc = -EINVAL;
2032 goto out_statfs;
2033 }
2034
2035 sbi = ll_i2sbi(inode);
2036 if (!sbi) {
34e1f2bb
JL
2037 rc = -EINVAL;
2038 goto out_statfs;
2039 }
d7e09d03
PT
2040
2041 rc = obd_ioctl_getdata(&buf, &len, arg);
2042 if (rc)
34e1f2bb 2043 goto out_statfs;
d7e09d03 2044
bdbb0512 2045 data = (void *)buf;
d7e09d03 2046 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
34e1f2bb
JL
2047 !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2048 rc = -EINVAL;
2049 goto out_statfs;
2050 }
d7e09d03
PT
2051
2052 if (data->ioc_inllen1 != sizeof(__u32) ||
2053 data->ioc_inllen2 != sizeof(__u32) ||
2054 data->ioc_plen1 != sizeof(struct obd_statfs) ||
34e1f2bb
JL
2055 data->ioc_plen2 != sizeof(struct obd_uuid)) {
2056 rc = -EINVAL;
2057 goto out_statfs;
2058 }
d7e09d03
PT
2059
2060 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2061 if (type & LL_STATFS_LMV)
2062 exp = sbi->ll_md_exp;
2063 else if (type & LL_STATFS_LOV)
2064 exp = sbi->ll_dt_exp;
34e1f2bb
JL
2065 else {
2066 rc = -ENODEV;
2067 goto out_statfs;
2068 }
d7e09d03 2069
44164fc9 2070 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
d7e09d03 2071 if (rc)
34e1f2bb 2072 goto out_statfs;
d7e09d03
PT
2073out_statfs:
2074 if (buf)
2075 obd_ioctl_freedata(buf, len);
2076 return rc;
2077}
2078
2079int ll_process_config(struct lustre_cfg *lcfg)
2080{
2081 char *ptr;
2082 void *sb;
2083 struct lprocfs_static_vars lvars;
2084 unsigned long x;
2085 int rc = 0;
2086
2087 lprocfs_llite_init_vars(&lvars);
2088
2089 /* The instance name contains the sb: lustre-client-aacfe000 */
2090 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2091 if (!ptr || !*(++ptr))
2092 return -EINVAL;
692f2b6c 2093 rc = kstrtoul(ptr, 16, &x);
2094 if (rc != 0)
d7e09d03
PT
2095 return -EINVAL;
2096 sb = (void *)x;
2097 /* This better be a real Lustre superblock! */
2098 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2099
2100 /* Note we have not called client_common_fill_super yet, so
2101 proc fns must be able to handle that! */
2102 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2103 lcfg, sb);
2104 if (rc > 0)
2105 rc = 0;
fbe7c6c7 2106 return rc;
d7e09d03
PT
2107}
2108
2109/* this function prepares md_op_data hint for passing ot down to MD stack. */
aff9d8e8 2110struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
d7e09d03
PT
2111 struct inode *i1, struct inode *i2,
2112 const char *name, int namelen,
2113 int mode, __u32 opc, void *data)
2114{
d7e09d03
PT
2115 if (namelen > ll_i2sbi(i1)->ll_namelen)
2116 return ERR_PTR(-ENAMETOOLONG);
2117
6e16818b 2118 if (!op_data)
496a51bd 2119 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
d7e09d03 2120
6e16818b 2121 if (!op_data)
d7e09d03
PT
2122 return ERR_PTR(-ENOMEM);
2123
2124 ll_i2gids(op_data->op_suppgids, i1, i2);
2125 op_data->op_fid1 = *ll_inode2fid(i1);
d7e09d03 2126
ef2e0f55 2127 if (i2)
d7e09d03 2128 op_data->op_fid2 = *ll_inode2fid(i2);
ef2e0f55 2129 else
d7e09d03 2130 fid_zero(&op_data->op_fid2);
d7e09d03
PT
2131
2132 op_data->op_name = name;
2133 op_data->op_namelen = namelen;
2134 op_data->op_mode = mode;
14e3f92a 2135 op_data->op_mod_time = ktime_get_real_seconds();
4b1a25f0
PT
2136 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2137 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
d7e09d03
PT
2138 op_data->op_cap = cfs_curproc_cap_pack();
2139 op_data->op_bias = 0;
2140 op_data->op_cli_flags = 0;
6e16818b
OD
2141 if ((opc == LUSTRE_OPC_CREATE) && name &&
2142 filename_is_volatile(name, namelen, NULL))
d7e09d03
PT
2143 op_data->op_bias |= MDS_CREATE_VOLATILE;
2144 op_data->op_opc = opc;
2145 op_data->op_mds = 0;
2146 op_data->op_data = data;
2147
2148 /* If the file is being opened after mknod() (normally due to NFS)
2149 * try to use the default stripe data from parent directory for
2150 * allocating OST objects. Try to pass the parent FID to MDS. */
2151 if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
2152 !ll_i2info(i2)->lli_has_smd) {
2153 struct ll_inode_info *lli = ll_i2info(i2);
2154
2155 spin_lock(&lli->lli_lock);
2156 if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
2157 op_data->op_fid1 = lli->lli_pfid;
2158 spin_unlock(&lli->lli_lock);
d7e09d03
PT
2159 }
2160
2161 /* When called by ll_setattr_raw, file is i1. */
1f6eaf83 2162 if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
d7e09d03
PT
2163 op_data->op_bias |= MDS_DATA_MODIFIED;
2164
2165 return op_data;
2166}
2167
2168void ll_finish_md_op_data(struct md_op_data *op_data)
2169{
97903a26 2170 kfree(op_data);
d7e09d03
PT
2171}
2172
2173int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2174{
2175 struct ll_sb_info *sbi;
2176
6e16818b 2177 LASSERT(seq && dentry);
d7e09d03
PT
2178 sbi = ll_s2sbi(dentry->d_sb);
2179
2180 if (sbi->ll_flags & LL_SBI_NOLCK)
2181 seq_puts(seq, ",nolock");
2182
2183 if (sbi->ll_flags & LL_SBI_FLOCK)
2184 seq_puts(seq, ",flock");
2185
2186 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2187 seq_puts(seq, ",localflock");
2188
2189 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2190 seq_puts(seq, ",user_xattr");
2191
2192 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2193 seq_puts(seq, ",lazystatfs");
2194
2195 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2196 seq_puts(seq, ",user_fid2path");
2197
0a3bdb00 2198 return 0;
d7e09d03
PT
2199}
2200
2201/**
2202 * Get obd name by cmd, and copy out to user space
2203 */
2204int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2205{
2206 struct ll_sb_info *sbi = ll_i2sbi(inode);
2207 struct obd_device *obd;
d7e09d03
PT
2208
2209 if (cmd == OBD_IOC_GETDTNAME)
2210 obd = class_exp2obd(sbi->ll_dt_exp);
2211 else if (cmd == OBD_IOC_GETMDNAME)
2212 obd = class_exp2obd(sbi->ll_md_exp);
2213 else
0a3bdb00 2214 return -EINVAL;
d7e09d03
PT
2215
2216 if (!obd)
0a3bdb00 2217 return -ENOENT;
d7e09d03 2218
7ac5db21
OD
2219 if (copy_to_user((void __user *)arg, obd->obd_name,
2220 strlen(obd->obd_name) + 1))
0a3bdb00 2221 return -EFAULT;
d7e09d03 2222
0a3bdb00 2223 return 0;
d7e09d03
PT
2224}
2225
2226/**
2227 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2228 * fsname will be returned in this buffer; otherwise, a static buffer will be
2229 * used to store the fsname and returned to caller.
2230 */
2231char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2232{
2233 static char fsname_static[MTI_NAME_MAXLEN];
2234 struct lustre_sb_info *lsi = s2lsi(sb);
2235 char *ptr;
2236 int len;
2237
6e16818b 2238 if (!buf) {
d7e09d03
PT
2239 /* this means the caller wants to use static buffer
2240 * and it doesn't care about race. Usually this is
2241 * in error reporting path */
2242 buf = fsname_static;
2243 buflen = sizeof(fsname_static);
2244 }
2245
2246 len = strlen(lsi->lsi_lmd->lmd_profile);
2247 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2248 if (ptr && (strcmp(ptr, "-client") == 0))
2249 len -= 7;
2250
2251 if (unlikely(len >= buflen))
2252 len = buflen - 1;
2253 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2254 buf[len] = '\0';
2255
2256 return buf;
2257}
2258
d7e09d03
PT
2259void ll_dirty_page_discard_warn(struct page *page, int ioret)
2260{
2261 char *buf, *path = NULL;
2262 struct dentry *dentry = NULL;
2263 struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
2264
2265 /* this can be called inside spin lock so use GFP_ATOMIC. */
2266 buf = (char *)__get_free_page(GFP_ATOMIC);
6e16818b 2267 if (buf) {
d7e09d03 2268 dentry = d_find_alias(page->mapping->host);
6e16818b 2269 if (dentry)
1ad581eb 2270 path = dentry_path_raw(dentry, buf, PAGE_SIZE);
d7e09d03
PT
2271 }
2272
73b89907 2273 CDEBUG(D_WARNING,
2d00bd17
JP
2274 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2275 ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
73b89907
RH
2276 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2277 PFID(&obj->cob_header.coh_lu.loh_fid),
2278 (path && !IS_ERR(path)) ? path : "", ioret);
d7e09d03 2279
6e16818b 2280 if (dentry)
d7e09d03
PT
2281 dput(dentry);
2282
6e16818b 2283 if (buf)
d7e09d03
PT
2284 free_page((unsigned long)buf);
2285}