4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * cl_object implementation for VVP layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
37 #define DEBUG_SUBSYSTEM S_LLITE
39 #include "../../include/linux/libcfs/libcfs.h"
41 #include "../include/obd.h"
42 #include "../include/lustre_lite.h"
44 #include "llite_internal.h"
45 #include "vvp_internal.h"
47 /*****************************************************************************
53 int vvp_object_invariant(const struct cl_object
*obj
)
55 struct inode
*inode
= vvp_object_inode(obj
);
56 struct ll_inode_info
*lli
= ll_i2info(inode
);
58 return (S_ISREG(inode
->i_mode
) || inode
->i_mode
== 0) &&
62 static int vvp_object_print(const struct lu_env
*env
, void *cookie
,
63 lu_printer_t p
, const struct lu_object
*o
)
65 struct vvp_object
*obj
= lu2vvp(o
);
66 struct inode
*inode
= obj
->vob_inode
;
67 struct ll_inode_info
*lli
;
69 (*p
)(env
, cookie
, "(%s %d %d) inode: %p ",
70 list_empty(&obj
->vob_pending_list
) ? "-" : "+",
71 obj
->vob_transient_pages
, atomic_read(&obj
->vob_mmap_cnt
),
74 lli
= ll_i2info(inode
);
75 (*p
)(env
, cookie
, "%lu/%u %o %u %d %p "DFID
,
76 inode
->i_ino
, inode
->i_generation
, inode
->i_mode
,
77 inode
->i_nlink
, atomic_read(&inode
->i_count
),
78 lli
->lli_clob
, PFID(&lli
->lli_fid
));
83 static int vvp_attr_get(const struct lu_env
*env
, struct cl_object
*obj
,
86 struct inode
*inode
= vvp_object_inode(obj
);
89 * lov overwrites most of these fields in
90 * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
91 * attributes are newer.
94 attr
->cat_size
= i_size_read(inode
);
95 attr
->cat_mtime
= inode
->i_mtime
.tv_sec
;
96 attr
->cat_atime
= inode
->i_atime
.tv_sec
;
97 attr
->cat_ctime
= inode
->i_ctime
.tv_sec
;
98 attr
->cat_blocks
= inode
->i_blocks
;
99 attr
->cat_uid
= from_kuid(&init_user_ns
, inode
->i_uid
);
100 attr
->cat_gid
= from_kgid(&init_user_ns
, inode
->i_gid
);
101 /* KMS is not known by this layer */
102 return 0; /* layers below have to fill in the rest */
105 static int vvp_attr_set(const struct lu_env
*env
, struct cl_object
*obj
,
106 const struct cl_attr
*attr
, unsigned valid
)
108 struct inode
*inode
= vvp_object_inode(obj
);
111 inode
->i_uid
= make_kuid(&init_user_ns
, attr
->cat_uid
);
113 inode
->i_gid
= make_kgid(&init_user_ns
, attr
->cat_gid
);
114 if (valid
& CAT_ATIME
)
115 inode
->i_atime
.tv_sec
= attr
->cat_atime
;
116 if (valid
& CAT_MTIME
)
117 inode
->i_mtime
.tv_sec
= attr
->cat_mtime
;
118 if (valid
& CAT_CTIME
)
119 inode
->i_ctime
.tv_sec
= attr
->cat_ctime
;
120 if (0 && valid
& CAT_SIZE
)
121 i_size_write(inode
, attr
->cat_size
);
122 /* not currently necessary */
123 if (0 && valid
& (CAT_UID
| CAT_GID
| CAT_SIZE
))
124 mark_inode_dirty(inode
);
128 static int vvp_conf_set(const struct lu_env
*env
, struct cl_object
*obj
,
129 const struct cl_object_conf
*conf
)
131 struct ll_inode_info
*lli
= ll_i2info(conf
->coc_inode
);
133 if (conf
->coc_opc
== OBJECT_CONF_INVALIDATE
) {
134 CDEBUG(D_VFSTRACE
, DFID
": losing layout lock\n",
135 PFID(&lli
->lli_fid
));
137 ll_layout_version_set(lli
, LL_LAYOUT_GEN_NONE
);
139 /* Clean up page mmap for this inode.
140 * The reason for us to do this is that if the page has
141 * already been installed into memory space, the process
142 * can access it without interacting with lustre, so this
143 * page may be stale due to layout change, and the process
144 * will never be notified.
145 * This operation is expensive but mmap processes have to pay
146 * a price themselves.
148 unmap_mapping_range(conf
->coc_inode
->i_mapping
,
149 0, OBD_OBJECT_EOF
, 0);
154 if (conf
->coc_opc
!= OBJECT_CONF_SET
)
157 if (conf
->u
.coc_md
&& conf
->u
.coc_md
->lsm
) {
158 CDEBUG(D_VFSTRACE
, DFID
": layout version change: %u -> %u\n",
159 PFID(&lli
->lli_fid
), lli
->lli_layout_gen
,
160 conf
->u
.coc_md
->lsm
->lsm_layout_gen
);
162 lli
->lli_has_smd
= lsm_has_objects(conf
->u
.coc_md
->lsm
);
163 ll_layout_version_set(lli
, conf
->u
.coc_md
->lsm
->lsm_layout_gen
);
165 CDEBUG(D_VFSTRACE
, DFID
": layout nuked: %u.\n",
166 PFID(&lli
->lli_fid
), lli
->lli_layout_gen
);
168 lli
->lli_has_smd
= false;
169 ll_layout_version_set(lli
, LL_LAYOUT_GEN_EMPTY
);
174 static int vvp_prune(const struct lu_env
*env
, struct cl_object
*obj
)
176 struct inode
*inode
= vvp_object_inode(obj
);
179 rc
= cl_sync_file_range(inode
, 0, OBD_OBJECT_EOF
, CL_FSYNC_LOCAL
, 1);
181 CDEBUG(D_VFSTRACE
, DFID
": writeback failed: %d\n",
182 PFID(lu_object_fid(&obj
->co_lu
)), rc
);
186 truncate_inode_pages(inode
->i_mapping
, 0);
190 static int vvp_object_glimpse(const struct lu_env
*env
,
191 const struct cl_object
*obj
, struct ost_lvb
*lvb
)
193 struct inode
*inode
= vvp_object_inode(obj
);
195 lvb
->lvb_mtime
= LTIME_S(inode
->i_mtime
);
196 lvb
->lvb_atime
= LTIME_S(inode
->i_atime
);
197 lvb
->lvb_ctime
= LTIME_S(inode
->i_ctime
);
199 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
200 * "cp" or "tar" on remote node may think it's a completely sparse file
203 if (lvb
->lvb_size
> 0 && lvb
->lvb_blocks
== 0)
204 lvb
->lvb_blocks
= dirty_cnt(inode
);
208 static const struct cl_object_operations vvp_ops
= {
209 .coo_page_init
= vvp_page_init
,
210 .coo_lock_init
= vvp_lock_init
,
211 .coo_io_init
= vvp_io_init
,
212 .coo_attr_get
= vvp_attr_get
,
213 .coo_attr_set
= vvp_attr_set
,
214 .coo_conf_set
= vvp_conf_set
,
215 .coo_prune
= vvp_prune
,
216 .coo_glimpse
= vvp_object_glimpse
219 static int vvp_object_init0(const struct lu_env
*env
,
220 struct vvp_object
*vob
,
221 const struct cl_object_conf
*conf
)
223 vob
->vob_inode
= conf
->coc_inode
;
224 vob
->vob_transient_pages
= 0;
225 cl_object_page_init(&vob
->vob_cl
, sizeof(struct vvp_page
));
229 static int vvp_object_init(const struct lu_env
*env
, struct lu_object
*obj
,
230 const struct lu_object_conf
*conf
)
232 struct vvp_device
*dev
= lu2vvp_dev(obj
->lo_dev
);
233 struct vvp_object
*vob
= lu2vvp(obj
);
234 struct lu_object
*below
;
235 struct lu_device
*under
;
238 under
= &dev
->vdv_next
->cd_lu_dev
;
239 below
= under
->ld_ops
->ldo_object_alloc(env
, obj
->lo_header
, under
);
241 const struct cl_object_conf
*cconf
;
243 cconf
= lu2cl_conf(conf
);
244 INIT_LIST_HEAD(&vob
->vob_pending_list
);
245 lu_object_add(obj
, below
);
246 result
= vvp_object_init0(env
, vob
, cconf
);
254 static void vvp_object_free(const struct lu_env
*env
, struct lu_object
*obj
)
256 struct vvp_object
*vob
= lu2vvp(obj
);
259 lu_object_header_fini(obj
->lo_header
);
260 kmem_cache_free(vvp_object_kmem
, vob
);
263 static const struct lu_object_operations vvp_lu_obj_ops
= {
264 .loo_object_init
= vvp_object_init
,
265 .loo_object_free
= vvp_object_free
,
266 .loo_object_print
= vvp_object_print
,
269 struct vvp_object
*cl_inode2vvp(struct inode
*inode
)
271 struct ll_inode_info
*lli
= ll_i2info(inode
);
272 struct cl_object
*obj
= lli
->lli_clob
;
273 struct lu_object
*lu
;
275 lu
= lu_object_locate(obj
->co_lu
.lo_header
, &vvp_device_type
);
280 struct lu_object
*vvp_object_alloc(const struct lu_env
*env
,
281 const struct lu_object_header
*unused
,
282 struct lu_device
*dev
)
284 struct vvp_object
*vob
;
285 struct lu_object
*obj
;
287 vob
= kmem_cache_zalloc(vvp_object_kmem
, GFP_NOFS
);
289 struct cl_object_header
*hdr
;
291 obj
= &vob
->vob_cl
.co_lu
;
292 hdr
= &vob
->vob_header
;
293 cl_object_header_init(hdr
);
294 hdr
->coh_page_bufsize
= cfs_size_round(sizeof(struct cl_page
));
296 lu_object_init(obj
, &hdr
->coh_lu
, dev
);
297 lu_object_add_top(&hdr
->coh_lu
, obj
);
299 vob
->vob_cl
.co_ops
= &vvp_ops
;
300 obj
->lo_ops
= &vvp_lu_obj_ops
;