]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/lustre/lustre/llite/vvp_object.c
e4080ba73bf3da8aa02d3796c0cff0a87f19a3be
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / lustre / lustre / llite / vvp_object.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2012, 2015, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * cl_object implementation for VVP layer.
33 *
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include "../../include/linux/libcfs/libcfs.h"
40
41 #include "../include/obd.h"
42 #include "../include/lustre_lite.h"
43
44 #include "llite_internal.h"
45 #include "vvp_internal.h"
46
47 /*****************************************************************************
48 *
49 * Object operations.
50 *
51 */
52
53 int vvp_object_invariant(const struct cl_object *obj)
54 {
55 struct inode *inode = vvp_object_inode(obj);
56 struct ll_inode_info *lli = ll_i2info(inode);
57
58 return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
59 lli->lli_clob == obj;
60 }
61
62 static int vvp_object_print(const struct lu_env *env, void *cookie,
63 lu_printer_t p, const struct lu_object *o)
64 {
65 struct vvp_object *obj = lu2vvp(o);
66 struct inode *inode = obj->vob_inode;
67 struct ll_inode_info *lli;
68
69 (*p)(env, cookie, "(%s %d %d) inode: %p ",
70 list_empty(&obj->vob_pending_list) ? "-" : "+",
71 obj->vob_transient_pages, atomic_read(&obj->vob_mmap_cnt),
72 inode);
73 if (inode) {
74 lli = ll_i2info(inode);
75 (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
76 inode->i_ino, inode->i_generation, inode->i_mode,
77 inode->i_nlink, atomic_read(&inode->i_count),
78 lli->lli_clob, PFID(&lli->lli_fid));
79 }
80 return 0;
81 }
82
83 static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
84 struct cl_attr *attr)
85 {
86 struct inode *inode = vvp_object_inode(obj);
87
88 /*
89 * lov overwrites most of these fields in
90 * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
91 * attributes are newer.
92 */
93
94 attr->cat_size = i_size_read(inode);
95 attr->cat_mtime = inode->i_mtime.tv_sec;
96 attr->cat_atime = inode->i_atime.tv_sec;
97 attr->cat_ctime = inode->i_ctime.tv_sec;
98 attr->cat_blocks = inode->i_blocks;
99 attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
100 attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
101 /* KMS is not known by this layer */
102 return 0; /* layers below have to fill in the rest */
103 }
104
105 static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
106 const struct cl_attr *attr, unsigned valid)
107 {
108 struct inode *inode = vvp_object_inode(obj);
109
110 if (valid & CAT_UID)
111 inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
112 if (valid & CAT_GID)
113 inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
114 if (valid & CAT_ATIME)
115 inode->i_atime.tv_sec = attr->cat_atime;
116 if (valid & CAT_MTIME)
117 inode->i_mtime.tv_sec = attr->cat_mtime;
118 if (valid & CAT_CTIME)
119 inode->i_ctime.tv_sec = attr->cat_ctime;
120 if (0 && valid & CAT_SIZE)
121 i_size_write(inode, attr->cat_size);
122 /* not currently necessary */
123 if (0 && valid & (CAT_UID | CAT_GID | CAT_SIZE))
124 mark_inode_dirty(inode);
125 return 0;
126 }
127
128 static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
129 const struct cl_object_conf *conf)
130 {
131 struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
132
133 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
134 CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
135 PFID(&lli->lli_fid));
136
137 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
138
139 /* Clean up page mmap for this inode.
140 * The reason for us to do this is that if the page has
141 * already been installed into memory space, the process
142 * can access it without interacting with lustre, so this
143 * page may be stale due to layout change, and the process
144 * will never be notified.
145 * This operation is expensive but mmap processes have to pay
146 * a price themselves.
147 */
148 unmap_mapping_range(conf->coc_inode->i_mapping,
149 0, OBD_OBJECT_EOF, 0);
150
151 return 0;
152 }
153
154 if (conf->coc_opc != OBJECT_CONF_SET)
155 return 0;
156
157 if (conf->u.coc_md && conf->u.coc_md->lsm) {
158 CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
159 PFID(&lli->lli_fid), lli->lli_layout_gen,
160 conf->u.coc_md->lsm->lsm_layout_gen);
161
162 lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
163 ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
164 } else {
165 CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
166 PFID(&lli->lli_fid), lli->lli_layout_gen);
167
168 lli->lli_has_smd = false;
169 ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
170 }
171 return 0;
172 }
173
174 static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
175 {
176 struct inode *inode = vvp_object_inode(obj);
177 int rc;
178
179 rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
180 if (rc < 0) {
181 CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
182 PFID(lu_object_fid(&obj->co_lu)), rc);
183 return rc;
184 }
185
186 truncate_inode_pages(inode->i_mapping, 0);
187 return 0;
188 }
189
190 static int vvp_object_glimpse(const struct lu_env *env,
191 const struct cl_object *obj, struct ost_lvb *lvb)
192 {
193 struct inode *inode = vvp_object_inode(obj);
194
195 lvb->lvb_mtime = LTIME_S(inode->i_mtime);
196 lvb->lvb_atime = LTIME_S(inode->i_atime);
197 lvb->lvb_ctime = LTIME_S(inode->i_ctime);
198 /*
199 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
200 * "cp" or "tar" on remote node may think it's a completely sparse file
201 * and skip it.
202 */
203 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
204 lvb->lvb_blocks = dirty_cnt(inode);
205 return 0;
206 }
207
208 static const struct cl_object_operations vvp_ops = {
209 .coo_page_init = vvp_page_init,
210 .coo_lock_init = vvp_lock_init,
211 .coo_io_init = vvp_io_init,
212 .coo_attr_get = vvp_attr_get,
213 .coo_attr_set = vvp_attr_set,
214 .coo_conf_set = vvp_conf_set,
215 .coo_prune = vvp_prune,
216 .coo_glimpse = vvp_object_glimpse
217 };
218
219 static int vvp_object_init0(const struct lu_env *env,
220 struct vvp_object *vob,
221 const struct cl_object_conf *conf)
222 {
223 vob->vob_inode = conf->coc_inode;
224 vob->vob_transient_pages = 0;
225 cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
226 return 0;
227 }
228
229 static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
230 const struct lu_object_conf *conf)
231 {
232 struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
233 struct vvp_object *vob = lu2vvp(obj);
234 struct lu_object *below;
235 struct lu_device *under;
236 int result;
237
238 under = &dev->vdv_next->cd_lu_dev;
239 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
240 if (below) {
241 const struct cl_object_conf *cconf;
242
243 cconf = lu2cl_conf(conf);
244 INIT_LIST_HEAD(&vob->vob_pending_list);
245 lu_object_add(obj, below);
246 result = vvp_object_init0(env, vob, cconf);
247 } else {
248 result = -ENOMEM;
249 }
250
251 return result;
252 }
253
254 static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
255 {
256 struct vvp_object *vob = lu2vvp(obj);
257
258 lu_object_fini(obj);
259 lu_object_header_fini(obj->lo_header);
260 kmem_cache_free(vvp_object_kmem, vob);
261 }
262
263 static const struct lu_object_operations vvp_lu_obj_ops = {
264 .loo_object_init = vvp_object_init,
265 .loo_object_free = vvp_object_free,
266 .loo_object_print = vvp_object_print,
267 };
268
269 struct vvp_object *cl_inode2vvp(struct inode *inode)
270 {
271 struct ll_inode_info *lli = ll_i2info(inode);
272 struct cl_object *obj = lli->lli_clob;
273 struct lu_object *lu;
274
275 lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
276 LASSERT(lu);
277 return lu2vvp(lu);
278 }
279
280 struct lu_object *vvp_object_alloc(const struct lu_env *env,
281 const struct lu_object_header *unused,
282 struct lu_device *dev)
283 {
284 struct vvp_object *vob;
285 struct lu_object *obj;
286
287 vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
288 if (vob) {
289 struct cl_object_header *hdr;
290
291 obj = &vob->vob_cl.co_lu;
292 hdr = &vob->vob_header;
293 cl_object_header_init(hdr);
294 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
295
296 lu_object_init(obj, &hdr->coh_lu, dev);
297 lu_object_add_top(&hdr->coh_lu, obj);
298
299 vob->vob_cl.co_ops = &vvp_ops;
300 obj->lo_ops = &vvp_lu_obj_ops;
301 } else {
302 obj = NULL;
303 }
304 return obj;
305 }