]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ceph/cache.c
337f88673ed9f71a83ea39080bdcd726eb116a15
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / cache.c
1 /*
2 * Ceph cache definitions.
3 *
4 * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
5 * Written by Milosz Tanski (milosz@adfin.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to:
18 * Free Software Foundation
19 * 51 Franklin Street, Fifth Floor
20 * Boston, MA 02111-1301 USA
21 *
22 */
23
24 #include "super.h"
25 #include "cache.h"
26
27 struct ceph_aux_inode {
28 u64 version;
29 struct timespec mtime;
30 loff_t size;
31 };
32
33 struct fscache_netfs ceph_cache_netfs = {
34 .name = "ceph",
35 .version = 0,
36 };
37
38 static DEFINE_MUTEX(ceph_fscache_lock);
39 static LIST_HEAD(ceph_fscache_list);
40
41 struct ceph_fscache_entry {
42 struct list_head list;
43 struct fscache_cookie *fscache;
44 struct ceph_fsid fsid;
45 size_t uniq_len;
46 char uniquifier[0];
47 };
48
49 static uint16_t ceph_fscache_session_get_key(const void *cookie_netfs_data,
50 void *buffer, uint16_t maxbuf)
51 {
52 const struct ceph_fs_client* fsc = cookie_netfs_data;
53 const char *fscache_uniq = fsc->mount_options->fscache_uniq;
54 uint16_t fsid_len, uniq_len;
55
56 fsid_len = sizeof(fsc->client->fsid);
57 uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
58 if (fsid_len + uniq_len > maxbuf)
59 return 0;
60
61 memcpy(buffer, &fsc->client->fsid, fsid_len);
62 if (uniq_len)
63 memcpy(buffer + fsid_len, fscache_uniq, uniq_len);
64
65 return fsid_len + uniq_len;
66 }
67
68 static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
69 .name = "CEPH.fsid",
70 .type = FSCACHE_COOKIE_TYPE_INDEX,
71 .get_key = ceph_fscache_session_get_key,
72 };
73
74 int ceph_fscache_register(void)
75 {
76 return fscache_register_netfs(&ceph_cache_netfs);
77 }
78
79 void ceph_fscache_unregister(void)
80 {
81 fscache_unregister_netfs(&ceph_cache_netfs);
82 }
83
84 int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
85 {
86 const struct ceph_fsid *fsid = &fsc->client->fsid;
87 const char *fscache_uniq = fsc->mount_options->fscache_uniq;
88 size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
89 struct ceph_fscache_entry *ent;
90 int err = 0;
91
92 mutex_lock(&ceph_fscache_lock);
93 list_for_each_entry(ent, &ceph_fscache_list, list) {
94 if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
95 continue;
96 if (ent->uniq_len != uniq_len)
97 continue;
98 if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
99 continue;
100
101 pr_err("fscache cookie already registered for fsid %pU\n", fsid);
102 pr_err(" use fsc=%%s mount option to specify a uniquifier\n");
103 err = -EBUSY;
104 goto out_unlock;
105 }
106
107 ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
108 if (!ent) {
109 err = -ENOMEM;
110 goto out_unlock;
111 }
112
113 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
114 &ceph_fscache_fsid_object_def,
115 fsc, true);
116
117 if (fsc->fscache) {
118 memcpy(&ent->fsid, fsid, sizeof(*fsid));
119 if (uniq_len > 0) {
120 memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
121 ent->uniq_len = uniq_len;
122 }
123 ent->fscache = fsc->fscache;
124 list_add_tail(&ent->list, &ceph_fscache_list);
125 } else {
126 kfree(ent);
127 pr_err("unable to register fscache cookie for fsid %pU\n",
128 fsid);
129 /* all other fs ignore this error */
130 }
131 out_unlock:
132 mutex_unlock(&ceph_fscache_lock);
133 return err;
134 }
135
136 static uint16_t ceph_fscache_inode_get_key(const void *cookie_netfs_data,
137 void *buffer, uint16_t maxbuf)
138 {
139 const struct ceph_inode_info* ci = cookie_netfs_data;
140 uint16_t klen;
141
142 /* use ceph virtual inode (id + snapshot) */
143 klen = sizeof(ci->i_vino);
144 if (klen > maxbuf)
145 return 0;
146
147 memcpy(buffer, &ci->i_vino, klen);
148 return klen;
149 }
150
151 static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
152 void *buffer, uint16_t bufmax)
153 {
154 struct ceph_aux_inode aux;
155 const struct ceph_inode_info* ci = cookie_netfs_data;
156 const struct inode* inode = &ci->vfs_inode;
157
158 memset(&aux, 0, sizeof(aux));
159 aux.version = ci->i_version;
160 aux.mtime = inode->i_mtime;
161 aux.size = i_size_read(inode);
162
163 memcpy(buffer, &aux, sizeof(aux));
164
165 return sizeof(aux);
166 }
167
168 static void ceph_fscache_inode_get_attr(const void *cookie_netfs_data,
169 uint64_t *size)
170 {
171 const struct ceph_inode_info* ci = cookie_netfs_data;
172 *size = i_size_read(&ci->vfs_inode);
173 }
174
175 static enum fscache_checkaux ceph_fscache_inode_check_aux(
176 void *cookie_netfs_data, const void *data, uint16_t dlen)
177 {
178 struct ceph_aux_inode aux;
179 struct ceph_inode_info* ci = cookie_netfs_data;
180 struct inode* inode = &ci->vfs_inode;
181
182 if (dlen != sizeof(aux))
183 return FSCACHE_CHECKAUX_OBSOLETE;
184
185 memset(&aux, 0, sizeof(aux));
186 aux.version = ci->i_version;
187 aux.mtime = inode->i_mtime;
188 aux.size = i_size_read(inode);
189
190 if (memcmp(data, &aux, sizeof(aux)) != 0)
191 return FSCACHE_CHECKAUX_OBSOLETE;
192
193 dout("ceph inode 0x%p cached okay", ci);
194 return FSCACHE_CHECKAUX_OKAY;
195 }
196
197 static void ceph_fscache_inode_now_uncached(void* cookie_netfs_data)
198 {
199 struct ceph_inode_info* ci = cookie_netfs_data;
200 struct pagevec pvec;
201 pgoff_t first;
202 int loop, nr_pages;
203
204 pagevec_init(&pvec, 0);
205 first = 0;
206
207 dout("ceph inode 0x%p now uncached", ci);
208
209 while (1) {
210 nr_pages = pagevec_lookup(&pvec, ci->vfs_inode.i_mapping, first,
211 PAGEVEC_SIZE - pagevec_count(&pvec));
212
213 if (!nr_pages)
214 break;
215
216 for (loop = 0; loop < nr_pages; loop++)
217 ClearPageFsCache(pvec.pages[loop]);
218
219 first = pvec.pages[nr_pages - 1]->index + 1;
220
221 pvec.nr = nr_pages;
222 pagevec_release(&pvec);
223 cond_resched();
224 }
225 }
226
227 static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
228 .name = "CEPH.inode",
229 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
230 .get_key = ceph_fscache_inode_get_key,
231 .get_attr = ceph_fscache_inode_get_attr,
232 .get_aux = ceph_fscache_inode_get_aux,
233 .check_aux = ceph_fscache_inode_check_aux,
234 .now_uncached = ceph_fscache_inode_now_uncached,
235 };
236
237 void ceph_fscache_register_inode_cookie(struct inode *inode)
238 {
239 struct ceph_inode_info *ci = ceph_inode(inode);
240 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
241
242 /* No caching for filesystem */
243 if (fsc->fscache == NULL)
244 return;
245
246 /* Only cache for regular files that are read only */
247 if (!S_ISREG(inode->i_mode))
248 return;
249
250 inode_lock_nested(inode, I_MUTEX_CHILD);
251 if (!ci->fscache) {
252 ci->fscache = fscache_acquire_cookie(fsc->fscache,
253 &ceph_fscache_inode_object_def,
254 ci, false);
255 }
256 inode_unlock(inode);
257 }
258
259 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
260 {
261 struct fscache_cookie* cookie;
262
263 if ((cookie = ci->fscache) == NULL)
264 return;
265
266 ci->fscache = NULL;
267
268 fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
269 fscache_relinquish_cookie(cookie, 0);
270 }
271
272 static bool ceph_fscache_can_enable(void *data)
273 {
274 struct inode *inode = data;
275 return !inode_is_open_for_write(inode);
276 }
277
278 void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
279 {
280 struct ceph_inode_info *ci = ceph_inode(inode);
281
282 if (!fscache_cookie_valid(ci->fscache))
283 return;
284
285 if (inode_is_open_for_write(inode)) {
286 dout("fscache_file_set_cookie %p %p disabling cache\n",
287 inode, filp);
288 fscache_disable_cookie(ci->fscache, false);
289 fscache_uncache_all_inode_pages(ci->fscache, inode);
290 } else {
291 fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
292 inode);
293 if (fscache_cookie_enabled(ci->fscache)) {
294 dout("fscache_file_set_cookie %p %p enabling cache\n",
295 inode, filp);
296 }
297 }
298 }
299
300 static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
301 {
302 if (!error)
303 SetPageUptodate(page);
304
305 unlock_page(page);
306 }
307
308 static inline bool cache_valid(struct ceph_inode_info *ci)
309 {
310 return ci->i_fscache_gen == ci->i_rdcache_gen;
311 }
312
313
314 /* Atempt to read from the fscache,
315 *
316 * This function is called from the readpage_nounlock context. DO NOT attempt to
317 * unlock the page here (or in the callback).
318 */
319 int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
320 {
321 struct ceph_inode_info *ci = ceph_inode(inode);
322 int ret;
323
324 if (!cache_valid(ci))
325 return -ENOBUFS;
326
327 ret = fscache_read_or_alloc_page(ci->fscache, page,
328 ceph_readpage_from_fscache_complete, NULL,
329 GFP_KERNEL);
330
331 switch (ret) {
332 case 0: /* Page found */
333 dout("page read submitted\n");
334 return 0;
335 case -ENOBUFS: /* Pages were not found, and can't be */
336 case -ENODATA: /* Pages were not found */
337 dout("page/inode not in cache\n");
338 return ret;
339 default:
340 dout("%s: unknown error ret = %i\n", __func__, ret);
341 return ret;
342 }
343 }
344
345 int ceph_readpages_from_fscache(struct inode *inode,
346 struct address_space *mapping,
347 struct list_head *pages,
348 unsigned *nr_pages)
349 {
350 struct ceph_inode_info *ci = ceph_inode(inode);
351 int ret;
352
353 if (!cache_valid(ci))
354 return -ENOBUFS;
355
356 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
357 ceph_readpage_from_fscache_complete,
358 NULL, mapping_gfp_mask(mapping));
359
360 switch (ret) {
361 case 0: /* All pages found */
362 dout("all-page read submitted\n");
363 return 0;
364 case -ENOBUFS: /* Some pages were not found, and can't be */
365 case -ENODATA: /* some pages were not found */
366 dout("page/inode not in cache\n");
367 return ret;
368 default:
369 dout("%s: unknown error ret = %i\n", __func__, ret);
370 return ret;
371 }
372 }
373
374 void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
375 {
376 struct ceph_inode_info *ci = ceph_inode(inode);
377 int ret;
378
379 if (!PageFsCache(page))
380 return;
381
382 if (!cache_valid(ci))
383 return;
384
385 ret = fscache_write_page(ci->fscache, page, GFP_KERNEL);
386 if (ret)
387 fscache_uncache_page(ci->fscache, page);
388 }
389
390 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
391 {
392 struct ceph_inode_info *ci = ceph_inode(inode);
393
394 if (!PageFsCache(page))
395 return;
396
397 fscache_wait_on_page_write(ci->fscache, page);
398 fscache_uncache_page(ci->fscache, page);
399 }
400
401 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
402 {
403 if (fscache_cookie_valid(fsc->fscache)) {
404 struct ceph_fscache_entry *ent;
405 bool found = false;
406
407 mutex_lock(&ceph_fscache_lock);
408 list_for_each_entry(ent, &ceph_fscache_list, list) {
409 if (ent->fscache == fsc->fscache) {
410 list_del(&ent->list);
411 kfree(ent);
412 found = true;
413 break;
414 }
415 }
416 WARN_ON_ONCE(!found);
417 mutex_unlock(&ceph_fscache_lock);
418
419 __fscache_relinquish_cookie(fsc->fscache, 0);
420 }
421 fsc->fscache = NULL;
422 }
423
424 /*
425 * caller should hold CEPH_CAP_FILE_{RD,CACHE}
426 */
427 void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
428 {
429 if (cache_valid(ci))
430 return;
431
432 /* resue i_truncate_mutex. There should be no pending
433 * truncate while the caller holds CEPH_CAP_FILE_RD */
434 mutex_lock(&ci->i_truncate_mutex);
435 if (!cache_valid(ci)) {
436 if (fscache_check_consistency(ci->fscache))
437 fscache_invalidate(ci->fscache);
438 spin_lock(&ci->i_ceph_lock);
439 ci->i_fscache_gen = ci->i_rdcache_gen;
440 spin_unlock(&ci->i_ceph_lock);
441 }
442 mutex_unlock(&ci->i_truncate_mutex);
443 }