]>
Commit | Line | Data |
---|---|---|
9ae326a6 DH |
1 | /* Storage object read/write |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/mount.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
9ae326a6 DH |
14 | #include <linux/file.h> |
15 | #include "internal.h" | |
16 | ||
17 | /* | |
18 | * detect wake up events generated by the unlocking of pages in which we're | |
19 | * interested | |
20 | * - we use this to detect read completion of backing pages | |
21 | * - the caller holds the waitqueue lock | |
22 | */ | |
23 | static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, | |
24 | int sync, void *_key) | |
25 | { | |
26 | struct cachefiles_one_read *monitor = | |
27 | container_of(wait, struct cachefiles_one_read, monitor); | |
28 | struct cachefiles_object *object; | |
29 | struct wait_bit_key *key = _key; | |
30 | struct page *page = wait->private; | |
31 | ||
32 | ASSERT(key); | |
33 | ||
34 | _enter("{%lu},%u,%d,{%p,%u}", | |
35 | monitor->netfs_page->index, mode, sync, | |
36 | key->flags, key->bit_nr); | |
37 | ||
38 | if (key->flags != &page->flags || | |
39 | key->bit_nr != PG_locked) | |
40 | return 0; | |
41 | ||
42 | _debug("--- monitor %p %lx ---", page, page->flags); | |
43 | ||
5e929b33 DH |
44 | if (!PageUptodate(page) && !PageError(page)) { |
45 | /* unlocked, not uptodate and not erronous? */ | |
46 | _debug("page probably truncated"); | |
47 | } | |
9ae326a6 DH |
48 | |
49 | /* remove from the waitqueue */ | |
50 | list_del(&wait->task_list); | |
51 | ||
52 | /* move onto the action list and queue for FS-Cache thread pool */ | |
53 | ASSERT(monitor->op); | |
54 | ||
55 | object = container_of(monitor->op->op.object, | |
56 | struct cachefiles_object, fscache); | |
57 | ||
58 | spin_lock(&object->work_lock); | |
59 | list_add_tail(&monitor->op_link, &monitor->op->to_do); | |
60 | spin_unlock(&object->work_lock); | |
61 | ||
62 | fscache_enqueue_retrieval(monitor->op); | |
63 | return 0; | |
64 | } | |
65 | ||
5e929b33 DH |
66 | /* |
67 | * handle a probably truncated page | |
68 | * - check to see if the page is still relevant and reissue the read if | |
69 | * possible | |
70 | * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we | |
71 | * must wait again and 0 if successful | |
72 | */ | |
73 | static int cachefiles_read_reissue(struct cachefiles_object *object, | |
74 | struct cachefiles_one_read *monitor) | |
75 | { | |
76 | struct address_space *bmapping = object->backer->d_inode->i_mapping; | |
77 | struct page *backpage = monitor->back_page, *backpage2; | |
78 | int ret; | |
79 | ||
80 | kenter("{ino=%lx},{%lx,%lx}", | |
81 | object->backer->d_inode->i_ino, | |
82 | backpage->index, backpage->flags); | |
83 | ||
84 | /* skip if the page was truncated away completely */ | |
85 | if (backpage->mapping != bmapping) { | |
86 | kleave(" = -ENODATA [mapping]"); | |
87 | return -ENODATA; | |
88 | } | |
89 | ||
90 | backpage2 = find_get_page(bmapping, backpage->index); | |
91 | if (!backpage2) { | |
92 | kleave(" = -ENODATA [gone]"); | |
93 | return -ENODATA; | |
94 | } | |
95 | ||
96 | if (backpage != backpage2) { | |
97 | put_page(backpage2); | |
98 | kleave(" = -ENODATA [different]"); | |
99 | return -ENODATA; | |
100 | } | |
101 | ||
102 | /* the page is still there and we already have a ref on it, so we don't | |
103 | * need a second */ | |
104 | put_page(backpage2); | |
105 | ||
106 | INIT_LIST_HEAD(&monitor->op_link); | |
107 | add_page_wait_queue(backpage, &monitor->monitor); | |
108 | ||
109 | if (trylock_page(backpage)) { | |
110 | ret = -EIO; | |
111 | if (PageError(backpage)) | |
112 | goto unlock_discard; | |
113 | ret = 0; | |
114 | if (PageUptodate(backpage)) | |
115 | goto unlock_discard; | |
116 | ||
117 | kdebug("reissue read"); | |
118 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
119 | if (ret < 0) | |
120 | goto unlock_discard; | |
121 | } | |
122 | ||
123 | /* but the page may have been read before the monitor was installed, so | |
124 | * the monitor may miss the event - so we have to ensure that we do get | |
125 | * one in such a case */ | |
126 | if (trylock_page(backpage)) { | |
127 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); | |
128 | unlock_page(backpage); | |
129 | } | |
130 | ||
131 | /* it'll reappear on the todo list */ | |
132 | kleave(" = -EINPROGRESS"); | |
133 | return -EINPROGRESS; | |
134 | ||
135 | unlock_discard: | |
136 | unlock_page(backpage); | |
137 | spin_lock_irq(&object->work_lock); | |
138 | list_del(&monitor->op_link); | |
139 | spin_unlock_irq(&object->work_lock); | |
140 | kleave(" = %d", ret); | |
141 | return ret; | |
142 | } | |
143 | ||
9ae326a6 DH |
144 | /* |
145 | * copy data from backing pages to netfs pages to complete a read operation | |
146 | * - driven by FS-Cache's thread pool | |
147 | */ | |
148 | static void cachefiles_read_copier(struct fscache_operation *_op) | |
149 | { | |
150 | struct cachefiles_one_read *monitor; | |
151 | struct cachefiles_object *object; | |
152 | struct fscache_retrieval *op; | |
153 | struct pagevec pagevec; | |
154 | int error, max; | |
155 | ||
156 | op = container_of(_op, struct fscache_retrieval, op); | |
157 | object = container_of(op->op.object, | |
158 | struct cachefiles_object, fscache); | |
159 | ||
160 | _enter("{ino=%lu}", object->backer->d_inode->i_ino); | |
161 | ||
162 | pagevec_init(&pagevec, 0); | |
163 | ||
164 | max = 8; | |
165 | spin_lock_irq(&object->work_lock); | |
166 | ||
167 | while (!list_empty(&op->to_do)) { | |
168 | monitor = list_entry(op->to_do.next, | |
169 | struct cachefiles_one_read, op_link); | |
170 | list_del(&monitor->op_link); | |
171 | ||
172 | spin_unlock_irq(&object->work_lock); | |
173 | ||
174 | _debug("- copy {%lu}", monitor->back_page->index); | |
175 | ||
5e929b33 | 176 | recheck: |
9ae326a6 DH |
177 | if (PageUptodate(monitor->back_page)) { |
178 | copy_highpage(monitor->netfs_page, monitor->back_page); | |
c4d6d8db DH |
179 | fscache_mark_page_cached(monitor->op, |
180 | monitor->netfs_page); | |
9ae326a6 | 181 | error = 0; |
5e929b33 DH |
182 | } else if (!PageError(monitor->back_page)) { |
183 | /* the page has probably been truncated */ | |
184 | error = cachefiles_read_reissue(object, monitor); | |
185 | if (error == -EINPROGRESS) | |
186 | goto next; | |
187 | goto recheck; | |
188 | } else { | |
9ae326a6 DH |
189 | cachefiles_io_error_obj( |
190 | object, | |
191 | "Readpage failed on backing file %lx", | |
192 | (unsigned long) monitor->back_page->flags); | |
5e929b33 DH |
193 | error = -EIO; |
194 | } | |
9ae326a6 DH |
195 | |
196 | page_cache_release(monitor->back_page); | |
197 | ||
198 | fscache_end_io(op, monitor->netfs_page, error); | |
199 | page_cache_release(monitor->netfs_page); | |
200 | fscache_put_retrieval(op); | |
201 | kfree(monitor); | |
202 | ||
5e929b33 | 203 | next: |
9ae326a6 DH |
204 | /* let the thread pool have some air occasionally */ |
205 | max--; | |
206 | if (max < 0 || need_resched()) { | |
207 | if (!list_empty(&op->to_do)) | |
208 | fscache_enqueue_retrieval(op); | |
209 | _leave(" [maxed out]"); | |
210 | return; | |
211 | } | |
212 | ||
213 | spin_lock_irq(&object->work_lock); | |
214 | } | |
215 | ||
216 | spin_unlock_irq(&object->work_lock); | |
217 | _leave(""); | |
218 | } | |
219 | ||
220 | /* | |
221 | * read the corresponding page to the given set from the backing file | |
222 | * - an uncertain page is simply discarded, to be tried again another time | |
223 | */ | |
224 | static int cachefiles_read_backing_file_one(struct cachefiles_object *object, | |
225 | struct fscache_retrieval *op, | |
226 | struct page *netpage, | |
227 | struct pagevec *pagevec) | |
228 | { | |
229 | struct cachefiles_one_read *monitor; | |
230 | struct address_space *bmapping; | |
231 | struct page *newpage, *backpage; | |
232 | int ret; | |
233 | ||
234 | _enter(""); | |
235 | ||
236 | pagevec_reinit(pagevec); | |
237 | ||
238 | _debug("read back %p{%lu,%d}", | |
239 | netpage, netpage->index, page_count(netpage)); | |
240 | ||
241 | monitor = kzalloc(sizeof(*monitor), GFP_KERNEL); | |
242 | if (!monitor) | |
243 | goto nomem; | |
244 | ||
245 | monitor->netfs_page = netpage; | |
246 | monitor->op = fscache_get_retrieval(op); | |
247 | ||
248 | init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter); | |
249 | ||
250 | /* attempt to get hold of the backing page */ | |
251 | bmapping = object->backer->d_inode->i_mapping; | |
252 | newpage = NULL; | |
253 | ||
254 | for (;;) { | |
255 | backpage = find_get_page(bmapping, netpage->index); | |
256 | if (backpage) | |
257 | goto backing_page_already_present; | |
258 | ||
259 | if (!newpage) { | |
260 | newpage = page_cache_alloc_cold(bmapping); | |
261 | if (!newpage) | |
262 | goto nomem_monitor; | |
263 | } | |
264 | ||
265 | ret = add_to_page_cache(newpage, bmapping, | |
266 | netpage->index, GFP_KERNEL); | |
267 | if (ret == 0) | |
268 | goto installed_new_backing_page; | |
269 | if (ret != -EEXIST) | |
270 | goto nomem_page; | |
271 | } | |
272 | ||
273 | /* we've installed a new backing page, so now we need to add it | |
274 | * to the LRU list and start it reading */ | |
275 | installed_new_backing_page: | |
276 | _debug("- new %p", newpage); | |
277 | ||
278 | backpage = newpage; | |
279 | newpage = NULL; | |
280 | ||
281 | page_cache_get(backpage); | |
282 | pagevec_add(pagevec, backpage); | |
283 | __pagevec_lru_add_file(pagevec); | |
284 | ||
285 | read_backing_page: | |
286 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
287 | if (ret < 0) | |
288 | goto read_error; | |
289 | ||
290 | /* set the monitor to transfer the data across */ | |
291 | monitor_backing_page: | |
292 | _debug("- monitor add"); | |
293 | ||
294 | /* install the monitor */ | |
295 | page_cache_get(monitor->netfs_page); | |
296 | page_cache_get(backpage); | |
297 | monitor->back_page = backpage; | |
298 | monitor->monitor.private = backpage; | |
299 | add_page_wait_queue(backpage, &monitor->monitor); | |
300 | monitor = NULL; | |
301 | ||
302 | /* but the page may have been read before the monitor was installed, so | |
303 | * the monitor may miss the event - so we have to ensure that we do get | |
304 | * one in such a case */ | |
305 | if (trylock_page(backpage)) { | |
306 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); | |
307 | unlock_page(backpage); | |
308 | } | |
309 | goto success; | |
310 | ||
311 | /* if the backing page is already present, it can be in one of | |
312 | * three states: read in progress, read failed or read okay */ | |
313 | backing_page_already_present: | |
314 | _debug("- present"); | |
315 | ||
316 | if (newpage) { | |
317 | page_cache_release(newpage); | |
318 | newpage = NULL; | |
319 | } | |
320 | ||
321 | if (PageError(backpage)) | |
322 | goto io_error; | |
323 | ||
324 | if (PageUptodate(backpage)) | |
325 | goto backing_page_already_uptodate; | |
326 | ||
327 | if (!trylock_page(backpage)) | |
328 | goto monitor_backing_page; | |
329 | _debug("read %p {%lx}", backpage, backpage->flags); | |
330 | goto read_backing_page; | |
331 | ||
332 | /* the backing page is already up to date, attach the netfs | |
333 | * page to the pagecache and LRU and copy the data across */ | |
334 | backing_page_already_uptodate: | |
335 | _debug("- uptodate"); | |
336 | ||
c4d6d8db | 337 | fscache_mark_page_cached(op, netpage); |
9ae326a6 DH |
338 | |
339 | copy_highpage(netpage, backpage); | |
340 | fscache_end_io(op, netpage, 0); | |
341 | ||
342 | success: | |
343 | _debug("success"); | |
344 | ret = 0; | |
345 | ||
346 | out: | |
347 | if (backpage) | |
348 | page_cache_release(backpage); | |
349 | if (monitor) { | |
350 | fscache_put_retrieval(monitor->op); | |
351 | kfree(monitor); | |
352 | } | |
353 | _leave(" = %d", ret); | |
354 | return ret; | |
355 | ||
356 | read_error: | |
357 | _debug("read error %d", ret); | |
358 | if (ret == -ENOMEM) | |
359 | goto out; | |
360 | io_error: | |
361 | cachefiles_io_error_obj(object, "Page read error on backing file"); | |
362 | ret = -ENOBUFS; | |
363 | goto out; | |
364 | ||
365 | nomem_page: | |
366 | page_cache_release(newpage); | |
367 | nomem_monitor: | |
368 | fscache_put_retrieval(monitor->op); | |
369 | kfree(monitor); | |
370 | nomem: | |
371 | _leave(" = -ENOMEM"); | |
372 | return -ENOMEM; | |
373 | } | |
374 | ||
375 | /* | |
376 | * read a page from the cache or allocate a block in which to store it | |
377 | * - cache withdrawal is prevented by the caller | |
378 | * - returns -EINTR if interrupted | |
379 | * - returns -ENOMEM if ran out of memory | |
380 | * - returns -ENOBUFS if no buffers can be made available | |
381 | * - returns -ENOBUFS if page is beyond EOF | |
382 | * - if the page is backed by a block in the cache: | |
383 | * - a read will be started which will call the callback on completion | |
384 | * - 0 will be returned | |
385 | * - else if the page is unbacked: | |
386 | * - the metadata will be retained | |
387 | * - -ENODATA will be returned | |
388 | */ | |
389 | int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |
390 | struct page *page, | |
391 | gfp_t gfp) | |
392 | { | |
393 | struct cachefiles_object *object; | |
394 | struct cachefiles_cache *cache; | |
395 | struct pagevec pagevec; | |
396 | struct inode *inode; | |
397 | sector_t block0, block; | |
398 | unsigned shift; | |
399 | int ret; | |
400 | ||
401 | object = container_of(op->op.object, | |
402 | struct cachefiles_object, fscache); | |
403 | cache = container_of(object->fscache.cache, | |
404 | struct cachefiles_cache, cache); | |
405 | ||
406 | _enter("{%p},{%lx},,,", object, page->index); | |
407 | ||
408 | if (!object->backer) | |
409 | return -ENOBUFS; | |
410 | ||
411 | inode = object->backer->d_inode; | |
412 | ASSERT(S_ISREG(inode->i_mode)); | |
413 | ASSERT(inode->i_mapping->a_ops->bmap); | |
414 | ASSERT(inode->i_mapping->a_ops->readpages); | |
415 | ||
416 | /* calculate the shift required to use bmap */ | |
417 | if (inode->i_sb->s_blocksize > PAGE_SIZE) | |
418 | return -ENOBUFS; | |
419 | ||
420 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | |
421 | ||
4fbf4291 | 422 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
8af7c124 | 423 | op->op.flags |= FSCACHE_OP_ASYNC; |
9ae326a6 DH |
424 | op->op.processor = cachefiles_read_copier; |
425 | ||
426 | pagevec_init(&pagevec, 0); | |
427 | ||
428 | /* we assume the absence or presence of the first block is a good | |
429 | * enough indication for the page as a whole | |
430 | * - TODO: don't use bmap() for this as it is _not_ actually good | |
431 | * enough for this as it doesn't indicate errors, but it's all we've | |
432 | * got for the moment | |
433 | */ | |
434 | block0 = page->index; | |
435 | block0 <<= shift; | |
436 | ||
437 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0); | |
438 | _debug("%llx -> %llx", | |
439 | (unsigned long long) block0, | |
440 | (unsigned long long) block); | |
441 | ||
442 | if (block) { | |
443 | /* submit the apparently valid page to the backing fs to be | |
444 | * read from disk */ | |
445 | ret = cachefiles_read_backing_file_one(object, op, page, | |
446 | &pagevec); | |
447 | } else if (cachefiles_has_space(cache, 0, 1) == 0) { | |
448 | /* there's space in the cache we can use */ | |
c4d6d8db | 449 | fscache_mark_page_cached(op, page); |
9ae326a6 DH |
450 | ret = -ENODATA; |
451 | } else { | |
452 | ret = -ENOBUFS; | |
453 | } | |
454 | ||
455 | _leave(" = %d", ret); | |
456 | return ret; | |
457 | } | |
458 | ||
459 | /* | |
460 | * read the corresponding pages to the given set from the backing file | |
461 | * - any uncertain pages are simply discarded, to be tried again another time | |
462 | */ | |
463 | static int cachefiles_read_backing_file(struct cachefiles_object *object, | |
464 | struct fscache_retrieval *op, | |
c4d6d8db | 465 | struct list_head *list) |
9ae326a6 DH |
466 | { |
467 | struct cachefiles_one_read *monitor = NULL; | |
468 | struct address_space *bmapping = object->backer->d_inode->i_mapping; | |
469 | struct pagevec lru_pvec; | |
470 | struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; | |
471 | int ret = 0; | |
472 | ||
473 | _enter(""); | |
474 | ||
475 | pagevec_init(&lru_pvec, 0); | |
476 | ||
477 | list_for_each_entry_safe(netpage, _n, list, lru) { | |
478 | list_del(&netpage->lru); | |
479 | ||
480 | _debug("read back %p{%lu,%d}", | |
481 | netpage, netpage->index, page_count(netpage)); | |
482 | ||
483 | if (!monitor) { | |
484 | monitor = kzalloc(sizeof(*monitor), GFP_KERNEL); | |
485 | if (!monitor) | |
486 | goto nomem; | |
487 | ||
488 | monitor->op = fscache_get_retrieval(op); | |
489 | init_waitqueue_func_entry(&monitor->monitor, | |
490 | cachefiles_read_waiter); | |
491 | } | |
492 | ||
493 | for (;;) { | |
494 | backpage = find_get_page(bmapping, netpage->index); | |
495 | if (backpage) | |
496 | goto backing_page_already_present; | |
497 | ||
498 | if (!newpage) { | |
499 | newpage = page_cache_alloc_cold(bmapping); | |
500 | if (!newpage) | |
501 | goto nomem; | |
502 | } | |
503 | ||
504 | ret = add_to_page_cache(newpage, bmapping, | |
505 | netpage->index, GFP_KERNEL); | |
506 | if (ret == 0) | |
507 | goto installed_new_backing_page; | |
508 | if (ret != -EEXIST) | |
509 | goto nomem; | |
510 | } | |
511 | ||
512 | /* we've installed a new backing page, so now we need to add it | |
513 | * to the LRU list and start it reading */ | |
514 | installed_new_backing_page: | |
515 | _debug("- new %p", newpage); | |
516 | ||
517 | backpage = newpage; | |
518 | newpage = NULL; | |
519 | ||
520 | page_cache_get(backpage); | |
521 | if (!pagevec_add(&lru_pvec, backpage)) | |
522 | __pagevec_lru_add_file(&lru_pvec); | |
523 | ||
524 | reread_backing_page: | |
525 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
526 | if (ret < 0) | |
527 | goto read_error; | |
528 | ||
529 | /* add the netfs page to the pagecache and LRU, and set the | |
530 | * monitor to transfer the data across */ | |
531 | monitor_backing_page: | |
532 | _debug("- monitor add"); | |
533 | ||
534 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | |
535 | GFP_KERNEL); | |
536 | if (ret < 0) { | |
537 | if (ret == -EEXIST) { | |
538 | page_cache_release(netpage); | |
539 | continue; | |
540 | } | |
541 | goto nomem; | |
542 | } | |
543 | ||
544 | page_cache_get(netpage); | |
545 | if (!pagevec_add(&lru_pvec, netpage)) | |
546 | __pagevec_lru_add_file(&lru_pvec); | |
547 | ||
548 | /* install a monitor */ | |
549 | page_cache_get(netpage); | |
550 | monitor->netfs_page = netpage; | |
551 | ||
552 | page_cache_get(backpage); | |
553 | monitor->back_page = backpage; | |
554 | monitor->monitor.private = backpage; | |
555 | add_page_wait_queue(backpage, &monitor->monitor); | |
556 | monitor = NULL; | |
557 | ||
558 | /* but the page may have been read before the monitor was | |
559 | * installed, so the monitor may miss the event - so we have to | |
560 | * ensure that we do get one in such a case */ | |
561 | if (trylock_page(backpage)) { | |
562 | _debug("2unlock %p {%lx}", backpage, backpage->flags); | |
563 | unlock_page(backpage); | |
564 | } | |
565 | ||
566 | page_cache_release(backpage); | |
567 | backpage = NULL; | |
568 | ||
569 | page_cache_release(netpage); | |
570 | netpage = NULL; | |
571 | continue; | |
572 | ||
573 | /* if the backing page is already present, it can be in one of | |
574 | * three states: read in progress, read failed or read okay */ | |
575 | backing_page_already_present: | |
576 | _debug("- present %p", backpage); | |
577 | ||
578 | if (PageError(backpage)) | |
579 | goto io_error; | |
580 | ||
581 | if (PageUptodate(backpage)) | |
582 | goto backing_page_already_uptodate; | |
583 | ||
584 | _debug("- not ready %p{%lx}", backpage, backpage->flags); | |
585 | ||
586 | if (!trylock_page(backpage)) | |
587 | goto monitor_backing_page; | |
588 | ||
589 | if (PageError(backpage)) { | |
590 | _debug("error %lx", backpage->flags); | |
591 | unlock_page(backpage); | |
592 | goto io_error; | |
593 | } | |
594 | ||
595 | if (PageUptodate(backpage)) | |
596 | goto backing_page_already_uptodate_unlock; | |
597 | ||
598 | /* we've locked a page that's neither up to date nor erroneous, | |
599 | * so we need to attempt to read it again */ | |
600 | goto reread_backing_page; | |
601 | ||
602 | /* the backing page is already up to date, attach the netfs | |
603 | * page to the pagecache and LRU and copy the data across */ | |
604 | backing_page_already_uptodate_unlock: | |
605 | _debug("uptodate %lx", backpage->flags); | |
606 | unlock_page(backpage); | |
607 | backing_page_already_uptodate: | |
608 | _debug("- uptodate"); | |
609 | ||
610 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | |
611 | GFP_KERNEL); | |
612 | if (ret < 0) { | |
613 | if (ret == -EEXIST) { | |
614 | page_cache_release(netpage); | |
615 | continue; | |
616 | } | |
617 | goto nomem; | |
618 | } | |
619 | ||
620 | copy_highpage(netpage, backpage); | |
621 | ||
622 | page_cache_release(backpage); | |
623 | backpage = NULL; | |
624 | ||
c4d6d8db | 625 | fscache_mark_page_cached(op, netpage); |
9ae326a6 DH |
626 | |
627 | page_cache_get(netpage); | |
628 | if (!pagevec_add(&lru_pvec, netpage)) | |
629 | __pagevec_lru_add_file(&lru_pvec); | |
630 | ||
c4d6d8db | 631 | /* the netpage is unlocked and marked up to date here */ |
9ae326a6 DH |
632 | fscache_end_io(op, netpage, 0); |
633 | page_cache_release(netpage); | |
634 | netpage = NULL; | |
635 | continue; | |
636 | } | |
637 | ||
638 | netpage = NULL; | |
639 | ||
640 | _debug("out"); | |
641 | ||
642 | out: | |
643 | /* tidy up */ | |
644 | pagevec_lru_add_file(&lru_pvec); | |
645 | ||
646 | if (newpage) | |
647 | page_cache_release(newpage); | |
648 | if (netpage) | |
649 | page_cache_release(netpage); | |
650 | if (backpage) | |
651 | page_cache_release(backpage); | |
652 | if (monitor) { | |
653 | fscache_put_retrieval(op); | |
654 | kfree(monitor); | |
655 | } | |
656 | ||
657 | list_for_each_entry_safe(netpage, _n, list, lru) { | |
658 | list_del(&netpage->lru); | |
659 | page_cache_release(netpage); | |
660 | } | |
661 | ||
662 | _leave(" = %d", ret); | |
663 | return ret; | |
664 | ||
665 | nomem: | |
666 | _debug("nomem"); | |
667 | ret = -ENOMEM; | |
668 | goto out; | |
669 | ||
670 | read_error: | |
671 | _debug("read error %d", ret); | |
672 | if (ret == -ENOMEM) | |
673 | goto out; | |
674 | io_error: | |
675 | cachefiles_io_error_obj(object, "Page read error on backing file"); | |
676 | ret = -ENOBUFS; | |
677 | goto out; | |
678 | } | |
679 | ||
680 | /* | |
681 | * read a list of pages from the cache or allocate blocks in which to store | |
682 | * them | |
683 | */ | |
684 | int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, | |
685 | struct list_head *pages, | |
686 | unsigned *nr_pages, | |
687 | gfp_t gfp) | |
688 | { | |
689 | struct cachefiles_object *object; | |
690 | struct cachefiles_cache *cache; | |
691 | struct list_head backpages; | |
692 | struct pagevec pagevec; | |
693 | struct inode *inode; | |
694 | struct page *page, *_n; | |
695 | unsigned shift, nrbackpages; | |
696 | int ret, ret2, space; | |
697 | ||
698 | object = container_of(op->op.object, | |
699 | struct cachefiles_object, fscache); | |
700 | cache = container_of(object->fscache.cache, | |
701 | struct cachefiles_cache, cache); | |
702 | ||
703 | _enter("{OBJ%x,%d},,%d,,", | |
704 | object->fscache.debug_id, atomic_read(&op->op.usage), | |
705 | *nr_pages); | |
706 | ||
707 | if (!object->backer) | |
708 | return -ENOBUFS; | |
709 | ||
710 | space = 1; | |
711 | if (cachefiles_has_space(cache, 0, *nr_pages) < 0) | |
712 | space = 0; | |
713 | ||
714 | inode = object->backer->d_inode; | |
715 | ASSERT(S_ISREG(inode->i_mode)); | |
716 | ASSERT(inode->i_mapping->a_ops->bmap); | |
717 | ASSERT(inode->i_mapping->a_ops->readpages); | |
718 | ||
719 | /* calculate the shift required to use bmap */ | |
720 | if (inode->i_sb->s_blocksize > PAGE_SIZE) | |
721 | return -ENOBUFS; | |
722 | ||
723 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | |
724 | ||
725 | pagevec_init(&pagevec, 0); | |
726 | ||
4fbf4291 | 727 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
8af7c124 | 728 | op->op.flags |= FSCACHE_OP_ASYNC; |
9ae326a6 DH |
729 | op->op.processor = cachefiles_read_copier; |
730 | ||
731 | INIT_LIST_HEAD(&backpages); | |
732 | nrbackpages = 0; | |
733 | ||
734 | ret = space ? -ENODATA : -ENOBUFS; | |
735 | list_for_each_entry_safe(page, _n, pages, lru) { | |
736 | sector_t block0, block; | |
737 | ||
738 | /* we assume the absence or presence of the first block is a | |
739 | * good enough indication for the page as a whole | |
740 | * - TODO: don't use bmap() for this as it is _not_ actually | |
741 | * good enough for this as it doesn't indicate errors, but | |
742 | * it's all we've got for the moment | |
743 | */ | |
744 | block0 = page->index; | |
745 | block0 <<= shift; | |
746 | ||
747 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, | |
748 | block0); | |
749 | _debug("%llx -> %llx", | |
750 | (unsigned long long) block0, | |
751 | (unsigned long long) block); | |
752 | ||
753 | if (block) { | |
754 | /* we have data - add it to the list to give to the | |
755 | * backing fs */ | |
756 | list_move(&page->lru, &backpages); | |
757 | (*nr_pages)--; | |
758 | nrbackpages++; | |
759 | } else if (space && pagevec_add(&pagevec, page) == 0) { | |
760 | fscache_mark_pages_cached(op, &pagevec); | |
761 | ret = -ENODATA; | |
762 | } | |
763 | } | |
764 | ||
765 | if (pagevec_count(&pagevec) > 0) | |
766 | fscache_mark_pages_cached(op, &pagevec); | |
767 | ||
768 | if (list_empty(pages)) | |
769 | ret = 0; | |
770 | ||
771 | /* submit the apparently valid pages to the backing fs to be read from | |
772 | * disk */ | |
773 | if (nrbackpages > 0) { | |
c4d6d8db | 774 | ret2 = cachefiles_read_backing_file(object, op, &backpages); |
9ae326a6 DH |
775 | if (ret2 == -ENOMEM || ret2 == -EINTR) |
776 | ret = ret2; | |
777 | } | |
778 | ||
9ae326a6 DH |
779 | _leave(" = %d [nr=%u%s]", |
780 | ret, *nr_pages, list_empty(pages) ? " empty" : ""); | |
781 | return ret; | |
782 | } | |
783 | ||
784 | /* | |
785 | * allocate a block in the cache in which to store a page | |
786 | * - cache withdrawal is prevented by the caller | |
787 | * - returns -EINTR if interrupted | |
788 | * - returns -ENOMEM if ran out of memory | |
789 | * - returns -ENOBUFS if no buffers can be made available | |
790 | * - returns -ENOBUFS if page is beyond EOF | |
791 | * - otherwise: | |
792 | * - the metadata will be retained | |
793 | * - 0 will be returned | |
794 | */ | |
795 | int cachefiles_allocate_page(struct fscache_retrieval *op, | |
796 | struct page *page, | |
797 | gfp_t gfp) | |
798 | { | |
799 | struct cachefiles_object *object; | |
800 | struct cachefiles_cache *cache; | |
9ae326a6 DH |
801 | int ret; |
802 | ||
803 | object = container_of(op->op.object, | |
804 | struct cachefiles_object, fscache); | |
805 | cache = container_of(object->fscache.cache, | |
806 | struct cachefiles_cache, cache); | |
807 | ||
808 | _enter("%p,{%lx},", object, page->index); | |
809 | ||
810 | ret = cachefiles_has_space(cache, 0, 1); | |
c4d6d8db DH |
811 | if (ret == 0) |
812 | fscache_mark_page_cached(op, page); | |
813 | else | |
9ae326a6 | 814 | ret = -ENOBUFS; |
9ae326a6 DH |
815 | |
816 | _leave(" = %d", ret); | |
817 | return ret; | |
818 | } | |
819 | ||
820 | /* | |
821 | * allocate blocks in the cache in which to store a set of pages | |
822 | * - cache withdrawal is prevented by the caller | |
823 | * - returns -EINTR if interrupted | |
824 | * - returns -ENOMEM if ran out of memory | |
825 | * - returns -ENOBUFS if some buffers couldn't be made available | |
826 | * - returns -ENOBUFS if some pages are beyond EOF | |
827 | * - otherwise: | |
828 | * - -ENODATA will be returned | |
829 | * - metadata will be retained for any page marked | |
830 | */ | |
831 | int cachefiles_allocate_pages(struct fscache_retrieval *op, | |
832 | struct list_head *pages, | |
833 | unsigned *nr_pages, | |
834 | gfp_t gfp) | |
835 | { | |
836 | struct cachefiles_object *object; | |
837 | struct cachefiles_cache *cache; | |
838 | struct pagevec pagevec; | |
839 | struct page *page; | |
840 | int ret; | |
841 | ||
842 | object = container_of(op->op.object, | |
843 | struct cachefiles_object, fscache); | |
844 | cache = container_of(object->fscache.cache, | |
845 | struct cachefiles_cache, cache); | |
846 | ||
847 | _enter("%p,,,%d,", object, *nr_pages); | |
848 | ||
849 | ret = cachefiles_has_space(cache, 0, *nr_pages); | |
850 | if (ret == 0) { | |
851 | pagevec_init(&pagevec, 0); | |
852 | ||
853 | list_for_each_entry(page, pages, lru) { | |
854 | if (pagevec_add(&pagevec, page) == 0) | |
855 | fscache_mark_pages_cached(op, &pagevec); | |
856 | } | |
857 | ||
858 | if (pagevec_count(&pagevec) > 0) | |
859 | fscache_mark_pages_cached(op, &pagevec); | |
860 | ret = -ENODATA; | |
861 | } else { | |
862 | ret = -ENOBUFS; | |
863 | } | |
864 | ||
865 | _leave(" = %d", ret); | |
866 | return ret; | |
867 | } | |
868 | ||
869 | /* | |
870 | * request a page be stored in the cache | |
871 | * - cache withdrawal is prevented by the caller | |
872 | * - this request may be ignored if there's no cache block available, in which | |
873 | * case -ENOBUFS will be returned | |
874 | * - if the op is in progress, 0 will be returned | |
875 | */ | |
876 | int cachefiles_write_page(struct fscache_storage *op, struct page *page) | |
877 | { | |
878 | struct cachefiles_object *object; | |
879 | struct cachefiles_cache *cache; | |
880 | mm_segment_t old_fs; | |
881 | struct file *file; | |
765927b2 | 882 | struct path path; |
a17754fb DH |
883 | loff_t pos, eof; |
884 | size_t len; | |
9ae326a6 DH |
885 | void *data; |
886 | int ret; | |
887 | ||
888 | ASSERT(op != NULL); | |
889 | ASSERT(page != NULL); | |
890 | ||
891 | object = container_of(op->op.object, | |
892 | struct cachefiles_object, fscache); | |
893 | ||
894 | _enter("%p,%p{%lx},,,", object, page, page->index); | |
895 | ||
896 | if (!object->backer) { | |
897 | _leave(" = -ENOBUFS"); | |
898 | return -ENOBUFS; | |
899 | } | |
900 | ||
901 | ASSERT(S_ISREG(object->backer->d_inode->i_mode)); | |
902 | ||
903 | cache = container_of(object->fscache.cache, | |
904 | struct cachefiles_cache, cache); | |
905 | ||
906 | /* write the page to the backing filesystem and let it store it in its | |
907 | * own time */ | |
765927b2 AV |
908 | path.mnt = cache->mnt; |
909 | path.dentry = object->backer; | |
98c350cd | 910 | file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred); |
9ae326a6 DH |
911 | if (IS_ERR(file)) { |
912 | ret = PTR_ERR(file); | |
913 | } else { | |
914 | ret = -EIO; | |
915 | if (file->f_op->write) { | |
916 | pos = (loff_t) page->index << PAGE_SHIFT; | |
a17754fb DH |
917 | |
918 | /* we mustn't write more data than we have, so we have | |
919 | * to beware of a partial page at EOF */ | |
920 | eof = object->fscache.store_limit_l; | |
921 | len = PAGE_SIZE; | |
922 | if (eof & ~PAGE_MASK) { | |
923 | ASSERTCMP(pos, <, eof); | |
924 | if (eof - pos < PAGE_SIZE) { | |
925 | _debug("cut short %llx to %llx", | |
926 | pos, eof); | |
927 | len = eof - pos; | |
928 | ASSERTCMP(pos + len, ==, eof); | |
929 | } | |
930 | } | |
931 | ||
9ae326a6 DH |
932 | data = kmap(page); |
933 | old_fs = get_fs(); | |
934 | set_fs(KERNEL_DS); | |
935 | ret = file->f_op->write( | |
a17754fb | 936 | file, (const void __user *) data, len, &pos); |
9ae326a6 DH |
937 | set_fs(old_fs); |
938 | kunmap(page); | |
a17754fb | 939 | if (ret != len) |
9ae326a6 DH |
940 | ret = -EIO; |
941 | } | |
942 | fput(file); | |
943 | } | |
944 | ||
945 | if (ret < 0) { | |
946 | if (ret == -EIO) | |
947 | cachefiles_io_error_obj( | |
948 | object, "Write page to backing file failed"); | |
949 | ret = -ENOBUFS; | |
950 | } | |
951 | ||
952 | _leave(" = %d", ret); | |
953 | return ret; | |
954 | } | |
955 | ||
956 | /* | |
957 | * detach a backing block from a page | |
958 | * - cache withdrawal is prevented by the caller | |
959 | */ | |
960 | void cachefiles_uncache_page(struct fscache_object *_object, struct page *page) | |
961 | { | |
962 | struct cachefiles_object *object; | |
963 | struct cachefiles_cache *cache; | |
964 | ||
965 | object = container_of(_object, struct cachefiles_object, fscache); | |
966 | cache = container_of(object->fscache.cache, | |
967 | struct cachefiles_cache, cache); | |
968 | ||
969 | _enter("%p,{%lu}", object, page->index); | |
970 | ||
971 | spin_unlock(&object->fscache.cookie->lock); | |
972 | } |