]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/ceph/addr.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[mirror_ubuntu-jammy-kernel.git] / fs / ceph / addr.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
1d3576fd
SW
3
4#include <linux/backing-dev.h>
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h> /* generic_writepages */
5a0e3ad6 9#include <linux/slab.h>
1d3576fd
SW
10#include <linux/pagevec.h>
11#include <linux/task_io_accounting_ops.h>
f361bf4a 12#include <linux/signal.h>
5c308356 13#include <linux/iversion.h>
1d3576fd
SW
14
15#include "super.h"
3d14c5d2 16#include "mds_client.h"
99ccbd22 17#include "cache.h"
3d14c5d2 18#include <linux/ceph/osd_client.h>
08c1ac50 19#include <linux/ceph/striper.h>
1d3576fd
SW
20
21/*
22 * Ceph address space ops.
23 *
24 * There are a few funny things going on here.
25 *
26 * The page->private field is used to reference a struct
27 * ceph_snap_context for _every_ dirty page. This indicates which
28 * snapshot the page was logically dirtied in, and thus which snap
29 * context needs to be associated with the osd write during writeback.
30 *
31 * Similarly, struct ceph_inode_info maintains a set of counters to
25985edc 32 * count dirty pages on the inode. In the absence of snapshots,
1d3576fd
SW
33 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
34 *
35 * When a snapshot is taken (that is, when the client receives
36 * notification that a snapshot was taken), each inode with caps and
37 * with dirty pages (dirty pages implies there is a cap) gets a new
38 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
39 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
40 * moved to capsnap->dirty. (Unless a sync write is currently in
41 * progress. In that case, the capsnap is said to be "pending", new
42 * writes cannot start, and the capsnap isn't "finalized" until the
43 * write completes (or fails) and a final size/mtime for the inode for
44 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
45 *
46 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
47 * we look for the first capsnap in i_cap_snaps and write out pages in
48 * that snap context _only_. Then we move on to the next capsnap,
49 * eventually reaching the "live" or "head" context (i.e., pages that
50 * are not yet snapped) and are writing the most recently dirtied
51 * pages.
52 *
53 * Invalidate and so forth must take care to ensure the dirty page
54 * accounting is preserved.
55 */
56
2baba250
YS
57#define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
58#define CONGESTION_OFF_THRESH(congestion_kb) \
59 (CONGESTION_ON_THRESH(congestion_kb) - \
60 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
61
61600ef8
YZ
62static inline struct ceph_snap_context *page_snap_context(struct page *page)
63{
64 if (PagePrivate(page))
65 return (void *)page->private;
66 return NULL;
67}
1d3576fd
SW
68
69/*
70 * Dirty a page. Optimistically adjust accounting, on the assumption
71 * that we won't race with invalidate. If we do, readjust.
72 */
73static int ceph_set_page_dirty(struct page *page)
74{
75 struct address_space *mapping = page->mapping;
76 struct inode *inode;
77 struct ceph_inode_info *ci;
1d3576fd 78 struct ceph_snap_context *snapc;
7d6e1f54 79 int ret;
1d3576fd
SW
80
81 if (unlikely(!mapping))
82 return !TestSetPageDirty(page);
83
7d6e1f54 84 if (PageDirty(page)) {
1d3576fd
SW
85 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
86 mapping->host, page, page->index);
7d6e1f54 87 BUG_ON(!PagePrivate(page));
1d3576fd
SW
88 return 0;
89 }
90
91 inode = mapping->host;
92 ci = ceph_inode(inode);
93
1d3576fd 94 /* dirty the head */
be655596 95 spin_lock(&ci->i_ceph_lock);
5dda377c
YZ
96 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
97 if (__ceph_have_pending_cap_snap(ci)) {
98 struct ceph_cap_snap *capsnap =
99 list_last_entry(&ci->i_cap_snaps,
100 struct ceph_cap_snap,
101 ci_item);
102 snapc = ceph_get_snap_context(capsnap->context);
103 capsnap->dirty_pages++;
104 } else {
105 BUG_ON(!ci->i_head_snapc);
106 snapc = ceph_get_snap_context(ci->i_head_snapc);
107 ++ci->i_wrbuffer_ref_head;
108 }
1d3576fd 109 if (ci->i_wrbuffer_ref == 0)
0444d76a 110 ihold(inode);
1d3576fd
SW
111 ++ci->i_wrbuffer_ref;
112 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
113 "snapc %p seq %lld (%d snaps)\n",
114 mapping->host, page, page->index,
115 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
116 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
117 snapc, snapc->seq, snapc->num_snaps);
be655596 118 spin_unlock(&ci->i_ceph_lock);
1d3576fd 119
7d6e1f54
SZ
120 /*
121 * Reference snap context in page->private. Also set
122 * PagePrivate so that we get invalidatepage callback.
123 */
124 BUG_ON(PagePrivate(page));
125 page->private = (unsigned long)snapc;
126 SetPagePrivate(page);
1d3576fd 127
7d6e1f54
SZ
128 ret = __set_page_dirty_nobuffers(page);
129 WARN_ON(!PageLocked(page));
130 WARN_ON(!page->mapping);
1d3576fd 131
7d6e1f54 132 return ret;
1d3576fd
SW
133}
134
135/*
136 * If we are truncating the full page (i.e. offset == 0), adjust the
137 * dirty page counters appropriately. Only called if there is private
138 * data on the page.
139 */
d47992f8
LC
140static void ceph_invalidatepage(struct page *page, unsigned int offset,
141 unsigned int length)
1d3576fd 142{
4ce1e9ad 143 struct inode *inode;
1d3576fd 144 struct ceph_inode_info *ci;
61600ef8 145 struct ceph_snap_context *snapc = page_snap_context(page);
1d3576fd 146
4ce1e9ad 147 inode = page->mapping->host;
b150f5c1
MT
148 ci = ceph_inode(inode);
149
09cbfeaf 150 if (offset != 0 || length != PAGE_SIZE) {
b150f5c1
MT
151 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
152 inode, page, page->index, offset, length);
153 return;
154 }
4ce1e9ad 155
99ccbd22
MT
156 ceph_invalidate_fscache_page(inode, page);
157
b072d774 158 WARN_ON(!PageLocked(page));
99ccbd22
MT
159 if (!PagePrivate(page))
160 return;
161
b150f5c1 162 ClearPageChecked(page);
1d3576fd 163
b150f5c1
MT
164 dout("%p invalidatepage %p idx %lu full dirty page\n",
165 inode, page, page->index);
166
167 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
168 ceph_put_snap_context(snapc);
169 page->private = 0;
170 ClearPagePrivate(page);
1d3576fd
SW
171}
172
1d3576fd
SW
173static int ceph_releasepage(struct page *page, gfp_t g)
174{
e55f1a18
N
175 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
176 page, page->index, PageDirty(page) ? "" : "not ");
99ccbd22
MT
177
178 /* Can we release the page from the cache? */
179 if (!ceph_release_fscache_page(page, g))
180 return 0;
181
182 return !PagePrivate(page);
1d3576fd
SW
183}
184
185/*
186 * read a single page, without unlocking it.
187 */
dd2bc473 188static int ceph_do_readpage(struct file *filp, struct page *page)
1d3576fd 189{
496ad9aa 190 struct inode *inode = file_inode(filp);
1d3576fd 191 struct ceph_inode_info *ci = ceph_inode(inode);
131d7eb4 192 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1d3576fd 193 int err = 0;
83701246 194 u64 off = page_offset(page);
09cbfeaf 195 u64 len = PAGE_SIZE;
1d3576fd 196
83701246 197 if (off >= i_size_read(inode)) {
09cbfeaf 198 zero_user_segment(page, 0, PAGE_SIZE);
83701246
YZ
199 SetPageUptodate(page);
200 return 0;
201 }
99ccbd22 202
fcc02d2a
YZ
203 if (ci->i_inline_version != CEPH_INLINE_NONE) {
204 /*
205 * Uptodate inline data should have been added
206 * into page cache while getting Fcr caps.
207 */
208 if (off == 0)
209 return -EINVAL;
09cbfeaf 210 zero_user_segment(page, 0, PAGE_SIZE);
fcc02d2a
YZ
211 SetPageUptodate(page);
212 return 0;
213 }
83701246
YZ
214
215 err = ceph_readpage_from_fscache(inode, page);
99ccbd22 216 if (err == 0)
dd2bc473 217 return -EINPROGRESS;
99ccbd22 218
1d3576fd
SW
219 dout("readpage inode %p file %p page %p index %lu\n",
220 inode, filp, page, page->index);
131d7eb4
YZ
221 err = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
222 &ci->i_layout, off, &len,
1d3576fd 223 ci->i_truncate_seq, ci->i_truncate_size,
b7495fc2 224 &page, 1, 0);
1d3576fd
SW
225 if (err == -ENOENT)
226 err = 0;
227 if (err < 0) {
228 SetPageError(page);
18302805 229 ceph_fscache_readpage_cancel(inode, page);
131d7eb4
YZ
230 if (err == -EBLACKLISTED)
231 fsc->blacklisted = true;
1d3576fd 232 goto out;
1d3576fd 233 }
09cbfeaf 234 if (err < PAGE_SIZE)
23cd573b 235 /* zero fill remainder of page */
09cbfeaf 236 zero_user_segment(page, err, PAGE_SIZE);
23cd573b
ZZ
237 else
238 flush_dcache_page(page);
1d3576fd 239
23cd573b
ZZ
240 SetPageUptodate(page);
241 ceph_readpage_to_fscache(inode, page);
99ccbd22 242
1d3576fd
SW
243out:
244 return err < 0 ? err : 0;
245}
246
247static int ceph_readpage(struct file *filp, struct page *page)
248{
dd2bc473
YZ
249 int r = ceph_do_readpage(filp, page);
250 if (r != -EINPROGRESS)
251 unlock_page(page);
252 else
253 r = 0;
1d3576fd
SW
254 return r;
255}
256
257/*
7c272194 258 * Finish an async read(ahead) op.
1d3576fd 259 */
85e084fe 260static void finish_read(struct ceph_osd_request *req)
1d3576fd 261{
7c272194 262 struct inode *inode = req->r_inode;
87060c10 263 struct ceph_osd_data *osd_data;
85e084fe
ID
264 int rc = req->r_result <= 0 ? req->r_result : 0;
265 int bytes = req->r_result >= 0 ? req->r_result : 0;
e0c59487 266 int num_pages;
7c272194 267 int i;
1d3576fd 268
7c272194 269 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
131d7eb4
YZ
270 if (rc == -EBLACKLISTED)
271 ceph_inode_to_client(inode)->blacklisted = true;
7c272194
SW
272
273 /* unlock all pages, zeroing any data we didn't read */
406e2c9f 274 osd_data = osd_req_op_extent_osd_data(req, 0);
87060c10
AE
275 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
276 num_pages = calc_pages_for((u64)osd_data->alignment,
277 (u64)osd_data->length);
e0c59487 278 for (i = 0; i < num_pages; i++) {
87060c10 279 struct page *page = osd_data->pages[i];
7c272194 280
368e3585
YZ
281 if (rc < 0 && rc != -ENOENT) {
282 ceph_fscache_readpage_cancel(inode, page);
f36132a7 283 goto unlock;
368e3585 284 }
09cbfeaf 285 if (bytes < (int)PAGE_SIZE) {
7c272194
SW
286 /* zero (remainder of) page */
287 int s = bytes < 0 ? 0 : bytes;
09cbfeaf 288 zero_user_segment(page, s, PAGE_SIZE);
1d3576fd 289 }
7c272194
SW
290 dout("finish_read %p uptodate %p idx %lu\n", inode, page,
291 page->index);
292 flush_dcache_page(page);
293 SetPageUptodate(page);
99ccbd22 294 ceph_readpage_to_fscache(inode, page);
f36132a7 295unlock:
7c272194 296 unlock_page(page);
09cbfeaf
KS
297 put_page(page);
298 bytes -= PAGE_SIZE;
1d3576fd 299 }
87060c10 300 kfree(osd_data->pages);
1d3576fd
SW
301}
302
303/*
7c272194
SW
304 * start an async read(ahead) operation. return nr_pages we submitted
305 * a read for on success, or negative error code.
1d3576fd 306 */
5d988308
YZ
307static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
308 struct list_head *page_list, int max)
1d3576fd 309{
3d14c5d2
YS
310 struct ceph_osd_client *osdc =
311 &ceph_inode_to_client(inode)->client->osdc;
7c272194 312 struct ceph_inode_info *ci = ceph_inode(inode);
f86196ea 313 struct page *page = lru_to_page(page_list);
acead002 314 struct ceph_vino vino;
7c272194
SW
315 struct ceph_osd_request *req;
316 u64 off;
1d3576fd 317 u64 len;
7c272194
SW
318 int i;
319 struct page **pages;
320 pgoff_t next_index;
321 int nr_pages = 0;
2b1ac852
YZ
322 int got = 0;
323 int ret = 0;
324
5d988308 325 if (!rw_ctx) {
2b1ac852
YZ
326 /* caller of readpages does not hold buffer and read caps
327 * (fadvise, madvise and readahead cases) */
328 int want = CEPH_CAP_FILE_CACHE;
5e3ded1b
YZ
329 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want,
330 true, &got);
2b1ac852
YZ
331 if (ret < 0) {
332 dout("start_read %p, error getting cap\n", inode);
333 } else if (!(got & want)) {
334 dout("start_read %p, no cache cap\n", inode);
335 ret = 0;
336 }
337 if (ret <= 0) {
338 if (got)
339 ceph_put_cap_refs(ci, got);
340 while (!list_empty(page_list)) {
f86196ea 341 page = lru_to_page(page_list);
2b1ac852
YZ
342 list_del(&page->lru);
343 put_page(page);
344 }
345 return ret;
346 }
347 }
1d3576fd 348
6285bc23 349 off = (u64) page_offset(page);
1d3576fd 350
7c272194
SW
351 /* count pages */
352 next_index = page->index;
353 list_for_each_entry_reverse(page, page_list, lru) {
354 if (page->index != next_index)
355 break;
356 nr_pages++;
357 next_index++;
0d66a487
SW
358 if (max && nr_pages == max)
359 break;
7c272194 360 }
09cbfeaf 361 len = nr_pages << PAGE_SHIFT;
7c272194
SW
362 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
363 off, len);
acead002
AE
364 vino = ceph_vino(inode);
365 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
715e4cd4 366 0, 1, CEPH_OSD_OP_READ,
acead002 367 CEPH_OSD_FLAG_READ, NULL,
7c272194 368 ci->i_truncate_seq, ci->i_truncate_size,
acead002 369 false);
2b1ac852
YZ
370 if (IS_ERR(req)) {
371 ret = PTR_ERR(req);
372 goto out;
373 }
1d3576fd 374
7c272194 375 /* build page vector */
cf7b7e14 376 nr_pages = calc_pages_for(0, len);
6da2ec56 377 pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
2b1ac852
YZ
378 if (!pages) {
379 ret = -ENOMEM;
380 goto out_put;
381 }
7c272194
SW
382 for (i = 0; i < nr_pages; ++i) {
383 page = list_entry(page_list->prev, struct page, lru);
384 BUG_ON(PageLocked(page));
1d3576fd 385 list_del(&page->lru);
99ccbd22 386
7c272194
SW
387 dout("start_read %p adding %p idx %lu\n", inode, page,
388 page->index);
389 if (add_to_page_cache_lru(page, &inode->i_data, page->index,
687265e5 390 GFP_KERNEL)) {
d4d3aa38 391 ceph_fscache_uncache_page(inode, page);
09cbfeaf 392 put_page(page);
7c272194 393 dout("start_read %p add_to_page_cache failed %p\n",
1d3576fd 394 inode, page);
7c272194 395 nr_pages = i;
1afe4785
YZ
396 if (nr_pages > 0) {
397 len = nr_pages << PAGE_SHIFT;
d641df81 398 osd_req_op_extent_update(req, 0, len);
1afe4785
YZ
399 break;
400 }
7c272194 401 goto out_pages;
1d3576fd 402 }
7c272194 403 pages[i] = page;
1d3576fd 404 }
406e2c9f 405 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
7c272194
SW
406 req->r_callback = finish_read;
407 req->r_inode = inode;
408
409 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
410 ret = ceph_osdc_start_request(osdc, req, false);
411 if (ret < 0)
412 goto out_pages;
413 ceph_osdc_put_request(req);
2b1ac852
YZ
414
415 /* After adding locked pages to page cache, the inode holds cache cap.
416 * So we can drop our cap refs. */
417 if (got)
418 ceph_put_cap_refs(ci, got);
419
7c272194
SW
420 return nr_pages;
421
422out_pages:
1afe4785
YZ
423 for (i = 0; i < nr_pages; ++i) {
424 ceph_fscache_readpage_cancel(inode, pages[i]);
425 unlock_page(pages[i]);
426 }
427 ceph_put_page_vector(pages, nr_pages, false);
2b1ac852 428out_put:
7c272194 429 ceph_osdc_put_request(req);
2b1ac852
YZ
430out:
431 if (got)
432 ceph_put_cap_refs(ci, got);
7c272194
SW
433 return ret;
434}
1d3576fd 435
7c272194
SW
436
437/*
438 * Read multiple pages. Leave pages we don't read + unlock in page_list;
439 * the caller (VM) cleans them up.
440 */
441static int ceph_readpages(struct file *file, struct address_space *mapping,
442 struct list_head *page_list, unsigned nr_pages)
443{
496ad9aa 444 struct inode *inode = file_inode(file);
0d66a487 445 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
73737682 446 struct ceph_file_info *fi = file->private_data;
5d988308 447 struct ceph_rw_context *rw_ctx;
7c272194 448 int rc = 0;
0d66a487
SW
449 int max = 0;
450
83701246
YZ
451 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
452 return -EINVAL;
453
99ccbd22
MT
454 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
455 &nr_pages);
456
457 if (rc == 0)
458 goto out;
459
73737682 460 rw_ctx = ceph_find_rw_context(fi);
aa187926 461 max = fsc->mount_options->rsize >> PAGE_SHIFT;
5d988308
YZ
462 dout("readpages %p file %p ctx %p nr_pages %d max %d\n",
463 inode, file, rw_ctx, nr_pages, max);
7c272194 464 while (!list_empty(page_list)) {
5d988308 465 rc = start_read(inode, rw_ctx, page_list, max);
7c272194
SW
466 if (rc < 0)
467 goto out;
7c272194 468 }
1d3576fd 469out:
76be778b
MT
470 ceph_fscache_readpages_cancel(inode, page_list);
471
7c272194 472 dout("readpages %p file %p ret %d\n", inode, file, rc);
1d3576fd
SW
473 return rc;
474}
475
1f934b00
YZ
476struct ceph_writeback_ctl
477{
478 loff_t i_size;
479 u64 truncate_size;
480 u32 truncate_seq;
481 bool size_stable;
2a2d927e 482 bool head_snapc;
1f934b00
YZ
483};
484
1d3576fd
SW
485/*
486 * Get ref for the oldest snapc for an inode with dirty data... that is, the
487 * only snap context we are allowed to write back.
1d3576fd 488 */
1f934b00 489static struct ceph_snap_context *
05455e11
YZ
490get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
491 struct ceph_snap_context *page_snapc)
1d3576fd
SW
492{
493 struct ceph_inode_info *ci = ceph_inode(inode);
494 struct ceph_snap_context *snapc = NULL;
495 struct ceph_cap_snap *capsnap = NULL;
496
be655596 497 spin_lock(&ci->i_ceph_lock);
1d3576fd
SW
498 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
499 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
500 capsnap->context, capsnap->dirty_pages);
05455e11
YZ
501 if (!capsnap->dirty_pages)
502 continue;
503
504 /* get i_size, truncate_{seq,size} for page_snapc? */
505 if (snapc && capsnap->context != page_snapc)
506 continue;
507
508 if (ctl) {
509 if (capsnap->writing) {
510 ctl->i_size = i_size_read(inode);
511 ctl->size_stable = false;
512 } else {
513 ctl->i_size = capsnap->size;
514 ctl->size_stable = true;
1f934b00 515 }
05455e11
YZ
516 ctl->truncate_size = capsnap->truncate_size;
517 ctl->truncate_seq = capsnap->truncate_seq;
2a2d927e 518 ctl->head_snapc = false;
1d3576fd 519 }
05455e11
YZ
520
521 if (snapc)
522 break;
523
524 snapc = ceph_get_snap_context(capsnap->context);
525 if (!page_snapc ||
526 page_snapc == snapc ||
527 page_snapc->seq > snapc->seq)
528 break;
1d3576fd 529 }
7d8cb26d 530 if (!snapc && ci->i_wrbuffer_ref_head) {
80e755fe 531 snapc = ceph_get_snap_context(ci->i_head_snapc);
1d3576fd
SW
532 dout(" head snapc %p has %d dirty pages\n",
533 snapc, ci->i_wrbuffer_ref_head);
1f934b00
YZ
534 if (ctl) {
535 ctl->i_size = i_size_read(inode);
536 ctl->truncate_size = ci->i_truncate_size;
537 ctl->truncate_seq = ci->i_truncate_seq;
538 ctl->size_stable = false;
2a2d927e 539 ctl->head_snapc = true;
1f934b00 540 }
1d3576fd 541 }
be655596 542 spin_unlock(&ci->i_ceph_lock);
1d3576fd
SW
543 return snapc;
544}
545
1f934b00
YZ
546static u64 get_writepages_data_length(struct inode *inode,
547 struct page *page, u64 start)
548{
549 struct ceph_inode_info *ci = ceph_inode(inode);
550 struct ceph_snap_context *snapc = page_snap_context(page);
551 struct ceph_cap_snap *capsnap = NULL;
552 u64 end = i_size_read(inode);
553
554 if (snapc != ci->i_head_snapc) {
555 bool found = false;
556 spin_lock(&ci->i_ceph_lock);
557 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
558 if (capsnap->context == snapc) {
559 if (!capsnap->writing)
560 end = capsnap->size;
561 found = true;
562 break;
563 }
564 }
565 spin_unlock(&ci->i_ceph_lock);
566 WARN_ON(!found);
567 }
568 if (end > page_offset(page) + PAGE_SIZE)
569 end = page_offset(page) + PAGE_SIZE;
570 return end > start ? end - start : 0;
571}
572
1d3576fd
SW
573/*
574 * Write a single page, but leave the page locked.
575 *
b72b13eb 576 * If we get a write error, mark the mapping for error, but still adjust the
1d3576fd
SW
577 * dirty page accounting (i.e., page is no longer dirty).
578 */
579static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
580{
581 struct inode *inode;
582 struct ceph_inode_info *ci;
3d14c5d2 583 struct ceph_fs_client *fsc;
6298a337 584 struct ceph_snap_context *snapc, *oldest;
fc2744aa 585 loff_t page_off = page_offset(page);
43986881 586 int err, len = PAGE_SIZE;
1f934b00 587 struct ceph_writeback_ctl ceph_wbc;
1d3576fd
SW
588
589 dout("writepage %p idx %lu\n", page, page->index);
590
1d3576fd
SW
591 inode = page->mapping->host;
592 ci = ceph_inode(inode);
3d14c5d2 593 fsc = ceph_inode_to_client(inode);
1d3576fd
SW
594
595 /* verify this is a writeable snap context */
61600ef8 596 snapc = page_snap_context(page);
d37b1d99 597 if (!snapc) {
1d3576fd 598 dout("writepage %p page %p not dirty?\n", inode, page);
43986881 599 return 0;
1d3576fd 600 }
05455e11 601 oldest = get_oldest_context(inode, &ceph_wbc, snapc);
6298a337 602 if (snapc->seq > oldest->seq) {
1d3576fd 603 dout("writepage %p page %p snapc %p not writeable - noop\n",
61600ef8 604 inode, page, snapc);
1d3576fd 605 /* we should only noop if called by kswapd */
fa71fefb 606 WARN_ON(!(current->flags & PF_MEMALLOC));
6298a337 607 ceph_put_snap_context(oldest);
fa71fefb 608 redirty_page_for_writepage(wbc, page);
43986881 609 return 0;
1d3576fd 610 }
6298a337 611 ceph_put_snap_context(oldest);
1d3576fd
SW
612
613 /* is this a partial page at end of file? */
1f934b00
YZ
614 if (page_off >= ceph_wbc.i_size) {
615 dout("%p page eof %llu\n", page, ceph_wbc.i_size);
05455e11 616 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
43986881 617 return 0;
fc2744aa 618 }
43986881 619
1f934b00
YZ
620 if (ceph_wbc.i_size < page_off + len)
621 len = ceph_wbc.i_size - page_off;
1d3576fd 622
1c0a9c2d
YZ
623 dout("writepage %p page %p index %lu on %llu~%u snapc %p seq %lld\n",
624 inode, page, page->index, page_off, len, snapc, snapc->seq);
1d3576fd 625
314c4737 626 if (atomic_long_inc_return(&fsc->writeback_count) >
3d14c5d2 627 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
09dc9fc2 628 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
2baba250 629
1d3576fd 630 set_page_writeback(page);
1f934b00
YZ
631 err = ceph_osdc_writepages(&fsc->client->osdc, ceph_vino(inode),
632 &ci->i_layout, snapc, page_off, len,
633 ceph_wbc.truncate_seq,
634 ceph_wbc.truncate_size,
fac02ddf 635 &inode->i_mtime, &page, 1);
1d3576fd 636 if (err < 0) {
ad15ec06
YZ
637 struct writeback_control tmp_wbc;
638 if (!wbc)
639 wbc = &tmp_wbc;
640 if (err == -ERESTARTSYS) {
641 /* killed by SIGKILL */
642 dout("writepage interrupted page %p\n", page);
643 redirty_page_for_writepage(wbc, page);
644 end_page_writeback(page);
43986881 645 return err;
ad15ec06 646 }
131d7eb4
YZ
647 if (err == -EBLACKLISTED)
648 fsc->blacklisted = true;
ad15ec06
YZ
649 dout("writepage setting page/mapping error %d %p\n",
650 err, page);
1d3576fd 651 mapping_set_error(&inode->i_data, err);
ad15ec06 652 wbc->pages_skipped++;
1d3576fd
SW
653 } else {
654 dout("writepage cleaned page %p\n", page);
655 err = 0; /* vfs expects us to return 0 */
656 }
657 page->private = 0;
658 ClearPagePrivate(page);
659 end_page_writeback(page);
660 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
6298a337 661 ceph_put_snap_context(snapc); /* page's reference */
314c4737
YZ
662
663 if (atomic_long_dec_return(&fsc->writeback_count) <
664 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
665 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
666
1d3576fd
SW
667 return err;
668}
669
670static int ceph_writepage(struct page *page, struct writeback_control *wbc)
671{
dbd646a8
YS
672 int err;
673 struct inode *inode = page->mapping->host;
674 BUG_ON(!inode);
70b666c3 675 ihold(inode);
dbd646a8 676 err = writepage_nounlock(page, wbc);
ad15ec06
YZ
677 if (err == -ERESTARTSYS) {
678 /* direct memory reclaimer was killed by SIGKILL. return 0
679 * to prevent caller from setting mapping/page error */
680 err = 0;
681 }
1d3576fd 682 unlock_page(page);
dbd646a8 683 iput(inode);
1d3576fd
SW
684 return err;
685}
686
1d3576fd
SW
687/*
688 * async writeback completion handler.
689 *
690 * If we get an error, set the mapping error bit, but not the individual
691 * page error bits.
692 */
85e084fe 693static void writepages_finish(struct ceph_osd_request *req)
1d3576fd
SW
694{
695 struct inode *inode = req->r_inode;
1d3576fd 696 struct ceph_inode_info *ci = ceph_inode(inode);
87060c10 697 struct ceph_osd_data *osd_data;
1d3576fd 698 struct page *page;
5b64640c
YZ
699 int num_pages, total_pages = 0;
700 int i, j;
701 int rc = req->r_result;
1d3576fd
SW
702 struct ceph_snap_context *snapc = req->r_snapc;
703 struct address_space *mapping = inode->i_mapping;
3d14c5d2 704 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
5b64640c 705 bool remove_page;
1d3576fd 706
5b64640c 707 dout("writepages_finish %p rc %d\n", inode, rc);
26544c62 708 if (rc < 0) {
1d3576fd 709 mapping_set_error(mapping, rc);
26544c62 710 ceph_set_error_write(ci);
131d7eb4
YZ
711 if (rc == -EBLACKLISTED)
712 fsc->blacklisted = true;
26544c62
JL
713 } else {
714 ceph_clear_error_write(ci);
715 }
5b64640c
YZ
716
717 /*
718 * We lost the cache cap, need to truncate the page before
719 * it is unlocked, otherwise we'd truncate it later in the
720 * page truncation thread, possibly losing some data that
721 * raced its way in
722 */
723 remove_page = !(ceph_caps_issued(ci) &
724 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
1d3576fd
SW
725
726 /* clean all pages */
5b64640c
YZ
727 for (i = 0; i < req->r_num_ops; i++) {
728 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
729 break;
e63dc5c7 730
5b64640c
YZ
731 osd_data = osd_req_op_extent_osd_data(req, i);
732 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
733 num_pages = calc_pages_for((u64)osd_data->alignment,
734 (u64)osd_data->length);
735 total_pages += num_pages;
736 for (j = 0; j < num_pages; j++) {
737 page = osd_data->pages[j];
738 BUG_ON(!page);
739 WARN_ON(!PageUptodate(page));
740
741 if (atomic_long_dec_return(&fsc->writeback_count) <
742 CONGESTION_OFF_THRESH(
743 fsc->mount_options->congestion_kb))
09dc9fc2 744 clear_bdi_congested(inode_to_bdi(inode),
5b64640c
YZ
745 BLK_RW_ASYNC);
746
747 ceph_put_snap_context(page_snap_context(page));
748 page->private = 0;
749 ClearPagePrivate(page);
750 dout("unlocking %p\n", page);
751 end_page_writeback(page);
752
753 if (remove_page)
754 generic_error_remove_page(inode->i_mapping,
755 page);
756
757 unlock_page(page);
758 }
759 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
760 inode, osd_data->length, rc >= 0 ? num_pages : 0);
e63dc5c7 761
96ac9158 762 release_pages(osd_data->pages, num_pages);
1d3576fd 763 }
1d3576fd 764
5b64640c
YZ
765 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
766
767 osd_data = osd_req_op_extent_osd_data(req, 0);
87060c10
AE
768 if (osd_data->pages_from_pool)
769 mempool_free(osd_data->pages,
640ef79d 770 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
1d3576fd 771 else
87060c10 772 kfree(osd_data->pages);
1d3576fd
SW
773 ceph_osdc_put_request(req);
774}
775
1d3576fd
SW
776/*
777 * initiate async writeback
778 */
779static int ceph_writepages_start(struct address_space *mapping,
780 struct writeback_control *wbc)
781{
782 struct inode *inode = mapping->host;
1d3576fd 783 struct ceph_inode_info *ci = ceph_inode(inode);
fc2744aa
YZ
784 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
785 struct ceph_vino vino = ceph_vino(inode);
2a2d927e 786 pgoff_t index, start_index, end = -1;
80e755fe 787 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
1d3576fd 788 struct pagevec pvec;
1d3576fd 789 int rc = 0;
93407472 790 unsigned int wsize = i_blocksize(inode);
1d3576fd 791 struct ceph_osd_request *req = NULL;
1f934b00 792 struct ceph_writeback_ctl ceph_wbc;
590e9d98 793 bool should_loop, range_whole = false;
af9cc401 794 bool done = false;
1d3576fd 795
3fb99d48 796 dout("writepages_start %p (mode=%s)\n", inode,
1d3576fd
SW
797 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
798 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
799
52953d55 800 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
6c93df5d
YZ
801 if (ci->i_wrbuffer_ref > 0) {
802 pr_warn_ratelimited(
803 "writepage_start %p %lld forced umount\n",
804 inode, ceph_ino(inode));
805 }
a341d4df 806 mapping_set_error(mapping, -EIO);
1d3576fd
SW
807 return -EIO; /* we're in a forced umount, don't write! */
808 }
95cca2b4 809 if (fsc->mount_options->wsize < wsize)
3d14c5d2 810 wsize = fsc->mount_options->wsize;
1d3576fd 811
86679820 812 pagevec_init(&pvec);
1d3576fd 813
590e9d98 814 start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
2a2d927e 815 index = start_index;
1d3576fd
SW
816
817retry:
818 /* find oldest snap context with dirty data */
05455e11 819 snapc = get_oldest_context(inode, &ceph_wbc, NULL);
1d3576fd
SW
820 if (!snapc) {
821 /* hmm, why does writepages get called when there
822 is no dirty data? */
823 dout(" no snap context with dirty data?\n");
824 goto out;
825 }
826 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
827 snapc, snapc->seq, snapc->num_snaps);
fc2744aa 828
2a2d927e
YZ
829 should_loop = false;
830 if (ceph_wbc.head_snapc && snapc != last_snapc) {
831 /* where to start/end? */
832 if (wbc->range_cyclic) {
833 index = start_index;
834 end = -1;
835 if (index > 0)
836 should_loop = true;
837 dout(" cyclic, start at %lu\n", index);
838 } else {
839 index = wbc->range_start >> PAGE_SHIFT;
840 end = wbc->range_end >> PAGE_SHIFT;
841 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
842 range_whole = true;
843 dout(" not cyclic, %lu to %lu\n", index, end);
844 }
845 } else if (!ceph_wbc.head_snapc) {
846 /* Do not respect wbc->range_{start,end}. Dirty pages
847 * in that range can be associated with newer snapc.
848 * They are not writeable until we write all dirty pages
849 * associated with 'snapc' get written */
1582af2e 850 if (index > 0)
2a2d927e
YZ
851 should_loop = true;
852 dout(" non-head snapc, range whole\n");
1d3576fd 853 }
2a2d927e
YZ
854
855 ceph_put_snap_context(last_snapc);
1d3576fd
SW
856 last_snapc = snapc;
857
af9cc401 858 while (!done && index <= end) {
5b64640c 859 int num_ops = 0, op_idx;
0e5ecac7 860 unsigned i, pvec_pages, max_pages, locked_pages = 0;
5b64640c 861 struct page **pages = NULL, **data_pages;
e5975c7c 862 mempool_t *pool = NULL; /* Becomes non-null if mempool used */
1d3576fd 863 struct page *page;
0e5ecac7 864 pgoff_t strip_unit_end = 0;
5b64640c 865 u64 offset = 0, len = 0;
1d3576fd 866
0e5ecac7 867 max_pages = wsize >> PAGE_SHIFT;
1d3576fd
SW
868
869get_more_pages:
4be90299 870 pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index,
0ed75fc8 871 end, PAGECACHE_TAG_DIRTY,
4be90299 872 max_pages - locked_pages);
0ed75fc8 873 dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
1d3576fd
SW
874 if (!pvec_pages && !locked_pages)
875 break;
876 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
877 page = pvec.pages[i];
878 dout("? %p idx %lu\n", page, page->index);
879 if (locked_pages == 0)
880 lock_page(page); /* first page */
881 else if (!trylock_page(page))
882 break;
883
884 /* only dirty pages, or our accounting breaks */
885 if (unlikely(!PageDirty(page)) ||
886 unlikely(page->mapping != mapping)) {
887 dout("!dirty or !mapping %p\n", page);
888 unlock_page(page);
0713e5f2 889 continue;
1d3576fd 890 }
af9cc401
YZ
891 /* only if matching snap context */
892 pgsnapc = page_snap_context(page);
893 if (pgsnapc != snapc) {
894 dout("page snapc %p %lld != oldest %p %lld\n",
895 pgsnapc, pgsnapc->seq, snapc, snapc->seq);
1582af2e
YZ
896 if (!should_loop &&
897 !ceph_wbc.head_snapc &&
898 wbc->sync_mode != WB_SYNC_NONE)
899 should_loop = true;
1d3576fd 900 unlock_page(page);
af9cc401 901 continue;
1d3576fd 902 }
1f934b00
YZ
903 if (page_offset(page) >= ceph_wbc.i_size) {
904 dout("%p page eof %llu\n",
905 page, ceph_wbc.i_size);
c95f1c5f
EC
906 if ((ceph_wbc.size_stable ||
907 page_offset(page) >= i_size_read(inode)) &&
908 clear_page_dirty_for_io(page))
af9cc401
YZ
909 mapping->a_ops->invalidatepage(page,
910 0, PAGE_SIZE);
911 unlock_page(page);
912 continue;
913 }
914 if (strip_unit_end && (page->index > strip_unit_end)) {
915 dout("end of strip unit %p\n", page);
1d3576fd
SW
916 unlock_page(page);
917 break;
918 }
919 if (PageWriteback(page)) {
0713e5f2
YZ
920 if (wbc->sync_mode == WB_SYNC_NONE) {
921 dout("%p under writeback\n", page);
922 unlock_page(page);
923 continue;
924 }
925 dout("waiting on writeback %p\n", page);
926 wait_on_page_writeback(page);
1d3576fd
SW
927 }
928
1d3576fd
SW
929 if (!clear_page_dirty_for_io(page)) {
930 dout("%p !clear_page_dirty_for_io\n", page);
931 unlock_page(page);
0713e5f2 932 continue;
1d3576fd
SW
933 }
934
e5975c7c
AE
935 /*
936 * We have something to write. If this is
937 * the first locked page this time through,
5b64640c
YZ
938 * calculate max possinle write size and
939 * allocate a page array
e5975c7c 940 */
1d3576fd 941 if (locked_pages == 0) {
5b64640c
YZ
942 u64 objnum;
943 u64 objoff;
dccbf080 944 u32 xlen;
5b64640c 945
1d3576fd 946 /* prepare async write request */
e5975c7c 947 offset = (u64)page_offset(page);
dccbf080
ID
948 ceph_calc_file_object_mapping(&ci->i_layout,
949 offset, wsize,
950 &objnum, &objoff,
951 &xlen);
952 len = xlen;
8c71897b 953
3fb99d48 954 num_ops = 1;
5b64640c 955 strip_unit_end = page->index +
09cbfeaf 956 ((len - 1) >> PAGE_SHIFT);
88486957 957
5b64640c 958 BUG_ON(pages);
88486957 959 max_pages = calc_pages_for(0, (u64)len);
6da2ec56
KC
960 pages = kmalloc_array(max_pages,
961 sizeof(*pages),
962 GFP_NOFS);
88486957
AE
963 if (!pages) {
964 pool = fsc->wb_pagevec_pool;
88486957 965 pages = mempool_alloc(pool, GFP_NOFS);
e5975c7c 966 BUG_ON(!pages);
88486957 967 }
5b64640c
YZ
968
969 len = 0;
970 } else if (page->index !=
09cbfeaf 971 (offset + len) >> PAGE_SHIFT) {
5b64640c
YZ
972 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
973 CEPH_OSD_MAX_OPS)) {
974 redirty_page_for_writepage(wbc, page);
975 unlock_page(page);
976 break;
977 }
978
979 num_ops++;
980 offset = (u64)page_offset(page);
981 len = 0;
1d3576fd
SW
982 }
983
984 /* note position of first page in pvec */
1d3576fd
SW
985 dout("%p will write page %p idx %lu\n",
986 inode, page, page->index);
2baba250 987
5b64640c
YZ
988 if (atomic_long_inc_return(&fsc->writeback_count) >
989 CONGESTION_ON_THRESH(
3d14c5d2 990 fsc->mount_options->congestion_kb)) {
09dc9fc2 991 set_bdi_congested(inode_to_bdi(inode),
213c99ee 992 BLK_RW_ASYNC);
2baba250
YS
993 }
994
0713e5f2
YZ
995
996 pages[locked_pages++] = page;
997 pvec.pages[i] = NULL;
998
09cbfeaf 999 len += PAGE_SIZE;
1d3576fd
SW
1000 }
1001
1002 /* did we get anything? */
1003 if (!locked_pages)
1004 goto release_pvec_pages;
1005 if (i) {
0713e5f2
YZ
1006 unsigned j, n = 0;
1007 /* shift unused page to beginning of pvec */
1008 for (j = 0; j < pvec_pages; j++) {
1009 if (!pvec.pages[j])
1010 continue;
1011 if (n < j)
1012 pvec.pages[n] = pvec.pages[j];
1013 n++;
1014 }
1015 pvec.nr = n;
1d3576fd
SW
1016
1017 if (pvec_pages && i == pvec_pages &&
1018 locked_pages < max_pages) {
1019 dout("reached end pvec, trying for more\n");
0713e5f2 1020 pagevec_release(&pvec);
1d3576fd
SW
1021 goto get_more_pages;
1022 }
1d3576fd
SW
1023 }
1024
5b64640c 1025new_request:
e5975c7c 1026 offset = page_offset(pages[0]);
5b64640c
YZ
1027 len = wsize;
1028
1029 req = ceph_osdc_new_request(&fsc->client->osdc,
1030 &ci->i_layout, vino,
1031 offset, &len, 0, num_ops,
1f934b00
YZ
1032 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1033 snapc, ceph_wbc.truncate_seq,
1034 ceph_wbc.truncate_size, false);
5b64640c
YZ
1035 if (IS_ERR(req)) {
1036 req = ceph_osdc_new_request(&fsc->client->osdc,
1037 &ci->i_layout, vino,
1038 offset, &len, 0,
1039 min(num_ops,
1040 CEPH_OSD_SLAB_OPS),
1041 CEPH_OSD_OP_WRITE,
54ea0046 1042 CEPH_OSD_FLAG_WRITE,
1f934b00
YZ
1043 snapc, ceph_wbc.truncate_seq,
1044 ceph_wbc.truncate_size, true);
5b64640c 1045 BUG_ON(IS_ERR(req));
e1966b49 1046 }
5b64640c 1047 BUG_ON(len < page_offset(pages[locked_pages - 1]) +
09cbfeaf 1048 PAGE_SIZE - offset);
5b64640c
YZ
1049
1050 req->r_callback = writepages_finish;
1051 req->r_inode = inode;
1d3576fd 1052
5b64640c
YZ
1053 /* Format the osd request message and submit the write */
1054 len = 0;
1055 data_pages = pages;
1056 op_idx = 0;
1057 for (i = 0; i < locked_pages; i++) {
1058 u64 cur_offset = page_offset(pages[i]);
1059 if (offset + len != cur_offset) {
3fb99d48 1060 if (op_idx + 1 == req->r_num_ops)
5b64640c
YZ
1061 break;
1062 osd_req_op_extent_dup_last(req, op_idx,
1063 cur_offset - offset);
1064 dout("writepages got pages at %llu~%llu\n",
1065 offset, len);
1066 osd_req_op_extent_osd_data_pages(req, op_idx,
1067 data_pages, len, 0,
a4ce40a9 1068 !!pool, false);
5b64640c 1069 osd_req_op_extent_update(req, op_idx, len);
e5975c7c 1070
5b64640c
YZ
1071 len = 0;
1072 offset = cur_offset;
1073 data_pages = pages + i;
1074 op_idx++;
1075 }
1076
1077 set_page_writeback(pages[i]);
09cbfeaf 1078 len += PAGE_SIZE;
5b64640c
YZ
1079 }
1080
1f934b00
YZ
1081 if (ceph_wbc.size_stable) {
1082 len = min(len, ceph_wbc.i_size - offset);
5b64640c
YZ
1083 } else if (i == locked_pages) {
1084 /* writepages_finish() clears writeback pages
1085 * according to the data length, so make sure
1086 * data length covers all locked pages */
09cbfeaf 1087 u64 min_len = len + 1 - PAGE_SIZE;
1f934b00
YZ
1088 len = get_writepages_data_length(inode, pages[i - 1],
1089 offset);
5b64640c
YZ
1090 len = max(len, min_len);
1091 }
1092 dout("writepages got pages at %llu~%llu\n", offset, len);
e5975c7c 1093
5b64640c
YZ
1094 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1095 0, !!pool, false);
1096 osd_req_op_extent_update(req, op_idx, len);
e5975c7c 1097
5b64640c
YZ
1098 BUG_ON(op_idx + 1 != req->r_num_ops);
1099
1100 pool = NULL;
1101 if (i < locked_pages) {
1102 BUG_ON(num_ops <= req->r_num_ops);
1103 num_ops -= req->r_num_ops;
5b64640c
YZ
1104 locked_pages -= i;
1105
1106 /* allocate new pages array for next request */
1107 data_pages = pages;
6da2ec56
KC
1108 pages = kmalloc_array(locked_pages, sizeof(*pages),
1109 GFP_NOFS);
5b64640c
YZ
1110 if (!pages) {
1111 pool = fsc->wb_pagevec_pool;
1112 pages = mempool_alloc(pool, GFP_NOFS);
1113 BUG_ON(!pages);
1114 }
1115 memcpy(pages, data_pages + i,
1116 locked_pages * sizeof(*pages));
1117 memset(data_pages + i, 0,
1118 locked_pages * sizeof(*pages));
1119 } else {
1120 BUG_ON(num_ops != req->r_num_ops);
1121 index = pages[i - 1]->index + 1;
1122 /* request message now owns the pages array */
1123 pages = NULL;
1124 }
e5975c7c 1125
fac02ddf 1126 req->r_mtime = inode->i_mtime;
9d6fcb08
SW
1127 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
1128 BUG_ON(rc);
1d3576fd
SW
1129 req = NULL;
1130
5b64640c
YZ
1131 wbc->nr_to_write -= i;
1132 if (pages)
1133 goto new_request;
1134
2a2d927e
YZ
1135 /*
1136 * We stop writing back only if we are not doing
1137 * integrity sync. In case of integrity sync we have to
1138 * keep going until we have written all the pages
1139 * we tagged for writeback prior to entering this loop.
1140 */
1141 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
af9cc401 1142 done = true;
1d3576fd
SW
1143
1144release_pvec_pages:
1145 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
1146 pvec.nr ? pvec.pages[0] : NULL);
1147 pagevec_release(&pvec);
1d3576fd
SW
1148 }
1149
1150 if (should_loop && !done) {
1151 /* more to do; loop back to beginning of file */
1152 dout("writepages looping back to beginning of file\n");
2a2d927e 1153 end = start_index - 1; /* OK even when start_index == 0 */
f275635e
YZ
1154
1155 /* to write dirty pages associated with next snapc,
1156 * we need to wait until current writes complete */
1157 if (wbc->sync_mode != WB_SYNC_NONE &&
1158 start_index == 0 && /* all dirty pages were checked */
1159 !ceph_wbc.head_snapc) {
1160 struct page *page;
1161 unsigned i, nr;
1162 index = 0;
1163 while ((index <= end) &&
1164 (nr = pagevec_lookup_tag(&pvec, mapping, &index,
67fd707f 1165 PAGECACHE_TAG_WRITEBACK))) {
f275635e
YZ
1166 for (i = 0; i < nr; i++) {
1167 page = pvec.pages[i];
1168 if (page_snap_context(page) != snapc)
1169 continue;
1170 wait_on_page_writeback(page);
1171 }
1172 pagevec_release(&pvec);
1173 cond_resched();
1174 }
1175 }
1176
2a2d927e 1177 start_index = 0;
1d3576fd
SW
1178 index = 0;
1179 goto retry;
1180 }
1181
1182 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1183 mapping->writeback_index = index;
1184
1185out:
3ed97d63 1186 ceph_osdc_put_request(req);
2a2d927e
YZ
1187 ceph_put_snap_context(last_snapc);
1188 dout("writepages dend - startone, rc = %d\n", rc);
1d3576fd
SW
1189 return rc;
1190}
1191
1192
1193
1194/*
1195 * See if a given @snapc is either writeable, or already written.
1196 */
1197static int context_is_writeable_or_written(struct inode *inode,
1198 struct ceph_snap_context *snapc)
1199{
05455e11 1200 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
6298a337
SW
1201 int ret = !oldest || snapc->seq <= oldest->seq;
1202
1203 ceph_put_snap_context(oldest);
1204 return ret;
1d3576fd
SW
1205}
1206
1207/*
1208 * We are only allowed to write into/dirty the page if the page is
1209 * clean, or already dirty within the same snap context.
8f883c24
SW
1210 *
1211 * called with page locked.
1212 * return success with page locked,
1213 * or any failure (incl -EAGAIN) with page unlocked.
1d3576fd 1214 */
4af6b225
YS
1215static int ceph_update_writeable_page(struct file *file,
1216 loff_t pos, unsigned len,
1217 struct page *page)
1d3576fd 1218{
496ad9aa 1219 struct inode *inode = file_inode(file);
6c93df5d 1220 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1d3576fd 1221 struct ceph_inode_info *ci = ceph_inode(inode);
09cbfeaf
KS
1222 loff_t page_off = pos & PAGE_MASK;
1223 int pos_in_page = pos & ~PAGE_MASK;
1d3576fd
SW
1224 int end_in_page = pos_in_page + len;
1225 loff_t i_size;
1d3576fd 1226 int r;
80e755fe 1227 struct ceph_snap_context *snapc, *oldest;
1d3576fd 1228
52953d55 1229 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
6c93df5d
YZ
1230 dout(" page %p forced umount\n", page);
1231 unlock_page(page);
1232 return -EIO;
1233 }
1234
1d3576fd
SW
1235retry_locked:
1236 /* writepages currently holds page lock, but if we change that later, */
1237 wait_on_page_writeback(page);
1238
61600ef8 1239 snapc = page_snap_context(page);
80e755fe 1240 if (snapc && snapc != ci->i_head_snapc) {
1d3576fd
SW
1241 /*
1242 * this page is already dirty in another (older) snap
1243 * context! is it writeable now?
1244 */
05455e11 1245 oldest = get_oldest_context(inode, NULL, NULL);
80e755fe 1246 if (snapc->seq > oldest->seq) {
6298a337 1247 ceph_put_snap_context(oldest);
1d3576fd 1248 dout(" page %p snapc %p not current or oldest\n",
6298a337 1249 page, snapc);
1d3576fd
SW
1250 /*
1251 * queue for writeback, and wait for snapc to
1252 * be writeable or written
1253 */
6298a337 1254 snapc = ceph_get_snap_context(snapc);
1d3576fd 1255 unlock_page(page);
3c6f6b79 1256 ceph_queue_writeback(inode);
a78bbd4b 1257 r = wait_event_killable(ci->i_cap_wq,
1d3576fd
SW
1258 context_is_writeable_or_written(inode, snapc));
1259 ceph_put_snap_context(snapc);
8f883c24
SW
1260 if (r == -ERESTARTSYS)
1261 return r;
4af6b225 1262 return -EAGAIN;
1d3576fd 1263 }
6298a337 1264 ceph_put_snap_context(oldest);
1d3576fd
SW
1265
1266 /* yay, writeable, do it now (without dropping page lock) */
1267 dout(" page %p snapc %p not current, but oldest\n",
1268 page, snapc);
1269 if (!clear_page_dirty_for_io(page))
1270 goto retry_locked;
1271 r = writepage_nounlock(page, NULL);
1272 if (r < 0)
dd2bc473 1273 goto fail_unlock;
1d3576fd
SW
1274 goto retry_locked;
1275 }
1276
1277 if (PageUptodate(page)) {
1278 dout(" page %p already uptodate\n", page);
1279 return 0;
1280 }
1281
1282 /* full page? */
09cbfeaf 1283 if (pos_in_page == 0 && len == PAGE_SIZE)
1d3576fd
SW
1284 return 0;
1285
1286 /* past end of file? */
99c88e69 1287 i_size = i_size_read(inode);
1d3576fd 1288
1d3576fd
SW
1289 if (page_off >= i_size ||
1290 (pos_in_page == 0 && (pos+len) >= i_size &&
09cbfeaf 1291 end_in_page - pos_in_page != PAGE_SIZE)) {
1d3576fd 1292 dout(" zeroing %p 0 - %d and %d - %d\n",
09cbfeaf 1293 page, pos_in_page, end_in_page, (int)PAGE_SIZE);
1d3576fd
SW
1294 zero_user_segments(page,
1295 0, pos_in_page,
09cbfeaf 1296 end_in_page, PAGE_SIZE);
1d3576fd
SW
1297 return 0;
1298 }
1299
1300 /* we need to read it. */
dd2bc473
YZ
1301 r = ceph_do_readpage(file, page);
1302 if (r < 0) {
1303 if (r == -EINPROGRESS)
1304 return -EAGAIN;
1305 goto fail_unlock;
1306 }
1d3576fd 1307 goto retry_locked;
dd2bc473 1308fail_unlock:
1d3576fd
SW
1309 unlock_page(page);
1310 return r;
1311}
1312
4af6b225
YS
1313/*
1314 * We are only allowed to write into/dirty the page if the page is
1315 * clean, or already dirty within the same snap context.
1316 */
1317static int ceph_write_begin(struct file *file, struct address_space *mapping,
1318 loff_t pos, unsigned len, unsigned flags,
1319 struct page **pagep, void **fsdata)
1320{
496ad9aa 1321 struct inode *inode = file_inode(file);
4af6b225 1322 struct page *page;
09cbfeaf 1323 pgoff_t index = pos >> PAGE_SHIFT;
7971bd92 1324 int r;
4af6b225
YS
1325
1326 do {
8f883c24 1327 /* get a page */
4af6b225 1328 page = grab_cache_page_write_begin(mapping, index, 0);
7971bd92
SW
1329 if (!page)
1330 return -ENOMEM;
4af6b225
YS
1331
1332 dout("write_begin file %p inode %p page %p %d~%d\n", file,
213c99ee 1333 inode, page, (int)pos, (int)len);
4af6b225
YS
1334
1335 r = ceph_update_writeable_page(file, pos, len, page);
c1d00b2d 1336 if (r < 0)
09cbfeaf 1337 put_page(page);
c1d00b2d
TK
1338 else
1339 *pagep = page;
4af6b225
YS
1340 } while (r == -EAGAIN);
1341
1342 return r;
1343}
1344
1d3576fd
SW
1345/*
1346 * we don't do anything in here that simple_write_end doesn't do
5dda377c 1347 * except adjust dirty page accounting
1d3576fd
SW
1348 */
1349static int ceph_write_end(struct file *file, struct address_space *mapping,
1350 loff_t pos, unsigned len, unsigned copied,
1351 struct page *page, void *fsdata)
1352{
496ad9aa 1353 struct inode *inode = file_inode(file);
efb0ca76 1354 bool check_cap = false;
1d3576fd
SW
1355
1356 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1357 inode, page, (int)pos, (int)copied, (int)len);
1358
1359 /* zero the stale part of the page if we did a short copy */
b9de313c
AV
1360 if (!PageUptodate(page)) {
1361 if (copied < len) {
1362 copied = 0;
1363 goto out;
1364 }
1365 SetPageUptodate(page);
1366 }
1d3576fd
SW
1367
1368 /* did file size increase? */
99c88e69 1369 if (pos+copied > i_size_read(inode))
1d3576fd
SW
1370 check_cap = ceph_inode_set_size(inode, pos+copied);
1371
1d3576fd
SW
1372 set_page_dirty(page);
1373
b9de313c 1374out:
1d3576fd 1375 unlock_page(page);
09cbfeaf 1376 put_page(page);
1d3576fd
SW
1377
1378 if (check_cap)
1379 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1380
1381 return copied;
1382}
1383
1384/*
1385 * we set .direct_IO to indicate direct io is supported, but since we
1386 * intercept O_DIRECT reads and writes early, this function should
1387 * never get called.
1388 */
c8b8e32d 1389static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
1d3576fd
SW
1390{
1391 WARN_ON(1);
1392 return -EINVAL;
1393}
1394
1395const struct address_space_operations ceph_aops = {
1396 .readpage = ceph_readpage,
1397 .readpages = ceph_readpages,
1398 .writepage = ceph_writepage,
1399 .writepages = ceph_writepages_start,
1400 .write_begin = ceph_write_begin,
1401 .write_end = ceph_write_end,
1402 .set_page_dirty = ceph_set_page_dirty,
1403 .invalidatepage = ceph_invalidatepage,
1404 .releasepage = ceph_releasepage,
1405 .direct_IO = ceph_direct_io,
1406};
1407
4f7e89f6
YZ
1408static void ceph_block_sigs(sigset_t *oldset)
1409{
1410 sigset_t mask;
1411 siginitsetinv(&mask, sigmask(SIGKILL));
1412 sigprocmask(SIG_BLOCK, &mask, oldset);
1413}
1414
1415static void ceph_restore_sigs(sigset_t *oldset)
1416{
1417 sigprocmask(SIG_SETMASK, oldset, NULL);
1418}
1d3576fd
SW
1419
1420/*
1421 * vm ops
1422 */
24499847 1423static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
61f68816 1424{
11bac800 1425 struct vm_area_struct *vma = vmf->vma;
61f68816
YZ
1426 struct inode *inode = file_inode(vma->vm_file);
1427 struct ceph_inode_info *ci = ceph_inode(inode);
1428 struct ceph_file_info *fi = vma->vm_file->private_data;
3738daa6 1429 struct page *pinned_page = NULL;
09cbfeaf 1430 loff_t off = vmf->pgoff << PAGE_SHIFT;
24499847 1431 int want, got, err;
4f7e89f6 1432 sigset_t oldset;
24499847 1433 vm_fault_t ret = VM_FAULT_SIGBUS;
4f7e89f6
YZ
1434
1435 ceph_block_sigs(&oldset);
61f68816
YZ
1436
1437 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
09cbfeaf 1438 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
61f68816
YZ
1439 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1440 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1441 else
1442 want = CEPH_CAP_FILE_CACHE;
4f7e89f6
YZ
1443
1444 got = 0;
5e3ded1b
YZ
1445 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1,
1446 &got, &pinned_page);
24499847 1447 if (err < 0)
4f7e89f6 1448 goto out_restore;
6ce026e4 1449
61f68816 1450 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
09cbfeaf 1451 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
61f68816 1452
83701246 1453 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
2b1ac852 1454 ci->i_inline_version == CEPH_INLINE_NONE) {
5d988308
YZ
1455 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1456 ceph_add_rw_context(fi, &rw_ctx);
11bac800 1457 ret = filemap_fault(vmf);
5d988308 1458 ceph_del_rw_context(fi, &rw_ctx);
24499847
SJ
1459 dout("filemap_fault %p %llu~%zd drop cap refs %s ret %x\n",
1460 inode, off, (size_t)PAGE_SIZE,
1461 ceph_cap_string(got), ret);
2b1ac852 1462 } else
24499847 1463 err = -EAGAIN;
61f68816 1464
3738daa6 1465 if (pinned_page)
09cbfeaf 1466 put_page(pinned_page);
61f68816
YZ
1467 ceph_put_cap_refs(ci, got);
1468
24499847 1469 if (err != -EAGAIN)
4f7e89f6 1470 goto out_restore;
83701246
YZ
1471
1472 /* read inline data */
09cbfeaf 1473 if (off >= PAGE_SIZE) {
83701246
YZ
1474 /* does not support inline data > PAGE_SIZE */
1475 ret = VM_FAULT_SIGBUS;
1476 } else {
83701246
YZ
1477 struct address_space *mapping = inode->i_mapping;
1478 struct page *page = find_or_create_page(mapping, 0,
c62d2555
MH
1479 mapping_gfp_constraint(mapping,
1480 ~__GFP_FS));
83701246
YZ
1481 if (!page) {
1482 ret = VM_FAULT_OOM;
4f7e89f6 1483 goto out_inline;
83701246 1484 }
24499847 1485 err = __ceph_do_getattr(inode, page,
83701246 1486 CEPH_STAT_CAP_INLINE_DATA, true);
24499847 1487 if (err < 0 || off >= i_size_read(inode)) {
83701246 1488 unlock_page(page);
09cbfeaf 1489 put_page(page);
c64a2b05 1490 ret = vmf_error(err);
4f7e89f6 1491 goto out_inline;
83701246 1492 }
24499847
SJ
1493 if (err < PAGE_SIZE)
1494 zero_user_segment(page, err, PAGE_SIZE);
83701246
YZ
1495 else
1496 flush_dcache_page(page);
1497 SetPageUptodate(page);
1498 vmf->page = page;
1499 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
4f7e89f6 1500out_inline:
24499847 1501 dout("filemap_fault %p %llu~%zd read inline data ret %x\n",
4f7e89f6 1502 inode, off, (size_t)PAGE_SIZE, ret);
83701246 1503 }
4f7e89f6
YZ
1504out_restore:
1505 ceph_restore_sigs(&oldset);
24499847
SJ
1506 if (err < 0)
1507 ret = vmf_error(err);
6ce026e4 1508
61f68816
YZ
1509 return ret;
1510}
1d3576fd
SW
1511
1512/*
1513 * Reuse write_begin here for simplicity.
1514 */
24499847 1515static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
1d3576fd 1516{
11bac800 1517 struct vm_area_struct *vma = vmf->vma;
496ad9aa 1518 struct inode *inode = file_inode(vma->vm_file);
61f68816
YZ
1519 struct ceph_inode_info *ci = ceph_inode(inode);
1520 struct ceph_file_info *fi = vma->vm_file->private_data;
f66fd9f0 1521 struct ceph_cap_flush *prealloc_cf;
61f68816 1522 struct page *page = vmf->page;
6285bc23 1523 loff_t off = page_offset(page);
61f68816
YZ
1524 loff_t size = i_size_read(inode);
1525 size_t len;
24499847 1526 int want, got, err;
4f7e89f6 1527 sigset_t oldset;
24499847 1528 vm_fault_t ret = VM_FAULT_SIGBUS;
3ca9c3bd 1529
f66fd9f0
YZ
1530 prealloc_cf = ceph_alloc_cap_flush();
1531 if (!prealloc_cf)
6ce026e4 1532 return VM_FAULT_OOM;
f66fd9f0 1533
249c1df5 1534 sb_start_pagefault(inode->i_sb);
4f7e89f6 1535 ceph_block_sigs(&oldset);
f66fd9f0 1536
28127bdd
YZ
1537 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1538 struct page *locked_page = NULL;
1539 if (off == 0) {
1540 lock_page(page);
1541 locked_page = page;
1542 }
24499847 1543 err = ceph_uninline_data(vma->vm_file, locked_page);
28127bdd
YZ
1544 if (locked_page)
1545 unlock_page(locked_page);
24499847 1546 if (err < 0)
f66fd9f0 1547 goto out_free;
28127bdd
YZ
1548 }
1549
09cbfeaf
KS
1550 if (off + PAGE_SIZE <= size)
1551 len = PAGE_SIZE;
1d3576fd 1552 else
09cbfeaf 1553 len = size & ~PAGE_MASK;
1d3576fd 1554
61f68816
YZ
1555 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1556 inode, ceph_vinop(inode), off, len, size);
1557 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1558 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1559 else
1560 want = CEPH_CAP_FILE_BUFFER;
4f7e89f6
YZ
1561
1562 got = 0;
5e3ded1b 1563 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len,
4f7e89f6 1564 &got, NULL);
24499847 1565 if (err < 0)
4f7e89f6 1566 goto out_free;
6ce026e4 1567
61f68816
YZ
1568 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1569 inode, off, len, ceph_cap_string(got));
1570
1571 /* Update time before taking page lock */
1572 file_update_time(vma->vm_file);
5c308356 1573 inode_inc_iversion_raw(inode);
4af6b225 1574
f0b33df5
YZ
1575 do {
1576 lock_page(page);
4af6b225 1577
f0b33df5
YZ
1578 if ((off > size) || (page->mapping != inode->i_mapping)) {
1579 unlock_page(page);
1580 ret = VM_FAULT_NOPAGE;
1581 break;
1582 }
1583
24499847
SJ
1584 err = ceph_update_writeable_page(vma->vm_file, off, len, page);
1585 if (err >= 0) {
f0b33df5
YZ
1586 /* success. we'll keep the page locked. */
1587 set_page_dirty(page);
1588 ret = VM_FAULT_LOCKED;
1589 }
24499847 1590 } while (err == -EAGAIN);
4af6b225 1591
28127bdd
YZ
1592 if (ret == VM_FAULT_LOCKED ||
1593 ci->i_inline_version != CEPH_INLINE_NONE) {
61f68816
YZ
1594 int dirty;
1595 spin_lock(&ci->i_ceph_lock);
28127bdd 1596 ci->i_inline_version = CEPH_INLINE_NONE;
f66fd9f0
YZ
1597 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1598 &prealloc_cf);
61f68816
YZ
1599 spin_unlock(&ci->i_ceph_lock);
1600 if (dirty)
1601 __mark_inode_dirty(inode, dirty);
1602 }
1603
24499847 1604 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
61f68816
YZ
1605 inode, off, len, ceph_cap_string(got), ret);
1606 ceph_put_cap_refs(ci, got);
f66fd9f0 1607out_free:
4f7e89f6 1608 ceph_restore_sigs(&oldset);
249c1df5 1609 sb_end_pagefault(inode->i_sb);
f66fd9f0 1610 ceph_free_cap_flush(prealloc_cf);
24499847
SJ
1611 if (err < 0)
1612 ret = vmf_error(err);
1d3576fd
SW
1613 return ret;
1614}
1615
31c542a1
YZ
1616void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1617 char *data, size_t len)
1618{
1619 struct address_space *mapping = inode->i_mapping;
1620 struct page *page;
1621
1622 if (locked_page) {
1623 page = locked_page;
1624 } else {
1625 if (i_size_read(inode) == 0)
1626 return;
1627 page = find_or_create_page(mapping, 0,
c62d2555
MH
1628 mapping_gfp_constraint(mapping,
1629 ~__GFP_FS));
31c542a1
YZ
1630 if (!page)
1631 return;
1632 if (PageUptodate(page)) {
1633 unlock_page(page);
09cbfeaf 1634 put_page(page);
31c542a1
YZ
1635 return;
1636 }
1637 }
1638
0668ff52 1639 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
31c542a1
YZ
1640 inode, ceph_vinop(inode), len, locked_page);
1641
1642 if (len > 0) {
1643 void *kaddr = kmap_atomic(page);
1644 memcpy(kaddr, data, len);
1645 kunmap_atomic(kaddr);
1646 }
1647
1648 if (page != locked_page) {
09cbfeaf
KS
1649 if (len < PAGE_SIZE)
1650 zero_user_segment(page, len, PAGE_SIZE);
31c542a1
YZ
1651 else
1652 flush_dcache_page(page);
1653
1654 SetPageUptodate(page);
1655 unlock_page(page);
09cbfeaf 1656 put_page(page);
31c542a1
YZ
1657 }
1658}
1659
28127bdd
YZ
1660int ceph_uninline_data(struct file *filp, struct page *locked_page)
1661{
1662 struct inode *inode = file_inode(filp);
1663 struct ceph_inode_info *ci = ceph_inode(inode);
1664 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1665 struct ceph_osd_request *req;
1666 struct page *page = NULL;
1667 u64 len, inline_version;
1668 int err = 0;
1669 bool from_pagecache = false;
1670
1671 spin_lock(&ci->i_ceph_lock);
1672 inline_version = ci->i_inline_version;
1673 spin_unlock(&ci->i_ceph_lock);
1674
1675 dout("uninline_data %p %llx.%llx inline_version %llu\n",
1676 inode, ceph_vinop(inode), inline_version);
1677
1678 if (inline_version == 1 || /* initial version, no data */
1679 inline_version == CEPH_INLINE_NONE)
1680 goto out;
1681
1682 if (locked_page) {
1683 page = locked_page;
1684 WARN_ON(!PageUptodate(page));
1685 } else if (ceph_caps_issued(ci) &
1686 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
1687 page = find_get_page(inode->i_mapping, 0);
1688 if (page) {
1689 if (PageUptodate(page)) {
1690 from_pagecache = true;
1691 lock_page(page);
1692 } else {
09cbfeaf 1693 put_page(page);
28127bdd
YZ
1694 page = NULL;
1695 }
1696 }
1697 }
1698
1699 if (page) {
1700 len = i_size_read(inode);
09cbfeaf
KS
1701 if (len > PAGE_SIZE)
1702 len = PAGE_SIZE;
28127bdd
YZ
1703 } else {
1704 page = __page_cache_alloc(GFP_NOFS);
1705 if (!page) {
1706 err = -ENOMEM;
1707 goto out;
1708 }
1709 err = __ceph_do_getattr(inode, page,
1710 CEPH_STAT_CAP_INLINE_DATA, true);
1711 if (err < 0) {
1712 /* no inline data */
1713 if (err == -ENODATA)
1714 err = 0;
1715 goto out;
1716 }
1717 len = err;
1718 }
1719
1720 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1721 ceph_vino(inode), 0, &len, 0, 1,
54ea0046 1722 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
34b759b4 1723 NULL, 0, 0, false);
28127bdd
YZ
1724 if (IS_ERR(req)) {
1725 err = PTR_ERR(req);
1726 goto out;
1727 }
1728
fac02ddf 1729 req->r_mtime = inode->i_mtime;
28127bdd
YZ
1730 err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1731 if (!err)
1732 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1733 ceph_osdc_put_request(req);
1734 if (err < 0)
1735 goto out;
1736
1737 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1738 ceph_vino(inode), 0, &len, 1, 3,
54ea0046 1739 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
34b759b4
ID
1740 NULL, ci->i_truncate_seq,
1741 ci->i_truncate_size, false);
28127bdd
YZ
1742 if (IS_ERR(req)) {
1743 err = PTR_ERR(req);
1744 goto out;
1745 }
1746
1747 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
1748
ec137c10
YZ
1749 {
1750 __le64 xattr_buf = cpu_to_le64(inline_version);
1751 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1752 "inline_version", &xattr_buf,
1753 sizeof(xattr_buf),
1754 CEPH_OSD_CMPXATTR_OP_GT,
1755 CEPH_OSD_CMPXATTR_MODE_U64);
1756 if (err)
1757 goto out_put;
1758 }
1759
1760 {
1761 char xattr_buf[32];
1762 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1763 "%llu", inline_version);
1764 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1765 "inline_version",
1766 xattr_buf, xattr_len, 0, 0);
1767 if (err)
1768 goto out_put;
1769 }
28127bdd 1770
fac02ddf 1771 req->r_mtime = inode->i_mtime;
28127bdd
YZ
1772 err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1773 if (!err)
1774 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1775out_put:
1776 ceph_osdc_put_request(req);
1777 if (err == -ECANCELED)
1778 err = 0;
1779out:
1780 if (page && page != locked_page) {
1781 if (from_pagecache) {
1782 unlock_page(page);
09cbfeaf 1783 put_page(page);
28127bdd
YZ
1784 } else
1785 __free_pages(page, 0);
1786 }
1787
1788 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1789 inode, ceph_vinop(inode), inline_version, err);
1790 return err;
1791}
1792
7cbea8dc 1793static const struct vm_operations_struct ceph_vmops = {
61f68816 1794 .fault = ceph_filemap_fault,
1d3576fd
SW
1795 .page_mkwrite = ceph_page_mkwrite,
1796};
1797
1798int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1799{
1800 struct address_space *mapping = file->f_mapping;
1801
1802 if (!mapping->a_ops->readpage)
1803 return -ENOEXEC;
1804 file_accessed(file);
1805 vma->vm_ops = &ceph_vmops;
1d3576fd
SW
1806 return 0;
1807}
10183a69
YZ
1808
1809enum {
1810 POOL_READ = 1,
1811 POOL_WRITE = 2,
1812};
1813
779fe0fb
YZ
1814static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1815 s64 pool, struct ceph_string *pool_ns)
10183a69
YZ
1816{
1817 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1818 struct ceph_mds_client *mdsc = fsc->mdsc;
1819 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
1820 struct rb_node **p, *parent;
1821 struct ceph_pool_perm *perm;
1822 struct page **pages;
779fe0fb 1823 size_t pool_ns_len;
10183a69
YZ
1824 int err = 0, err2 = 0, have = 0;
1825
1826 down_read(&mdsc->pool_perm_rwsem);
1827 p = &mdsc->pool_perm_tree.rb_node;
1828 while (*p) {
1829 perm = rb_entry(*p, struct ceph_pool_perm, node);
1830 if (pool < perm->pool)
1831 p = &(*p)->rb_left;
1832 else if (pool > perm->pool)
1833 p = &(*p)->rb_right;
1834 else {
779fe0fb
YZ
1835 int ret = ceph_compare_string(pool_ns,
1836 perm->pool_ns,
1837 perm->pool_ns_len);
1838 if (ret < 0)
1839 p = &(*p)->rb_left;
1840 else if (ret > 0)
1841 p = &(*p)->rb_right;
1842 else {
1843 have = perm->perm;
1844 break;
1845 }
10183a69
YZ
1846 }
1847 }
1848 up_read(&mdsc->pool_perm_rwsem);
1849 if (*p)
1850 goto out;
1851
779fe0fb
YZ
1852 if (pool_ns)
1853 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1854 pool, (int)pool_ns->len, pool_ns->str);
1855 else
1856 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
10183a69
YZ
1857
1858 down_write(&mdsc->pool_perm_rwsem);
779fe0fb 1859 p = &mdsc->pool_perm_tree.rb_node;
10183a69
YZ
1860 parent = NULL;
1861 while (*p) {
1862 parent = *p;
1863 perm = rb_entry(parent, struct ceph_pool_perm, node);
1864 if (pool < perm->pool)
1865 p = &(*p)->rb_left;
1866 else if (pool > perm->pool)
1867 p = &(*p)->rb_right;
1868 else {
779fe0fb
YZ
1869 int ret = ceph_compare_string(pool_ns,
1870 perm->pool_ns,
1871 perm->pool_ns_len);
1872 if (ret < 0)
1873 p = &(*p)->rb_left;
1874 else if (ret > 0)
1875 p = &(*p)->rb_right;
1876 else {
1877 have = perm->perm;
1878 break;
1879 }
10183a69
YZ
1880 }
1881 }
1882 if (*p) {
1883 up_write(&mdsc->pool_perm_rwsem);
1884 goto out;
1885 }
1886
34b759b4 1887 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
10183a69
YZ
1888 1, false, GFP_NOFS);
1889 if (!rd_req) {
1890 err = -ENOMEM;
1891 goto out_unlock;
1892 }
1893
1894 rd_req->r_flags = CEPH_OSD_FLAG_READ;
1895 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
1896 rd_req->r_base_oloc.pool = pool;
779fe0fb
YZ
1897 if (pool_ns)
1898 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
d30291b9 1899 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
10183a69 1900
13d1ad16
ID
1901 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
1902 if (err)
1903 goto out_unlock;
10183a69 1904
34b759b4 1905 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
10183a69
YZ
1906 1, false, GFP_NOFS);
1907 if (!wr_req) {
1908 err = -ENOMEM;
1909 goto out_unlock;
1910 }
1911
54ea0046 1912 wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
10183a69 1913 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
63244fa1 1914 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
d30291b9 1915 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
10183a69 1916
13d1ad16
ID
1917 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
1918 if (err)
1919 goto out_unlock;
10183a69
YZ
1920
1921 /* one page should be large enough for STAT data */
1922 pages = ceph_alloc_page_vector(1, GFP_KERNEL);
1923 if (IS_ERR(pages)) {
1924 err = PTR_ERR(pages);
1925 goto out_unlock;
1926 }
1927
1928 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
1929 0, false, true);
10183a69
YZ
1930 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
1931
fac02ddf 1932 wr_req->r_mtime = ci->vfs_inode.i_mtime;
10183a69
YZ
1933 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
1934
1935 if (!err)
1936 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
1937 if (!err2)
1938 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
1939
1940 if (err >= 0 || err == -ENOENT)
1941 have |= POOL_READ;
131d7eb4
YZ
1942 else if (err != -EPERM) {
1943 if (err == -EBLACKLISTED)
1944 fsc->blacklisted = true;
10183a69 1945 goto out_unlock;
131d7eb4 1946 }
10183a69
YZ
1947
1948 if (err2 == 0 || err2 == -EEXIST)
1949 have |= POOL_WRITE;
1950 else if (err2 != -EPERM) {
131d7eb4
YZ
1951 if (err2 == -EBLACKLISTED)
1952 fsc->blacklisted = true;
10183a69
YZ
1953 err = err2;
1954 goto out_unlock;
1955 }
1956
779fe0fb
YZ
1957 pool_ns_len = pool_ns ? pool_ns->len : 0;
1958 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
10183a69
YZ
1959 if (!perm) {
1960 err = -ENOMEM;
1961 goto out_unlock;
1962 }
1963
1964 perm->pool = pool;
1965 perm->perm = have;
779fe0fb
YZ
1966 perm->pool_ns_len = pool_ns_len;
1967 if (pool_ns_len > 0)
1968 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
1969 perm->pool_ns[pool_ns_len] = 0;
1970
10183a69
YZ
1971 rb_link_node(&perm->node, parent, p);
1972 rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
1973 err = 0;
1974out_unlock:
1975 up_write(&mdsc->pool_perm_rwsem);
1976
3ed97d63
ID
1977 ceph_osdc_put_request(rd_req);
1978 ceph_osdc_put_request(wr_req);
10183a69
YZ
1979out:
1980 if (!err)
1981 err = have;
779fe0fb
YZ
1982 if (pool_ns)
1983 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
1984 pool, (int)pool_ns->len, pool_ns->str, err);
1985 else
1986 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
10183a69
YZ
1987 return err;
1988}
1989
5e3ded1b 1990int ceph_pool_perm_check(struct inode *inode, int need)
10183a69 1991{
5e3ded1b 1992 struct ceph_inode_info *ci = ceph_inode(inode);
779fe0fb 1993 struct ceph_string *pool_ns;
5e3ded1b 1994 s64 pool;
10183a69
YZ
1995 int ret, flags;
1996
80e80fbb
YZ
1997 if (ci->i_vino.snap != CEPH_NOSNAP) {
1998 /*
1999 * Pool permission check needs to write to the first object.
2000 * But for snapshot, head of the first object may have alread
2001 * been deleted. Skip check to avoid creating orphan object.
2002 */
2003 return 0;
2004 }
2005
5e3ded1b 2006 if (ceph_test_mount_opt(ceph_inode_to_client(inode),
10183a69
YZ
2007 NOPOOLPERM))
2008 return 0;
2009
2010 spin_lock(&ci->i_ceph_lock);
2011 flags = ci->i_ceph_flags;
7627151e 2012 pool = ci->i_layout.pool_id;
10183a69
YZ
2013 spin_unlock(&ci->i_ceph_lock);
2014check:
2015 if (flags & CEPH_I_POOL_PERM) {
2016 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
7627151e 2017 dout("ceph_pool_perm_check pool %lld no read perm\n",
10183a69
YZ
2018 pool);
2019 return -EPERM;
2020 }
2021 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
7627151e 2022 dout("ceph_pool_perm_check pool %lld no write perm\n",
10183a69
YZ
2023 pool);
2024 return -EPERM;
2025 }
2026 return 0;
2027 }
2028
779fe0fb
YZ
2029 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2030 ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2031 ceph_put_string(pool_ns);
10183a69
YZ
2032 if (ret < 0)
2033 return ret;
2034
2035 flags = CEPH_I_POOL_PERM;
2036 if (ret & POOL_READ)
2037 flags |= CEPH_I_POOL_RD;
2038 if (ret & POOL_WRITE)
2039 flags |= CEPH_I_POOL_WR;
2040
2041 spin_lock(&ci->i_ceph_lock);
779fe0fb
YZ
2042 if (pool == ci->i_layout.pool_id &&
2043 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2044 ci->i_ceph_flags |= flags;
10183a69 2045 } else {
7627151e 2046 pool = ci->i_layout.pool_id;
10183a69
YZ
2047 flags = ci->i_ceph_flags;
2048 }
2049 spin_unlock(&ci->i_ceph_lock);
2050 goto check;
2051}
2052
2053void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2054{
2055 struct ceph_pool_perm *perm;
2056 struct rb_node *n;
2057
2058 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2059 n = rb_first(&mdsc->pool_perm_tree);
2060 perm = rb_entry(n, struct ceph_pool_perm, node);
2061 rb_erase(n, &mdsc->pool_perm_tree);
2062 kfree(perm);
2063 }
2064}