]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/ceph/addr.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / fs / ceph / addr.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
1d3576fd
SW
3
4#include <linux/backing-dev.h>
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h> /* generic_writepages */
5a0e3ad6 9#include <linux/slab.h>
1d3576fd
SW
10#include <linux/pagevec.h>
11#include <linux/task_io_accounting_ops.h>
f361bf4a 12#include <linux/signal.h>
5c308356 13#include <linux/iversion.h>
97e27aaa 14#include <linux/ktime.h>
1d3576fd
SW
15
16#include "super.h"
3d14c5d2 17#include "mds_client.h"
99ccbd22 18#include "cache.h"
97e27aaa 19#include "metric.h"
3d14c5d2 20#include <linux/ceph/osd_client.h>
08c1ac50 21#include <linux/ceph/striper.h>
1d3576fd
SW
22
23/*
24 * Ceph address space ops.
25 *
26 * There are a few funny things going on here.
27 *
28 * The page->private field is used to reference a struct
29 * ceph_snap_context for _every_ dirty page. This indicates which
30 * snapshot the page was logically dirtied in, and thus which snap
31 * context needs to be associated with the osd write during writeback.
32 *
33 * Similarly, struct ceph_inode_info maintains a set of counters to
25985edc 34 * count dirty pages on the inode. In the absence of snapshots,
1d3576fd
SW
35 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
36 *
37 * When a snapshot is taken (that is, when the client receives
38 * notification that a snapshot was taken), each inode with caps and
39 * with dirty pages (dirty pages implies there is a cap) gets a new
40 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
41 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
42 * moved to capsnap->dirty. (Unless a sync write is currently in
43 * progress. In that case, the capsnap is said to be "pending", new
44 * writes cannot start, and the capsnap isn't "finalized" until the
45 * write completes (or fails) and a final size/mtime for the inode for
46 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
47 *
48 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
49 * we look for the first capsnap in i_cap_snaps and write out pages in
50 * that snap context _only_. Then we move on to the next capsnap,
51 * eventually reaching the "live" or "head" context (i.e., pages that
52 * are not yet snapped) and are writing the most recently dirtied
53 * pages.
54 *
55 * Invalidate and so forth must take care to ensure the dirty page
56 * accounting is preserved.
57 */
58
2baba250
YS
59#define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
60#define CONGESTION_OFF_THRESH(congestion_kb) \
61 (CONGESTION_ON_THRESH(congestion_kb) - \
62 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
63
61600ef8
YZ
64static inline struct ceph_snap_context *page_snap_context(struct page *page)
65{
66 if (PagePrivate(page))
67 return (void *)page->private;
68 return NULL;
69}
1d3576fd
SW
70
71/*
72 * Dirty a page. Optimistically adjust accounting, on the assumption
73 * that we won't race with invalidate. If we do, readjust.
74 */
75static int ceph_set_page_dirty(struct page *page)
76{
77 struct address_space *mapping = page->mapping;
78 struct inode *inode;
79 struct ceph_inode_info *ci;
1d3576fd 80 struct ceph_snap_context *snapc;
7d6e1f54 81 int ret;
1d3576fd
SW
82
83 if (unlikely(!mapping))
84 return !TestSetPageDirty(page);
85
7d6e1f54 86 if (PageDirty(page)) {
1d3576fd
SW
87 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
88 mapping->host, page, page->index);
7d6e1f54 89 BUG_ON(!PagePrivate(page));
1d3576fd
SW
90 return 0;
91 }
92
93 inode = mapping->host;
94 ci = ceph_inode(inode);
95
1d3576fd 96 /* dirty the head */
be655596 97 spin_lock(&ci->i_ceph_lock);
5dda377c
YZ
98 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
99 if (__ceph_have_pending_cap_snap(ci)) {
100 struct ceph_cap_snap *capsnap =
101 list_last_entry(&ci->i_cap_snaps,
102 struct ceph_cap_snap,
103 ci_item);
104 snapc = ceph_get_snap_context(capsnap->context);
105 capsnap->dirty_pages++;
106 } else {
107 BUG_ON(!ci->i_head_snapc);
108 snapc = ceph_get_snap_context(ci->i_head_snapc);
109 ++ci->i_wrbuffer_ref_head;
110 }
1d3576fd 111 if (ci->i_wrbuffer_ref == 0)
0444d76a 112 ihold(inode);
1d3576fd
SW
113 ++ci->i_wrbuffer_ref;
114 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
115 "snapc %p seq %lld (%d snaps)\n",
116 mapping->host, page, page->index,
117 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
118 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
119 snapc, snapc->seq, snapc->num_snaps);
be655596 120 spin_unlock(&ci->i_ceph_lock);
1d3576fd 121
7d6e1f54
SZ
122 /*
123 * Reference snap context in page->private. Also set
124 * PagePrivate so that we get invalidatepage callback.
125 */
126 BUG_ON(PagePrivate(page));
127 page->private = (unsigned long)snapc;
128 SetPagePrivate(page);
1d3576fd 129
7d6e1f54
SZ
130 ret = __set_page_dirty_nobuffers(page);
131 WARN_ON(!PageLocked(page));
132 WARN_ON(!page->mapping);
1d3576fd 133
7d6e1f54 134 return ret;
1d3576fd
SW
135}
136
137/*
138 * If we are truncating the full page (i.e. offset == 0), adjust the
139 * dirty page counters appropriately. Only called if there is private
140 * data on the page.
141 */
d47992f8
LC
142static void ceph_invalidatepage(struct page *page, unsigned int offset,
143 unsigned int length)
1d3576fd 144{
4ce1e9ad 145 struct inode *inode;
1d3576fd 146 struct ceph_inode_info *ci;
61600ef8 147 struct ceph_snap_context *snapc = page_snap_context(page);
1d3576fd 148
4ce1e9ad 149 inode = page->mapping->host;
b150f5c1
MT
150 ci = ceph_inode(inode);
151
09cbfeaf 152 if (offset != 0 || length != PAGE_SIZE) {
b150f5c1
MT
153 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
154 inode, page, page->index, offset, length);
155 return;
156 }
4ce1e9ad 157
99ccbd22
MT
158 ceph_invalidate_fscache_page(inode, page);
159
b072d774 160 WARN_ON(!PageLocked(page));
99ccbd22
MT
161 if (!PagePrivate(page))
162 return;
163
b150f5c1
MT
164 dout("%p invalidatepage %p idx %lu full dirty page\n",
165 inode, page, page->index);
166
167 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
168 ceph_put_snap_context(snapc);
169 page->private = 0;
170 ClearPagePrivate(page);
1d3576fd
SW
171}
172
1d3576fd
SW
173static int ceph_releasepage(struct page *page, gfp_t g)
174{
e55f1a18
N
175 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
176 page, page->index, PageDirty(page) ? "" : "not ");
99ccbd22
MT
177
178 /* Can we release the page from the cache? */
179 if (!ceph_release_fscache_page(page, g))
180 return 0;
181
182 return !PagePrivate(page);
1d3576fd
SW
183}
184
9b4862ec 185/* read a single page, without unlocking it. */
dd2bc473 186static int ceph_do_readpage(struct file *filp, struct page *page)
1d3576fd 187{
496ad9aa 188 struct inode *inode = file_inode(filp);
1d3576fd 189 struct ceph_inode_info *ci = ceph_inode(inode);
131d7eb4 190 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
9b4862ec
JL
191 struct ceph_osd_client *osdc = &fsc->client->osdc;
192 struct ceph_osd_request *req;
193 struct ceph_vino vino = ceph_vino(inode);
1d3576fd 194 int err = 0;
83701246 195 u64 off = page_offset(page);
09cbfeaf 196 u64 len = PAGE_SIZE;
1d3576fd 197
83701246 198 if (off >= i_size_read(inode)) {
09cbfeaf 199 zero_user_segment(page, 0, PAGE_SIZE);
83701246
YZ
200 SetPageUptodate(page);
201 return 0;
202 }
99ccbd22 203
fcc02d2a
YZ
204 if (ci->i_inline_version != CEPH_INLINE_NONE) {
205 /*
206 * Uptodate inline data should have been added
207 * into page cache while getting Fcr caps.
208 */
209 if (off == 0)
210 return -EINVAL;
09cbfeaf 211 zero_user_segment(page, 0, PAGE_SIZE);
fcc02d2a
YZ
212 SetPageUptodate(page);
213 return 0;
214 }
83701246
YZ
215
216 err = ceph_readpage_from_fscache(inode, page);
99ccbd22 217 if (err == 0)
dd2bc473 218 return -EINPROGRESS;
99ccbd22 219
9b4862ec
JL
220 dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n",
221 vino.ino, vino.snap, filp, off, len, page, page->index);
222 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 0, 1,
223 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL,
224 ci->i_truncate_seq, ci->i_truncate_size,
225 false);
226 if (IS_ERR(req))
227 return PTR_ERR(req);
228
229 osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false);
230
231 err = ceph_osdc_start_request(osdc, req, false);
232 if (!err)
233 err = ceph_osdc_wait_request(osdc, req);
234
235 ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency,
236 req->r_end_latency, err);
237
238 ceph_osdc_put_request(req);
239 dout("readpage result %d\n", err);
240
1d3576fd
SW
241 if (err == -ENOENT)
242 err = 0;
243 if (err < 0) {
18302805 244 ceph_fscache_readpage_cancel(inode, page);
0b98acd6
ID
245 if (err == -EBLOCKLISTED)
246 fsc->blocklisted = true;
1d3576fd 247 goto out;
1d3576fd 248 }
09cbfeaf 249 if (err < PAGE_SIZE)
23cd573b 250 /* zero fill remainder of page */
09cbfeaf 251 zero_user_segment(page, err, PAGE_SIZE);
23cd573b
ZZ
252 else
253 flush_dcache_page(page);
1d3576fd 254
23cd573b
ZZ
255 SetPageUptodate(page);
256 ceph_readpage_to_fscache(inode, page);
99ccbd22 257
1d3576fd
SW
258out:
259 return err < 0 ? err : 0;
260}
261
262static int ceph_readpage(struct file *filp, struct page *page)
263{
dd2bc473
YZ
264 int r = ceph_do_readpage(filp, page);
265 if (r != -EINPROGRESS)
266 unlock_page(page);
267 else
268 r = 0;
1d3576fd
SW
269 return r;
270}
271
272/*
7c272194 273 * Finish an async read(ahead) op.
1d3576fd 274 */
85e084fe 275static void finish_read(struct ceph_osd_request *req)
1d3576fd 276{
7c272194 277 struct inode *inode = req->r_inode;
97e27aaa 278 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
87060c10 279 struct ceph_osd_data *osd_data;
85e084fe
ID
280 int rc = req->r_result <= 0 ? req->r_result : 0;
281 int bytes = req->r_result >= 0 ? req->r_result : 0;
e0c59487 282 int num_pages;
7c272194 283 int i;
1d3576fd 284
7c272194 285 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
0b98acd6
ID
286 if (rc == -EBLOCKLISTED)
287 ceph_inode_to_client(inode)->blocklisted = true;
7c272194
SW
288
289 /* unlock all pages, zeroing any data we didn't read */
406e2c9f 290 osd_data = osd_req_op_extent_osd_data(req, 0);
87060c10
AE
291 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
292 num_pages = calc_pages_for((u64)osd_data->alignment,
293 (u64)osd_data->length);
e0c59487 294 for (i = 0; i < num_pages; i++) {
87060c10 295 struct page *page = osd_data->pages[i];
7c272194 296
368e3585
YZ
297 if (rc < 0 && rc != -ENOENT) {
298 ceph_fscache_readpage_cancel(inode, page);
f36132a7 299 goto unlock;
368e3585 300 }
09cbfeaf 301 if (bytes < (int)PAGE_SIZE) {
7c272194
SW
302 /* zero (remainder of) page */
303 int s = bytes < 0 ? 0 : bytes;
09cbfeaf 304 zero_user_segment(page, s, PAGE_SIZE);
1d3576fd 305 }
7c272194
SW
306 dout("finish_read %p uptodate %p idx %lu\n", inode, page,
307 page->index);
308 flush_dcache_page(page);
309 SetPageUptodate(page);
99ccbd22 310 ceph_readpage_to_fscache(inode, page);
f36132a7 311unlock:
7c272194 312 unlock_page(page);
09cbfeaf
KS
313 put_page(page);
314 bytes -= PAGE_SIZE;
1d3576fd 315 }
97e27aaa
XL
316
317 ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency,
318 req->r_end_latency, rc);
319
87060c10 320 kfree(osd_data->pages);
1d3576fd
SW
321}
322
323/*
7c272194
SW
324 * start an async read(ahead) operation. return nr_pages we submitted
325 * a read for on success, or negative error code.
1d3576fd 326 */
5d988308
YZ
327static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
328 struct list_head *page_list, int max)
1d3576fd 329{
3d14c5d2
YS
330 struct ceph_osd_client *osdc =
331 &ceph_inode_to_client(inode)->client->osdc;
7c272194 332 struct ceph_inode_info *ci = ceph_inode(inode);
f86196ea 333 struct page *page = lru_to_page(page_list);
acead002 334 struct ceph_vino vino;
7c272194
SW
335 struct ceph_osd_request *req;
336 u64 off;
1d3576fd 337 u64 len;
7c272194
SW
338 int i;
339 struct page **pages;
340 pgoff_t next_index;
341 int nr_pages = 0;
2b1ac852
YZ
342 int got = 0;
343 int ret = 0;
344
5d988308 345 if (!rw_ctx) {
2b1ac852
YZ
346 /* caller of readpages does not hold buffer and read caps
347 * (fadvise, madvise and readahead cases) */
348 int want = CEPH_CAP_FILE_CACHE;
5e3ded1b
YZ
349 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want,
350 true, &got);
2b1ac852
YZ
351 if (ret < 0) {
352 dout("start_read %p, error getting cap\n", inode);
353 } else if (!(got & want)) {
354 dout("start_read %p, no cache cap\n", inode);
355 ret = 0;
356 }
357 if (ret <= 0) {
358 if (got)
359 ceph_put_cap_refs(ci, got);
360 while (!list_empty(page_list)) {
f86196ea 361 page = lru_to_page(page_list);
2b1ac852
YZ
362 list_del(&page->lru);
363 put_page(page);
364 }
365 return ret;
366 }
367 }
1d3576fd 368
6285bc23 369 off = (u64) page_offset(page);
1d3576fd 370
7c272194
SW
371 /* count pages */
372 next_index = page->index;
373 list_for_each_entry_reverse(page, page_list, lru) {
374 if (page->index != next_index)
375 break;
376 nr_pages++;
377 next_index++;
0d66a487
SW
378 if (max && nr_pages == max)
379 break;
7c272194 380 }
09cbfeaf 381 len = nr_pages << PAGE_SHIFT;
7c272194
SW
382 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
383 off, len);
acead002
AE
384 vino = ceph_vino(inode);
385 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
715e4cd4 386 0, 1, CEPH_OSD_OP_READ,
acead002 387 CEPH_OSD_FLAG_READ, NULL,
7c272194 388 ci->i_truncate_seq, ci->i_truncate_size,
acead002 389 false);
2b1ac852
YZ
390 if (IS_ERR(req)) {
391 ret = PTR_ERR(req);
392 goto out;
393 }
1d3576fd 394
7c272194 395 /* build page vector */
cf7b7e14 396 nr_pages = calc_pages_for(0, len);
6da2ec56 397 pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
2b1ac852
YZ
398 if (!pages) {
399 ret = -ENOMEM;
400 goto out_put;
401 }
7c272194
SW
402 for (i = 0; i < nr_pages; ++i) {
403 page = list_entry(page_list->prev, struct page, lru);
404 BUG_ON(PageLocked(page));
1d3576fd 405 list_del(&page->lru);
99ccbd22 406
7c272194
SW
407 dout("start_read %p adding %p idx %lu\n", inode, page,
408 page->index);
409 if (add_to_page_cache_lru(page, &inode->i_data, page->index,
687265e5 410 GFP_KERNEL)) {
d4d3aa38 411 ceph_fscache_uncache_page(inode, page);
09cbfeaf 412 put_page(page);
7c272194 413 dout("start_read %p add_to_page_cache failed %p\n",
1d3576fd 414 inode, page);
7c272194 415 nr_pages = i;
1afe4785
YZ
416 if (nr_pages > 0) {
417 len = nr_pages << PAGE_SHIFT;
d641df81 418 osd_req_op_extent_update(req, 0, len);
1afe4785
YZ
419 break;
420 }
7c272194 421 goto out_pages;
1d3576fd 422 }
7c272194 423 pages[i] = page;
1d3576fd 424 }
406e2c9f 425 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
7c272194
SW
426 req->r_callback = finish_read;
427 req->r_inode = inode;
428
429 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
430 ret = ceph_osdc_start_request(osdc, req, false);
431 if (ret < 0)
432 goto out_pages;
433 ceph_osdc_put_request(req);
2b1ac852
YZ
434
435 /* After adding locked pages to page cache, the inode holds cache cap.
436 * So we can drop our cap refs. */
437 if (got)
438 ceph_put_cap_refs(ci, got);
439
7c272194
SW
440 return nr_pages;
441
442out_pages:
1afe4785
YZ
443 for (i = 0; i < nr_pages; ++i) {
444 ceph_fscache_readpage_cancel(inode, pages[i]);
445 unlock_page(pages[i]);
446 }
447 ceph_put_page_vector(pages, nr_pages, false);
2b1ac852 448out_put:
7c272194 449 ceph_osdc_put_request(req);
2b1ac852
YZ
450out:
451 if (got)
452 ceph_put_cap_refs(ci, got);
7c272194
SW
453 return ret;
454}
1d3576fd 455
7c272194
SW
456
457/*
458 * Read multiple pages. Leave pages we don't read + unlock in page_list;
459 * the caller (VM) cleans them up.
460 */
461static int ceph_readpages(struct file *file, struct address_space *mapping,
462 struct list_head *page_list, unsigned nr_pages)
463{
496ad9aa 464 struct inode *inode = file_inode(file);
0d66a487 465 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
73737682 466 struct ceph_file_info *fi = file->private_data;
5d988308 467 struct ceph_rw_context *rw_ctx;
7c272194 468 int rc = 0;
0d66a487
SW
469 int max = 0;
470
83701246
YZ
471 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
472 return -EINVAL;
473
99ccbd22
MT
474 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
475 &nr_pages);
476
477 if (rc == 0)
478 goto out;
479
73737682 480 rw_ctx = ceph_find_rw_context(fi);
aa187926 481 max = fsc->mount_options->rsize >> PAGE_SHIFT;
5d988308
YZ
482 dout("readpages %p file %p ctx %p nr_pages %d max %d\n",
483 inode, file, rw_ctx, nr_pages, max);
7c272194 484 while (!list_empty(page_list)) {
5d988308 485 rc = start_read(inode, rw_ctx, page_list, max);
7c272194
SW
486 if (rc < 0)
487 goto out;
7c272194 488 }
1d3576fd 489out:
76be778b
MT
490 ceph_fscache_readpages_cancel(inode, page_list);
491
7c272194 492 dout("readpages %p file %p ret %d\n", inode, file, rc);
1d3576fd
SW
493 return rc;
494}
495
1f934b00
YZ
496struct ceph_writeback_ctl
497{
498 loff_t i_size;
499 u64 truncate_size;
500 u32 truncate_seq;
501 bool size_stable;
2a2d927e 502 bool head_snapc;
1f934b00
YZ
503};
504
1d3576fd
SW
505/*
506 * Get ref for the oldest snapc for an inode with dirty data... that is, the
507 * only snap context we are allowed to write back.
1d3576fd 508 */
1f934b00 509static struct ceph_snap_context *
05455e11
YZ
510get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
511 struct ceph_snap_context *page_snapc)
1d3576fd
SW
512{
513 struct ceph_inode_info *ci = ceph_inode(inode);
514 struct ceph_snap_context *snapc = NULL;
515 struct ceph_cap_snap *capsnap = NULL;
516
be655596 517 spin_lock(&ci->i_ceph_lock);
1d3576fd
SW
518 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
519 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
520 capsnap->context, capsnap->dirty_pages);
05455e11
YZ
521 if (!capsnap->dirty_pages)
522 continue;
523
524 /* get i_size, truncate_{seq,size} for page_snapc? */
525 if (snapc && capsnap->context != page_snapc)
526 continue;
527
528 if (ctl) {
529 if (capsnap->writing) {
530 ctl->i_size = i_size_read(inode);
531 ctl->size_stable = false;
532 } else {
533 ctl->i_size = capsnap->size;
534 ctl->size_stable = true;
1f934b00 535 }
05455e11
YZ
536 ctl->truncate_size = capsnap->truncate_size;
537 ctl->truncate_seq = capsnap->truncate_seq;
2a2d927e 538 ctl->head_snapc = false;
1d3576fd 539 }
05455e11
YZ
540
541 if (snapc)
542 break;
543
544 snapc = ceph_get_snap_context(capsnap->context);
545 if (!page_snapc ||
546 page_snapc == snapc ||
547 page_snapc->seq > snapc->seq)
548 break;
1d3576fd 549 }
7d8cb26d 550 if (!snapc && ci->i_wrbuffer_ref_head) {
80e755fe 551 snapc = ceph_get_snap_context(ci->i_head_snapc);
1d3576fd
SW
552 dout(" head snapc %p has %d dirty pages\n",
553 snapc, ci->i_wrbuffer_ref_head);
1f934b00
YZ
554 if (ctl) {
555 ctl->i_size = i_size_read(inode);
556 ctl->truncate_size = ci->i_truncate_size;
557 ctl->truncate_seq = ci->i_truncate_seq;
558 ctl->size_stable = false;
2a2d927e 559 ctl->head_snapc = true;
1f934b00 560 }
1d3576fd 561 }
be655596 562 spin_unlock(&ci->i_ceph_lock);
1d3576fd
SW
563 return snapc;
564}
565
1f934b00
YZ
566static u64 get_writepages_data_length(struct inode *inode,
567 struct page *page, u64 start)
568{
569 struct ceph_inode_info *ci = ceph_inode(inode);
570 struct ceph_snap_context *snapc = page_snap_context(page);
571 struct ceph_cap_snap *capsnap = NULL;
572 u64 end = i_size_read(inode);
573
574 if (snapc != ci->i_head_snapc) {
575 bool found = false;
576 spin_lock(&ci->i_ceph_lock);
577 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
578 if (capsnap->context == snapc) {
579 if (!capsnap->writing)
580 end = capsnap->size;
581 found = true;
582 break;
583 }
584 }
585 spin_unlock(&ci->i_ceph_lock);
586 WARN_ON(!found);
587 }
588 if (end > page_offset(page) + PAGE_SIZE)
589 end = page_offset(page) + PAGE_SIZE;
590 return end > start ? end - start : 0;
591}
592
1d3576fd
SW
593/*
594 * Write a single page, but leave the page locked.
595 *
b72b13eb 596 * If we get a write error, mark the mapping for error, but still adjust the
1d3576fd
SW
597 * dirty page accounting (i.e., page is no longer dirty).
598 */
599static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
600{
6390987f
JL
601 struct inode *inode = page->mapping->host;
602 struct ceph_inode_info *ci = ceph_inode(inode);
603 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
6298a337 604 struct ceph_snap_context *snapc, *oldest;
fc2744aa 605 loff_t page_off = page_offset(page);
6390987f
JL
606 int err;
607 loff_t len = PAGE_SIZE;
1f934b00 608 struct ceph_writeback_ctl ceph_wbc;
6390987f
JL
609 struct ceph_osd_client *osdc = &fsc->client->osdc;
610 struct ceph_osd_request *req;
1d3576fd
SW
611
612 dout("writepage %p idx %lu\n", page, page->index);
613
1d3576fd 614 /* verify this is a writeable snap context */
61600ef8 615 snapc = page_snap_context(page);
d37b1d99 616 if (!snapc) {
1d3576fd 617 dout("writepage %p page %p not dirty?\n", inode, page);
43986881 618 return 0;
1d3576fd 619 }
05455e11 620 oldest = get_oldest_context(inode, &ceph_wbc, snapc);
6298a337 621 if (snapc->seq > oldest->seq) {
1d3576fd 622 dout("writepage %p page %p snapc %p not writeable - noop\n",
61600ef8 623 inode, page, snapc);
1d3576fd 624 /* we should only noop if called by kswapd */
fa71fefb 625 WARN_ON(!(current->flags & PF_MEMALLOC));
6298a337 626 ceph_put_snap_context(oldest);
fa71fefb 627 redirty_page_for_writepage(wbc, page);
43986881 628 return 0;
1d3576fd 629 }
6298a337 630 ceph_put_snap_context(oldest);
1d3576fd
SW
631
632 /* is this a partial page at end of file? */
1f934b00
YZ
633 if (page_off >= ceph_wbc.i_size) {
634 dout("%p page eof %llu\n", page, ceph_wbc.i_size);
05455e11 635 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
43986881 636 return 0;
fc2744aa 637 }
43986881 638
1f934b00
YZ
639 if (ceph_wbc.i_size < page_off + len)
640 len = ceph_wbc.i_size - page_off;
1d3576fd 641
6390987f 642 dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
1c0a9c2d 643 inode, page, page->index, page_off, len, snapc, snapc->seq);
1d3576fd 644
314c4737 645 if (atomic_long_inc_return(&fsc->writeback_count) >
3d14c5d2 646 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
09dc9fc2 647 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
2baba250 648
1d3576fd 649 set_page_writeback(page);
6390987f
JL
650 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
651 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
652 ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
653 true);
654 if (IS_ERR(req)) {
655 redirty_page_for_writepage(wbc, page);
656 end_page_writeback(page);
657 return PTR_ERR(req);
658 }
659
660 /* it may be a short write due to an object boundary */
661 WARN_ON_ONCE(len > PAGE_SIZE);
662 osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false);
663 dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
664
665 req->r_mtime = inode->i_mtime;
666 err = ceph_osdc_start_request(osdc, req, true);
667 if (!err)
668 err = ceph_osdc_wait_request(osdc, req);
669
670 ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
671 req->r_end_latency, err);
672
673 ceph_osdc_put_request(req);
674 if (err == 0)
675 err = len;
676
1d3576fd 677 if (err < 0) {
ad15ec06
YZ
678 struct writeback_control tmp_wbc;
679 if (!wbc)
680 wbc = &tmp_wbc;
681 if (err == -ERESTARTSYS) {
682 /* killed by SIGKILL */
683 dout("writepage interrupted page %p\n", page);
684 redirty_page_for_writepage(wbc, page);
685 end_page_writeback(page);
43986881 686 return err;
ad15ec06 687 }
0b98acd6
ID
688 if (err == -EBLOCKLISTED)
689 fsc->blocklisted = true;
ad15ec06
YZ
690 dout("writepage setting page/mapping error %d %p\n",
691 err, page);
1d3576fd 692 mapping_set_error(&inode->i_data, err);
ad15ec06 693 wbc->pages_skipped++;
1d3576fd
SW
694 } else {
695 dout("writepage cleaned page %p\n", page);
696 err = 0; /* vfs expects us to return 0 */
697 }
698 page->private = 0;
699 ClearPagePrivate(page);
700 end_page_writeback(page);
701 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
6298a337 702 ceph_put_snap_context(snapc); /* page's reference */
314c4737
YZ
703
704 if (atomic_long_dec_return(&fsc->writeback_count) <
705 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
706 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
707
1d3576fd
SW
708 return err;
709}
710
711static int ceph_writepage(struct page *page, struct writeback_control *wbc)
712{
dbd646a8
YS
713 int err;
714 struct inode *inode = page->mapping->host;
715 BUG_ON(!inode);
70b666c3 716 ihold(inode);
dbd646a8 717 err = writepage_nounlock(page, wbc);
ad15ec06
YZ
718 if (err == -ERESTARTSYS) {
719 /* direct memory reclaimer was killed by SIGKILL. return 0
720 * to prevent caller from setting mapping/page error */
721 err = 0;
722 }
1d3576fd 723 unlock_page(page);
dbd646a8 724 iput(inode);
1d3576fd
SW
725 return err;
726}
727
1d3576fd
SW
728/*
729 * async writeback completion handler.
730 *
731 * If we get an error, set the mapping error bit, but not the individual
732 * page error bits.
733 */
85e084fe 734static void writepages_finish(struct ceph_osd_request *req)
1d3576fd
SW
735{
736 struct inode *inode = req->r_inode;
1d3576fd 737 struct ceph_inode_info *ci = ceph_inode(inode);
87060c10 738 struct ceph_osd_data *osd_data;
1d3576fd 739 struct page *page;
5b64640c
YZ
740 int num_pages, total_pages = 0;
741 int i, j;
742 int rc = req->r_result;
1d3576fd
SW
743 struct ceph_snap_context *snapc = req->r_snapc;
744 struct address_space *mapping = inode->i_mapping;
3d14c5d2 745 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
5b64640c 746 bool remove_page;
1d3576fd 747
5b64640c 748 dout("writepages_finish %p rc %d\n", inode, rc);
26544c62 749 if (rc < 0) {
1d3576fd 750 mapping_set_error(mapping, rc);
26544c62 751 ceph_set_error_write(ci);
0b98acd6
ID
752 if (rc == -EBLOCKLISTED)
753 fsc->blocklisted = true;
26544c62
JL
754 } else {
755 ceph_clear_error_write(ci);
756 }
5b64640c 757
97e27aaa
XL
758 ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
759 req->r_end_latency, rc);
760
5b64640c
YZ
761 /*
762 * We lost the cache cap, need to truncate the page before
763 * it is unlocked, otherwise we'd truncate it later in the
764 * page truncation thread, possibly losing some data that
765 * raced its way in
766 */
767 remove_page = !(ceph_caps_issued(ci) &
768 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
1d3576fd
SW
769
770 /* clean all pages */
5b64640c
YZ
771 for (i = 0; i < req->r_num_ops; i++) {
772 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
773 break;
e63dc5c7 774
5b64640c
YZ
775 osd_data = osd_req_op_extent_osd_data(req, i);
776 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
777 num_pages = calc_pages_for((u64)osd_data->alignment,
778 (u64)osd_data->length);
779 total_pages += num_pages;
780 for (j = 0; j < num_pages; j++) {
781 page = osd_data->pages[j];
782 BUG_ON(!page);
783 WARN_ON(!PageUptodate(page));
784
785 if (atomic_long_dec_return(&fsc->writeback_count) <
786 CONGESTION_OFF_THRESH(
787 fsc->mount_options->congestion_kb))
09dc9fc2 788 clear_bdi_congested(inode_to_bdi(inode),
5b64640c
YZ
789 BLK_RW_ASYNC);
790
791 ceph_put_snap_context(page_snap_context(page));
792 page->private = 0;
793 ClearPagePrivate(page);
794 dout("unlocking %p\n", page);
795 end_page_writeback(page);
796
797 if (remove_page)
798 generic_error_remove_page(inode->i_mapping,
799 page);
800
801 unlock_page(page);
802 }
803 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
804 inode, osd_data->length, rc >= 0 ? num_pages : 0);
e63dc5c7 805
96ac9158 806 release_pages(osd_data->pages, num_pages);
1d3576fd 807 }
1d3576fd 808
5b64640c
YZ
809 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
810
811 osd_data = osd_req_op_extent_osd_data(req, 0);
87060c10 812 if (osd_data->pages_from_pool)
a0102bda 813 mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
1d3576fd 814 else
87060c10 815 kfree(osd_data->pages);
1d3576fd
SW
816 ceph_osdc_put_request(req);
817}
818
1d3576fd
SW
819/*
820 * initiate async writeback
821 */
822static int ceph_writepages_start(struct address_space *mapping,
823 struct writeback_control *wbc)
824{
825 struct inode *inode = mapping->host;
1d3576fd 826 struct ceph_inode_info *ci = ceph_inode(inode);
fc2744aa
YZ
827 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
828 struct ceph_vino vino = ceph_vino(inode);
2a2d927e 829 pgoff_t index, start_index, end = -1;
80e755fe 830 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
1d3576fd 831 struct pagevec pvec;
1d3576fd 832 int rc = 0;
93407472 833 unsigned int wsize = i_blocksize(inode);
1d3576fd 834 struct ceph_osd_request *req = NULL;
1f934b00 835 struct ceph_writeback_ctl ceph_wbc;
590e9d98 836 bool should_loop, range_whole = false;
af9cc401 837 bool done = false;
1d3576fd 838
3fb99d48 839 dout("writepages_start %p (mode=%s)\n", inode,
1d3576fd
SW
840 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
841 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
842
50c9132d 843 if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
6c93df5d
YZ
844 if (ci->i_wrbuffer_ref > 0) {
845 pr_warn_ratelimited(
846 "writepage_start %p %lld forced umount\n",
847 inode, ceph_ino(inode));
848 }
a341d4df 849 mapping_set_error(mapping, -EIO);
1d3576fd
SW
850 return -EIO; /* we're in a forced umount, don't write! */
851 }
95cca2b4 852 if (fsc->mount_options->wsize < wsize)
3d14c5d2 853 wsize = fsc->mount_options->wsize;
1d3576fd 854
86679820 855 pagevec_init(&pvec);
1d3576fd 856
590e9d98 857 start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
2a2d927e 858 index = start_index;
1d3576fd
SW
859
860retry:
861 /* find oldest snap context with dirty data */
05455e11 862 snapc = get_oldest_context(inode, &ceph_wbc, NULL);
1d3576fd
SW
863 if (!snapc) {
864 /* hmm, why does writepages get called when there
865 is no dirty data? */
866 dout(" no snap context with dirty data?\n");
867 goto out;
868 }
869 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
870 snapc, snapc->seq, snapc->num_snaps);
fc2744aa 871
2a2d927e
YZ
872 should_loop = false;
873 if (ceph_wbc.head_snapc && snapc != last_snapc) {
874 /* where to start/end? */
875 if (wbc->range_cyclic) {
876 index = start_index;
877 end = -1;
878 if (index > 0)
879 should_loop = true;
880 dout(" cyclic, start at %lu\n", index);
881 } else {
882 index = wbc->range_start >> PAGE_SHIFT;
883 end = wbc->range_end >> PAGE_SHIFT;
884 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
885 range_whole = true;
886 dout(" not cyclic, %lu to %lu\n", index, end);
887 }
888 } else if (!ceph_wbc.head_snapc) {
889 /* Do not respect wbc->range_{start,end}. Dirty pages
890 * in that range can be associated with newer snapc.
891 * They are not writeable until we write all dirty pages
892 * associated with 'snapc' get written */
1582af2e 893 if (index > 0)
2a2d927e
YZ
894 should_loop = true;
895 dout(" non-head snapc, range whole\n");
1d3576fd 896 }
2a2d927e
YZ
897
898 ceph_put_snap_context(last_snapc);
1d3576fd
SW
899 last_snapc = snapc;
900
af9cc401 901 while (!done && index <= end) {
5b64640c 902 int num_ops = 0, op_idx;
0e5ecac7 903 unsigned i, pvec_pages, max_pages, locked_pages = 0;
5b64640c 904 struct page **pages = NULL, **data_pages;
1d3576fd 905 struct page *page;
0e5ecac7 906 pgoff_t strip_unit_end = 0;
5b64640c 907 u64 offset = 0, len = 0;
a0102bda 908 bool from_pool = false;
1d3576fd 909
0e5ecac7 910 max_pages = wsize >> PAGE_SHIFT;
1d3576fd
SW
911
912get_more_pages:
2e169296
JL
913 pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
914 end, PAGECACHE_TAG_DIRTY);
0ed75fc8 915 dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
1d3576fd
SW
916 if (!pvec_pages && !locked_pages)
917 break;
918 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
919 page = pvec.pages[i];
920 dout("? %p idx %lu\n", page, page->index);
921 if (locked_pages == 0)
922 lock_page(page); /* first page */
923 else if (!trylock_page(page))
924 break;
925
926 /* only dirty pages, or our accounting breaks */
927 if (unlikely(!PageDirty(page)) ||
928 unlikely(page->mapping != mapping)) {
929 dout("!dirty or !mapping %p\n", page);
930 unlock_page(page);
0713e5f2 931 continue;
1d3576fd 932 }
af9cc401
YZ
933 /* only if matching snap context */
934 pgsnapc = page_snap_context(page);
935 if (pgsnapc != snapc) {
936 dout("page snapc %p %lld != oldest %p %lld\n",
937 pgsnapc, pgsnapc->seq, snapc, snapc->seq);
1582af2e
YZ
938 if (!should_loop &&
939 !ceph_wbc.head_snapc &&
940 wbc->sync_mode != WB_SYNC_NONE)
941 should_loop = true;
1d3576fd 942 unlock_page(page);
af9cc401 943 continue;
1d3576fd 944 }
1f934b00
YZ
945 if (page_offset(page) >= ceph_wbc.i_size) {
946 dout("%p page eof %llu\n",
947 page, ceph_wbc.i_size);
c95f1c5f
EC
948 if ((ceph_wbc.size_stable ||
949 page_offset(page) >= i_size_read(inode)) &&
950 clear_page_dirty_for_io(page))
af9cc401
YZ
951 mapping->a_ops->invalidatepage(page,
952 0, PAGE_SIZE);
953 unlock_page(page);
954 continue;
955 }
956 if (strip_unit_end && (page->index > strip_unit_end)) {
957 dout("end of strip unit %p\n", page);
1d3576fd
SW
958 unlock_page(page);
959 break;
960 }
961 if (PageWriteback(page)) {
0713e5f2
YZ
962 if (wbc->sync_mode == WB_SYNC_NONE) {
963 dout("%p under writeback\n", page);
964 unlock_page(page);
965 continue;
966 }
967 dout("waiting on writeback %p\n", page);
968 wait_on_page_writeback(page);
1d3576fd
SW
969 }
970
1d3576fd
SW
971 if (!clear_page_dirty_for_io(page)) {
972 dout("%p !clear_page_dirty_for_io\n", page);
973 unlock_page(page);
0713e5f2 974 continue;
1d3576fd
SW
975 }
976
e5975c7c
AE
977 /*
978 * We have something to write. If this is
979 * the first locked page this time through,
5b64640c
YZ
980 * calculate max possinle write size and
981 * allocate a page array
e5975c7c 982 */
1d3576fd 983 if (locked_pages == 0) {
5b64640c
YZ
984 u64 objnum;
985 u64 objoff;
dccbf080 986 u32 xlen;
5b64640c 987
1d3576fd 988 /* prepare async write request */
e5975c7c 989 offset = (u64)page_offset(page);
dccbf080
ID
990 ceph_calc_file_object_mapping(&ci->i_layout,
991 offset, wsize,
992 &objnum, &objoff,
993 &xlen);
994 len = xlen;
8c71897b 995
3fb99d48 996 num_ops = 1;
5b64640c 997 strip_unit_end = page->index +
09cbfeaf 998 ((len - 1) >> PAGE_SHIFT);
88486957 999
5b64640c 1000 BUG_ON(pages);
88486957 1001 max_pages = calc_pages_for(0, (u64)len);
6da2ec56
KC
1002 pages = kmalloc_array(max_pages,
1003 sizeof(*pages),
1004 GFP_NOFS);
88486957 1005 if (!pages) {
a0102bda
JL
1006 from_pool = true;
1007 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
e5975c7c 1008 BUG_ON(!pages);
88486957 1009 }
5b64640c
YZ
1010
1011 len = 0;
1012 } else if (page->index !=
09cbfeaf 1013 (offset + len) >> PAGE_SHIFT) {
a0102bda
JL
1014 if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS :
1015 CEPH_OSD_MAX_OPS)) {
5b64640c
YZ
1016 redirty_page_for_writepage(wbc, page);
1017 unlock_page(page);
1018 break;
1019 }
1020
1021 num_ops++;
1022 offset = (u64)page_offset(page);
1023 len = 0;
1d3576fd
SW
1024 }
1025
1026 /* note position of first page in pvec */
1d3576fd
SW
1027 dout("%p will write page %p idx %lu\n",
1028 inode, page, page->index);
2baba250 1029
5b64640c
YZ
1030 if (atomic_long_inc_return(&fsc->writeback_count) >
1031 CONGESTION_ON_THRESH(
3d14c5d2 1032 fsc->mount_options->congestion_kb)) {
09dc9fc2 1033 set_bdi_congested(inode_to_bdi(inode),
213c99ee 1034 BLK_RW_ASYNC);
2baba250
YS
1035 }
1036
0713e5f2
YZ
1037
1038 pages[locked_pages++] = page;
1039 pvec.pages[i] = NULL;
1040
09cbfeaf 1041 len += PAGE_SIZE;
1d3576fd
SW
1042 }
1043
1044 /* did we get anything? */
1045 if (!locked_pages)
1046 goto release_pvec_pages;
1047 if (i) {
0713e5f2
YZ
1048 unsigned j, n = 0;
1049 /* shift unused page to beginning of pvec */
1050 for (j = 0; j < pvec_pages; j++) {
1051 if (!pvec.pages[j])
1052 continue;
1053 if (n < j)
1054 pvec.pages[n] = pvec.pages[j];
1055 n++;
1056 }
1057 pvec.nr = n;
1d3576fd
SW
1058
1059 if (pvec_pages && i == pvec_pages &&
1060 locked_pages < max_pages) {
1061 dout("reached end pvec, trying for more\n");
0713e5f2 1062 pagevec_release(&pvec);
1d3576fd
SW
1063 goto get_more_pages;
1064 }
1d3576fd
SW
1065 }
1066
5b64640c 1067new_request:
e5975c7c 1068 offset = page_offset(pages[0]);
5b64640c
YZ
1069 len = wsize;
1070
1071 req = ceph_osdc_new_request(&fsc->client->osdc,
1072 &ci->i_layout, vino,
1073 offset, &len, 0, num_ops,
1f934b00
YZ
1074 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1075 snapc, ceph_wbc.truncate_seq,
1076 ceph_wbc.truncate_size, false);
5b64640c
YZ
1077 if (IS_ERR(req)) {
1078 req = ceph_osdc_new_request(&fsc->client->osdc,
1079 &ci->i_layout, vino,
1080 offset, &len, 0,
1081 min(num_ops,
1082 CEPH_OSD_SLAB_OPS),
1083 CEPH_OSD_OP_WRITE,
54ea0046 1084 CEPH_OSD_FLAG_WRITE,
1f934b00
YZ
1085 snapc, ceph_wbc.truncate_seq,
1086 ceph_wbc.truncate_size, true);
5b64640c 1087 BUG_ON(IS_ERR(req));
e1966b49 1088 }
5b64640c 1089 BUG_ON(len < page_offset(pages[locked_pages - 1]) +
09cbfeaf 1090 PAGE_SIZE - offset);
5b64640c
YZ
1091
1092 req->r_callback = writepages_finish;
1093 req->r_inode = inode;
1d3576fd 1094
5b64640c
YZ
1095 /* Format the osd request message and submit the write */
1096 len = 0;
1097 data_pages = pages;
1098 op_idx = 0;
1099 for (i = 0; i < locked_pages; i++) {
1100 u64 cur_offset = page_offset(pages[i]);
1101 if (offset + len != cur_offset) {
3fb99d48 1102 if (op_idx + 1 == req->r_num_ops)
5b64640c
YZ
1103 break;
1104 osd_req_op_extent_dup_last(req, op_idx,
1105 cur_offset - offset);
1106 dout("writepages got pages at %llu~%llu\n",
1107 offset, len);
1108 osd_req_op_extent_osd_data_pages(req, op_idx,
1109 data_pages, len, 0,
a0102bda 1110 from_pool, false);
5b64640c 1111 osd_req_op_extent_update(req, op_idx, len);
e5975c7c 1112
5b64640c
YZ
1113 len = 0;
1114 offset = cur_offset;
1115 data_pages = pages + i;
1116 op_idx++;
1117 }
1118
1119 set_page_writeback(pages[i]);
09cbfeaf 1120 len += PAGE_SIZE;
5b64640c
YZ
1121 }
1122
1f934b00
YZ
1123 if (ceph_wbc.size_stable) {
1124 len = min(len, ceph_wbc.i_size - offset);
5b64640c
YZ
1125 } else if (i == locked_pages) {
1126 /* writepages_finish() clears writeback pages
1127 * according to the data length, so make sure
1128 * data length covers all locked pages */
09cbfeaf 1129 u64 min_len = len + 1 - PAGE_SIZE;
1f934b00
YZ
1130 len = get_writepages_data_length(inode, pages[i - 1],
1131 offset);
5b64640c
YZ
1132 len = max(len, min_len);
1133 }
1134 dout("writepages got pages at %llu~%llu\n", offset, len);
e5975c7c 1135
5b64640c 1136 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
a0102bda 1137 0, from_pool, false);
5b64640c 1138 osd_req_op_extent_update(req, op_idx, len);
e5975c7c 1139
5b64640c
YZ
1140 BUG_ON(op_idx + 1 != req->r_num_ops);
1141
a0102bda 1142 from_pool = false;
5b64640c
YZ
1143 if (i < locked_pages) {
1144 BUG_ON(num_ops <= req->r_num_ops);
1145 num_ops -= req->r_num_ops;
5b64640c
YZ
1146 locked_pages -= i;
1147
1148 /* allocate new pages array for next request */
1149 data_pages = pages;
6da2ec56
KC
1150 pages = kmalloc_array(locked_pages, sizeof(*pages),
1151 GFP_NOFS);
5b64640c 1152 if (!pages) {
a0102bda
JL
1153 from_pool = true;
1154 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
5b64640c
YZ
1155 BUG_ON(!pages);
1156 }
1157 memcpy(pages, data_pages + i,
1158 locked_pages * sizeof(*pages));
1159 memset(data_pages + i, 0,
1160 locked_pages * sizeof(*pages));
1161 } else {
1162 BUG_ON(num_ops != req->r_num_ops);
1163 index = pages[i - 1]->index + 1;
1164 /* request message now owns the pages array */
1165 pages = NULL;
1166 }
e5975c7c 1167
fac02ddf 1168 req->r_mtime = inode->i_mtime;
9d6fcb08
SW
1169 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
1170 BUG_ON(rc);
1d3576fd
SW
1171 req = NULL;
1172
5b64640c
YZ
1173 wbc->nr_to_write -= i;
1174 if (pages)
1175 goto new_request;
1176
2a2d927e
YZ
1177 /*
1178 * We stop writing back only if we are not doing
1179 * integrity sync. In case of integrity sync we have to
1180 * keep going until we have written all the pages
1181 * we tagged for writeback prior to entering this loop.
1182 */
1183 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
af9cc401 1184 done = true;
1d3576fd
SW
1185
1186release_pvec_pages:
1187 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
1188 pvec.nr ? pvec.pages[0] : NULL);
1189 pagevec_release(&pvec);
1d3576fd
SW
1190 }
1191
1192 if (should_loop && !done) {
1193 /* more to do; loop back to beginning of file */
1194 dout("writepages looping back to beginning of file\n");
2a2d927e 1195 end = start_index - 1; /* OK even when start_index == 0 */
f275635e
YZ
1196
1197 /* to write dirty pages associated with next snapc,
1198 * we need to wait until current writes complete */
1199 if (wbc->sync_mode != WB_SYNC_NONE &&
1200 start_index == 0 && /* all dirty pages were checked */
1201 !ceph_wbc.head_snapc) {
1202 struct page *page;
1203 unsigned i, nr;
1204 index = 0;
1205 while ((index <= end) &&
1206 (nr = pagevec_lookup_tag(&pvec, mapping, &index,
67fd707f 1207 PAGECACHE_TAG_WRITEBACK))) {
f275635e
YZ
1208 for (i = 0; i < nr; i++) {
1209 page = pvec.pages[i];
1210 if (page_snap_context(page) != snapc)
1211 continue;
1212 wait_on_page_writeback(page);
1213 }
1214 pagevec_release(&pvec);
1215 cond_resched();
1216 }
1217 }
1218
2a2d927e 1219 start_index = 0;
1d3576fd
SW
1220 index = 0;
1221 goto retry;
1222 }
1223
1224 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1225 mapping->writeback_index = index;
1226
1227out:
3ed97d63 1228 ceph_osdc_put_request(req);
2a2d927e
YZ
1229 ceph_put_snap_context(last_snapc);
1230 dout("writepages dend - startone, rc = %d\n", rc);
1d3576fd
SW
1231 return rc;
1232}
1233
1234
1235
1236/*
1237 * See if a given @snapc is either writeable, or already written.
1238 */
1239static int context_is_writeable_or_written(struct inode *inode,
1240 struct ceph_snap_context *snapc)
1241{
05455e11 1242 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
6298a337
SW
1243 int ret = !oldest || snapc->seq <= oldest->seq;
1244
1245 ceph_put_snap_context(oldest);
1246 return ret;
1d3576fd
SW
1247}
1248
18d620f0
JL
1249/**
1250 * ceph_find_incompatible - find an incompatible context and return it
18d620f0 1251 * @page: page being dirtied
8f883c24 1252 *
18d620f0
JL
1253 * We are only allowed to write into/dirty a page if the page is
1254 * clean, or already dirty within the same snap context. Returns a
1255 * conflicting context if there is one, NULL if there isn't, or a
1256 * negative error code on other errors.
1257 *
1258 * Must be called with page lock held.
1d3576fd 1259 */
18d620f0 1260static struct ceph_snap_context *
d45156bf 1261ceph_find_incompatible(struct page *page)
1d3576fd 1262{
d45156bf 1263 struct inode *inode = page->mapping->host;
6c93df5d 1264 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1d3576fd 1265 struct ceph_inode_info *ci = ceph_inode(inode);
1d3576fd 1266
50c9132d 1267 if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
6c93df5d 1268 dout(" page %p forced umount\n", page);
18d620f0 1269 return ERR_PTR(-EIO);
6c93df5d
YZ
1270 }
1271
18d620f0
JL
1272 for (;;) {
1273 struct ceph_snap_context *snapc, *oldest;
1274
1275 wait_on_page_writeback(page);
1276
1277 snapc = page_snap_context(page);
1278 if (!snapc || snapc == ci->i_head_snapc)
1279 break;
1d3576fd 1280
1d3576fd
SW
1281 /*
1282 * this page is already dirty in another (older) snap
1283 * context! is it writeable now?
1284 */
05455e11 1285 oldest = get_oldest_context(inode, NULL, NULL);
80e755fe 1286 if (snapc->seq > oldest->seq) {
18d620f0 1287 /* not writeable -- return it for the caller to deal with */
6298a337 1288 ceph_put_snap_context(oldest);
18d620f0
JL
1289 dout(" page %p snapc %p not current or oldest\n", page, snapc);
1290 return ceph_get_snap_context(snapc);
1d3576fd 1291 }
6298a337 1292 ceph_put_snap_context(oldest);
1d3576fd
SW
1293
1294 /* yay, writeable, do it now (without dropping page lock) */
18d620f0
JL
1295 dout(" page %p snapc %p not current, but oldest\n", page, snapc);
1296 if (clear_page_dirty_for_io(page)) {
1297 int r = writepage_nounlock(page, NULL);
1298 if (r < 0)
1299 return ERR_PTR(r);
1300 }
1301 }
1302 return NULL;
1303}
1304
1305/*
1306 * We are only allowed to write into/dirty the page if the page is
1307 * clean, or already dirty within the same snap context.
18d620f0 1308 */
1cc16990
JL
1309static int ceph_write_begin(struct file *file, struct address_space *mapping,
1310 loff_t pos, unsigned len, unsigned flags,
1311 struct page **pagep, void **fsdata)
18d620f0
JL
1312{
1313 struct inode *inode = file_inode(file);
1314 struct ceph_inode_info *ci = ceph_inode(inode);
1315 struct ceph_snap_context *snapc;
1cc16990
JL
1316 struct page *page = NULL;
1317 pgoff_t index = pos >> PAGE_SHIFT;
18d620f0 1318 int pos_in_page = pos & ~PAGE_MASK;
1cc16990 1319 int r = 0;
1d3576fd 1320
1cc16990 1321 dout("write_begin file %p inode %p page %p %d~%d\n", file, inode, page, (int)pos, (int)len);
1d3576fd 1322
1cc16990 1323 for (;;) {
4a357f50 1324 page = grab_cache_page_write_begin(mapping, index, flags);
1cc16990
JL
1325 if (!page) {
1326 r = -ENOMEM;
1327 break;
1328 }
1d3576fd 1329
1cc16990
JL
1330 snapc = ceph_find_incompatible(page);
1331 if (snapc) {
1332 if (IS_ERR(snapc)) {
1333 r = PTR_ERR(snapc);
1334 break;
1335 }
1336 unlock_page(page);
1337 put_page(page);
1338 page = NULL;
1339 ceph_queue_writeback(inode);
1340 r = wait_event_killable(ci->i_cap_wq,
1341 context_is_writeable_or_written(inode, snapc));
1342 ceph_put_snap_context(snapc);
1343 if (r != 0)
1344 break;
1345 continue;
1346 }
1d3576fd 1347
1cc16990
JL
1348 if (PageUptodate(page)) {
1349 dout(" page %p already uptodate\n", page);
1350 break;
1351 }
4af6b225 1352
1cc16990
JL
1353 /*
1354 * In some cases we don't need to read at all:
1355 * - full page write
1356 * - write that lies completely beyond EOF
1357 * - write that covers the the page from start to EOF or beyond it
1358 */
1359 if ((pos_in_page == 0 && len == PAGE_SIZE) ||
1360 (pos >= i_size_read(inode)) ||
1361 (pos_in_page == 0 && (pos + len) >= i_size_read(inode))) {
1362 zero_user_segments(page, 0, pos_in_page,
1363 pos_in_page + len, PAGE_SIZE);
1364 break;
1365 }
4af6b225 1366
1cc16990
JL
1367 /*
1368 * We need to read it. If we get back -EINPROGRESS, then the page was
1369 * handed off to fscache and it will be unlocked when the read completes.
1370 * Refind the page in that case so we can reacquire the page lock. Otherwise
1371 * we got a hard error or the read was completed synchronously.
1372 */
1373 r = ceph_do_readpage(file, page);
1374 if (r != -EINPROGRESS)
1375 break;
1376 }
4af6b225 1377
1cc16990
JL
1378 if (r < 0) {
1379 if (page) {
1380 unlock_page(page);
09cbfeaf 1381 put_page(page);
1cc16990
JL
1382 }
1383 } else {
1384 *pagep = page;
1385 }
4af6b225
YS
1386 return r;
1387}
1388
1d3576fd
SW
1389/*
1390 * we don't do anything in here that simple_write_end doesn't do
5dda377c 1391 * except adjust dirty page accounting
1d3576fd
SW
1392 */
1393static int ceph_write_end(struct file *file, struct address_space *mapping,
1394 loff_t pos, unsigned len, unsigned copied,
1395 struct page *page, void *fsdata)
1396{
496ad9aa 1397 struct inode *inode = file_inode(file);
efb0ca76 1398 bool check_cap = false;
1d3576fd
SW
1399
1400 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1401 inode, page, (int)pos, (int)copied, (int)len);
1402
1403 /* zero the stale part of the page if we did a short copy */
b9de313c
AV
1404 if (!PageUptodate(page)) {
1405 if (copied < len) {
1406 copied = 0;
1407 goto out;
1408 }
1409 SetPageUptodate(page);
1410 }
1d3576fd
SW
1411
1412 /* did file size increase? */
99c88e69 1413 if (pos+copied > i_size_read(inode))
1d3576fd
SW
1414 check_cap = ceph_inode_set_size(inode, pos+copied);
1415
1d3576fd
SW
1416 set_page_dirty(page);
1417
b9de313c 1418out:
1d3576fd 1419 unlock_page(page);
09cbfeaf 1420 put_page(page);
1d3576fd
SW
1421
1422 if (check_cap)
1423 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1424
1425 return copied;
1426}
1427
1428/*
1429 * we set .direct_IO to indicate direct io is supported, but since we
1430 * intercept O_DIRECT reads and writes early, this function should
1431 * never get called.
1432 */
c8b8e32d 1433static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
1d3576fd
SW
1434{
1435 WARN_ON(1);
1436 return -EINVAL;
1437}
1438
1439const struct address_space_operations ceph_aops = {
1440 .readpage = ceph_readpage,
1441 .readpages = ceph_readpages,
1442 .writepage = ceph_writepage,
1443 .writepages = ceph_writepages_start,
1444 .write_begin = ceph_write_begin,
1445 .write_end = ceph_write_end,
1446 .set_page_dirty = ceph_set_page_dirty,
1447 .invalidatepage = ceph_invalidatepage,
1448 .releasepage = ceph_releasepage,
1449 .direct_IO = ceph_direct_io,
1450};
1451
4f7e89f6
YZ
1452static void ceph_block_sigs(sigset_t *oldset)
1453{
1454 sigset_t mask;
1455 siginitsetinv(&mask, sigmask(SIGKILL));
1456 sigprocmask(SIG_BLOCK, &mask, oldset);
1457}
1458
1459static void ceph_restore_sigs(sigset_t *oldset)
1460{
1461 sigprocmask(SIG_SETMASK, oldset, NULL);
1462}
1d3576fd
SW
1463
1464/*
1465 * vm ops
1466 */
24499847 1467static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
61f68816 1468{
11bac800 1469 struct vm_area_struct *vma = vmf->vma;
61f68816
YZ
1470 struct inode *inode = file_inode(vma->vm_file);
1471 struct ceph_inode_info *ci = ceph_inode(inode);
1472 struct ceph_file_info *fi = vma->vm_file->private_data;
3738daa6 1473 struct page *pinned_page = NULL;
c403c3a2 1474 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
24499847 1475 int want, got, err;
4f7e89f6 1476 sigset_t oldset;
24499847 1477 vm_fault_t ret = VM_FAULT_SIGBUS;
4f7e89f6
YZ
1478
1479 ceph_block_sigs(&oldset);
61f68816
YZ
1480
1481 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
09cbfeaf 1482 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
61f68816
YZ
1483 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1484 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1485 else
1486 want = CEPH_CAP_FILE_CACHE;
4f7e89f6
YZ
1487
1488 got = 0;
5e3ded1b
YZ
1489 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1,
1490 &got, &pinned_page);
24499847 1491 if (err < 0)
4f7e89f6 1492 goto out_restore;
6ce026e4 1493
61f68816 1494 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
09cbfeaf 1495 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
61f68816 1496
83701246 1497 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
2b1ac852 1498 ci->i_inline_version == CEPH_INLINE_NONE) {
5d988308
YZ
1499 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1500 ceph_add_rw_context(fi, &rw_ctx);
11bac800 1501 ret = filemap_fault(vmf);
5d988308 1502 ceph_del_rw_context(fi, &rw_ctx);
24499847
SJ
1503 dout("filemap_fault %p %llu~%zd drop cap refs %s ret %x\n",
1504 inode, off, (size_t)PAGE_SIZE,
1505 ceph_cap_string(got), ret);
2b1ac852 1506 } else
24499847 1507 err = -EAGAIN;
61f68816 1508
3738daa6 1509 if (pinned_page)
09cbfeaf 1510 put_page(pinned_page);
61f68816
YZ
1511 ceph_put_cap_refs(ci, got);
1512
24499847 1513 if (err != -EAGAIN)
4f7e89f6 1514 goto out_restore;
83701246
YZ
1515
1516 /* read inline data */
09cbfeaf 1517 if (off >= PAGE_SIZE) {
83701246
YZ
1518 /* does not support inline data > PAGE_SIZE */
1519 ret = VM_FAULT_SIGBUS;
1520 } else {
83701246
YZ
1521 struct address_space *mapping = inode->i_mapping;
1522 struct page *page = find_or_create_page(mapping, 0,
c62d2555
MH
1523 mapping_gfp_constraint(mapping,
1524 ~__GFP_FS));
83701246
YZ
1525 if (!page) {
1526 ret = VM_FAULT_OOM;
4f7e89f6 1527 goto out_inline;
83701246 1528 }
24499847 1529 err = __ceph_do_getattr(inode, page,
83701246 1530 CEPH_STAT_CAP_INLINE_DATA, true);
24499847 1531 if (err < 0 || off >= i_size_read(inode)) {
83701246 1532 unlock_page(page);
09cbfeaf 1533 put_page(page);
c64a2b05 1534 ret = vmf_error(err);
4f7e89f6 1535 goto out_inline;
83701246 1536 }
24499847
SJ
1537 if (err < PAGE_SIZE)
1538 zero_user_segment(page, err, PAGE_SIZE);
83701246
YZ
1539 else
1540 flush_dcache_page(page);
1541 SetPageUptodate(page);
1542 vmf->page = page;
1543 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
4f7e89f6 1544out_inline:
24499847 1545 dout("filemap_fault %p %llu~%zd read inline data ret %x\n",
4f7e89f6 1546 inode, off, (size_t)PAGE_SIZE, ret);
83701246 1547 }
4f7e89f6
YZ
1548out_restore:
1549 ceph_restore_sigs(&oldset);
24499847
SJ
1550 if (err < 0)
1551 ret = vmf_error(err);
6ce026e4 1552
61f68816
YZ
1553 return ret;
1554}
1d3576fd
SW
1555
1556/*
1557 * Reuse write_begin here for simplicity.
1558 */
24499847 1559static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
1d3576fd 1560{
11bac800 1561 struct vm_area_struct *vma = vmf->vma;
496ad9aa 1562 struct inode *inode = file_inode(vma->vm_file);
61f68816
YZ
1563 struct ceph_inode_info *ci = ceph_inode(inode);
1564 struct ceph_file_info *fi = vma->vm_file->private_data;
f66fd9f0 1565 struct ceph_cap_flush *prealloc_cf;
61f68816 1566 struct page *page = vmf->page;
6285bc23 1567 loff_t off = page_offset(page);
61f68816
YZ
1568 loff_t size = i_size_read(inode);
1569 size_t len;
24499847 1570 int want, got, err;
4f7e89f6 1571 sigset_t oldset;
24499847 1572 vm_fault_t ret = VM_FAULT_SIGBUS;
3ca9c3bd 1573
f66fd9f0
YZ
1574 prealloc_cf = ceph_alloc_cap_flush();
1575 if (!prealloc_cf)
6ce026e4 1576 return VM_FAULT_OOM;
f66fd9f0 1577
249c1df5 1578 sb_start_pagefault(inode->i_sb);
4f7e89f6 1579 ceph_block_sigs(&oldset);
f66fd9f0 1580
28127bdd
YZ
1581 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1582 struct page *locked_page = NULL;
1583 if (off == 0) {
1584 lock_page(page);
1585 locked_page = page;
1586 }
24499847 1587 err = ceph_uninline_data(vma->vm_file, locked_page);
28127bdd
YZ
1588 if (locked_page)
1589 unlock_page(locked_page);
24499847 1590 if (err < 0)
f66fd9f0 1591 goto out_free;
28127bdd
YZ
1592 }
1593
09cbfeaf
KS
1594 if (off + PAGE_SIZE <= size)
1595 len = PAGE_SIZE;
1d3576fd 1596 else
09cbfeaf 1597 len = size & ~PAGE_MASK;
1d3576fd 1598
61f68816
YZ
1599 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1600 inode, ceph_vinop(inode), off, len, size);
1601 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1602 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1603 else
1604 want = CEPH_CAP_FILE_BUFFER;
4f7e89f6
YZ
1605
1606 got = 0;
5e3ded1b 1607 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len,
4f7e89f6 1608 &got, NULL);
24499847 1609 if (err < 0)
4f7e89f6 1610 goto out_free;
6ce026e4 1611
61f68816
YZ
1612 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1613 inode, off, len, ceph_cap_string(got));
1614
1615 /* Update time before taking page lock */
1616 file_update_time(vma->vm_file);
5c308356 1617 inode_inc_iversion_raw(inode);
4af6b225 1618
f0b33df5 1619 do {
d45156bf
JL
1620 struct ceph_snap_context *snapc;
1621
f0b33df5 1622 lock_page(page);
4af6b225 1623
cb03c143 1624 if (page_mkwrite_check_truncate(page, inode) < 0) {
f0b33df5
YZ
1625 unlock_page(page);
1626 ret = VM_FAULT_NOPAGE;
1627 break;
1628 }
1629
d45156bf
JL
1630 snapc = ceph_find_incompatible(page);
1631 if (!snapc) {
f0b33df5
YZ
1632 /* success. we'll keep the page locked. */
1633 set_page_dirty(page);
1634 ret = VM_FAULT_LOCKED;
d45156bf
JL
1635 break;
1636 }
1637
1638 unlock_page(page);
1639
1640 if (IS_ERR(snapc)) {
1641 ret = VM_FAULT_SIGBUS;
1642 break;
f0b33df5 1643 }
d45156bf
JL
1644
1645 ceph_queue_writeback(inode);
1646 err = wait_event_killable(ci->i_cap_wq,
1647 context_is_writeable_or_written(inode, snapc));
1648 ceph_put_snap_context(snapc);
1649 } while (err == 0);
4af6b225 1650
28127bdd
YZ
1651 if (ret == VM_FAULT_LOCKED ||
1652 ci->i_inline_version != CEPH_INLINE_NONE) {
61f68816
YZ
1653 int dirty;
1654 spin_lock(&ci->i_ceph_lock);
28127bdd 1655 ci->i_inline_version = CEPH_INLINE_NONE;
f66fd9f0
YZ
1656 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1657 &prealloc_cf);
61f68816
YZ
1658 spin_unlock(&ci->i_ceph_lock);
1659 if (dirty)
1660 __mark_inode_dirty(inode, dirty);
1661 }
1662
24499847 1663 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
61f68816
YZ
1664 inode, off, len, ceph_cap_string(got), ret);
1665 ceph_put_cap_refs(ci, got);
f66fd9f0 1666out_free:
4f7e89f6 1667 ceph_restore_sigs(&oldset);
249c1df5 1668 sb_end_pagefault(inode->i_sb);
f66fd9f0 1669 ceph_free_cap_flush(prealloc_cf);
24499847
SJ
1670 if (err < 0)
1671 ret = vmf_error(err);
1d3576fd
SW
1672 return ret;
1673}
1674
31c542a1
YZ
1675void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1676 char *data, size_t len)
1677{
1678 struct address_space *mapping = inode->i_mapping;
1679 struct page *page;
1680
1681 if (locked_page) {
1682 page = locked_page;
1683 } else {
1684 if (i_size_read(inode) == 0)
1685 return;
1686 page = find_or_create_page(mapping, 0,
c62d2555
MH
1687 mapping_gfp_constraint(mapping,
1688 ~__GFP_FS));
31c542a1
YZ
1689 if (!page)
1690 return;
1691 if (PageUptodate(page)) {
1692 unlock_page(page);
09cbfeaf 1693 put_page(page);
31c542a1
YZ
1694 return;
1695 }
1696 }
1697
0668ff52 1698 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
31c542a1
YZ
1699 inode, ceph_vinop(inode), len, locked_page);
1700
1701 if (len > 0) {
1702 void *kaddr = kmap_atomic(page);
1703 memcpy(kaddr, data, len);
1704 kunmap_atomic(kaddr);
1705 }
1706
1707 if (page != locked_page) {
09cbfeaf
KS
1708 if (len < PAGE_SIZE)
1709 zero_user_segment(page, len, PAGE_SIZE);
31c542a1
YZ
1710 else
1711 flush_dcache_page(page);
1712
1713 SetPageUptodate(page);
1714 unlock_page(page);
09cbfeaf 1715 put_page(page);
31c542a1
YZ
1716 }
1717}
1718
28127bdd
YZ
1719int ceph_uninline_data(struct file *filp, struct page *locked_page)
1720{
1721 struct inode *inode = file_inode(filp);
1722 struct ceph_inode_info *ci = ceph_inode(inode);
1723 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1724 struct ceph_osd_request *req;
1725 struct page *page = NULL;
1726 u64 len, inline_version;
1727 int err = 0;
1728 bool from_pagecache = false;
1729
1730 spin_lock(&ci->i_ceph_lock);
1731 inline_version = ci->i_inline_version;
1732 spin_unlock(&ci->i_ceph_lock);
1733
1734 dout("uninline_data %p %llx.%llx inline_version %llu\n",
1735 inode, ceph_vinop(inode), inline_version);
1736
1737 if (inline_version == 1 || /* initial version, no data */
1738 inline_version == CEPH_INLINE_NONE)
1739 goto out;
1740
1741 if (locked_page) {
1742 page = locked_page;
1743 WARN_ON(!PageUptodate(page));
1744 } else if (ceph_caps_issued(ci) &
1745 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
1746 page = find_get_page(inode->i_mapping, 0);
1747 if (page) {
1748 if (PageUptodate(page)) {
1749 from_pagecache = true;
1750 lock_page(page);
1751 } else {
09cbfeaf 1752 put_page(page);
28127bdd
YZ
1753 page = NULL;
1754 }
1755 }
1756 }
1757
1758 if (page) {
1759 len = i_size_read(inode);
09cbfeaf
KS
1760 if (len > PAGE_SIZE)
1761 len = PAGE_SIZE;
28127bdd
YZ
1762 } else {
1763 page = __page_cache_alloc(GFP_NOFS);
1764 if (!page) {
1765 err = -ENOMEM;
1766 goto out;
1767 }
1768 err = __ceph_do_getattr(inode, page,
1769 CEPH_STAT_CAP_INLINE_DATA, true);
1770 if (err < 0) {
1771 /* no inline data */
1772 if (err == -ENODATA)
1773 err = 0;
1774 goto out;
1775 }
1776 len = err;
1777 }
1778
1779 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1780 ceph_vino(inode), 0, &len, 0, 1,
54ea0046 1781 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
34b759b4 1782 NULL, 0, 0, false);
28127bdd
YZ
1783 if (IS_ERR(req)) {
1784 err = PTR_ERR(req);
1785 goto out;
1786 }
1787
fac02ddf 1788 req->r_mtime = inode->i_mtime;
28127bdd
YZ
1789 err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1790 if (!err)
1791 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1792 ceph_osdc_put_request(req);
1793 if (err < 0)
1794 goto out;
1795
1796 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1797 ceph_vino(inode), 0, &len, 1, 3,
54ea0046 1798 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
34b759b4
ID
1799 NULL, ci->i_truncate_seq,
1800 ci->i_truncate_size, false);
28127bdd
YZ
1801 if (IS_ERR(req)) {
1802 err = PTR_ERR(req);
1803 goto out;
1804 }
1805
1806 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
1807
ec137c10
YZ
1808 {
1809 __le64 xattr_buf = cpu_to_le64(inline_version);
1810 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1811 "inline_version", &xattr_buf,
1812 sizeof(xattr_buf),
1813 CEPH_OSD_CMPXATTR_OP_GT,
1814 CEPH_OSD_CMPXATTR_MODE_U64);
1815 if (err)
1816 goto out_put;
1817 }
1818
1819 {
1820 char xattr_buf[32];
1821 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1822 "%llu", inline_version);
1823 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1824 "inline_version",
1825 xattr_buf, xattr_len, 0, 0);
1826 if (err)
1827 goto out_put;
1828 }
28127bdd 1829
fac02ddf 1830 req->r_mtime = inode->i_mtime;
28127bdd
YZ
1831 err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1832 if (!err)
1833 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
97e27aaa
XL
1834
1835 ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
1836 req->r_end_latency, err);
1837
28127bdd
YZ
1838out_put:
1839 ceph_osdc_put_request(req);
1840 if (err == -ECANCELED)
1841 err = 0;
1842out:
1843 if (page && page != locked_page) {
1844 if (from_pagecache) {
1845 unlock_page(page);
09cbfeaf 1846 put_page(page);
28127bdd
YZ
1847 } else
1848 __free_pages(page, 0);
1849 }
1850
1851 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1852 inode, ceph_vinop(inode), inline_version, err);
1853 return err;
1854}
1855
7cbea8dc 1856static const struct vm_operations_struct ceph_vmops = {
61f68816 1857 .fault = ceph_filemap_fault,
1d3576fd
SW
1858 .page_mkwrite = ceph_page_mkwrite,
1859};
1860
1861int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1862{
1863 struct address_space *mapping = file->f_mapping;
1864
1865 if (!mapping->a_ops->readpage)
1866 return -ENOEXEC;
1867 file_accessed(file);
1868 vma->vm_ops = &ceph_vmops;
1d3576fd
SW
1869 return 0;
1870}
10183a69
YZ
1871
1872enum {
1873 POOL_READ = 1,
1874 POOL_WRITE = 2,
1875};
1876
779fe0fb
YZ
1877static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1878 s64 pool, struct ceph_string *pool_ns)
10183a69
YZ
1879{
1880 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1881 struct ceph_mds_client *mdsc = fsc->mdsc;
1882 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
1883 struct rb_node **p, *parent;
1884 struct ceph_pool_perm *perm;
1885 struct page **pages;
779fe0fb 1886 size_t pool_ns_len;
10183a69
YZ
1887 int err = 0, err2 = 0, have = 0;
1888
1889 down_read(&mdsc->pool_perm_rwsem);
1890 p = &mdsc->pool_perm_tree.rb_node;
1891 while (*p) {
1892 perm = rb_entry(*p, struct ceph_pool_perm, node);
1893 if (pool < perm->pool)
1894 p = &(*p)->rb_left;
1895 else if (pool > perm->pool)
1896 p = &(*p)->rb_right;
1897 else {
779fe0fb
YZ
1898 int ret = ceph_compare_string(pool_ns,
1899 perm->pool_ns,
1900 perm->pool_ns_len);
1901 if (ret < 0)
1902 p = &(*p)->rb_left;
1903 else if (ret > 0)
1904 p = &(*p)->rb_right;
1905 else {
1906 have = perm->perm;
1907 break;
1908 }
10183a69
YZ
1909 }
1910 }
1911 up_read(&mdsc->pool_perm_rwsem);
1912 if (*p)
1913 goto out;
1914
779fe0fb
YZ
1915 if (pool_ns)
1916 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1917 pool, (int)pool_ns->len, pool_ns->str);
1918 else
1919 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
10183a69
YZ
1920
1921 down_write(&mdsc->pool_perm_rwsem);
779fe0fb 1922 p = &mdsc->pool_perm_tree.rb_node;
10183a69
YZ
1923 parent = NULL;
1924 while (*p) {
1925 parent = *p;
1926 perm = rb_entry(parent, struct ceph_pool_perm, node);
1927 if (pool < perm->pool)
1928 p = &(*p)->rb_left;
1929 else if (pool > perm->pool)
1930 p = &(*p)->rb_right;
1931 else {
779fe0fb
YZ
1932 int ret = ceph_compare_string(pool_ns,
1933 perm->pool_ns,
1934 perm->pool_ns_len);
1935 if (ret < 0)
1936 p = &(*p)->rb_left;
1937 else if (ret > 0)
1938 p = &(*p)->rb_right;
1939 else {
1940 have = perm->perm;
1941 break;
1942 }
10183a69
YZ
1943 }
1944 }
1945 if (*p) {
1946 up_write(&mdsc->pool_perm_rwsem);
1947 goto out;
1948 }
1949
34b759b4 1950 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
10183a69
YZ
1951 1, false, GFP_NOFS);
1952 if (!rd_req) {
1953 err = -ENOMEM;
1954 goto out_unlock;
1955 }
1956
1957 rd_req->r_flags = CEPH_OSD_FLAG_READ;
1958 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
1959 rd_req->r_base_oloc.pool = pool;
779fe0fb
YZ
1960 if (pool_ns)
1961 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
d30291b9 1962 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
10183a69 1963
13d1ad16
ID
1964 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
1965 if (err)
1966 goto out_unlock;
10183a69 1967
34b759b4 1968 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
10183a69
YZ
1969 1, false, GFP_NOFS);
1970 if (!wr_req) {
1971 err = -ENOMEM;
1972 goto out_unlock;
1973 }
1974
54ea0046 1975 wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
10183a69 1976 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
63244fa1 1977 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
d30291b9 1978 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
10183a69 1979
13d1ad16
ID
1980 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
1981 if (err)
1982 goto out_unlock;
10183a69
YZ
1983
1984 /* one page should be large enough for STAT data */
1985 pages = ceph_alloc_page_vector(1, GFP_KERNEL);
1986 if (IS_ERR(pages)) {
1987 err = PTR_ERR(pages);
1988 goto out_unlock;
1989 }
1990
1991 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
1992 0, false, true);
10183a69
YZ
1993 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
1994
fac02ddf 1995 wr_req->r_mtime = ci->vfs_inode.i_mtime;
10183a69
YZ
1996 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
1997
1998 if (!err)
1999 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
2000 if (!err2)
2001 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
2002
2003 if (err >= 0 || err == -ENOENT)
2004 have |= POOL_READ;
131d7eb4 2005 else if (err != -EPERM) {
0b98acd6
ID
2006 if (err == -EBLOCKLISTED)
2007 fsc->blocklisted = true;
10183a69 2008 goto out_unlock;
131d7eb4 2009 }
10183a69
YZ
2010
2011 if (err2 == 0 || err2 == -EEXIST)
2012 have |= POOL_WRITE;
2013 else if (err2 != -EPERM) {
0b98acd6
ID
2014 if (err2 == -EBLOCKLISTED)
2015 fsc->blocklisted = true;
10183a69
YZ
2016 err = err2;
2017 goto out_unlock;
2018 }
2019
779fe0fb
YZ
2020 pool_ns_len = pool_ns ? pool_ns->len : 0;
2021 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
10183a69
YZ
2022 if (!perm) {
2023 err = -ENOMEM;
2024 goto out_unlock;
2025 }
2026
2027 perm->pool = pool;
2028 perm->perm = have;
779fe0fb
YZ
2029 perm->pool_ns_len = pool_ns_len;
2030 if (pool_ns_len > 0)
2031 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
2032 perm->pool_ns[pool_ns_len] = 0;
2033
10183a69
YZ
2034 rb_link_node(&perm->node, parent, p);
2035 rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
2036 err = 0;
2037out_unlock:
2038 up_write(&mdsc->pool_perm_rwsem);
2039
3ed97d63
ID
2040 ceph_osdc_put_request(rd_req);
2041 ceph_osdc_put_request(wr_req);
10183a69
YZ
2042out:
2043 if (!err)
2044 err = have;
779fe0fb
YZ
2045 if (pool_ns)
2046 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
2047 pool, (int)pool_ns->len, pool_ns->str, err);
2048 else
2049 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
10183a69
YZ
2050 return err;
2051}
2052
5e3ded1b 2053int ceph_pool_perm_check(struct inode *inode, int need)
10183a69 2054{
5e3ded1b 2055 struct ceph_inode_info *ci = ceph_inode(inode);
779fe0fb 2056 struct ceph_string *pool_ns;
5e3ded1b 2057 s64 pool;
10183a69
YZ
2058 int ret, flags;
2059
80e80fbb
YZ
2060 if (ci->i_vino.snap != CEPH_NOSNAP) {
2061 /*
2062 * Pool permission check needs to write to the first object.
2063 * But for snapshot, head of the first object may have alread
2064 * been deleted. Skip check to avoid creating orphan object.
2065 */
2066 return 0;
2067 }
2068
5e3ded1b 2069 if (ceph_test_mount_opt(ceph_inode_to_client(inode),
10183a69
YZ
2070 NOPOOLPERM))
2071 return 0;
2072
2073 spin_lock(&ci->i_ceph_lock);
2074 flags = ci->i_ceph_flags;
7627151e 2075 pool = ci->i_layout.pool_id;
10183a69
YZ
2076 spin_unlock(&ci->i_ceph_lock);
2077check:
2078 if (flags & CEPH_I_POOL_PERM) {
2079 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
7627151e 2080 dout("ceph_pool_perm_check pool %lld no read perm\n",
10183a69
YZ
2081 pool);
2082 return -EPERM;
2083 }
2084 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
7627151e 2085 dout("ceph_pool_perm_check pool %lld no write perm\n",
10183a69
YZ
2086 pool);
2087 return -EPERM;
2088 }
2089 return 0;
2090 }
2091
779fe0fb
YZ
2092 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2093 ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2094 ceph_put_string(pool_ns);
10183a69
YZ
2095 if (ret < 0)
2096 return ret;
2097
2098 flags = CEPH_I_POOL_PERM;
2099 if (ret & POOL_READ)
2100 flags |= CEPH_I_POOL_RD;
2101 if (ret & POOL_WRITE)
2102 flags |= CEPH_I_POOL_WR;
2103
2104 spin_lock(&ci->i_ceph_lock);
779fe0fb
YZ
2105 if (pool == ci->i_layout.pool_id &&
2106 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2107 ci->i_ceph_flags |= flags;
10183a69 2108 } else {
7627151e 2109 pool = ci->i_layout.pool_id;
10183a69
YZ
2110 flags = ci->i_ceph_flags;
2111 }
2112 spin_unlock(&ci->i_ceph_lock);
2113 goto check;
2114}
2115
2116void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2117{
2118 struct ceph_pool_perm *perm;
2119 struct rb_node *n;
2120
2121 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2122 n = rb_first(&mdsc->pool_perm_tree);
2123 perm = rb_entry(n, struct ceph_pool_perm, node);
2124 rb_erase(n, &mdsc->pool_perm_tree);
2125 kfree(perm);
2126 }
2127}