]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ceph/addr.c
drm/i915: Save the old CDCLK atomic state
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / addr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h> /* generic_writepages */
9 #include <linux/slab.h>
10 #include <linux/pagevec.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/signal.h>
13
14 #include "super.h"
15 #include "mds_client.h"
16 #include "cache.h"
17 #include <linux/ceph/osd_client.h>
18
19 /*
20 * Ceph address space ops.
21 *
22 * There are a few funny things going on here.
23 *
24 * The page->private field is used to reference a struct
25 * ceph_snap_context for _every_ dirty page. This indicates which
26 * snapshot the page was logically dirtied in, and thus which snap
27 * context needs to be associated with the osd write during writeback.
28 *
29 * Similarly, struct ceph_inode_info maintains a set of counters to
30 * count dirty pages on the inode. In the absence of snapshots,
31 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
32 *
33 * When a snapshot is taken (that is, when the client receives
34 * notification that a snapshot was taken), each inode with caps and
35 * with dirty pages (dirty pages implies there is a cap) gets a new
36 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
37 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
38 * moved to capsnap->dirty. (Unless a sync write is currently in
39 * progress. In that case, the capsnap is said to be "pending", new
40 * writes cannot start, and the capsnap isn't "finalized" until the
41 * write completes (or fails) and a final size/mtime for the inode for
42 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
43 *
44 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
45 * we look for the first capsnap in i_cap_snaps and write out pages in
46 * that snap context _only_. Then we move on to the next capsnap,
47 * eventually reaching the "live" or "head" context (i.e., pages that
48 * are not yet snapped) and are writing the most recently dirtied
49 * pages.
50 *
51 * Invalidate and so forth must take care to ensure the dirty page
52 * accounting is preserved.
53 */
54
55 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
56 #define CONGESTION_OFF_THRESH(congestion_kb) \
57 (CONGESTION_ON_THRESH(congestion_kb) - \
58 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
59
60 static inline struct ceph_snap_context *page_snap_context(struct page *page)
61 {
62 if (PagePrivate(page))
63 return (void *)page->private;
64 return NULL;
65 }
66
67 /*
68 * Dirty a page. Optimistically adjust accounting, on the assumption
69 * that we won't race with invalidate. If we do, readjust.
70 */
71 static int ceph_set_page_dirty(struct page *page)
72 {
73 struct address_space *mapping = page->mapping;
74 struct inode *inode;
75 struct ceph_inode_info *ci;
76 struct ceph_snap_context *snapc;
77 int ret;
78
79 if (unlikely(!mapping))
80 return !TestSetPageDirty(page);
81
82 if (PageDirty(page)) {
83 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
84 mapping->host, page, page->index);
85 BUG_ON(!PagePrivate(page));
86 return 0;
87 }
88
89 inode = mapping->host;
90 ci = ceph_inode(inode);
91
92 /* dirty the head */
93 spin_lock(&ci->i_ceph_lock);
94 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
95 if (__ceph_have_pending_cap_snap(ci)) {
96 struct ceph_cap_snap *capsnap =
97 list_last_entry(&ci->i_cap_snaps,
98 struct ceph_cap_snap,
99 ci_item);
100 snapc = ceph_get_snap_context(capsnap->context);
101 capsnap->dirty_pages++;
102 } else {
103 BUG_ON(!ci->i_head_snapc);
104 snapc = ceph_get_snap_context(ci->i_head_snapc);
105 ++ci->i_wrbuffer_ref_head;
106 }
107 if (ci->i_wrbuffer_ref == 0)
108 ihold(inode);
109 ++ci->i_wrbuffer_ref;
110 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
111 "snapc %p seq %lld (%d snaps)\n",
112 mapping->host, page, page->index,
113 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
114 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
115 snapc, snapc->seq, snapc->num_snaps);
116 spin_unlock(&ci->i_ceph_lock);
117
118 /*
119 * Reference snap context in page->private. Also set
120 * PagePrivate so that we get invalidatepage callback.
121 */
122 BUG_ON(PagePrivate(page));
123 page->private = (unsigned long)snapc;
124 SetPagePrivate(page);
125
126 ret = __set_page_dirty_nobuffers(page);
127 WARN_ON(!PageLocked(page));
128 WARN_ON(!page->mapping);
129
130 return ret;
131 }
132
133 /*
134 * If we are truncating the full page (i.e. offset == 0), adjust the
135 * dirty page counters appropriately. Only called if there is private
136 * data on the page.
137 */
138 static void ceph_invalidatepage(struct page *page, unsigned int offset,
139 unsigned int length)
140 {
141 struct inode *inode;
142 struct ceph_inode_info *ci;
143 struct ceph_snap_context *snapc = page_snap_context(page);
144
145 inode = page->mapping->host;
146 ci = ceph_inode(inode);
147
148 if (offset != 0 || length != PAGE_SIZE) {
149 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
150 inode, page, page->index, offset, length);
151 return;
152 }
153
154 ceph_invalidate_fscache_page(inode, page);
155
156 WARN_ON(!PageLocked(page));
157 if (!PagePrivate(page))
158 return;
159
160 ClearPageChecked(page);
161
162 dout("%p invalidatepage %p idx %lu full dirty page\n",
163 inode, page, page->index);
164
165 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
166 ceph_put_snap_context(snapc);
167 page->private = 0;
168 ClearPagePrivate(page);
169 }
170
171 static int ceph_releasepage(struct page *page, gfp_t g)
172 {
173 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
174 page, page->index, PageDirty(page) ? "" : "not ");
175
176 /* Can we release the page from the cache? */
177 if (!ceph_release_fscache_page(page, g))
178 return 0;
179
180 return !PagePrivate(page);
181 }
182
183 /*
184 * read a single page, without unlocking it.
185 */
186 static int ceph_do_readpage(struct file *filp, struct page *page)
187 {
188 struct inode *inode = file_inode(filp);
189 struct ceph_inode_info *ci = ceph_inode(inode);
190 struct ceph_osd_client *osdc =
191 &ceph_inode_to_client(inode)->client->osdc;
192 int err = 0;
193 u64 off = page_offset(page);
194 u64 len = PAGE_SIZE;
195
196 if (off >= i_size_read(inode)) {
197 zero_user_segment(page, 0, PAGE_SIZE);
198 SetPageUptodate(page);
199 return 0;
200 }
201
202 if (ci->i_inline_version != CEPH_INLINE_NONE) {
203 /*
204 * Uptodate inline data should have been added
205 * into page cache while getting Fcr caps.
206 */
207 if (off == 0)
208 return -EINVAL;
209 zero_user_segment(page, 0, PAGE_SIZE);
210 SetPageUptodate(page);
211 return 0;
212 }
213
214 err = ceph_readpage_from_fscache(inode, page);
215 if (err == 0)
216 return -EINPROGRESS;
217
218 dout("readpage inode %p file %p page %p index %lu\n",
219 inode, filp, page, page->index);
220 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
221 off, &len,
222 ci->i_truncate_seq, ci->i_truncate_size,
223 &page, 1, 0);
224 if (err == -ENOENT)
225 err = 0;
226 if (err < 0) {
227 SetPageError(page);
228 ceph_fscache_readpage_cancel(inode, page);
229 goto out;
230 }
231 if (err < PAGE_SIZE)
232 /* zero fill remainder of page */
233 zero_user_segment(page, err, PAGE_SIZE);
234 else
235 flush_dcache_page(page);
236
237 SetPageUptodate(page);
238 ceph_readpage_to_fscache(inode, page);
239
240 out:
241 return err < 0 ? err : 0;
242 }
243
244 static int ceph_readpage(struct file *filp, struct page *page)
245 {
246 int r = ceph_do_readpage(filp, page);
247 if (r != -EINPROGRESS)
248 unlock_page(page);
249 else
250 r = 0;
251 return r;
252 }
253
254 /*
255 * Finish an async read(ahead) op.
256 */
257 static void finish_read(struct ceph_osd_request *req)
258 {
259 struct inode *inode = req->r_inode;
260 struct ceph_osd_data *osd_data;
261 int rc = req->r_result <= 0 ? req->r_result : 0;
262 int bytes = req->r_result >= 0 ? req->r_result : 0;
263 int num_pages;
264 int i;
265
266 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
267
268 /* unlock all pages, zeroing any data we didn't read */
269 osd_data = osd_req_op_extent_osd_data(req, 0);
270 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
271 num_pages = calc_pages_for((u64)osd_data->alignment,
272 (u64)osd_data->length);
273 for (i = 0; i < num_pages; i++) {
274 struct page *page = osd_data->pages[i];
275
276 if (rc < 0 && rc != -ENOENT) {
277 ceph_fscache_readpage_cancel(inode, page);
278 goto unlock;
279 }
280 if (bytes < (int)PAGE_SIZE) {
281 /* zero (remainder of) page */
282 int s = bytes < 0 ? 0 : bytes;
283 zero_user_segment(page, s, PAGE_SIZE);
284 }
285 dout("finish_read %p uptodate %p idx %lu\n", inode, page,
286 page->index);
287 flush_dcache_page(page);
288 SetPageUptodate(page);
289 ceph_readpage_to_fscache(inode, page);
290 unlock:
291 unlock_page(page);
292 put_page(page);
293 bytes -= PAGE_SIZE;
294 }
295 kfree(osd_data->pages);
296 }
297
298 /*
299 * start an async read(ahead) operation. return nr_pages we submitted
300 * a read for on success, or negative error code.
301 */
302 static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
303 struct list_head *page_list, int max)
304 {
305 struct ceph_osd_client *osdc =
306 &ceph_inode_to_client(inode)->client->osdc;
307 struct ceph_inode_info *ci = ceph_inode(inode);
308 struct page *page = list_entry(page_list->prev, struct page, lru);
309 struct ceph_vino vino;
310 struct ceph_osd_request *req;
311 u64 off;
312 u64 len;
313 int i;
314 struct page **pages;
315 pgoff_t next_index;
316 int nr_pages = 0;
317 int got = 0;
318 int ret = 0;
319
320 if (!rw_ctx) {
321 /* caller of readpages does not hold buffer and read caps
322 * (fadvise, madvise and readahead cases) */
323 int want = CEPH_CAP_FILE_CACHE;
324 ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
325 if (ret < 0) {
326 dout("start_read %p, error getting cap\n", inode);
327 } else if (!(got & want)) {
328 dout("start_read %p, no cache cap\n", inode);
329 ret = 0;
330 }
331 if (ret <= 0) {
332 if (got)
333 ceph_put_cap_refs(ci, got);
334 while (!list_empty(page_list)) {
335 page = list_entry(page_list->prev,
336 struct page, lru);
337 list_del(&page->lru);
338 put_page(page);
339 }
340 return ret;
341 }
342 }
343
344 off = (u64) page_offset(page);
345
346 /* count pages */
347 next_index = page->index;
348 list_for_each_entry_reverse(page, page_list, lru) {
349 if (page->index != next_index)
350 break;
351 nr_pages++;
352 next_index++;
353 if (max && nr_pages == max)
354 break;
355 }
356 len = nr_pages << PAGE_SHIFT;
357 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
358 off, len);
359 vino = ceph_vino(inode);
360 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
361 0, 1, CEPH_OSD_OP_READ,
362 CEPH_OSD_FLAG_READ, NULL,
363 ci->i_truncate_seq, ci->i_truncate_size,
364 false);
365 if (IS_ERR(req)) {
366 ret = PTR_ERR(req);
367 goto out;
368 }
369
370 /* build page vector */
371 nr_pages = calc_pages_for(0, len);
372 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
373 if (!pages) {
374 ret = -ENOMEM;
375 goto out_put;
376 }
377 for (i = 0; i < nr_pages; ++i) {
378 page = list_entry(page_list->prev, struct page, lru);
379 BUG_ON(PageLocked(page));
380 list_del(&page->lru);
381
382 dout("start_read %p adding %p idx %lu\n", inode, page,
383 page->index);
384 if (add_to_page_cache_lru(page, &inode->i_data, page->index,
385 GFP_KERNEL)) {
386 ceph_fscache_uncache_page(inode, page);
387 put_page(page);
388 dout("start_read %p add_to_page_cache failed %p\n",
389 inode, page);
390 nr_pages = i;
391 if (nr_pages > 0) {
392 len = nr_pages << PAGE_SHIFT;
393 osd_req_op_extent_update(req, 0, len);
394 break;
395 }
396 goto out_pages;
397 }
398 pages[i] = page;
399 }
400 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
401 req->r_callback = finish_read;
402 req->r_inode = inode;
403
404 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
405 ret = ceph_osdc_start_request(osdc, req, false);
406 if (ret < 0)
407 goto out_pages;
408 ceph_osdc_put_request(req);
409
410 /* After adding locked pages to page cache, the inode holds cache cap.
411 * So we can drop our cap refs. */
412 if (got)
413 ceph_put_cap_refs(ci, got);
414
415 return nr_pages;
416
417 out_pages:
418 for (i = 0; i < nr_pages; ++i) {
419 ceph_fscache_readpage_cancel(inode, pages[i]);
420 unlock_page(pages[i]);
421 }
422 ceph_put_page_vector(pages, nr_pages, false);
423 out_put:
424 ceph_osdc_put_request(req);
425 out:
426 if (got)
427 ceph_put_cap_refs(ci, got);
428 return ret;
429 }
430
431
432 /*
433 * Read multiple pages. Leave pages we don't read + unlock in page_list;
434 * the caller (VM) cleans them up.
435 */
436 static int ceph_readpages(struct file *file, struct address_space *mapping,
437 struct list_head *page_list, unsigned nr_pages)
438 {
439 struct inode *inode = file_inode(file);
440 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
441 struct ceph_file_info *ci = file->private_data;
442 struct ceph_rw_context *rw_ctx;
443 int rc = 0;
444 int max = 0;
445
446 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
447 return -EINVAL;
448
449 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
450 &nr_pages);
451
452 if (rc == 0)
453 goto out;
454
455 rw_ctx = ceph_find_rw_context(ci);
456 max = fsc->mount_options->rsize >> PAGE_SHIFT;
457 dout("readpages %p file %p ctx %p nr_pages %d max %d\n",
458 inode, file, rw_ctx, nr_pages, max);
459 while (!list_empty(page_list)) {
460 rc = start_read(inode, rw_ctx, page_list, max);
461 if (rc < 0)
462 goto out;
463 }
464 out:
465 ceph_fscache_readpages_cancel(inode, page_list);
466
467 dout("readpages %p file %p ret %d\n", inode, file, rc);
468 return rc;
469 }
470
471 struct ceph_writeback_ctl
472 {
473 loff_t i_size;
474 u64 truncate_size;
475 u32 truncate_seq;
476 bool size_stable;
477 bool head_snapc;
478 };
479
480 /*
481 * Get ref for the oldest snapc for an inode with dirty data... that is, the
482 * only snap context we are allowed to write back.
483 */
484 static struct ceph_snap_context *
485 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
486 struct ceph_snap_context *page_snapc)
487 {
488 struct ceph_inode_info *ci = ceph_inode(inode);
489 struct ceph_snap_context *snapc = NULL;
490 struct ceph_cap_snap *capsnap = NULL;
491
492 spin_lock(&ci->i_ceph_lock);
493 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
494 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
495 capsnap->context, capsnap->dirty_pages);
496 if (!capsnap->dirty_pages)
497 continue;
498
499 /* get i_size, truncate_{seq,size} for page_snapc? */
500 if (snapc && capsnap->context != page_snapc)
501 continue;
502
503 if (ctl) {
504 if (capsnap->writing) {
505 ctl->i_size = i_size_read(inode);
506 ctl->size_stable = false;
507 } else {
508 ctl->i_size = capsnap->size;
509 ctl->size_stable = true;
510 }
511 ctl->truncate_size = capsnap->truncate_size;
512 ctl->truncate_seq = capsnap->truncate_seq;
513 ctl->head_snapc = false;
514 }
515
516 if (snapc)
517 break;
518
519 snapc = ceph_get_snap_context(capsnap->context);
520 if (!page_snapc ||
521 page_snapc == snapc ||
522 page_snapc->seq > snapc->seq)
523 break;
524 }
525 if (!snapc && ci->i_wrbuffer_ref_head) {
526 snapc = ceph_get_snap_context(ci->i_head_snapc);
527 dout(" head snapc %p has %d dirty pages\n",
528 snapc, ci->i_wrbuffer_ref_head);
529 if (ctl) {
530 ctl->i_size = i_size_read(inode);
531 ctl->truncate_size = ci->i_truncate_size;
532 ctl->truncate_seq = ci->i_truncate_seq;
533 ctl->size_stable = false;
534 ctl->head_snapc = true;
535 }
536 }
537 spin_unlock(&ci->i_ceph_lock);
538 return snapc;
539 }
540
541 static u64 get_writepages_data_length(struct inode *inode,
542 struct page *page, u64 start)
543 {
544 struct ceph_inode_info *ci = ceph_inode(inode);
545 struct ceph_snap_context *snapc = page_snap_context(page);
546 struct ceph_cap_snap *capsnap = NULL;
547 u64 end = i_size_read(inode);
548
549 if (snapc != ci->i_head_snapc) {
550 bool found = false;
551 spin_lock(&ci->i_ceph_lock);
552 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
553 if (capsnap->context == snapc) {
554 if (!capsnap->writing)
555 end = capsnap->size;
556 found = true;
557 break;
558 }
559 }
560 spin_unlock(&ci->i_ceph_lock);
561 WARN_ON(!found);
562 }
563 if (end > page_offset(page) + PAGE_SIZE)
564 end = page_offset(page) + PAGE_SIZE;
565 return end > start ? end - start : 0;
566 }
567
568 /*
569 * Write a single page, but leave the page locked.
570 *
571 * If we get a write error, set the page error bit, but still adjust the
572 * dirty page accounting (i.e., page is no longer dirty).
573 */
574 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
575 {
576 struct inode *inode;
577 struct ceph_inode_info *ci;
578 struct ceph_fs_client *fsc;
579 struct ceph_snap_context *snapc, *oldest;
580 loff_t page_off = page_offset(page);
581 long writeback_stat;
582 int err, len = PAGE_SIZE;
583 struct ceph_writeback_ctl ceph_wbc;
584
585 dout("writepage %p idx %lu\n", page, page->index);
586
587 inode = page->mapping->host;
588 ci = ceph_inode(inode);
589 fsc = ceph_inode_to_client(inode);
590
591 /* verify this is a writeable snap context */
592 snapc = page_snap_context(page);
593 if (!snapc) {
594 dout("writepage %p page %p not dirty?\n", inode, page);
595 return 0;
596 }
597 oldest = get_oldest_context(inode, &ceph_wbc, snapc);
598 if (snapc->seq > oldest->seq) {
599 dout("writepage %p page %p snapc %p not writeable - noop\n",
600 inode, page, snapc);
601 /* we should only noop if called by kswapd */
602 WARN_ON(!(current->flags & PF_MEMALLOC));
603 ceph_put_snap_context(oldest);
604 redirty_page_for_writepage(wbc, page);
605 return 0;
606 }
607 ceph_put_snap_context(oldest);
608
609 /* is this a partial page at end of file? */
610 if (page_off >= ceph_wbc.i_size) {
611 dout("%p page eof %llu\n", page, ceph_wbc.i_size);
612 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
613 return 0;
614 }
615
616 if (ceph_wbc.i_size < page_off + len)
617 len = ceph_wbc.i_size - page_off;
618
619 dout("writepage %p page %p index %lu on %llu~%u snapc %p seq %lld\n",
620 inode, page, page->index, page_off, len, snapc, snapc->seq);
621
622 writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
623 if (writeback_stat >
624 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
625 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
626
627 set_page_writeback(page);
628 err = ceph_osdc_writepages(&fsc->client->osdc, ceph_vino(inode),
629 &ci->i_layout, snapc, page_off, len,
630 ceph_wbc.truncate_seq,
631 ceph_wbc.truncate_size,
632 &inode->i_mtime, &page, 1);
633 if (err < 0) {
634 struct writeback_control tmp_wbc;
635 if (!wbc)
636 wbc = &tmp_wbc;
637 if (err == -ERESTARTSYS) {
638 /* killed by SIGKILL */
639 dout("writepage interrupted page %p\n", page);
640 redirty_page_for_writepage(wbc, page);
641 end_page_writeback(page);
642 return err;
643 }
644 dout("writepage setting page/mapping error %d %p\n",
645 err, page);
646 SetPageError(page);
647 mapping_set_error(&inode->i_data, err);
648 wbc->pages_skipped++;
649 } else {
650 dout("writepage cleaned page %p\n", page);
651 err = 0; /* vfs expects us to return 0 */
652 }
653 page->private = 0;
654 ClearPagePrivate(page);
655 end_page_writeback(page);
656 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
657 ceph_put_snap_context(snapc); /* page's reference */
658 return err;
659 }
660
661 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
662 {
663 int err;
664 struct inode *inode = page->mapping->host;
665 BUG_ON(!inode);
666 ihold(inode);
667 err = writepage_nounlock(page, wbc);
668 if (err == -ERESTARTSYS) {
669 /* direct memory reclaimer was killed by SIGKILL. return 0
670 * to prevent caller from setting mapping/page error */
671 err = 0;
672 }
673 unlock_page(page);
674 iput(inode);
675 return err;
676 }
677
678 /*
679 * lame release_pages helper. release_pages() isn't exported to
680 * modules.
681 */
682 static void ceph_release_pages(struct page **pages, int num)
683 {
684 struct pagevec pvec;
685 int i;
686
687 pagevec_init(&pvec);
688 for (i = 0; i < num; i++) {
689 if (pagevec_add(&pvec, pages[i]) == 0)
690 pagevec_release(&pvec);
691 }
692 pagevec_release(&pvec);
693 }
694
695 /*
696 * async writeback completion handler.
697 *
698 * If we get an error, set the mapping error bit, but not the individual
699 * page error bits.
700 */
701 static void writepages_finish(struct ceph_osd_request *req)
702 {
703 struct inode *inode = req->r_inode;
704 struct ceph_inode_info *ci = ceph_inode(inode);
705 struct ceph_osd_data *osd_data;
706 struct page *page;
707 int num_pages, total_pages = 0;
708 int i, j;
709 int rc = req->r_result;
710 struct ceph_snap_context *snapc = req->r_snapc;
711 struct address_space *mapping = inode->i_mapping;
712 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
713 bool remove_page;
714
715 dout("writepages_finish %p rc %d\n", inode, rc);
716 if (rc < 0) {
717 mapping_set_error(mapping, rc);
718 ceph_set_error_write(ci);
719 } else {
720 ceph_clear_error_write(ci);
721 }
722
723 /*
724 * We lost the cache cap, need to truncate the page before
725 * it is unlocked, otherwise we'd truncate it later in the
726 * page truncation thread, possibly losing some data that
727 * raced its way in
728 */
729 remove_page = !(ceph_caps_issued(ci) &
730 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
731
732 /* clean all pages */
733 for (i = 0; i < req->r_num_ops; i++) {
734 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
735 break;
736
737 osd_data = osd_req_op_extent_osd_data(req, i);
738 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
739 num_pages = calc_pages_for((u64)osd_data->alignment,
740 (u64)osd_data->length);
741 total_pages += num_pages;
742 for (j = 0; j < num_pages; j++) {
743 page = osd_data->pages[j];
744 BUG_ON(!page);
745 WARN_ON(!PageUptodate(page));
746
747 if (atomic_long_dec_return(&fsc->writeback_count) <
748 CONGESTION_OFF_THRESH(
749 fsc->mount_options->congestion_kb))
750 clear_bdi_congested(inode_to_bdi(inode),
751 BLK_RW_ASYNC);
752
753 ceph_put_snap_context(page_snap_context(page));
754 page->private = 0;
755 ClearPagePrivate(page);
756 dout("unlocking %p\n", page);
757 end_page_writeback(page);
758
759 if (remove_page)
760 generic_error_remove_page(inode->i_mapping,
761 page);
762
763 unlock_page(page);
764 }
765 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
766 inode, osd_data->length, rc >= 0 ? num_pages : 0);
767
768 ceph_release_pages(osd_data->pages, num_pages);
769 }
770
771 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
772
773 osd_data = osd_req_op_extent_osd_data(req, 0);
774 if (osd_data->pages_from_pool)
775 mempool_free(osd_data->pages,
776 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
777 else
778 kfree(osd_data->pages);
779 ceph_osdc_put_request(req);
780 }
781
782 /*
783 * initiate async writeback
784 */
785 static int ceph_writepages_start(struct address_space *mapping,
786 struct writeback_control *wbc)
787 {
788 struct inode *inode = mapping->host;
789 struct ceph_inode_info *ci = ceph_inode(inode);
790 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
791 struct ceph_vino vino = ceph_vino(inode);
792 pgoff_t index, start_index, end = -1;
793 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
794 struct pagevec pvec;
795 int rc = 0;
796 unsigned int wsize = i_blocksize(inode);
797 struct ceph_osd_request *req = NULL;
798 struct ceph_writeback_ctl ceph_wbc;
799 bool should_loop, range_whole = false;
800 bool stop, done = false;
801
802 dout("writepages_start %p (mode=%s)\n", inode,
803 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
804 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
805
806 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
807 if (ci->i_wrbuffer_ref > 0) {
808 pr_warn_ratelimited(
809 "writepage_start %p %lld forced umount\n",
810 inode, ceph_ino(inode));
811 }
812 mapping_set_error(mapping, -EIO);
813 return -EIO; /* we're in a forced umount, don't write! */
814 }
815 if (fsc->mount_options->wsize < wsize)
816 wsize = fsc->mount_options->wsize;
817
818 pagevec_init(&pvec);
819
820 start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
821 index = start_index;
822
823 retry:
824 /* find oldest snap context with dirty data */
825 snapc = get_oldest_context(inode, &ceph_wbc, NULL);
826 if (!snapc) {
827 /* hmm, why does writepages get called when there
828 is no dirty data? */
829 dout(" no snap context with dirty data?\n");
830 goto out;
831 }
832 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
833 snapc, snapc->seq, snapc->num_snaps);
834
835 should_loop = false;
836 if (ceph_wbc.head_snapc && snapc != last_snapc) {
837 /* where to start/end? */
838 if (wbc->range_cyclic) {
839 index = start_index;
840 end = -1;
841 if (index > 0)
842 should_loop = true;
843 dout(" cyclic, start at %lu\n", index);
844 } else {
845 index = wbc->range_start >> PAGE_SHIFT;
846 end = wbc->range_end >> PAGE_SHIFT;
847 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
848 range_whole = true;
849 dout(" not cyclic, %lu to %lu\n", index, end);
850 }
851 } else if (!ceph_wbc.head_snapc) {
852 /* Do not respect wbc->range_{start,end}. Dirty pages
853 * in that range can be associated with newer snapc.
854 * They are not writeable until we write all dirty pages
855 * associated with 'snapc' get written */
856 if (index > 0 || wbc->sync_mode != WB_SYNC_NONE)
857 should_loop = true;
858 dout(" non-head snapc, range whole\n");
859 }
860
861 ceph_put_snap_context(last_snapc);
862 last_snapc = snapc;
863
864 stop = false;
865 while (!stop && index <= end) {
866 int num_ops = 0, op_idx;
867 unsigned i, pvec_pages, max_pages, locked_pages = 0;
868 struct page **pages = NULL, **data_pages;
869 mempool_t *pool = NULL; /* Becomes non-null if mempool used */
870 struct page *page;
871 pgoff_t strip_unit_end = 0;
872 u64 offset = 0, len = 0;
873
874 max_pages = wsize >> PAGE_SHIFT;
875
876 get_more_pages:
877 pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index,
878 end, PAGECACHE_TAG_DIRTY,
879 max_pages - locked_pages);
880 dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
881 if (!pvec_pages && !locked_pages)
882 break;
883 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
884 page = pvec.pages[i];
885 dout("? %p idx %lu\n", page, page->index);
886 if (locked_pages == 0)
887 lock_page(page); /* first page */
888 else if (!trylock_page(page))
889 break;
890
891 /* only dirty pages, or our accounting breaks */
892 if (unlikely(!PageDirty(page)) ||
893 unlikely(page->mapping != mapping)) {
894 dout("!dirty or !mapping %p\n", page);
895 unlock_page(page);
896 continue;
897 }
898 if (strip_unit_end && (page->index > strip_unit_end)) {
899 dout("end of strip unit %p\n", page);
900 unlock_page(page);
901 break;
902 }
903 if (page_offset(page) >= ceph_wbc.i_size) {
904 dout("%p page eof %llu\n",
905 page, ceph_wbc.i_size);
906 /* not done if range_cyclic */
907 stop = true;
908 unlock_page(page);
909 break;
910 }
911 if (PageWriteback(page)) {
912 if (wbc->sync_mode == WB_SYNC_NONE) {
913 dout("%p under writeback\n", page);
914 unlock_page(page);
915 continue;
916 }
917 dout("waiting on writeback %p\n", page);
918 wait_on_page_writeback(page);
919 }
920
921 /* only if matching snap context */
922 pgsnapc = page_snap_context(page);
923 if (pgsnapc != snapc) {
924 dout("page snapc %p %lld != oldest %p %lld\n",
925 pgsnapc, pgsnapc->seq, snapc, snapc->seq);
926 unlock_page(page);
927 continue;
928 }
929
930 if (!clear_page_dirty_for_io(page)) {
931 dout("%p !clear_page_dirty_for_io\n", page);
932 unlock_page(page);
933 continue;
934 }
935
936 /*
937 * We have something to write. If this is
938 * the first locked page this time through,
939 * calculate max possinle write size and
940 * allocate a page array
941 */
942 if (locked_pages == 0) {
943 u64 objnum;
944 u64 objoff;
945
946 /* prepare async write request */
947 offset = (u64)page_offset(page);
948 len = wsize;
949
950 rc = ceph_calc_file_object_mapping(&ci->i_layout,
951 offset, len,
952 &objnum, &objoff,
953 &len);
954 if (rc < 0) {
955 unlock_page(page);
956 break;
957 }
958
959 num_ops = 1;
960 strip_unit_end = page->index +
961 ((len - 1) >> PAGE_SHIFT);
962
963 BUG_ON(pages);
964 max_pages = calc_pages_for(0, (u64)len);
965 pages = kmalloc(max_pages * sizeof (*pages),
966 GFP_NOFS);
967 if (!pages) {
968 pool = fsc->wb_pagevec_pool;
969 pages = mempool_alloc(pool, GFP_NOFS);
970 BUG_ON(!pages);
971 }
972
973 len = 0;
974 } else if (page->index !=
975 (offset + len) >> PAGE_SHIFT) {
976 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
977 CEPH_OSD_MAX_OPS)) {
978 redirty_page_for_writepage(wbc, page);
979 unlock_page(page);
980 break;
981 }
982
983 num_ops++;
984 offset = (u64)page_offset(page);
985 len = 0;
986 }
987
988 /* note position of first page in pvec */
989 dout("%p will write page %p idx %lu\n",
990 inode, page, page->index);
991
992 if (atomic_long_inc_return(&fsc->writeback_count) >
993 CONGESTION_ON_THRESH(
994 fsc->mount_options->congestion_kb)) {
995 set_bdi_congested(inode_to_bdi(inode),
996 BLK_RW_ASYNC);
997 }
998
999
1000 pages[locked_pages++] = page;
1001 pvec.pages[i] = NULL;
1002
1003 len += PAGE_SIZE;
1004 }
1005
1006 /* did we get anything? */
1007 if (!locked_pages)
1008 goto release_pvec_pages;
1009 if (i) {
1010 unsigned j, n = 0;
1011 /* shift unused page to beginning of pvec */
1012 for (j = 0; j < pvec_pages; j++) {
1013 if (!pvec.pages[j])
1014 continue;
1015 if (n < j)
1016 pvec.pages[n] = pvec.pages[j];
1017 n++;
1018 }
1019 pvec.nr = n;
1020
1021 if (pvec_pages && i == pvec_pages &&
1022 locked_pages < max_pages) {
1023 dout("reached end pvec, trying for more\n");
1024 pagevec_release(&pvec);
1025 goto get_more_pages;
1026 }
1027 }
1028
1029 new_request:
1030 offset = page_offset(pages[0]);
1031 len = wsize;
1032
1033 req = ceph_osdc_new_request(&fsc->client->osdc,
1034 &ci->i_layout, vino,
1035 offset, &len, 0, num_ops,
1036 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1037 snapc, ceph_wbc.truncate_seq,
1038 ceph_wbc.truncate_size, false);
1039 if (IS_ERR(req)) {
1040 req = ceph_osdc_new_request(&fsc->client->osdc,
1041 &ci->i_layout, vino,
1042 offset, &len, 0,
1043 min(num_ops,
1044 CEPH_OSD_SLAB_OPS),
1045 CEPH_OSD_OP_WRITE,
1046 CEPH_OSD_FLAG_WRITE,
1047 snapc, ceph_wbc.truncate_seq,
1048 ceph_wbc.truncate_size, true);
1049 BUG_ON(IS_ERR(req));
1050 }
1051 BUG_ON(len < page_offset(pages[locked_pages - 1]) +
1052 PAGE_SIZE - offset);
1053
1054 req->r_callback = writepages_finish;
1055 req->r_inode = inode;
1056
1057 /* Format the osd request message and submit the write */
1058 len = 0;
1059 data_pages = pages;
1060 op_idx = 0;
1061 for (i = 0; i < locked_pages; i++) {
1062 u64 cur_offset = page_offset(pages[i]);
1063 if (offset + len != cur_offset) {
1064 if (op_idx + 1 == req->r_num_ops)
1065 break;
1066 osd_req_op_extent_dup_last(req, op_idx,
1067 cur_offset - offset);
1068 dout("writepages got pages at %llu~%llu\n",
1069 offset, len);
1070 osd_req_op_extent_osd_data_pages(req, op_idx,
1071 data_pages, len, 0,
1072 !!pool, false);
1073 osd_req_op_extent_update(req, op_idx, len);
1074
1075 len = 0;
1076 offset = cur_offset;
1077 data_pages = pages + i;
1078 op_idx++;
1079 }
1080
1081 set_page_writeback(pages[i]);
1082 len += PAGE_SIZE;
1083 }
1084
1085 if (ceph_wbc.size_stable) {
1086 len = min(len, ceph_wbc.i_size - offset);
1087 } else if (i == locked_pages) {
1088 /* writepages_finish() clears writeback pages
1089 * according to the data length, so make sure
1090 * data length covers all locked pages */
1091 u64 min_len = len + 1 - PAGE_SIZE;
1092 len = get_writepages_data_length(inode, pages[i - 1],
1093 offset);
1094 len = max(len, min_len);
1095 }
1096 dout("writepages got pages at %llu~%llu\n", offset, len);
1097
1098 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1099 0, !!pool, false);
1100 osd_req_op_extent_update(req, op_idx, len);
1101
1102 BUG_ON(op_idx + 1 != req->r_num_ops);
1103
1104 pool = NULL;
1105 if (i < locked_pages) {
1106 BUG_ON(num_ops <= req->r_num_ops);
1107 num_ops -= req->r_num_ops;
1108 locked_pages -= i;
1109
1110 /* allocate new pages array for next request */
1111 data_pages = pages;
1112 pages = kmalloc(locked_pages * sizeof (*pages),
1113 GFP_NOFS);
1114 if (!pages) {
1115 pool = fsc->wb_pagevec_pool;
1116 pages = mempool_alloc(pool, GFP_NOFS);
1117 BUG_ON(!pages);
1118 }
1119 memcpy(pages, data_pages + i,
1120 locked_pages * sizeof(*pages));
1121 memset(data_pages + i, 0,
1122 locked_pages * sizeof(*pages));
1123 } else {
1124 BUG_ON(num_ops != req->r_num_ops);
1125 index = pages[i - 1]->index + 1;
1126 /* request message now owns the pages array */
1127 pages = NULL;
1128 }
1129
1130 req->r_mtime = inode->i_mtime;
1131 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
1132 BUG_ON(rc);
1133 req = NULL;
1134
1135 wbc->nr_to_write -= i;
1136 if (pages)
1137 goto new_request;
1138
1139 /*
1140 * We stop writing back only if we are not doing
1141 * integrity sync. In case of integrity sync we have to
1142 * keep going until we have written all the pages
1143 * we tagged for writeback prior to entering this loop.
1144 */
1145 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1146 done = stop = true;
1147
1148 release_pvec_pages:
1149 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
1150 pvec.nr ? pvec.pages[0] : NULL);
1151 pagevec_release(&pvec);
1152 }
1153
1154 if (should_loop && !done) {
1155 /* more to do; loop back to beginning of file */
1156 dout("writepages looping back to beginning of file\n");
1157 end = start_index - 1; /* OK even when start_index == 0 */
1158
1159 /* to write dirty pages associated with next snapc,
1160 * we need to wait until current writes complete */
1161 if (wbc->sync_mode != WB_SYNC_NONE &&
1162 start_index == 0 && /* all dirty pages were checked */
1163 !ceph_wbc.head_snapc) {
1164 struct page *page;
1165 unsigned i, nr;
1166 index = 0;
1167 while ((index <= end) &&
1168 (nr = pagevec_lookup_tag(&pvec, mapping, &index,
1169 PAGECACHE_TAG_WRITEBACK))) {
1170 for (i = 0; i < nr; i++) {
1171 page = pvec.pages[i];
1172 if (page_snap_context(page) != snapc)
1173 continue;
1174 wait_on_page_writeback(page);
1175 }
1176 pagevec_release(&pvec);
1177 cond_resched();
1178 }
1179 }
1180
1181 start_index = 0;
1182 index = 0;
1183 goto retry;
1184 }
1185
1186 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1187 mapping->writeback_index = index;
1188
1189 out:
1190 ceph_osdc_put_request(req);
1191 ceph_put_snap_context(last_snapc);
1192 dout("writepages dend - startone, rc = %d\n", rc);
1193 return rc;
1194 }
1195
1196
1197
1198 /*
1199 * See if a given @snapc is either writeable, or already written.
1200 */
1201 static int context_is_writeable_or_written(struct inode *inode,
1202 struct ceph_snap_context *snapc)
1203 {
1204 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
1205 int ret = !oldest || snapc->seq <= oldest->seq;
1206
1207 ceph_put_snap_context(oldest);
1208 return ret;
1209 }
1210
1211 /*
1212 * We are only allowed to write into/dirty the page if the page is
1213 * clean, or already dirty within the same snap context.
1214 *
1215 * called with page locked.
1216 * return success with page locked,
1217 * or any failure (incl -EAGAIN) with page unlocked.
1218 */
1219 static int ceph_update_writeable_page(struct file *file,
1220 loff_t pos, unsigned len,
1221 struct page *page)
1222 {
1223 struct inode *inode = file_inode(file);
1224 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1225 struct ceph_inode_info *ci = ceph_inode(inode);
1226 loff_t page_off = pos & PAGE_MASK;
1227 int pos_in_page = pos & ~PAGE_MASK;
1228 int end_in_page = pos_in_page + len;
1229 loff_t i_size;
1230 int r;
1231 struct ceph_snap_context *snapc, *oldest;
1232
1233 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1234 dout(" page %p forced umount\n", page);
1235 unlock_page(page);
1236 return -EIO;
1237 }
1238
1239 retry_locked:
1240 /* writepages currently holds page lock, but if we change that later, */
1241 wait_on_page_writeback(page);
1242
1243 snapc = page_snap_context(page);
1244 if (snapc && snapc != ci->i_head_snapc) {
1245 /*
1246 * this page is already dirty in another (older) snap
1247 * context! is it writeable now?
1248 */
1249 oldest = get_oldest_context(inode, NULL, NULL);
1250 if (snapc->seq > oldest->seq) {
1251 ceph_put_snap_context(oldest);
1252 dout(" page %p snapc %p not current or oldest\n",
1253 page, snapc);
1254 /*
1255 * queue for writeback, and wait for snapc to
1256 * be writeable or written
1257 */
1258 snapc = ceph_get_snap_context(snapc);
1259 unlock_page(page);
1260 ceph_queue_writeback(inode);
1261 r = wait_event_killable(ci->i_cap_wq,
1262 context_is_writeable_or_written(inode, snapc));
1263 ceph_put_snap_context(snapc);
1264 if (r == -ERESTARTSYS)
1265 return r;
1266 return -EAGAIN;
1267 }
1268 ceph_put_snap_context(oldest);
1269
1270 /* yay, writeable, do it now (without dropping page lock) */
1271 dout(" page %p snapc %p not current, but oldest\n",
1272 page, snapc);
1273 if (!clear_page_dirty_for_io(page))
1274 goto retry_locked;
1275 r = writepage_nounlock(page, NULL);
1276 if (r < 0)
1277 goto fail_unlock;
1278 goto retry_locked;
1279 }
1280
1281 if (PageUptodate(page)) {
1282 dout(" page %p already uptodate\n", page);
1283 return 0;
1284 }
1285
1286 /* full page? */
1287 if (pos_in_page == 0 && len == PAGE_SIZE)
1288 return 0;
1289
1290 /* past end of file? */
1291 i_size = i_size_read(inode);
1292
1293 if (page_off >= i_size ||
1294 (pos_in_page == 0 && (pos+len) >= i_size &&
1295 end_in_page - pos_in_page != PAGE_SIZE)) {
1296 dout(" zeroing %p 0 - %d and %d - %d\n",
1297 page, pos_in_page, end_in_page, (int)PAGE_SIZE);
1298 zero_user_segments(page,
1299 0, pos_in_page,
1300 end_in_page, PAGE_SIZE);
1301 return 0;
1302 }
1303
1304 /* we need to read it. */
1305 r = ceph_do_readpage(file, page);
1306 if (r < 0) {
1307 if (r == -EINPROGRESS)
1308 return -EAGAIN;
1309 goto fail_unlock;
1310 }
1311 goto retry_locked;
1312 fail_unlock:
1313 unlock_page(page);
1314 return r;
1315 }
1316
1317 /*
1318 * We are only allowed to write into/dirty the page if the page is
1319 * clean, or already dirty within the same snap context.
1320 */
1321 static int ceph_write_begin(struct file *file, struct address_space *mapping,
1322 loff_t pos, unsigned len, unsigned flags,
1323 struct page **pagep, void **fsdata)
1324 {
1325 struct inode *inode = file_inode(file);
1326 struct page *page;
1327 pgoff_t index = pos >> PAGE_SHIFT;
1328 int r;
1329
1330 do {
1331 /* get a page */
1332 page = grab_cache_page_write_begin(mapping, index, 0);
1333 if (!page)
1334 return -ENOMEM;
1335
1336 dout("write_begin file %p inode %p page %p %d~%d\n", file,
1337 inode, page, (int)pos, (int)len);
1338
1339 r = ceph_update_writeable_page(file, pos, len, page);
1340 if (r < 0)
1341 put_page(page);
1342 else
1343 *pagep = page;
1344 } while (r == -EAGAIN);
1345
1346 return r;
1347 }
1348
1349 /*
1350 * we don't do anything in here that simple_write_end doesn't do
1351 * except adjust dirty page accounting
1352 */
1353 static int ceph_write_end(struct file *file, struct address_space *mapping,
1354 loff_t pos, unsigned len, unsigned copied,
1355 struct page *page, void *fsdata)
1356 {
1357 struct inode *inode = file_inode(file);
1358 bool check_cap = false;
1359
1360 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1361 inode, page, (int)pos, (int)copied, (int)len);
1362
1363 /* zero the stale part of the page if we did a short copy */
1364 if (!PageUptodate(page)) {
1365 if (copied < len) {
1366 copied = 0;
1367 goto out;
1368 }
1369 SetPageUptodate(page);
1370 }
1371
1372 /* did file size increase? */
1373 if (pos+copied > i_size_read(inode))
1374 check_cap = ceph_inode_set_size(inode, pos+copied);
1375
1376 set_page_dirty(page);
1377
1378 out:
1379 unlock_page(page);
1380 put_page(page);
1381
1382 if (check_cap)
1383 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1384
1385 return copied;
1386 }
1387
1388 /*
1389 * we set .direct_IO to indicate direct io is supported, but since we
1390 * intercept O_DIRECT reads and writes early, this function should
1391 * never get called.
1392 */
1393 static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
1394 {
1395 WARN_ON(1);
1396 return -EINVAL;
1397 }
1398
1399 const struct address_space_operations ceph_aops = {
1400 .readpage = ceph_readpage,
1401 .readpages = ceph_readpages,
1402 .writepage = ceph_writepage,
1403 .writepages = ceph_writepages_start,
1404 .write_begin = ceph_write_begin,
1405 .write_end = ceph_write_end,
1406 .set_page_dirty = ceph_set_page_dirty,
1407 .invalidatepage = ceph_invalidatepage,
1408 .releasepage = ceph_releasepage,
1409 .direct_IO = ceph_direct_io,
1410 };
1411
1412 static void ceph_block_sigs(sigset_t *oldset)
1413 {
1414 sigset_t mask;
1415 siginitsetinv(&mask, sigmask(SIGKILL));
1416 sigprocmask(SIG_BLOCK, &mask, oldset);
1417 }
1418
1419 static void ceph_restore_sigs(sigset_t *oldset)
1420 {
1421 sigprocmask(SIG_SETMASK, oldset, NULL);
1422 }
1423
1424 /*
1425 * vm ops
1426 */
1427 static int ceph_filemap_fault(struct vm_fault *vmf)
1428 {
1429 struct vm_area_struct *vma = vmf->vma;
1430 struct inode *inode = file_inode(vma->vm_file);
1431 struct ceph_inode_info *ci = ceph_inode(inode);
1432 struct ceph_file_info *fi = vma->vm_file->private_data;
1433 struct page *pinned_page = NULL;
1434 loff_t off = vmf->pgoff << PAGE_SHIFT;
1435 int want, got, ret;
1436 sigset_t oldset;
1437
1438 ceph_block_sigs(&oldset);
1439
1440 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
1441 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
1442 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1443 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1444 else
1445 want = CEPH_CAP_FILE_CACHE;
1446
1447 got = 0;
1448 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1449 if (ret < 0)
1450 goto out_restore;
1451
1452 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
1453 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
1454
1455 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1456 ci->i_inline_version == CEPH_INLINE_NONE) {
1457 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1458 ceph_add_rw_context(fi, &rw_ctx);
1459 ret = filemap_fault(vmf);
1460 ceph_del_rw_context(fi, &rw_ctx);
1461 } else
1462 ret = -EAGAIN;
1463
1464 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
1465 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
1466 if (pinned_page)
1467 put_page(pinned_page);
1468 ceph_put_cap_refs(ci, got);
1469
1470 if (ret != -EAGAIN)
1471 goto out_restore;
1472
1473 /* read inline data */
1474 if (off >= PAGE_SIZE) {
1475 /* does not support inline data > PAGE_SIZE */
1476 ret = VM_FAULT_SIGBUS;
1477 } else {
1478 int ret1;
1479 struct address_space *mapping = inode->i_mapping;
1480 struct page *page = find_or_create_page(mapping, 0,
1481 mapping_gfp_constraint(mapping,
1482 ~__GFP_FS));
1483 if (!page) {
1484 ret = VM_FAULT_OOM;
1485 goto out_inline;
1486 }
1487 ret1 = __ceph_do_getattr(inode, page,
1488 CEPH_STAT_CAP_INLINE_DATA, true);
1489 if (ret1 < 0 || off >= i_size_read(inode)) {
1490 unlock_page(page);
1491 put_page(page);
1492 if (ret1 < 0)
1493 ret = ret1;
1494 else
1495 ret = VM_FAULT_SIGBUS;
1496 goto out_inline;
1497 }
1498 if (ret1 < PAGE_SIZE)
1499 zero_user_segment(page, ret1, PAGE_SIZE);
1500 else
1501 flush_dcache_page(page);
1502 SetPageUptodate(page);
1503 vmf->page = page;
1504 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
1505 out_inline:
1506 dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
1507 inode, off, (size_t)PAGE_SIZE, ret);
1508 }
1509 out_restore:
1510 ceph_restore_sigs(&oldset);
1511 if (ret < 0)
1512 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
1513
1514 return ret;
1515 }
1516
1517 /*
1518 * Reuse write_begin here for simplicity.
1519 */
1520 static int ceph_page_mkwrite(struct vm_fault *vmf)
1521 {
1522 struct vm_area_struct *vma = vmf->vma;
1523 struct inode *inode = file_inode(vma->vm_file);
1524 struct ceph_inode_info *ci = ceph_inode(inode);
1525 struct ceph_file_info *fi = vma->vm_file->private_data;
1526 struct ceph_cap_flush *prealloc_cf;
1527 struct page *page = vmf->page;
1528 loff_t off = page_offset(page);
1529 loff_t size = i_size_read(inode);
1530 size_t len;
1531 int want, got, ret;
1532 sigset_t oldset;
1533
1534 prealloc_cf = ceph_alloc_cap_flush();
1535 if (!prealloc_cf)
1536 return VM_FAULT_OOM;
1537
1538 ceph_block_sigs(&oldset);
1539
1540 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1541 struct page *locked_page = NULL;
1542 if (off == 0) {
1543 lock_page(page);
1544 locked_page = page;
1545 }
1546 ret = ceph_uninline_data(vma->vm_file, locked_page);
1547 if (locked_page)
1548 unlock_page(locked_page);
1549 if (ret < 0)
1550 goto out_free;
1551 }
1552
1553 if (off + PAGE_SIZE <= size)
1554 len = PAGE_SIZE;
1555 else
1556 len = size & ~PAGE_MASK;
1557
1558 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1559 inode, ceph_vinop(inode), off, len, size);
1560 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1561 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1562 else
1563 want = CEPH_CAP_FILE_BUFFER;
1564
1565 got = 0;
1566 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
1567 &got, NULL);
1568 if (ret < 0)
1569 goto out_free;
1570
1571 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1572 inode, off, len, ceph_cap_string(got));
1573
1574 /* Update time before taking page lock */
1575 file_update_time(vma->vm_file);
1576
1577 do {
1578 lock_page(page);
1579
1580 if ((off > size) || (page->mapping != inode->i_mapping)) {
1581 unlock_page(page);
1582 ret = VM_FAULT_NOPAGE;
1583 break;
1584 }
1585
1586 ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
1587 if (ret >= 0) {
1588 /* success. we'll keep the page locked. */
1589 set_page_dirty(page);
1590 ret = VM_FAULT_LOCKED;
1591 }
1592 } while (ret == -EAGAIN);
1593
1594 if (ret == VM_FAULT_LOCKED ||
1595 ci->i_inline_version != CEPH_INLINE_NONE) {
1596 int dirty;
1597 spin_lock(&ci->i_ceph_lock);
1598 ci->i_inline_version = CEPH_INLINE_NONE;
1599 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1600 &prealloc_cf);
1601 spin_unlock(&ci->i_ceph_lock);
1602 if (dirty)
1603 __mark_inode_dirty(inode, dirty);
1604 }
1605
1606 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n",
1607 inode, off, len, ceph_cap_string(got), ret);
1608 ceph_put_cap_refs(ci, got);
1609 out_free:
1610 ceph_restore_sigs(&oldset);
1611 ceph_free_cap_flush(prealloc_cf);
1612 if (ret < 0)
1613 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
1614 return ret;
1615 }
1616
1617 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1618 char *data, size_t len)
1619 {
1620 struct address_space *mapping = inode->i_mapping;
1621 struct page *page;
1622
1623 if (locked_page) {
1624 page = locked_page;
1625 } else {
1626 if (i_size_read(inode) == 0)
1627 return;
1628 page = find_or_create_page(mapping, 0,
1629 mapping_gfp_constraint(mapping,
1630 ~__GFP_FS));
1631 if (!page)
1632 return;
1633 if (PageUptodate(page)) {
1634 unlock_page(page);
1635 put_page(page);
1636 return;
1637 }
1638 }
1639
1640 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
1641 inode, ceph_vinop(inode), len, locked_page);
1642
1643 if (len > 0) {
1644 void *kaddr = kmap_atomic(page);
1645 memcpy(kaddr, data, len);
1646 kunmap_atomic(kaddr);
1647 }
1648
1649 if (page != locked_page) {
1650 if (len < PAGE_SIZE)
1651 zero_user_segment(page, len, PAGE_SIZE);
1652 else
1653 flush_dcache_page(page);
1654
1655 SetPageUptodate(page);
1656 unlock_page(page);
1657 put_page(page);
1658 }
1659 }
1660
1661 int ceph_uninline_data(struct file *filp, struct page *locked_page)
1662 {
1663 struct inode *inode = file_inode(filp);
1664 struct ceph_inode_info *ci = ceph_inode(inode);
1665 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1666 struct ceph_osd_request *req;
1667 struct page *page = NULL;
1668 u64 len, inline_version;
1669 int err = 0;
1670 bool from_pagecache = false;
1671
1672 spin_lock(&ci->i_ceph_lock);
1673 inline_version = ci->i_inline_version;
1674 spin_unlock(&ci->i_ceph_lock);
1675
1676 dout("uninline_data %p %llx.%llx inline_version %llu\n",
1677 inode, ceph_vinop(inode), inline_version);
1678
1679 if (inline_version == 1 || /* initial version, no data */
1680 inline_version == CEPH_INLINE_NONE)
1681 goto out;
1682
1683 if (locked_page) {
1684 page = locked_page;
1685 WARN_ON(!PageUptodate(page));
1686 } else if (ceph_caps_issued(ci) &
1687 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
1688 page = find_get_page(inode->i_mapping, 0);
1689 if (page) {
1690 if (PageUptodate(page)) {
1691 from_pagecache = true;
1692 lock_page(page);
1693 } else {
1694 put_page(page);
1695 page = NULL;
1696 }
1697 }
1698 }
1699
1700 if (page) {
1701 len = i_size_read(inode);
1702 if (len > PAGE_SIZE)
1703 len = PAGE_SIZE;
1704 } else {
1705 page = __page_cache_alloc(GFP_NOFS);
1706 if (!page) {
1707 err = -ENOMEM;
1708 goto out;
1709 }
1710 err = __ceph_do_getattr(inode, page,
1711 CEPH_STAT_CAP_INLINE_DATA, true);
1712 if (err < 0) {
1713 /* no inline data */
1714 if (err == -ENODATA)
1715 err = 0;
1716 goto out;
1717 }
1718 len = err;
1719 }
1720
1721 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1722 ceph_vino(inode), 0, &len, 0, 1,
1723 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
1724 NULL, 0, 0, false);
1725 if (IS_ERR(req)) {
1726 err = PTR_ERR(req);
1727 goto out;
1728 }
1729
1730 req->r_mtime = inode->i_mtime;
1731 err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1732 if (!err)
1733 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1734 ceph_osdc_put_request(req);
1735 if (err < 0)
1736 goto out;
1737
1738 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1739 ceph_vino(inode), 0, &len, 1, 3,
1740 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1741 NULL, ci->i_truncate_seq,
1742 ci->i_truncate_size, false);
1743 if (IS_ERR(req)) {
1744 err = PTR_ERR(req);
1745 goto out;
1746 }
1747
1748 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
1749
1750 {
1751 __le64 xattr_buf = cpu_to_le64(inline_version);
1752 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1753 "inline_version", &xattr_buf,
1754 sizeof(xattr_buf),
1755 CEPH_OSD_CMPXATTR_OP_GT,
1756 CEPH_OSD_CMPXATTR_MODE_U64);
1757 if (err)
1758 goto out_put;
1759 }
1760
1761 {
1762 char xattr_buf[32];
1763 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1764 "%llu", inline_version);
1765 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1766 "inline_version",
1767 xattr_buf, xattr_len, 0, 0);
1768 if (err)
1769 goto out_put;
1770 }
1771
1772 req->r_mtime = inode->i_mtime;
1773 err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1774 if (!err)
1775 err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1776 out_put:
1777 ceph_osdc_put_request(req);
1778 if (err == -ECANCELED)
1779 err = 0;
1780 out:
1781 if (page && page != locked_page) {
1782 if (from_pagecache) {
1783 unlock_page(page);
1784 put_page(page);
1785 } else
1786 __free_pages(page, 0);
1787 }
1788
1789 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1790 inode, ceph_vinop(inode), inline_version, err);
1791 return err;
1792 }
1793
1794 static const struct vm_operations_struct ceph_vmops = {
1795 .fault = ceph_filemap_fault,
1796 .page_mkwrite = ceph_page_mkwrite,
1797 };
1798
1799 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1800 {
1801 struct address_space *mapping = file->f_mapping;
1802
1803 if (!mapping->a_ops->readpage)
1804 return -ENOEXEC;
1805 file_accessed(file);
1806 vma->vm_ops = &ceph_vmops;
1807 return 0;
1808 }
1809
1810 enum {
1811 POOL_READ = 1,
1812 POOL_WRITE = 2,
1813 };
1814
1815 static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1816 s64 pool, struct ceph_string *pool_ns)
1817 {
1818 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1819 struct ceph_mds_client *mdsc = fsc->mdsc;
1820 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
1821 struct rb_node **p, *parent;
1822 struct ceph_pool_perm *perm;
1823 struct page **pages;
1824 size_t pool_ns_len;
1825 int err = 0, err2 = 0, have = 0;
1826
1827 down_read(&mdsc->pool_perm_rwsem);
1828 p = &mdsc->pool_perm_tree.rb_node;
1829 while (*p) {
1830 perm = rb_entry(*p, struct ceph_pool_perm, node);
1831 if (pool < perm->pool)
1832 p = &(*p)->rb_left;
1833 else if (pool > perm->pool)
1834 p = &(*p)->rb_right;
1835 else {
1836 int ret = ceph_compare_string(pool_ns,
1837 perm->pool_ns,
1838 perm->pool_ns_len);
1839 if (ret < 0)
1840 p = &(*p)->rb_left;
1841 else if (ret > 0)
1842 p = &(*p)->rb_right;
1843 else {
1844 have = perm->perm;
1845 break;
1846 }
1847 }
1848 }
1849 up_read(&mdsc->pool_perm_rwsem);
1850 if (*p)
1851 goto out;
1852
1853 if (pool_ns)
1854 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1855 pool, (int)pool_ns->len, pool_ns->str);
1856 else
1857 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
1858
1859 down_write(&mdsc->pool_perm_rwsem);
1860 p = &mdsc->pool_perm_tree.rb_node;
1861 parent = NULL;
1862 while (*p) {
1863 parent = *p;
1864 perm = rb_entry(parent, struct ceph_pool_perm, node);
1865 if (pool < perm->pool)
1866 p = &(*p)->rb_left;
1867 else if (pool > perm->pool)
1868 p = &(*p)->rb_right;
1869 else {
1870 int ret = ceph_compare_string(pool_ns,
1871 perm->pool_ns,
1872 perm->pool_ns_len);
1873 if (ret < 0)
1874 p = &(*p)->rb_left;
1875 else if (ret > 0)
1876 p = &(*p)->rb_right;
1877 else {
1878 have = perm->perm;
1879 break;
1880 }
1881 }
1882 }
1883 if (*p) {
1884 up_write(&mdsc->pool_perm_rwsem);
1885 goto out;
1886 }
1887
1888 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
1889 1, false, GFP_NOFS);
1890 if (!rd_req) {
1891 err = -ENOMEM;
1892 goto out_unlock;
1893 }
1894
1895 rd_req->r_flags = CEPH_OSD_FLAG_READ;
1896 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
1897 rd_req->r_base_oloc.pool = pool;
1898 if (pool_ns)
1899 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
1900 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
1901
1902 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
1903 if (err)
1904 goto out_unlock;
1905
1906 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
1907 1, false, GFP_NOFS);
1908 if (!wr_req) {
1909 err = -ENOMEM;
1910 goto out_unlock;
1911 }
1912
1913 wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
1914 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
1915 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
1916 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
1917
1918 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
1919 if (err)
1920 goto out_unlock;
1921
1922 /* one page should be large enough for STAT data */
1923 pages = ceph_alloc_page_vector(1, GFP_KERNEL);
1924 if (IS_ERR(pages)) {
1925 err = PTR_ERR(pages);
1926 goto out_unlock;
1927 }
1928
1929 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
1930 0, false, true);
1931 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
1932
1933 wr_req->r_mtime = ci->vfs_inode.i_mtime;
1934 wr_req->r_abort_on_full = true;
1935 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
1936
1937 if (!err)
1938 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
1939 if (!err2)
1940 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
1941
1942 if (err >= 0 || err == -ENOENT)
1943 have |= POOL_READ;
1944 else if (err != -EPERM)
1945 goto out_unlock;
1946
1947 if (err2 == 0 || err2 == -EEXIST)
1948 have |= POOL_WRITE;
1949 else if (err2 != -EPERM) {
1950 err = err2;
1951 goto out_unlock;
1952 }
1953
1954 pool_ns_len = pool_ns ? pool_ns->len : 0;
1955 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
1956 if (!perm) {
1957 err = -ENOMEM;
1958 goto out_unlock;
1959 }
1960
1961 perm->pool = pool;
1962 perm->perm = have;
1963 perm->pool_ns_len = pool_ns_len;
1964 if (pool_ns_len > 0)
1965 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
1966 perm->pool_ns[pool_ns_len] = 0;
1967
1968 rb_link_node(&perm->node, parent, p);
1969 rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
1970 err = 0;
1971 out_unlock:
1972 up_write(&mdsc->pool_perm_rwsem);
1973
1974 ceph_osdc_put_request(rd_req);
1975 ceph_osdc_put_request(wr_req);
1976 out:
1977 if (!err)
1978 err = have;
1979 if (pool_ns)
1980 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
1981 pool, (int)pool_ns->len, pool_ns->str, err);
1982 else
1983 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
1984 return err;
1985 }
1986
1987 int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
1988 {
1989 s64 pool;
1990 struct ceph_string *pool_ns;
1991 int ret, flags;
1992
1993 if (ci->i_vino.snap != CEPH_NOSNAP) {
1994 /*
1995 * Pool permission check needs to write to the first object.
1996 * But for snapshot, head of the first object may have alread
1997 * been deleted. Skip check to avoid creating orphan object.
1998 */
1999 return 0;
2000 }
2001
2002 if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
2003 NOPOOLPERM))
2004 return 0;
2005
2006 spin_lock(&ci->i_ceph_lock);
2007 flags = ci->i_ceph_flags;
2008 pool = ci->i_layout.pool_id;
2009 spin_unlock(&ci->i_ceph_lock);
2010 check:
2011 if (flags & CEPH_I_POOL_PERM) {
2012 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
2013 dout("ceph_pool_perm_check pool %lld no read perm\n",
2014 pool);
2015 return -EPERM;
2016 }
2017 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
2018 dout("ceph_pool_perm_check pool %lld no write perm\n",
2019 pool);
2020 return -EPERM;
2021 }
2022 return 0;
2023 }
2024
2025 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2026 ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2027 ceph_put_string(pool_ns);
2028 if (ret < 0)
2029 return ret;
2030
2031 flags = CEPH_I_POOL_PERM;
2032 if (ret & POOL_READ)
2033 flags |= CEPH_I_POOL_RD;
2034 if (ret & POOL_WRITE)
2035 flags |= CEPH_I_POOL_WR;
2036
2037 spin_lock(&ci->i_ceph_lock);
2038 if (pool == ci->i_layout.pool_id &&
2039 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2040 ci->i_ceph_flags |= flags;
2041 } else {
2042 pool = ci->i_layout.pool_id;
2043 flags = ci->i_ceph_flags;
2044 }
2045 spin_unlock(&ci->i_ceph_lock);
2046 goto check;
2047 }
2048
2049 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2050 {
2051 struct ceph_pool_perm *perm;
2052 struct rb_node *n;
2053
2054 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2055 n = rb_first(&mdsc->pool_perm_tree);
2056 perm = rb_entry(n, struct ceph_pool_perm, node);
2057 rb_erase(n, &mdsc->pool_perm_tree);
2058 kfree(perm);
2059 }
2060 }