]>
Commit | Line | Data |
---|---|---|
124e68e7 SW |
1 | #include "ceph_debug.h" |
2 | ||
3 | #include <linux/sched.h> | |
5a0e3ad6 | 4 | #include <linux/slab.h> |
124e68e7 SW |
5 | #include <linux/file.h> |
6 | #include <linux/namei.h> | |
7 | #include <linux/writeback.h> | |
8 | ||
9 | #include "super.h" | |
10 | #include "mds_client.h" | |
11 | ||
12 | /* | |
13 | * Ceph file operations | |
14 | * | |
15 | * Implement basic open/close functionality, and implement | |
16 | * read/write. | |
17 | * | |
18 | * We implement three modes of file I/O: | |
19 | * - buffered uses the generic_file_aio_{read,write} helpers | |
20 | * | |
21 | * - synchronous is used when there is multi-client read/write | |
22 | * sharing, avoids the page cache, and synchronously waits for an | |
23 | * ack from the OSD. | |
24 | * | |
25 | * - direct io takes the variant of the sync path that references | |
26 | * user pages directly. | |
27 | * | |
28 | * fsync() flushes and waits on dirty pages, but just queues metadata | |
29 | * for writeback: since the MDS can recover size and mtime there is no | |
30 | * need to wait for MDS acknowledgement. | |
31 | */ | |
32 | ||
33 | ||
34 | /* | |
35 | * Prepare an open request. Preallocate ceph_cap to avoid an | |
36 | * inopportune ENOMEM later. | |
37 | */ | |
38 | static struct ceph_mds_request * | |
39 | prepare_open_request(struct super_block *sb, int flags, int create_mode) | |
40 | { | |
41 | struct ceph_client *client = ceph_sb_to_client(sb); | |
42 | struct ceph_mds_client *mdsc = &client->mdsc; | |
43 | struct ceph_mds_request *req; | |
44 | int want_auth = USE_ANY_MDS; | |
45 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; | |
46 | ||
47 | if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) | |
48 | want_auth = USE_AUTH_MDS; | |
49 | ||
50 | req = ceph_mdsc_create_request(mdsc, op, want_auth); | |
51 | if (IS_ERR(req)) | |
52 | goto out; | |
53 | req->r_fmode = ceph_flags_to_mode(flags); | |
54 | req->r_args.open.flags = cpu_to_le32(flags); | |
55 | req->r_args.open.mode = cpu_to_le32(create_mode); | |
6a18be16 | 56 | req->r_args.open.preferred = cpu_to_le32(-1); |
124e68e7 SW |
57 | out: |
58 | return req; | |
59 | } | |
60 | ||
61 | /* | |
62 | * initialize private struct file data. | |
63 | * if we fail, clean up by dropping fmode reference on the ceph_inode | |
64 | */ | |
65 | static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | |
66 | { | |
67 | struct ceph_file_info *cf; | |
68 | int ret = 0; | |
69 | ||
70 | switch (inode->i_mode & S_IFMT) { | |
71 | case S_IFREG: | |
72 | case S_IFDIR: | |
73 | dout("init_file %p %p 0%o (regular)\n", inode, file, | |
74 | inode->i_mode); | |
75 | cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); | |
76 | if (cf == NULL) { | |
77 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
78 | return -ENOMEM; | |
79 | } | |
80 | cf->fmode = fmode; | |
81 | cf->next_offset = 2; | |
82 | file->private_data = cf; | |
83 | BUG_ON(inode->i_fop->release != ceph_release); | |
84 | break; | |
85 | ||
86 | case S_IFLNK: | |
87 | dout("init_file %p %p 0%o (symlink)\n", inode, file, | |
88 | inode->i_mode); | |
89 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
90 | break; | |
91 | ||
92 | default: | |
93 | dout("init_file %p %p 0%o (special)\n", inode, file, | |
94 | inode->i_mode); | |
95 | /* | |
96 | * we need to drop the open ref now, since we don't | |
97 | * have .release set to ceph_release. | |
98 | */ | |
99 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
100 | BUG_ON(inode->i_fop->release == ceph_release); | |
101 | ||
102 | /* call the proper open fop */ | |
103 | ret = inode->i_fop->open(inode, file); | |
104 | } | |
105 | return ret; | |
106 | } | |
107 | ||
108 | /* | |
109 | * If the filp already has private_data, that means the file was | |
110 | * already opened by intent during lookup, and we do nothing. | |
111 | * | |
112 | * If we already have the requisite capabilities, we can satisfy | |
113 | * the open request locally (no need to request new caps from the | |
114 | * MDS). We do, however, need to inform the MDS (asynchronously) | |
115 | * if our wanted caps set expands. | |
116 | */ | |
117 | int ceph_open(struct inode *inode, struct file *file) | |
118 | { | |
119 | struct ceph_inode_info *ci = ceph_inode(inode); | |
120 | struct ceph_client *client = ceph_sb_to_client(inode->i_sb); | |
121 | struct ceph_mds_client *mdsc = &client->mdsc; | |
122 | struct ceph_mds_request *req; | |
123 | struct ceph_file_info *cf = file->private_data; | |
124 | struct inode *parent_inode = file->f_dentry->d_parent->d_inode; | |
125 | int err; | |
126 | int flags, fmode, wanted; | |
127 | ||
128 | if (cf) { | |
129 | dout("open file %p is already opened\n", file); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ | |
134 | flags = file->f_flags & ~(O_CREAT|O_EXCL); | |
135 | if (S_ISDIR(inode->i_mode)) | |
136 | flags = O_DIRECTORY; /* mds likes to know */ | |
137 | ||
138 | dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, | |
139 | ceph_vinop(inode), file, flags, file->f_flags); | |
140 | fmode = ceph_flags_to_mode(flags); | |
141 | wanted = ceph_caps_for_mode(fmode); | |
142 | ||
143 | /* snapped files are read-only */ | |
144 | if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) | |
145 | return -EROFS; | |
146 | ||
147 | /* trivially open snapdir */ | |
148 | if (ceph_snap(inode) == CEPH_SNAPDIR) { | |
149 | spin_lock(&inode->i_lock); | |
150 | __ceph_get_fmode(ci, fmode); | |
151 | spin_unlock(&inode->i_lock); | |
152 | return ceph_init_file(inode, file, fmode); | |
153 | } | |
154 | ||
155 | /* | |
156 | * No need to block if we have any caps. Update wanted set | |
157 | * asynchronously. | |
158 | */ | |
159 | spin_lock(&inode->i_lock); | |
160 | if (__ceph_is_any_real_caps(ci)) { | |
161 | int mds_wanted = __ceph_caps_mds_wanted(ci); | |
162 | int issued = __ceph_caps_issued(ci, NULL); | |
163 | ||
164 | dout("open %p fmode %d want %s issued %s using existing\n", | |
165 | inode, fmode, ceph_cap_string(wanted), | |
166 | ceph_cap_string(issued)); | |
167 | __ceph_get_fmode(ci, fmode); | |
168 | spin_unlock(&inode->i_lock); | |
169 | ||
170 | /* adjust wanted? */ | |
171 | if ((issued & wanted) != wanted && | |
172 | (mds_wanted & wanted) != wanted && | |
173 | ceph_snap(inode) != CEPH_SNAPDIR) | |
174 | ceph_check_caps(ci, 0, NULL); | |
175 | ||
176 | return ceph_init_file(inode, file, fmode); | |
177 | } else if (ceph_snap(inode) != CEPH_NOSNAP && | |
178 | (ci->i_snap_caps & wanted) == wanted) { | |
179 | __ceph_get_fmode(ci, fmode); | |
180 | spin_unlock(&inode->i_lock); | |
181 | return ceph_init_file(inode, file, fmode); | |
182 | } | |
183 | spin_unlock(&inode->i_lock); | |
184 | ||
185 | dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); | |
186 | req = prepare_open_request(inode->i_sb, flags, 0); | |
187 | if (IS_ERR(req)) { | |
188 | err = PTR_ERR(req); | |
189 | goto out; | |
190 | } | |
191 | req->r_inode = igrab(inode); | |
192 | req->r_num_caps = 1; | |
193 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); | |
194 | if (!err) | |
195 | err = ceph_init_file(inode, file, req->r_fmode); | |
196 | ceph_mdsc_put_request(req); | |
197 | dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); | |
198 | out: | |
199 | return err; | |
200 | } | |
201 | ||
202 | ||
203 | /* | |
204 | * Do a lookup + open with a single request. | |
205 | * | |
206 | * If this succeeds, but some subsequent check in the vfs | |
207 | * may_open() fails, the struct *file gets cleaned up (i.e. | |
208 | * ceph_release gets called). So fear not! | |
209 | */ | |
210 | /* | |
211 | * flags | |
212 | * path_lookup_open -> LOOKUP_OPEN | |
213 | * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE | |
214 | */ | |
215 | struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, | |
216 | struct nameidata *nd, int mode, | |
217 | int locked_dir) | |
218 | { | |
219 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | |
220 | struct ceph_mds_client *mdsc = &client->mdsc; | |
221 | struct file *file = nd->intent.open.file; | |
222 | struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); | |
223 | struct ceph_mds_request *req; | |
224 | int err; | |
225 | int flags = nd->intent.open.flags - 1; /* silly vfs! */ | |
226 | ||
227 | dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", | |
228 | dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); | |
229 | ||
230 | /* do the open */ | |
231 | req = prepare_open_request(dir->i_sb, flags, mode); | |
232 | if (IS_ERR(req)) | |
7e34bc52 | 233 | return ERR_CAST(req); |
124e68e7 SW |
234 | req->r_dentry = dget(dentry); |
235 | req->r_num_caps = 2; | |
236 | if (flags & O_CREAT) { | |
237 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
238 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
239 | } | |
240 | req->r_locked_dir = dir; /* caller holds dir->i_mutex */ | |
241 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); | |
242 | dentry = ceph_finish_lookup(req, dentry, err); | |
243 | if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) | |
244 | err = ceph_handle_notrace_create(dir, dentry); | |
245 | if (!err) | |
246 | err = ceph_init_file(req->r_dentry->d_inode, file, | |
247 | req->r_fmode); | |
248 | ceph_mdsc_put_request(req); | |
249 | dout("ceph_lookup_open result=%p\n", dentry); | |
250 | return dentry; | |
251 | } | |
252 | ||
253 | int ceph_release(struct inode *inode, struct file *file) | |
254 | { | |
255 | struct ceph_inode_info *ci = ceph_inode(inode); | |
256 | struct ceph_file_info *cf = file->private_data; | |
257 | ||
258 | dout("release inode %p file %p\n", inode, file); | |
259 | ceph_put_fmode(ci, cf->fmode); | |
260 | if (cf->last_readdir) | |
261 | ceph_mdsc_put_request(cf->last_readdir); | |
262 | kfree(cf->last_name); | |
263 | kfree(cf->dir_info); | |
264 | dput(cf->dentry); | |
265 | kmem_cache_free(ceph_file_cachep, cf); | |
195d3ce2 SW |
266 | |
267 | /* wake up anyone waiting for caps on this inode */ | |
03066f23 | 268 | wake_up_all(&ci->i_cap_wq); |
124e68e7 SW |
269 | return 0; |
270 | } | |
271 | ||
272 | /* | |
273 | * build a vector of user pages | |
274 | */ | |
275 | static struct page **get_direct_page_vector(const char __user *data, | |
276 | int num_pages, | |
277 | loff_t off, size_t len) | |
278 | { | |
279 | struct page **pages; | |
280 | int rc; | |
281 | ||
282 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | |
283 | if (!pages) | |
284 | return ERR_PTR(-ENOMEM); | |
285 | ||
286 | down_read(¤t->mm->mmap_sem); | |
287 | rc = get_user_pages(current, current->mm, (unsigned long)data, | |
288 | num_pages, 0, 0, pages, NULL); | |
289 | up_read(¤t->mm->mmap_sem); | |
290 | if (rc < 0) | |
291 | goto fail; | |
292 | return pages; | |
293 | ||
294 | fail: | |
295 | kfree(pages); | |
296 | return ERR_PTR(rc); | |
297 | } | |
298 | ||
299 | static void put_page_vector(struct page **pages, int num_pages) | |
300 | { | |
301 | int i; | |
302 | ||
303 | for (i = 0; i < num_pages; i++) | |
304 | put_page(pages[i]); | |
305 | kfree(pages); | |
306 | } | |
307 | ||
308 | void ceph_release_page_vector(struct page **pages, int num_pages) | |
309 | { | |
310 | int i; | |
311 | ||
312 | for (i = 0; i < num_pages; i++) | |
313 | __free_pages(pages[i], 0); | |
314 | kfree(pages); | |
315 | } | |
316 | ||
317 | /* | |
318 | * allocate a vector new pages | |
319 | */ | |
34d23762 | 320 | struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) |
124e68e7 SW |
321 | { |
322 | struct page **pages; | |
323 | int i; | |
324 | ||
34d23762 | 325 | pages = kmalloc(sizeof(*pages) * num_pages, flags); |
124e68e7 SW |
326 | if (!pages) |
327 | return ERR_PTR(-ENOMEM); | |
328 | for (i = 0; i < num_pages; i++) { | |
34d23762 | 329 | pages[i] = __page_cache_alloc(flags); |
124e68e7 SW |
330 | if (pages[i] == NULL) { |
331 | ceph_release_page_vector(pages, i); | |
332 | return ERR_PTR(-ENOMEM); | |
333 | } | |
334 | } | |
335 | return pages; | |
336 | } | |
337 | ||
338 | /* | |
339 | * copy user data into a page vector | |
340 | */ | |
341 | static int copy_user_to_page_vector(struct page **pages, | |
342 | const char __user *data, | |
343 | loff_t off, size_t len) | |
344 | { | |
345 | int i = 0; | |
346 | int po = off & ~PAGE_CACHE_MASK; | |
347 | int left = len; | |
348 | int l, bad; | |
349 | ||
350 | while (left > 0) { | |
351 | l = min_t(int, PAGE_CACHE_SIZE-po, left); | |
352 | bad = copy_from_user(page_address(pages[i]) + po, data, l); | |
353 | if (bad == l) | |
354 | return -EFAULT; | |
355 | data += l - bad; | |
356 | left -= l - bad; | |
6a4ef481 YS |
357 | po += l - bad; |
358 | if (po == PAGE_CACHE_SIZE) { | |
359 | po = 0; | |
360 | i++; | |
124e68e7 SW |
361 | } |
362 | } | |
363 | return len; | |
364 | } | |
365 | ||
366 | /* | |
367 | * copy user data from a page vector into a user pointer | |
368 | */ | |
369 | static int copy_page_vector_to_user(struct page **pages, char __user *data, | |
370 | loff_t off, size_t len) | |
371 | { | |
372 | int i = 0; | |
373 | int po = off & ~PAGE_CACHE_MASK; | |
374 | int left = len; | |
375 | int l, bad; | |
376 | ||
377 | while (left > 0) { | |
378 | l = min_t(int, left, PAGE_CACHE_SIZE-po); | |
379 | bad = copy_to_user(data, page_address(pages[i]) + po, l); | |
380 | if (bad == l) | |
381 | return -EFAULT; | |
382 | data += l - bad; | |
383 | left -= l - bad; | |
384 | if (po) { | |
385 | po += l - bad; | |
386 | if (po == PAGE_CACHE_SIZE) | |
387 | po = 0; | |
388 | } | |
389 | i++; | |
390 | } | |
391 | return len; | |
392 | } | |
393 | ||
394 | /* | |
395 | * Zero an extent within a page vector. Offset is relative to the | |
396 | * start of the first page. | |
397 | */ | |
398 | static void zero_page_vector_range(int off, int len, struct page **pages) | |
399 | { | |
400 | int i = off >> PAGE_CACHE_SHIFT; | |
401 | ||
972f0d3a YS |
402 | off &= ~PAGE_CACHE_MASK; |
403 | ||
124e68e7 | 404 | dout("zero_page_vector_page %u~%u\n", off, len); |
124e68e7 SW |
405 | |
406 | /* leading partial page? */ | |
972f0d3a YS |
407 | if (off) { |
408 | int end = min((int)PAGE_CACHE_SIZE, off + len); | |
124e68e7 | 409 | dout("zeroing %d %p head from %d\n", i, pages[i], |
972f0d3a YS |
410 | (int)off); |
411 | zero_user_segment(pages[i], off, end); | |
412 | len -= (end - off); | |
124e68e7 SW |
413 | i++; |
414 | } | |
415 | while (len >= PAGE_CACHE_SIZE) { | |
29065a51 | 416 | dout("zeroing %d %p len=%d\n", i, pages[i], len); |
124e68e7 | 417 | zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); |
124e68e7 SW |
418 | len -= PAGE_CACHE_SIZE; |
419 | i++; | |
420 | } | |
421 | /* trailing partial page? */ | |
422 | if (len) { | |
423 | dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); | |
424 | zero_user_segment(pages[i], 0, len); | |
425 | } | |
426 | } | |
427 | ||
428 | ||
429 | /* | |
430 | * Read a range of bytes striped over one or more objects. Iterate over | |
431 | * objects we stripe over. (That's not atomic, but good enough for now.) | |
432 | * | |
433 | * If we get a short result from the OSD, check against i_size; we need to | |
434 | * only return a short read to the caller if we hit EOF. | |
435 | */ | |
436 | static int striped_read(struct inode *inode, | |
437 | u64 off, u64 len, | |
6a026589 SW |
438 | struct page **pages, int num_pages, |
439 | int *checkeof) | |
124e68e7 SW |
440 | { |
441 | struct ceph_client *client = ceph_inode_to_client(inode); | |
442 | struct ceph_inode_info *ci = ceph_inode(inode); | |
443 | u64 pos, this_len; | |
972f0d3a | 444 | int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ |
124e68e7 SW |
445 | int left, pages_left; |
446 | int read; | |
447 | struct page **page_pos; | |
448 | int ret; | |
449 | bool hit_stripe, was_short; | |
450 | ||
451 | /* | |
452 | * we may need to do multiple reads. not atomic, unfortunately. | |
453 | */ | |
454 | pos = off; | |
455 | left = len; | |
456 | page_pos = pages; | |
457 | pages_left = num_pages; | |
458 | read = 0; | |
459 | ||
460 | more: | |
461 | this_len = left; | |
462 | ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode), | |
463 | &ci->i_layout, pos, &this_len, | |
464 | ci->i_truncate_seq, | |
465 | ci->i_truncate_size, | |
466 | page_pos, pages_left); | |
467 | hit_stripe = this_len < left; | |
468 | was_short = ret >= 0 && ret < this_len; | |
469 | if (ret == -ENOENT) | |
470 | ret = 0; | |
471 | dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, | |
472 | ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); | |
473 | ||
474 | if (ret > 0) { | |
475 | int didpages = | |
476 | ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT; | |
477 | ||
478 | if (read < pos - off) { | |
479 | dout(" zero gap %llu to %llu\n", off + read, pos); | |
480 | zero_page_vector_range(page_off + read, | |
481 | pos - off - read, pages); | |
482 | } | |
483 | pos += ret; | |
484 | read = pos - off; | |
485 | left -= ret; | |
486 | page_pos += didpages; | |
487 | pages_left -= didpages; | |
488 | ||
489 | /* hit stripe? */ | |
490 | if (left && hit_stripe) | |
491 | goto more; | |
492 | } | |
493 | ||
494 | if (was_short) { | |
495 | /* was original extent fully inside i_size? */ | |
496 | if (pos + left <= inode->i_size) { | |
497 | dout("zero tail\n"); | |
498 | zero_page_vector_range(page_off + read, len - read, | |
499 | pages); | |
972f0d3a | 500 | read = len; |
124e68e7 SW |
501 | goto out; |
502 | } | |
503 | ||
504 | /* check i_size */ | |
6a026589 | 505 | *checkeof = 1; |
124e68e7 SW |
506 | } |
507 | ||
508 | out: | |
509 | if (ret >= 0) | |
510 | ret = read; | |
511 | dout("striped_read returns %d\n", ret); | |
512 | return ret; | |
513 | } | |
514 | ||
515 | /* | |
516 | * Completely synchronous read and write methods. Direct from __user | |
517 | * buffer to osd, or directly to user pages (if O_DIRECT). | |
518 | * | |
519 | * If the read spans object boundary, just do multiple reads. | |
520 | */ | |
521 | static ssize_t ceph_sync_read(struct file *file, char __user *data, | |
6a026589 | 522 | unsigned len, loff_t *poff, int *checkeof) |
124e68e7 SW |
523 | { |
524 | struct inode *inode = file->f_dentry->d_inode; | |
525 | struct page **pages; | |
526 | u64 off = *poff; | |
527 | int num_pages = calc_pages_for(off, len); | |
528 | int ret; | |
529 | ||
530 | dout("sync_read on file %p %llu~%u %s\n", file, off, len, | |
531 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | |
532 | ||
533 | if (file->f_flags & O_DIRECT) { | |
534 | pages = get_direct_page_vector(data, num_pages, off, len); | |
535 | ||
536 | /* | |
537 | * flush any page cache pages in this range. this | |
538 | * will make concurrent normal and O_DIRECT io slow, | |
539 | * but it will at least behave sensibly when they are | |
540 | * in sequence. | |
541 | */ | |
124e68e7 | 542 | } else { |
34d23762 | 543 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); |
124e68e7 SW |
544 | } |
545 | if (IS_ERR(pages)) | |
546 | return PTR_ERR(pages); | |
547 | ||
29065a51 YS |
548 | ret = filemap_write_and_wait(inode->i_mapping); |
549 | if (ret < 0) | |
550 | goto done; | |
551 | ||
6a026589 | 552 | ret = striped_read(inode, off, len, pages, num_pages, checkeof); |
124e68e7 SW |
553 | |
554 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) | |
555 | ret = copy_page_vector_to_user(pages, data, off, ret); | |
556 | if (ret >= 0) | |
557 | *poff = off + ret; | |
558 | ||
29065a51 | 559 | done: |
124e68e7 SW |
560 | if (file->f_flags & O_DIRECT) |
561 | put_page_vector(pages, num_pages); | |
562 | else | |
563 | ceph_release_page_vector(pages, num_pages); | |
564 | dout("sync_read result %d\n", ret); | |
565 | return ret; | |
566 | } | |
567 | ||
568 | /* | |
569 | * Write commit callback, called if we requested both an ACK and | |
570 | * ONDISK commit reply from the OSD. | |
571 | */ | |
572 | static void sync_write_commit(struct ceph_osd_request *req, | |
573 | struct ceph_msg *msg) | |
574 | { | |
575 | struct ceph_inode_info *ci = ceph_inode(req->r_inode); | |
576 | ||
577 | dout("sync_write_commit %p tid %llu\n", req, req->r_tid); | |
578 | spin_lock(&ci->i_unsafe_lock); | |
579 | list_del_init(&req->r_unsafe_item); | |
580 | spin_unlock(&ci->i_unsafe_lock); | |
581 | ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); | |
582 | } | |
583 | ||
584 | /* | |
585 | * Synchronous write, straight from __user pointer or user pages (if | |
586 | * O_DIRECT). | |
587 | * | |
588 | * If write spans object boundary, just do multiple writes. (For a | |
589 | * correct atomic write, we should e.g. take write locks on all | |
590 | * objects, rollback on failure, etc.) | |
591 | */ | |
592 | static ssize_t ceph_sync_write(struct file *file, const char __user *data, | |
593 | size_t left, loff_t *offset) | |
594 | { | |
595 | struct inode *inode = file->f_dentry->d_inode; | |
596 | struct ceph_inode_info *ci = ceph_inode(inode); | |
597 | struct ceph_client *client = ceph_inode_to_client(inode); | |
598 | struct ceph_osd_request *req; | |
599 | struct page **pages; | |
600 | int num_pages; | |
601 | long long unsigned pos; | |
602 | u64 len; | |
603 | int written = 0; | |
604 | int flags; | |
605 | int do_sync = 0; | |
606 | int check_caps = 0; | |
607 | int ret; | |
608 | struct timespec mtime = CURRENT_TIME; | |
609 | ||
610 | if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP) | |
611 | return -EROFS; | |
612 | ||
613 | dout("sync_write on file %p %lld~%u %s\n", file, *offset, | |
614 | (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | |
615 | ||
616 | if (file->f_flags & O_APPEND) | |
617 | pos = i_size_read(inode); | |
618 | else | |
619 | pos = *offset; | |
620 | ||
29065a51 YS |
621 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); |
622 | if (ret < 0) | |
623 | return ret; | |
624 | ||
625 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
626 | pos >> PAGE_CACHE_SHIFT, | |
627 | (pos + left) >> PAGE_CACHE_SHIFT); | |
628 | if (ret < 0) | |
629 | dout("invalidate_inode_pages2_range returned %d\n", ret); | |
630 | ||
124e68e7 SW |
631 | flags = CEPH_OSD_FLAG_ORDERSNAP | |
632 | CEPH_OSD_FLAG_ONDISK | | |
633 | CEPH_OSD_FLAG_WRITE; | |
634 | if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) | |
635 | flags |= CEPH_OSD_FLAG_ACK; | |
636 | else | |
637 | do_sync = 1; | |
638 | ||
639 | /* | |
640 | * we may need to do multiple writes here if we span an object | |
641 | * boundary. this isn't atomic, unfortunately. :( | |
642 | */ | |
643 | more: | |
644 | len = left; | |
645 | req = ceph_osdc_new_request(&client->osdc, &ci->i_layout, | |
646 | ceph_vino(inode), pos, &len, | |
647 | CEPH_OSD_OP_WRITE, flags, | |
648 | ci->i_snap_realm->cached_context, | |
649 | do_sync, | |
650 | ci->i_truncate_seq, ci->i_truncate_size, | |
651 | &mtime, false, 2); | |
a79832f2 SW |
652 | if (!req) |
653 | return -ENOMEM; | |
124e68e7 SW |
654 | |
655 | num_pages = calc_pages_for(pos, len); | |
656 | ||
657 | if (file->f_flags & O_DIRECT) { | |
658 | pages = get_direct_page_vector(data, num_pages, pos, len); | |
659 | if (IS_ERR(pages)) { | |
660 | ret = PTR_ERR(pages); | |
661 | goto out; | |
662 | } | |
663 | ||
664 | /* | |
665 | * throw out any page cache pages in this range. this | |
666 | * may block. | |
667 | */ | |
5c6a2cdb SW |
668 | truncate_inode_pages_range(inode->i_mapping, pos, |
669 | (pos+len) | (PAGE_CACHE_SIZE-1)); | |
124e68e7 | 670 | } else { |
34d23762 | 671 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); |
124e68e7 SW |
672 | if (IS_ERR(pages)) { |
673 | ret = PTR_ERR(pages); | |
674 | goto out; | |
675 | } | |
676 | ret = copy_user_to_page_vector(pages, data, pos, len); | |
677 | if (ret < 0) { | |
678 | ceph_release_page_vector(pages, num_pages); | |
679 | goto out; | |
680 | } | |
681 | ||
682 | if ((file->f_flags & O_SYNC) == 0) { | |
683 | /* get a second commit callback */ | |
684 | req->r_safe_callback = sync_write_commit; | |
685 | req->r_own_pages = 1; | |
686 | } | |
687 | } | |
688 | req->r_pages = pages; | |
689 | req->r_num_pages = num_pages; | |
690 | req->r_inode = inode; | |
691 | ||
692 | ret = ceph_osdc_start_request(&client->osdc, req, false); | |
693 | if (!ret) { | |
694 | if (req->r_safe_callback) { | |
695 | /* | |
696 | * Add to inode unsafe list only after we | |
697 | * start_request so that a tid has been assigned. | |
698 | */ | |
699 | spin_lock(&ci->i_unsafe_lock); | |
700 | list_add(&ci->i_unsafe_writes, &req->r_unsafe_item); | |
701 | spin_unlock(&ci->i_unsafe_lock); | |
702 | ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); | |
703 | } | |
704 | ret = ceph_osdc_wait_request(&client->osdc, req); | |
705 | } | |
706 | ||
707 | if (file->f_flags & O_DIRECT) | |
708 | put_page_vector(pages, num_pages); | |
709 | else if (file->f_flags & O_SYNC) | |
710 | ceph_release_page_vector(pages, num_pages); | |
711 | ||
712 | out: | |
713 | ceph_osdc_put_request(req); | |
714 | if (ret == 0) { | |
715 | pos += len; | |
716 | written += len; | |
717 | left -= len; | |
718 | if (left) | |
719 | goto more; | |
720 | ||
721 | ret = written; | |
722 | *offset = pos; | |
723 | if (pos > i_size_read(inode)) | |
724 | check_caps = ceph_inode_set_size(inode, pos); | |
725 | if (check_caps) | |
726 | ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, | |
727 | NULL); | |
728 | } | |
729 | return ret; | |
730 | } | |
731 | ||
732 | /* | |
733 | * Wrap generic_file_aio_read with checks for cap bits on the inode. | |
734 | * Atomically grab references, so that those bits are not released | |
735 | * back to the MDS mid-read. | |
736 | * | |
737 | * Hmm, the sync read case isn't actually async... should it be? | |
738 | */ | |
739 | static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, | |
740 | unsigned long nr_segs, loff_t pos) | |
741 | { | |
742 | struct file *filp = iocb->ki_filp; | |
743 | loff_t *ppos = &iocb->ki_pos; | |
744 | size_t len = iov->iov_len; | |
745 | struct inode *inode = filp->f_dentry->d_inode; | |
746 | struct ceph_inode_info *ci = ceph_inode(inode); | |
6a026589 | 747 | void *base = iov->iov_base; |
124e68e7 SW |
748 | ssize_t ret; |
749 | int got = 0; | |
6a026589 | 750 | int checkeof = 0, read = 0; |
124e68e7 SW |
751 | |
752 | dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", | |
753 | inode, ceph_vinop(inode), pos, (unsigned)len, inode); | |
6a026589 | 754 | again: |
124e68e7 SW |
755 | __ceph_do_pending_vmtruncate(inode); |
756 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE, | |
757 | &got, -1); | |
758 | if (ret < 0) | |
759 | goto out; | |
760 | dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", | |
761 | inode, ceph_vinop(inode), pos, (unsigned)len, | |
762 | ceph_cap_string(got)); | |
763 | ||
764 | if ((got & CEPH_CAP_FILE_CACHE) == 0 || | |
765 | (iocb->ki_filp->f_flags & O_DIRECT) || | |
766 | (inode->i_sb->s_flags & MS_SYNCHRONOUS)) | |
767 | /* hmm, this isn't really async... */ | |
6a026589 | 768 | ret = ceph_sync_read(filp, base, len, ppos, &checkeof); |
124e68e7 SW |
769 | else |
770 | ret = generic_file_aio_read(iocb, iov, nr_segs, pos); | |
771 | ||
772 | out: | |
773 | dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", | |
774 | inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); | |
775 | ceph_put_cap_refs(ci, got); | |
6a026589 SW |
776 | |
777 | if (checkeof && ret >= 0) { | |
778 | int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); | |
779 | ||
780 | /* hit EOF or hole? */ | |
781 | if (statret == 0 && *ppos < inode->i_size) { | |
782 | dout("aio_read sync_read hit hole, reading more\n"); | |
783 | read += ret; | |
784 | base += ret; | |
785 | len -= ret; | |
786 | checkeof = 0; | |
787 | goto again; | |
788 | } | |
789 | } | |
790 | if (ret >= 0) | |
791 | ret += read; | |
792 | ||
124e68e7 SW |
793 | return ret; |
794 | } | |
795 | ||
796 | /* | |
797 | * Take cap references to avoid releasing caps to MDS mid-write. | |
798 | * | |
799 | * If we are synchronous, and write with an old snap context, the OSD | |
800 | * may return EOLDSNAPC. In that case, retry the write.. _after_ | |
801 | * dropping our cap refs and allowing the pending snap to logically | |
802 | * complete _before_ this write occurs. | |
803 | * | |
804 | * If we are near ENOSPC, write synchronously. | |
805 | */ | |
806 | static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
807 | unsigned long nr_segs, loff_t pos) | |
808 | { | |
809 | struct file *file = iocb->ki_filp; | |
33caad32 | 810 | struct ceph_file_info *fi = file->private_data; |
124e68e7 SW |
811 | struct inode *inode = file->f_dentry->d_inode; |
812 | struct ceph_inode_info *ci = ceph_inode(inode); | |
640ef79d | 813 | struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; |
124e68e7 | 814 | loff_t endoff = pos + iov->iov_len; |
33caad32 | 815 | int want, got = 0; |
88d892a3 | 816 | int ret, err; |
124e68e7 SW |
817 | |
818 | if (ceph_snap(inode) != CEPH_NOSNAP) | |
819 | return -EROFS; | |
820 | ||
821 | retry_snap: | |
822 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) | |
823 | return -ENOSPC; | |
824 | __ceph_do_pending_vmtruncate(inode); | |
825 | dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", | |
826 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | |
827 | inode->i_size); | |
33caad32 SW |
828 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
829 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; | |
830 | else | |
831 | want = CEPH_CAP_FILE_BUFFER; | |
832 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); | |
124e68e7 SW |
833 | if (ret < 0) |
834 | goto out; | |
835 | ||
836 | dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", | |
837 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | |
838 | ceph_cap_string(got)); | |
839 | ||
33caad32 | 840 | if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || |
124e68e7 SW |
841 | (iocb->ki_filp->f_flags & O_DIRECT) || |
842 | (inode->i_sb->s_flags & MS_SYNCHRONOUS)) { | |
843 | ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, | |
844 | &iocb->ki_pos); | |
845 | } else { | |
846 | ret = generic_file_aio_write(iocb, iov, nr_segs, pos); | |
847 | ||
848 | if ((ret >= 0 || ret == -EIOCBQUEUED) && | |
849 | ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) | |
88d892a3 | 850 | || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { |
8018ab05 | 851 | err = vfs_fsync_range(file, pos, pos + ret - 1, 1); |
88d892a3 YS |
852 | if (err < 0) |
853 | ret = err; | |
854 | } | |
124e68e7 SW |
855 | } |
856 | if (ret >= 0) { | |
857 | spin_lock(&inode->i_lock); | |
858 | __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); | |
859 | spin_unlock(&inode->i_lock); | |
860 | } | |
861 | ||
862 | out: | |
863 | dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", | |
864 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | |
865 | ceph_cap_string(got)); | |
866 | ceph_put_cap_refs(ci, got); | |
867 | ||
868 | if (ret == -EOLDSNAPC) { | |
869 | dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", | |
870 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); | |
871 | goto retry_snap; | |
872 | } | |
873 | ||
874 | return ret; | |
875 | } | |
876 | ||
877 | /* | |
878 | * llseek. be sure to verify file size on SEEK_END. | |
879 | */ | |
880 | static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) | |
881 | { | |
882 | struct inode *inode = file->f_mapping->host; | |
883 | int ret; | |
884 | ||
885 | mutex_lock(&inode->i_mutex); | |
886 | __ceph_do_pending_vmtruncate(inode); | |
887 | switch (origin) { | |
888 | case SEEK_END: | |
889 | ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); | |
890 | if (ret < 0) { | |
891 | offset = ret; | |
892 | goto out; | |
893 | } | |
894 | offset += inode->i_size; | |
895 | break; | |
896 | case SEEK_CUR: | |
897 | /* | |
898 | * Here we special-case the lseek(fd, 0, SEEK_CUR) | |
899 | * position-querying operation. Avoid rewriting the "same" | |
900 | * f_pos value back to the file because a concurrent read(), | |
901 | * write() or lseek() might have altered it | |
902 | */ | |
903 | if (offset == 0) { | |
904 | offset = file->f_pos; | |
905 | goto out; | |
906 | } | |
907 | offset += file->f_pos; | |
908 | break; | |
909 | } | |
910 | ||
911 | if (offset < 0 || offset > inode->i_sb->s_maxbytes) { | |
912 | offset = -EINVAL; | |
913 | goto out; | |
914 | } | |
915 | ||
916 | /* Special lock needed here? */ | |
917 | if (offset != file->f_pos) { | |
918 | file->f_pos = offset; | |
919 | file->f_version = 0; | |
920 | } | |
921 | ||
922 | out: | |
923 | mutex_unlock(&inode->i_mutex); | |
924 | return offset; | |
925 | } | |
926 | ||
927 | const struct file_operations ceph_file_fops = { | |
928 | .open = ceph_open, | |
929 | .release = ceph_release, | |
930 | .llseek = ceph_llseek, | |
931 | .read = do_sync_read, | |
932 | .write = do_sync_write, | |
933 | .aio_read = ceph_aio_read, | |
934 | .aio_write = ceph_aio_write, | |
935 | .mmap = ceph_mmap, | |
936 | .fsync = ceph_fsync, | |
937 | .splice_read = generic_file_splice_read, | |
938 | .splice_write = generic_file_splice_write, | |
939 | .unlocked_ioctl = ceph_ioctl, | |
940 | .compat_ioctl = ceph_ioctl, | |
941 | }; | |
942 |