]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/ceph/ceph_debug.h> | |
2 | ||
3 | #include <linux/module.h> | |
4 | #include <linux/sched.h> | |
5 | #include <linux/slab.h> | |
6 | #include <linux/file.h> | |
7 | #include <linux/mount.h> | |
8 | #include <linux/namei.h> | |
9 | #include <linux/writeback.h> | |
10 | #include <linux/aio.h> | |
11 | #include <linux/falloc.h> | |
12 | ||
13 | #include "super.h" | |
14 | #include "mds_client.h" | |
15 | #include "cache.h" | |
16 | ||
17 | /* | |
18 | * Ceph file operations | |
19 | * | |
20 | * Implement basic open/close functionality, and implement | |
21 | * read/write. | |
22 | * | |
23 | * We implement three modes of file I/O: | |
24 | * - buffered uses the generic_file_aio_{read,write} helpers | |
25 | * | |
26 | * - synchronous is used when there is multi-client read/write | |
27 | * sharing, avoids the page cache, and synchronously waits for an | |
28 | * ack from the OSD. | |
29 | * | |
30 | * - direct io takes the variant of the sync path that references | |
31 | * user pages directly. | |
32 | * | |
33 | * fsync() flushes and waits on dirty pages, but just queues metadata | |
34 | * for writeback: since the MDS can recover size and mtime there is no | |
35 | * need to wait for MDS acknowledgement. | |
36 | */ | |
37 | ||
38 | ||
39 | /* | |
40 | * Prepare an open request. Preallocate ceph_cap to avoid an | |
41 | * inopportune ENOMEM later. | |
42 | */ | |
43 | static struct ceph_mds_request * | |
44 | prepare_open_request(struct super_block *sb, int flags, int create_mode) | |
45 | { | |
46 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); | |
47 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
48 | struct ceph_mds_request *req; | |
49 | int want_auth = USE_ANY_MDS; | |
50 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; | |
51 | ||
52 | if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) | |
53 | want_auth = USE_AUTH_MDS; | |
54 | ||
55 | req = ceph_mdsc_create_request(mdsc, op, want_auth); | |
56 | if (IS_ERR(req)) | |
57 | goto out; | |
58 | req->r_fmode = ceph_flags_to_mode(flags); | |
59 | req->r_args.open.flags = cpu_to_le32(flags); | |
60 | req->r_args.open.mode = cpu_to_le32(create_mode); | |
61 | out: | |
62 | return req; | |
63 | } | |
64 | ||
65 | /* | |
66 | * initialize private struct file data. | |
67 | * if we fail, clean up by dropping fmode reference on the ceph_inode | |
68 | */ | |
69 | static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | |
70 | { | |
71 | struct ceph_file_info *cf; | |
72 | int ret = 0; | |
73 | struct ceph_inode_info *ci = ceph_inode(inode); | |
74 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); | |
75 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
76 | ||
77 | switch (inode->i_mode & S_IFMT) { | |
78 | case S_IFREG: | |
79 | /* First file open request creates the cookie, we want to keep | |
80 | * this cookie around for the filetime of the inode as not to | |
81 | * have to worry about fscache register / revoke / operation | |
82 | * races. | |
83 | * | |
84 | * Also, if we know the operation is going to invalidate data | |
85 | * (non readonly) just nuke the cache right away. | |
86 | */ | |
87 | ceph_fscache_register_inode_cookie(mdsc->fsc, ci); | |
88 | if ((fmode & CEPH_FILE_MODE_WR)) | |
89 | ceph_fscache_invalidate(inode); | |
90 | case S_IFDIR: | |
91 | dout("init_file %p %p 0%o (regular)\n", inode, file, | |
92 | inode->i_mode); | |
93 | cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); | |
94 | if (cf == NULL) { | |
95 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
96 | return -ENOMEM; | |
97 | } | |
98 | cf->fmode = fmode; | |
99 | cf->next_offset = 2; | |
100 | file->private_data = cf; | |
101 | BUG_ON(inode->i_fop->release != ceph_release); | |
102 | break; | |
103 | ||
104 | case S_IFLNK: | |
105 | dout("init_file %p %p 0%o (symlink)\n", inode, file, | |
106 | inode->i_mode); | |
107 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
108 | break; | |
109 | ||
110 | default: | |
111 | dout("init_file %p %p 0%o (special)\n", inode, file, | |
112 | inode->i_mode); | |
113 | /* | |
114 | * we need to drop the open ref now, since we don't | |
115 | * have .release set to ceph_release. | |
116 | */ | |
117 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
118 | BUG_ON(inode->i_fop->release == ceph_release); | |
119 | ||
120 | /* call the proper open fop */ | |
121 | ret = inode->i_fop->open(inode, file); | |
122 | } | |
123 | return ret; | |
124 | } | |
125 | ||
126 | /* | |
127 | * If we already have the requisite capabilities, we can satisfy | |
128 | * the open request locally (no need to request new caps from the | |
129 | * MDS). We do, however, need to inform the MDS (asynchronously) | |
130 | * if our wanted caps set expands. | |
131 | */ | |
132 | int ceph_open(struct inode *inode, struct file *file) | |
133 | { | |
134 | struct ceph_inode_info *ci = ceph_inode(inode); | |
135 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); | |
136 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
137 | struct ceph_mds_request *req; | |
138 | struct ceph_file_info *cf = file->private_data; | |
139 | struct inode *parent_inode = NULL; | |
140 | int err; | |
141 | int flags, fmode, wanted; | |
142 | ||
143 | if (cf) { | |
144 | dout("open file %p is already opened\n", file); | |
145 | return 0; | |
146 | } | |
147 | ||
148 | /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ | |
149 | flags = file->f_flags & ~(O_CREAT|O_EXCL); | |
150 | if (S_ISDIR(inode->i_mode)) | |
151 | flags = O_DIRECTORY; /* mds likes to know */ | |
152 | ||
153 | dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, | |
154 | ceph_vinop(inode), file, flags, file->f_flags); | |
155 | fmode = ceph_flags_to_mode(flags); | |
156 | wanted = ceph_caps_for_mode(fmode); | |
157 | ||
158 | /* snapped files are read-only */ | |
159 | if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) | |
160 | return -EROFS; | |
161 | ||
162 | /* trivially open snapdir */ | |
163 | if (ceph_snap(inode) == CEPH_SNAPDIR) { | |
164 | spin_lock(&ci->i_ceph_lock); | |
165 | __ceph_get_fmode(ci, fmode); | |
166 | spin_unlock(&ci->i_ceph_lock); | |
167 | return ceph_init_file(inode, file, fmode); | |
168 | } | |
169 | ||
170 | /* | |
171 | * No need to block if we have caps on the auth MDS (for | |
172 | * write) or any MDS (for read). Update wanted set | |
173 | * asynchronously. | |
174 | */ | |
175 | spin_lock(&ci->i_ceph_lock); | |
176 | if (__ceph_is_any_real_caps(ci) && | |
177 | (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { | |
178 | int mds_wanted = __ceph_caps_mds_wanted(ci); | |
179 | int issued = __ceph_caps_issued(ci, NULL); | |
180 | ||
181 | dout("open %p fmode %d want %s issued %s using existing\n", | |
182 | inode, fmode, ceph_cap_string(wanted), | |
183 | ceph_cap_string(issued)); | |
184 | __ceph_get_fmode(ci, fmode); | |
185 | spin_unlock(&ci->i_ceph_lock); | |
186 | ||
187 | /* adjust wanted? */ | |
188 | if ((issued & wanted) != wanted && | |
189 | (mds_wanted & wanted) != wanted && | |
190 | ceph_snap(inode) != CEPH_SNAPDIR) | |
191 | ceph_check_caps(ci, 0, NULL); | |
192 | ||
193 | return ceph_init_file(inode, file, fmode); | |
194 | } else if (ceph_snap(inode) != CEPH_NOSNAP && | |
195 | (ci->i_snap_caps & wanted) == wanted) { | |
196 | __ceph_get_fmode(ci, fmode); | |
197 | spin_unlock(&ci->i_ceph_lock); | |
198 | return ceph_init_file(inode, file, fmode); | |
199 | } | |
200 | ||
201 | spin_unlock(&ci->i_ceph_lock); | |
202 | ||
203 | dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); | |
204 | req = prepare_open_request(inode->i_sb, flags, 0); | |
205 | if (IS_ERR(req)) { | |
206 | err = PTR_ERR(req); | |
207 | goto out; | |
208 | } | |
209 | req->r_inode = inode; | |
210 | ihold(inode); | |
211 | ||
212 | req->r_num_caps = 1; | |
213 | if (flags & O_CREAT) | |
214 | parent_inode = ceph_get_dentry_parent_inode(file->f_dentry); | |
215 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); | |
216 | iput(parent_inode); | |
217 | if (!err) | |
218 | err = ceph_init_file(inode, file, req->r_fmode); | |
219 | ceph_mdsc_put_request(req); | |
220 | dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); | |
221 | out: | |
222 | return err; | |
223 | } | |
224 | ||
225 | ||
226 | /* | |
227 | * Do a lookup + open with a single request. If we get a non-existent | |
228 | * file or symlink, return 1 so the VFS can retry. | |
229 | */ | |
230 | int ceph_atomic_open(struct inode *dir, struct dentry *dentry, | |
231 | struct file *file, unsigned flags, umode_t mode, | |
232 | int *opened) | |
233 | { | |
234 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); | |
235 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
236 | struct ceph_mds_request *req; | |
237 | struct dentry *dn; | |
238 | int err; | |
239 | ||
240 | dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n", | |
241 | dir, dentry, dentry->d_name.len, dentry->d_name.name, | |
242 | d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); | |
243 | ||
244 | if (dentry->d_name.len > NAME_MAX) | |
245 | return -ENAMETOOLONG; | |
246 | ||
247 | err = ceph_init_dentry(dentry); | |
248 | if (err < 0) | |
249 | return err; | |
250 | ||
251 | /* do the open */ | |
252 | req = prepare_open_request(dir->i_sb, flags, mode); | |
253 | if (IS_ERR(req)) | |
254 | return PTR_ERR(req); | |
255 | req->r_dentry = dget(dentry); | |
256 | req->r_num_caps = 2; | |
257 | if (flags & O_CREAT) { | |
258 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
259 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
260 | } | |
261 | req->r_locked_dir = dir; /* caller holds dir->i_mutex */ | |
262 | err = ceph_mdsc_do_request(mdsc, | |
263 | (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, | |
264 | req); | |
265 | if (err) | |
266 | goto out_err; | |
267 | ||
268 | err = ceph_handle_snapdir(req, dentry, err); | |
269 | if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) | |
270 | err = ceph_handle_notrace_create(dir, dentry); | |
271 | ||
272 | if (d_unhashed(dentry)) { | |
273 | dn = ceph_finish_lookup(req, dentry, err); | |
274 | if (IS_ERR(dn)) | |
275 | err = PTR_ERR(dn); | |
276 | } else { | |
277 | /* we were given a hashed negative dentry */ | |
278 | dn = NULL; | |
279 | } | |
280 | if (err) | |
281 | goto out_err; | |
282 | if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { | |
283 | /* make vfs retry on splice, ENOENT, or symlink */ | |
284 | dout("atomic_open finish_no_open on dn %p\n", dn); | |
285 | err = finish_no_open(file, dn); | |
286 | } else { | |
287 | dout("atomic_open finish_open on dn %p\n", dn); | |
288 | if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { | |
289 | ceph_init_acl(dentry, dentry->d_inode, dir); | |
290 | *opened |= FILE_CREATED; | |
291 | } | |
292 | err = finish_open(file, dentry, ceph_open, opened); | |
293 | } | |
294 | out_err: | |
295 | if (!req->r_err && req->r_target_inode) | |
296 | ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); | |
297 | ceph_mdsc_put_request(req); | |
298 | dout("atomic_open result=%d\n", err); | |
299 | return err; | |
300 | } | |
301 | ||
302 | int ceph_release(struct inode *inode, struct file *file) | |
303 | { | |
304 | struct ceph_inode_info *ci = ceph_inode(inode); | |
305 | struct ceph_file_info *cf = file->private_data; | |
306 | ||
307 | dout("release inode %p file %p\n", inode, file); | |
308 | ceph_put_fmode(ci, cf->fmode); | |
309 | if (cf->last_readdir) | |
310 | ceph_mdsc_put_request(cf->last_readdir); | |
311 | kfree(cf->last_name); | |
312 | kfree(cf->dir_info); | |
313 | dput(cf->dentry); | |
314 | kmem_cache_free(ceph_file_cachep, cf); | |
315 | ||
316 | /* wake up anyone waiting for caps on this inode */ | |
317 | wake_up_all(&ci->i_cap_wq); | |
318 | return 0; | |
319 | } | |
320 | ||
321 | /* | |
322 | * Read a range of bytes striped over one or more objects. Iterate over | |
323 | * objects we stripe over. (That's not atomic, but good enough for now.) | |
324 | * | |
325 | * If we get a short result from the OSD, check against i_size; we need to | |
326 | * only return a short read to the caller if we hit EOF. | |
327 | */ | |
328 | static int striped_read(struct inode *inode, | |
329 | u64 off, u64 len, | |
330 | struct page **pages, int num_pages, | |
331 | int *checkeof, bool o_direct, | |
332 | unsigned long buf_align) | |
333 | { | |
334 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | |
335 | struct ceph_inode_info *ci = ceph_inode(inode); | |
336 | u64 pos, this_len, left; | |
337 | int io_align, page_align; | |
338 | int pages_left; | |
339 | int read; | |
340 | struct page **page_pos; | |
341 | int ret; | |
342 | bool hit_stripe, was_short; | |
343 | ||
344 | /* | |
345 | * we may need to do multiple reads. not atomic, unfortunately. | |
346 | */ | |
347 | pos = off; | |
348 | left = len; | |
349 | page_pos = pages; | |
350 | pages_left = num_pages; | |
351 | read = 0; | |
352 | io_align = off & ~PAGE_MASK; | |
353 | ||
354 | more: | |
355 | if (o_direct) | |
356 | page_align = (pos - io_align + buf_align) & ~PAGE_MASK; | |
357 | else | |
358 | page_align = pos & ~PAGE_MASK; | |
359 | this_len = left; | |
360 | ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), | |
361 | &ci->i_layout, pos, &this_len, | |
362 | ci->i_truncate_seq, | |
363 | ci->i_truncate_size, | |
364 | page_pos, pages_left, page_align); | |
365 | if (ret == -ENOENT) | |
366 | ret = 0; | |
367 | hit_stripe = this_len < left; | |
368 | was_short = ret >= 0 && ret < this_len; | |
369 | dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read, | |
370 | ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); | |
371 | ||
372 | if (ret >= 0) { | |
373 | int didpages; | |
374 | if (was_short && (pos + ret < inode->i_size)) { | |
375 | u64 tmp = min(this_len - ret, | |
376 | inode->i_size - pos - ret); | |
377 | dout(" zero gap %llu to %llu\n", | |
378 | pos + ret, pos + ret + tmp); | |
379 | ceph_zero_page_vector_range(page_align + read + ret, | |
380 | tmp, pages); | |
381 | ret += tmp; | |
382 | } | |
383 | ||
384 | didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; | |
385 | pos += ret; | |
386 | read = pos - off; | |
387 | left -= ret; | |
388 | page_pos += didpages; | |
389 | pages_left -= didpages; | |
390 | ||
391 | /* hit stripe and need continue*/ | |
392 | if (left && hit_stripe && pos < inode->i_size) | |
393 | goto more; | |
394 | } | |
395 | ||
396 | if (read > 0) { | |
397 | ret = read; | |
398 | /* did we bounce off eof? */ | |
399 | if (pos + left > inode->i_size) | |
400 | *checkeof = 1; | |
401 | } | |
402 | ||
403 | dout("striped_read returns %d\n", ret); | |
404 | return ret; | |
405 | } | |
406 | ||
407 | /* | |
408 | * Completely synchronous read and write methods. Direct from __user | |
409 | * buffer to osd, or directly to user pages (if O_DIRECT). | |
410 | * | |
411 | * If the read spans object boundary, just do multiple reads. | |
412 | */ | |
413 | static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, | |
414 | int *checkeof) | |
415 | { | |
416 | struct file *file = iocb->ki_filp; | |
417 | struct inode *inode = file_inode(file); | |
418 | struct page **pages; | |
419 | u64 off = iocb->ki_pos; | |
420 | int num_pages, ret; | |
421 | size_t len = i->count; | |
422 | ||
423 | dout("sync_read on file %p %llu~%u %s\n", file, off, | |
424 | (unsigned)len, | |
425 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | |
426 | /* | |
427 | * flush any page cache pages in this range. this | |
428 | * will make concurrent normal and sync io slow, | |
429 | * but it will at least behave sensibly when they are | |
430 | * in sequence. | |
431 | */ | |
432 | ret = filemap_write_and_wait_range(inode->i_mapping, off, | |
433 | off + len); | |
434 | if (ret < 0) | |
435 | return ret; | |
436 | ||
437 | if (file->f_flags & O_DIRECT) { | |
438 | while (iov_iter_count(i)) { | |
439 | void __user *data = i->iov[0].iov_base + i->iov_offset; | |
440 | size_t len = i->iov[0].iov_len - i->iov_offset; | |
441 | ||
442 | num_pages = calc_pages_for((unsigned long)data, len); | |
443 | pages = ceph_get_direct_page_vector(data, | |
444 | num_pages, true); | |
445 | if (IS_ERR(pages)) | |
446 | return PTR_ERR(pages); | |
447 | ||
448 | ret = striped_read(inode, off, len, | |
449 | pages, num_pages, checkeof, | |
450 | 1, (unsigned long)data & ~PAGE_MASK); | |
451 | ceph_put_page_vector(pages, num_pages, true); | |
452 | ||
453 | if (ret <= 0) | |
454 | break; | |
455 | off += ret; | |
456 | iov_iter_advance(i, ret); | |
457 | if (ret < len) | |
458 | break; | |
459 | } | |
460 | } else { | |
461 | num_pages = calc_pages_for(off, len); | |
462 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); | |
463 | if (IS_ERR(pages)) | |
464 | return PTR_ERR(pages); | |
465 | ret = striped_read(inode, off, len, pages, | |
466 | num_pages, checkeof, 0, 0); | |
467 | if (ret > 0) { | |
468 | int l, k = 0; | |
469 | size_t left = len = ret; | |
470 | ||
471 | while (left) { | |
472 | void __user *data = i->iov[0].iov_base | |
473 | + i->iov_offset; | |
474 | l = min(i->iov[0].iov_len - i->iov_offset, | |
475 | left); | |
476 | ||
477 | ret = ceph_copy_page_vector_to_user(&pages[k], | |
478 | data, off, | |
479 | l); | |
480 | if (ret > 0) { | |
481 | iov_iter_advance(i, ret); | |
482 | left -= ret; | |
483 | off += ret; | |
484 | k = calc_pages_for(iocb->ki_pos, | |
485 | len - left + 1) - 1; | |
486 | BUG_ON(k >= num_pages && left); | |
487 | } else | |
488 | break; | |
489 | } | |
490 | } | |
491 | ceph_release_page_vector(pages, num_pages); | |
492 | } | |
493 | ||
494 | if (off > iocb->ki_pos) { | |
495 | ret = off - iocb->ki_pos; | |
496 | iocb->ki_pos = off; | |
497 | } | |
498 | ||
499 | dout("sync_read result %d\n", ret); | |
500 | return ret; | |
501 | } | |
502 | ||
503 | /* | |
504 | * Write commit request unsafe callback, called to tell us when a | |
505 | * request is unsafe (that is, in flight--has been handed to the | |
506 | * messenger to send to its target osd). It is called again when | |
507 | * we've received a response message indicating the request is | |
508 | * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request | |
509 | * is completed early (and unsuccessfully) due to a timeout or | |
510 | * interrupt. | |
511 | * | |
512 | * This is used if we requested both an ACK and ONDISK commit reply | |
513 | * from the OSD. | |
514 | */ | |
515 | static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) | |
516 | { | |
517 | struct ceph_inode_info *ci = ceph_inode(req->r_inode); | |
518 | ||
519 | dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, | |
520 | unsafe ? "un" : ""); | |
521 | if (unsafe) { | |
522 | ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); | |
523 | spin_lock(&ci->i_unsafe_lock); | |
524 | list_add_tail(&req->r_unsafe_item, | |
525 | &ci->i_unsafe_writes); | |
526 | spin_unlock(&ci->i_unsafe_lock); | |
527 | } else { | |
528 | spin_lock(&ci->i_unsafe_lock); | |
529 | list_del_init(&req->r_unsafe_item); | |
530 | spin_unlock(&ci->i_unsafe_lock); | |
531 | ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); | |
532 | } | |
533 | } | |
534 | ||
535 | ||
536 | /* | |
537 | * Synchronous write, straight from __user pointer or user pages. | |
538 | * | |
539 | * If write spans object boundary, just do multiple writes. (For a | |
540 | * correct atomic write, we should e.g. take write locks on all | |
541 | * objects, rollback on failure, etc.) | |
542 | */ | |
543 | static ssize_t | |
544 | ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov, | |
545 | unsigned long nr_segs, size_t count) | |
546 | { | |
547 | struct file *file = iocb->ki_filp; | |
548 | struct inode *inode = file_inode(file); | |
549 | struct ceph_inode_info *ci = ceph_inode(inode); | |
550 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | |
551 | struct ceph_snap_context *snapc; | |
552 | struct ceph_vino vino; | |
553 | struct ceph_osd_request *req; | |
554 | struct page **pages; | |
555 | int num_pages; | |
556 | int written = 0; | |
557 | int flags; | |
558 | int check_caps = 0; | |
559 | int page_align; | |
560 | int ret; | |
561 | struct timespec mtime = CURRENT_TIME; | |
562 | loff_t pos = iocb->ki_pos; | |
563 | struct iov_iter i; | |
564 | ||
565 | if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) | |
566 | return -EROFS; | |
567 | ||
568 | dout("sync_direct_write on file %p %lld~%u\n", file, pos, | |
569 | (unsigned)count); | |
570 | ||
571 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); | |
572 | if (ret < 0) | |
573 | return ret; | |
574 | ||
575 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
576 | pos >> PAGE_CACHE_SHIFT, | |
577 | (pos + count) >> PAGE_CACHE_SHIFT); | |
578 | if (ret < 0) | |
579 | dout("invalidate_inode_pages2_range returned %d\n", ret); | |
580 | ||
581 | flags = CEPH_OSD_FLAG_ORDERSNAP | | |
582 | CEPH_OSD_FLAG_ONDISK | | |
583 | CEPH_OSD_FLAG_WRITE; | |
584 | ||
585 | iov_iter_init(&i, WRITE, iov, nr_segs, count); | |
586 | ||
587 | while (iov_iter_count(&i) > 0) { | |
588 | void __user *data = i.iov->iov_base + i.iov_offset; | |
589 | u64 len = i.iov->iov_len - i.iov_offset; | |
590 | ||
591 | page_align = (unsigned long)data & ~PAGE_MASK; | |
592 | ||
593 | snapc = ci->i_snap_realm->cached_context; | |
594 | vino = ceph_vino(inode); | |
595 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | |
596 | vino, pos, &len, | |
597 | 2,/*include a 'startsync' command*/ | |
598 | CEPH_OSD_OP_WRITE, flags, snapc, | |
599 | ci->i_truncate_seq, | |
600 | ci->i_truncate_size, | |
601 | false); | |
602 | if (IS_ERR(req)) { | |
603 | ret = PTR_ERR(req); | |
604 | break; | |
605 | } | |
606 | ||
607 | num_pages = calc_pages_for(page_align, len); | |
608 | pages = ceph_get_direct_page_vector(data, num_pages, false); | |
609 | if (IS_ERR(pages)) { | |
610 | ret = PTR_ERR(pages); | |
611 | goto out; | |
612 | } | |
613 | ||
614 | /* | |
615 | * throw out any page cache pages in this range. this | |
616 | * may block. | |
617 | */ | |
618 | truncate_inode_pages_range(inode->i_mapping, pos, | |
619 | (pos+len) | (PAGE_CACHE_SIZE-1)); | |
620 | osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, | |
621 | false, false); | |
622 | ||
623 | /* BUG_ON(vino.snap != CEPH_NOSNAP); */ | |
624 | ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); | |
625 | ||
626 | ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); | |
627 | if (!ret) | |
628 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); | |
629 | ||
630 | ceph_put_page_vector(pages, num_pages, false); | |
631 | ||
632 | out: | |
633 | ceph_osdc_put_request(req); | |
634 | if (ret == 0) { | |
635 | pos += len; | |
636 | written += len; | |
637 | iov_iter_advance(&i, (size_t)len); | |
638 | ||
639 | if (pos > i_size_read(inode)) { | |
640 | check_caps = ceph_inode_set_size(inode, pos); | |
641 | if (check_caps) | |
642 | ceph_check_caps(ceph_inode(inode), | |
643 | CHECK_CAPS_AUTHONLY, | |
644 | NULL); | |
645 | } | |
646 | } else | |
647 | break; | |
648 | } | |
649 | ||
650 | if (ret != -EOLDSNAPC && written > 0) { | |
651 | iocb->ki_pos = pos; | |
652 | ret = written; | |
653 | } | |
654 | return ret; | |
655 | } | |
656 | ||
657 | ||
658 | /* | |
659 | * Synchronous write, straight from __user pointer or user pages. | |
660 | * | |
661 | * If write spans object boundary, just do multiple writes. (For a | |
662 | * correct atomic write, we should e.g. take write locks on all | |
663 | * objects, rollback on failure, etc.) | |
664 | */ | |
665 | static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov, | |
666 | unsigned long nr_segs, size_t count) | |
667 | { | |
668 | struct file *file = iocb->ki_filp; | |
669 | struct inode *inode = file_inode(file); | |
670 | struct ceph_inode_info *ci = ceph_inode(inode); | |
671 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | |
672 | struct ceph_snap_context *snapc; | |
673 | struct ceph_vino vino; | |
674 | struct ceph_osd_request *req; | |
675 | struct page **pages; | |
676 | u64 len; | |
677 | int num_pages; | |
678 | int written = 0; | |
679 | int flags; | |
680 | int check_caps = 0; | |
681 | int ret; | |
682 | struct timespec mtime = CURRENT_TIME; | |
683 | loff_t pos = iocb->ki_pos; | |
684 | struct iov_iter i; | |
685 | ||
686 | if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) | |
687 | return -EROFS; | |
688 | ||
689 | dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count); | |
690 | ||
691 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); | |
692 | if (ret < 0) | |
693 | return ret; | |
694 | ||
695 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
696 | pos >> PAGE_CACHE_SHIFT, | |
697 | (pos + count) >> PAGE_CACHE_SHIFT); | |
698 | if (ret < 0) | |
699 | dout("invalidate_inode_pages2_range returned %d\n", ret); | |
700 | ||
701 | flags = CEPH_OSD_FLAG_ORDERSNAP | | |
702 | CEPH_OSD_FLAG_ONDISK | | |
703 | CEPH_OSD_FLAG_WRITE | | |
704 | CEPH_OSD_FLAG_ACK; | |
705 | ||
706 | iov_iter_init(&i, WRITE, iov, nr_segs, count); | |
707 | ||
708 | while ((len = iov_iter_count(&i)) > 0) { | |
709 | size_t left; | |
710 | int n; | |
711 | ||
712 | snapc = ci->i_snap_realm->cached_context; | |
713 | vino = ceph_vino(inode); | |
714 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | |
715 | vino, pos, &len, 1, | |
716 | CEPH_OSD_OP_WRITE, flags, snapc, | |
717 | ci->i_truncate_seq, | |
718 | ci->i_truncate_size, | |
719 | false); | |
720 | if (IS_ERR(req)) { | |
721 | ret = PTR_ERR(req); | |
722 | break; | |
723 | } | |
724 | ||
725 | /* | |
726 | * write from beginning of first page, | |
727 | * regardless of io alignment | |
728 | */ | |
729 | num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
730 | ||
731 | pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); | |
732 | if (IS_ERR(pages)) { | |
733 | ret = PTR_ERR(pages); | |
734 | goto out; | |
735 | } | |
736 | ||
737 | left = len; | |
738 | for (n = 0; n < num_pages; n++) { | |
739 | size_t plen = min_t(size_t, left, PAGE_SIZE); | |
740 | ret = copy_page_from_iter(pages[n], 0, plen, &i); | |
741 | if (ret != plen) { | |
742 | ret = -EFAULT; | |
743 | break; | |
744 | } | |
745 | left -= ret; | |
746 | } | |
747 | ||
748 | if (ret < 0) { | |
749 | ceph_release_page_vector(pages, num_pages); | |
750 | goto out; | |
751 | } | |
752 | ||
753 | /* get a second commit callback */ | |
754 | req->r_unsafe_callback = ceph_sync_write_unsafe; | |
755 | req->r_inode = inode; | |
756 | ||
757 | osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, | |
758 | false, true); | |
759 | ||
760 | /* BUG_ON(vino.snap != CEPH_NOSNAP); */ | |
761 | ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); | |
762 | ||
763 | ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); | |
764 | if (!ret) | |
765 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); | |
766 | ||
767 | out: | |
768 | ceph_osdc_put_request(req); | |
769 | if (ret == 0) { | |
770 | pos += len; | |
771 | written += len; | |
772 | ||
773 | if (pos > i_size_read(inode)) { | |
774 | check_caps = ceph_inode_set_size(inode, pos); | |
775 | if (check_caps) | |
776 | ceph_check_caps(ceph_inode(inode), | |
777 | CHECK_CAPS_AUTHONLY, | |
778 | NULL); | |
779 | } | |
780 | } else | |
781 | break; | |
782 | } | |
783 | ||
784 | if (ret != -EOLDSNAPC && written > 0) { | |
785 | ret = written; | |
786 | iocb->ki_pos = pos; | |
787 | } | |
788 | return ret; | |
789 | } | |
790 | ||
791 | /* | |
792 | * Wrap generic_file_aio_read with checks for cap bits on the inode. | |
793 | * Atomically grab references, so that those bits are not released | |
794 | * back to the MDS mid-read. | |
795 | * | |
796 | * Hmm, the sync read case isn't actually async... should it be? | |
797 | */ | |
798 | static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) | |
799 | { | |
800 | struct file *filp = iocb->ki_filp; | |
801 | struct ceph_file_info *fi = filp->private_data; | |
802 | size_t len = iocb->ki_nbytes; | |
803 | struct inode *inode = file_inode(filp); | |
804 | struct ceph_inode_info *ci = ceph_inode(inode); | |
805 | ssize_t ret; | |
806 | int want, got = 0; | |
807 | int checkeof = 0, read = 0; | |
808 | ||
809 | again: | |
810 | dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", | |
811 | inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); | |
812 | ||
813 | if (fi->fmode & CEPH_FILE_MODE_LAZY) | |
814 | want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; | |
815 | else | |
816 | want = CEPH_CAP_FILE_CACHE; | |
817 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1); | |
818 | if (ret < 0) | |
819 | return ret; | |
820 | ||
821 | if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || | |
822 | (iocb->ki_filp->f_flags & O_DIRECT) || | |
823 | (fi->flags & CEPH_F_SYNC)) { | |
824 | ||
825 | dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", | |
826 | inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, | |
827 | ceph_cap_string(got)); | |
828 | ||
829 | /* hmm, this isn't really async... */ | |
830 | ret = ceph_sync_read(iocb, to, &checkeof); | |
831 | } else { | |
832 | dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", | |
833 | inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, | |
834 | ceph_cap_string(got)); | |
835 | ||
836 | ret = generic_file_read_iter(iocb, to); | |
837 | } | |
838 | dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", | |
839 | inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); | |
840 | ceph_put_cap_refs(ci, got); | |
841 | ||
842 | if (checkeof && ret >= 0) { | |
843 | int statret = ceph_do_getattr(inode, | |
844 | CEPH_STAT_CAP_SIZE); | |
845 | ||
846 | /* hit EOF or hole? */ | |
847 | if (statret == 0 && iocb->ki_pos < inode->i_size && | |
848 | ret < len) { | |
849 | dout("sync_read hit hole, ppos %lld < size %lld" | |
850 | ", reading more\n", iocb->ki_pos, | |
851 | inode->i_size); | |
852 | ||
853 | iov_iter_advance(to, ret); | |
854 | read += ret; | |
855 | len -= ret; | |
856 | checkeof = 0; | |
857 | goto again; | |
858 | } | |
859 | } | |
860 | ||
861 | if (ret >= 0) | |
862 | ret += read; | |
863 | ||
864 | return ret; | |
865 | } | |
866 | ||
867 | /* | |
868 | * Take cap references to avoid releasing caps to MDS mid-write. | |
869 | * | |
870 | * If we are synchronous, and write with an old snap context, the OSD | |
871 | * may return EOLDSNAPC. In that case, retry the write.. _after_ | |
872 | * dropping our cap refs and allowing the pending snap to logically | |
873 | * complete _before_ this write occurs. | |
874 | * | |
875 | * If we are near ENOSPC, write synchronously. | |
876 | */ | |
877 | static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
878 | unsigned long nr_segs, loff_t pos) | |
879 | { | |
880 | struct file *file = iocb->ki_filp; | |
881 | struct ceph_file_info *fi = file->private_data; | |
882 | struct inode *inode = file_inode(file); | |
883 | struct ceph_inode_info *ci = ceph_inode(inode); | |
884 | struct ceph_osd_client *osdc = | |
885 | &ceph_sb_to_client(inode->i_sb)->client->osdc; | |
886 | ssize_t count, written = 0; | |
887 | int err, want, got; | |
888 | ||
889 | if (ceph_snap(inode) != CEPH_NOSNAP) | |
890 | return -EROFS; | |
891 | ||
892 | mutex_lock(&inode->i_mutex); | |
893 | ||
894 | count = iov_length(iov, nr_segs); | |
895 | ||
896 | /* We can write back this queue in page reclaim */ | |
897 | current->backing_dev_info = file->f_mapping->backing_dev_info; | |
898 | ||
899 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | |
900 | if (err) | |
901 | goto out; | |
902 | ||
903 | if (count == 0) | |
904 | goto out; | |
905 | ||
906 | err = file_remove_suid(file); | |
907 | if (err) | |
908 | goto out; | |
909 | ||
910 | err = file_update_time(file); | |
911 | if (err) | |
912 | goto out; | |
913 | ||
914 | retry_snap: | |
915 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { | |
916 | err = -ENOSPC; | |
917 | goto out; | |
918 | } | |
919 | ||
920 | dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", | |
921 | inode, ceph_vinop(inode), pos, count, inode->i_size); | |
922 | if (fi->fmode & CEPH_FILE_MODE_LAZY) | |
923 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; | |
924 | else | |
925 | want = CEPH_CAP_FILE_BUFFER; | |
926 | got = 0; | |
927 | err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count); | |
928 | if (err < 0) | |
929 | goto out; | |
930 | ||
931 | dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", | |
932 | inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); | |
933 | ||
934 | if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || | |
935 | (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) { | |
936 | mutex_unlock(&inode->i_mutex); | |
937 | if (file->f_flags & O_DIRECT) | |
938 | written = ceph_sync_direct_write(iocb, iov, | |
939 | nr_segs, count); | |
940 | else | |
941 | written = ceph_sync_write(iocb, iov, nr_segs, count); | |
942 | if (written == -EOLDSNAPC) { | |
943 | dout("aio_write %p %llx.%llx %llu~%u" | |
944 | "got EOLDSNAPC, retrying\n", | |
945 | inode, ceph_vinop(inode), | |
946 | pos, (unsigned)iov->iov_len); | |
947 | mutex_lock(&inode->i_mutex); | |
948 | goto retry_snap; | |
949 | } | |
950 | } else { | |
951 | loff_t old_size = inode->i_size; | |
952 | struct iov_iter from; | |
953 | /* | |
954 | * No need to acquire the i_truncate_mutex. Because | |
955 | * the MDS revokes Fwb caps before sending truncate | |
956 | * message to us. We can't get Fwb cap while there | |
957 | * are pending vmtruncate. So write and vmtruncate | |
958 | * can not run at the same time | |
959 | */ | |
960 | iov_iter_init(&from, WRITE, iov, nr_segs, count); | |
961 | written = generic_perform_write(file, &from, pos); | |
962 | if (likely(written >= 0)) | |
963 | iocb->ki_pos = pos + written; | |
964 | if (inode->i_size > old_size) | |
965 | ceph_fscache_update_objectsize(inode); | |
966 | mutex_unlock(&inode->i_mutex); | |
967 | } | |
968 | ||
969 | if (written >= 0) { | |
970 | int dirty; | |
971 | spin_lock(&ci->i_ceph_lock); | |
972 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); | |
973 | spin_unlock(&ci->i_ceph_lock); | |
974 | if (dirty) | |
975 | __mark_inode_dirty(inode, dirty); | |
976 | } | |
977 | ||
978 | dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", | |
979 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | |
980 | ceph_cap_string(got)); | |
981 | ceph_put_cap_refs(ci, got); | |
982 | ||
983 | if (written >= 0 && | |
984 | ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || | |
985 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { | |
986 | err = vfs_fsync_range(file, pos, pos + written - 1, 1); | |
987 | if (err < 0) | |
988 | written = err; | |
989 | } | |
990 | ||
991 | goto out_unlocked; | |
992 | ||
993 | out: | |
994 | mutex_unlock(&inode->i_mutex); | |
995 | out_unlocked: | |
996 | current->backing_dev_info = NULL; | |
997 | return written ? written : err; | |
998 | } | |
999 | ||
1000 | /* | |
1001 | * llseek. be sure to verify file size on SEEK_END. | |
1002 | */ | |
1003 | static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) | |
1004 | { | |
1005 | struct inode *inode = file->f_mapping->host; | |
1006 | int ret; | |
1007 | ||
1008 | mutex_lock(&inode->i_mutex); | |
1009 | ||
1010 | if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { | |
1011 | ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); | |
1012 | if (ret < 0) { | |
1013 | offset = ret; | |
1014 | goto out; | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | switch (whence) { | |
1019 | case SEEK_END: | |
1020 | offset += inode->i_size; | |
1021 | break; | |
1022 | case SEEK_CUR: | |
1023 | /* | |
1024 | * Here we special-case the lseek(fd, 0, SEEK_CUR) | |
1025 | * position-querying operation. Avoid rewriting the "same" | |
1026 | * f_pos value back to the file because a concurrent read(), | |
1027 | * write() or lseek() might have altered it | |
1028 | */ | |
1029 | if (offset == 0) { | |
1030 | offset = file->f_pos; | |
1031 | goto out; | |
1032 | } | |
1033 | offset += file->f_pos; | |
1034 | break; | |
1035 | case SEEK_DATA: | |
1036 | if (offset >= inode->i_size) { | |
1037 | ret = -ENXIO; | |
1038 | goto out; | |
1039 | } | |
1040 | break; | |
1041 | case SEEK_HOLE: | |
1042 | if (offset >= inode->i_size) { | |
1043 | ret = -ENXIO; | |
1044 | goto out; | |
1045 | } | |
1046 | offset = inode->i_size; | |
1047 | break; | |
1048 | } | |
1049 | ||
1050 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); | |
1051 | ||
1052 | out: | |
1053 | mutex_unlock(&inode->i_mutex); | |
1054 | return offset; | |
1055 | } | |
1056 | ||
1057 | static inline void ceph_zero_partial_page( | |
1058 | struct inode *inode, loff_t offset, unsigned size) | |
1059 | { | |
1060 | struct page *page; | |
1061 | pgoff_t index = offset >> PAGE_CACHE_SHIFT; | |
1062 | ||
1063 | page = find_lock_page(inode->i_mapping, index); | |
1064 | if (page) { | |
1065 | wait_on_page_writeback(page); | |
1066 | zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); | |
1067 | unlock_page(page); | |
1068 | page_cache_release(page); | |
1069 | } | |
1070 | } | |
1071 | ||
1072 | static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, | |
1073 | loff_t length) | |
1074 | { | |
1075 | loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); | |
1076 | if (offset < nearly) { | |
1077 | loff_t size = nearly - offset; | |
1078 | if (length < size) | |
1079 | size = length; | |
1080 | ceph_zero_partial_page(inode, offset, size); | |
1081 | offset += size; | |
1082 | length -= size; | |
1083 | } | |
1084 | if (length >= PAGE_CACHE_SIZE) { | |
1085 | loff_t size = round_down(length, PAGE_CACHE_SIZE); | |
1086 | truncate_pagecache_range(inode, offset, offset + size - 1); | |
1087 | offset += size; | |
1088 | length -= size; | |
1089 | } | |
1090 | if (length) | |
1091 | ceph_zero_partial_page(inode, offset, length); | |
1092 | } | |
1093 | ||
1094 | static int ceph_zero_partial_object(struct inode *inode, | |
1095 | loff_t offset, loff_t *length) | |
1096 | { | |
1097 | struct ceph_inode_info *ci = ceph_inode(inode); | |
1098 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | |
1099 | struct ceph_osd_request *req; | |
1100 | int ret = 0; | |
1101 | loff_t zero = 0; | |
1102 | int op; | |
1103 | ||
1104 | if (!length) { | |
1105 | op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; | |
1106 | length = &zero; | |
1107 | } else { | |
1108 | op = CEPH_OSD_OP_ZERO; | |
1109 | } | |
1110 | ||
1111 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | |
1112 | ceph_vino(inode), | |
1113 | offset, length, | |
1114 | 1, op, | |
1115 | CEPH_OSD_FLAG_WRITE | | |
1116 | CEPH_OSD_FLAG_ONDISK, | |
1117 | NULL, 0, 0, false); | |
1118 | if (IS_ERR(req)) { | |
1119 | ret = PTR_ERR(req); | |
1120 | goto out; | |
1121 | } | |
1122 | ||
1123 | ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap, | |
1124 | &inode->i_mtime); | |
1125 | ||
1126 | ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); | |
1127 | if (!ret) { | |
1128 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); | |
1129 | if (ret == -ENOENT) | |
1130 | ret = 0; | |
1131 | } | |
1132 | ceph_osdc_put_request(req); | |
1133 | ||
1134 | out: | |
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) | |
1139 | { | |
1140 | int ret = 0; | |
1141 | struct ceph_inode_info *ci = ceph_inode(inode); | |
1142 | s32 stripe_unit = ceph_file_layout_su(ci->i_layout); | |
1143 | s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout); | |
1144 | s32 object_size = ceph_file_layout_object_size(ci->i_layout); | |
1145 | u64 object_set_size = object_size * stripe_count; | |
1146 | u64 nearly, t; | |
1147 | ||
1148 | /* round offset up to next period boundary */ | |
1149 | nearly = offset + object_set_size - 1; | |
1150 | t = nearly; | |
1151 | nearly -= do_div(t, object_set_size); | |
1152 | ||
1153 | while (length && offset < nearly) { | |
1154 | loff_t size = length; | |
1155 | ret = ceph_zero_partial_object(inode, offset, &size); | |
1156 | if (ret < 0) | |
1157 | return ret; | |
1158 | offset += size; | |
1159 | length -= size; | |
1160 | } | |
1161 | while (length >= object_set_size) { | |
1162 | int i; | |
1163 | loff_t pos = offset; | |
1164 | for (i = 0; i < stripe_count; ++i) { | |
1165 | ret = ceph_zero_partial_object(inode, pos, NULL); | |
1166 | if (ret < 0) | |
1167 | return ret; | |
1168 | pos += stripe_unit; | |
1169 | } | |
1170 | offset += object_set_size; | |
1171 | length -= object_set_size; | |
1172 | } | |
1173 | while (length) { | |
1174 | loff_t size = length; | |
1175 | ret = ceph_zero_partial_object(inode, offset, &size); | |
1176 | if (ret < 0) | |
1177 | return ret; | |
1178 | offset += size; | |
1179 | length -= size; | |
1180 | } | |
1181 | return ret; | |
1182 | } | |
1183 | ||
1184 | static long ceph_fallocate(struct file *file, int mode, | |
1185 | loff_t offset, loff_t length) | |
1186 | { | |
1187 | struct ceph_file_info *fi = file->private_data; | |
1188 | struct inode *inode = file_inode(file); | |
1189 | struct ceph_inode_info *ci = ceph_inode(inode); | |
1190 | struct ceph_osd_client *osdc = | |
1191 | &ceph_inode_to_client(inode)->client->osdc; | |
1192 | int want, got = 0; | |
1193 | int dirty; | |
1194 | int ret = 0; | |
1195 | loff_t endoff = 0; | |
1196 | loff_t size; | |
1197 | ||
1198 | if (!S_ISREG(inode->i_mode)) | |
1199 | return -EOPNOTSUPP; | |
1200 | ||
1201 | mutex_lock(&inode->i_mutex); | |
1202 | ||
1203 | if (ceph_snap(inode) != CEPH_NOSNAP) { | |
1204 | ret = -EROFS; | |
1205 | goto unlock; | |
1206 | } | |
1207 | ||
1208 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && | |
1209 | !(mode & FALLOC_FL_PUNCH_HOLE)) { | |
1210 | ret = -ENOSPC; | |
1211 | goto unlock; | |
1212 | } | |
1213 | ||
1214 | size = i_size_read(inode); | |
1215 | if (!(mode & FALLOC_FL_KEEP_SIZE)) | |
1216 | endoff = offset + length; | |
1217 | ||
1218 | if (fi->fmode & CEPH_FILE_MODE_LAZY) | |
1219 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; | |
1220 | else | |
1221 | want = CEPH_CAP_FILE_BUFFER; | |
1222 | ||
1223 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); | |
1224 | if (ret < 0) | |
1225 | goto unlock; | |
1226 | ||
1227 | if (mode & FALLOC_FL_PUNCH_HOLE) { | |
1228 | if (offset < size) | |
1229 | ceph_zero_pagecache_range(inode, offset, length); | |
1230 | ret = ceph_zero_objects(inode, offset, length); | |
1231 | } else if (endoff > size) { | |
1232 | truncate_pagecache_range(inode, size, -1); | |
1233 | if (ceph_inode_set_size(inode, endoff)) | |
1234 | ceph_check_caps(ceph_inode(inode), | |
1235 | CHECK_CAPS_AUTHONLY, NULL); | |
1236 | } | |
1237 | ||
1238 | if (!ret) { | |
1239 | spin_lock(&ci->i_ceph_lock); | |
1240 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); | |
1241 | spin_unlock(&ci->i_ceph_lock); | |
1242 | if (dirty) | |
1243 | __mark_inode_dirty(inode, dirty); | |
1244 | } | |
1245 | ||
1246 | ceph_put_cap_refs(ci, got); | |
1247 | unlock: | |
1248 | mutex_unlock(&inode->i_mutex); | |
1249 | return ret; | |
1250 | } | |
1251 | ||
1252 | const struct file_operations ceph_file_fops = { | |
1253 | .open = ceph_open, | |
1254 | .release = ceph_release, | |
1255 | .llseek = ceph_llseek, | |
1256 | .read = new_sync_read, | |
1257 | .write = do_sync_write, | |
1258 | .read_iter = ceph_read_iter, | |
1259 | .aio_write = ceph_aio_write, | |
1260 | .mmap = ceph_mmap, | |
1261 | .fsync = ceph_fsync, | |
1262 | .lock = ceph_lock, | |
1263 | .flock = ceph_flock, | |
1264 | .splice_read = generic_file_splice_read, | |
1265 | .splice_write = generic_file_splice_write, | |
1266 | .unlocked_ioctl = ceph_ioctl, | |
1267 | .compat_ioctl = ceph_ioctl, | |
1268 | .fallocate = ceph_fallocate, | |
1269 | }; | |
1270 |