]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
124e68e7 | 2 | |
3d14c5d2 | 3 | #include <linux/module.h> |
124e68e7 | 4 | #include <linux/sched.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
124e68e7 | 6 | #include <linux/file.h> |
5ef50c3b | 7 | #include <linux/mount.h> |
124e68e7 SW |
8 | #include <linux/namei.h> |
9 | #include <linux/writeback.h> | |
ad7a60de | 10 | #include <linux/falloc.h> |
124e68e7 SW |
11 | |
12 | #include "super.h" | |
13 | #include "mds_client.h" | |
99ccbd22 | 14 | #include "cache.h" |
124e68e7 | 15 | |
f775ff7d AG |
16 | static __le32 ceph_flags_sys2wire(u32 flags) |
17 | { | |
18 | u32 wire_flags = 0; | |
19 | ||
20 | switch (flags & O_ACCMODE) { | |
21 | case O_RDONLY: | |
22 | wire_flags |= CEPH_O_RDONLY; | |
23 | break; | |
24 | case O_WRONLY: | |
25 | wire_flags |= CEPH_O_WRONLY; | |
26 | break; | |
27 | case O_RDWR: | |
28 | wire_flags |= CEPH_O_RDWR; | |
29 | break; | |
30 | } | |
31 | ||
32 | #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; } | |
33 | ||
34 | ceph_sys2wire(O_CREAT); | |
35 | ceph_sys2wire(O_EXCL); | |
36 | ceph_sys2wire(O_TRUNC); | |
37 | ceph_sys2wire(O_DIRECTORY); | |
38 | ceph_sys2wire(O_NOFOLLOW); | |
39 | ||
40 | #undef ceph_sys2wire | |
41 | ||
42 | if (flags) | |
43 | dout("unused open flags: %x", flags); | |
44 | ||
45 | return cpu_to_le32(wire_flags); | |
46 | } | |
47 | ||
124e68e7 SW |
48 | /* |
49 | * Ceph file operations | |
50 | * | |
51 | * Implement basic open/close functionality, and implement | |
52 | * read/write. | |
53 | * | |
54 | * We implement three modes of file I/O: | |
55 | * - buffered uses the generic_file_aio_{read,write} helpers | |
56 | * | |
57 | * - synchronous is used when there is multi-client read/write | |
58 | * sharing, avoids the page cache, and synchronously waits for an | |
59 | * ack from the OSD. | |
60 | * | |
61 | * - direct io takes the variant of the sync path that references | |
62 | * user pages directly. | |
63 | * | |
64 | * fsync() flushes and waits on dirty pages, but just queues metadata | |
65 | * for writeback: since the MDS can recover size and mtime there is no | |
66 | * need to wait for MDS acknowledgement. | |
67 | */ | |
68 | ||
b5b98989 ZC |
69 | /* |
70 | * Calculate the length sum of direct io vectors that can | |
71 | * be combined into one page vector. | |
72 | */ | |
73 | static size_t dio_get_pagev_size(const struct iov_iter *it) | |
74 | { | |
75 | const struct iovec *iov = it->iov; | |
76 | const struct iovec *iovend = iov + it->nr_segs; | |
77 | size_t size; | |
78 | ||
79 | size = iov->iov_len - it->iov_offset; | |
80 | /* | |
81 | * An iov can be page vectored when both the current tail | |
82 | * and the next base are page aligned. | |
83 | */ | |
84 | while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && | |
85 | (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { | |
86 | size += iov->iov_len; | |
87 | } | |
88 | dout("dio_get_pagevlen len = %zu\n", size); | |
89 | return size; | |
90 | } | |
91 | ||
92 | /* | |
93 | * Allocate a page vector based on (@it, @nbytes). | |
94 | * The return value is the tuple describing a page vector, | |
95 | * that is (@pages, @page_align, @num_pages). | |
96 | */ | |
97 | static struct page ** | |
98 | dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, | |
99 | size_t *page_align, int *num_pages) | |
100 | { | |
101 | struct iov_iter tmp_it = *it; | |
102 | size_t align; | |
103 | struct page **pages; | |
104 | int ret = 0, idx, npages; | |
105 | ||
106 | align = (unsigned long)(it->iov->iov_base + it->iov_offset) & | |
107 | (PAGE_SIZE - 1); | |
108 | npages = calc_pages_for(align, nbytes); | |
752ade68 MH |
109 | pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL); |
110 | if (!pages) | |
111 | return ERR_PTR(-ENOMEM); | |
b5b98989 ZC |
112 | |
113 | for (idx = 0; idx < npages; ) { | |
114 | size_t start; | |
115 | ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, | |
116 | npages - idx, &start); | |
117 | if (ret < 0) | |
118 | goto fail; | |
119 | ||
120 | iov_iter_advance(&tmp_it, ret); | |
121 | nbytes -= ret; | |
122 | idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; | |
123 | } | |
124 | ||
125 | BUG_ON(nbytes != 0); | |
126 | *num_pages = npages; | |
127 | *page_align = align; | |
128 | dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); | |
129 | return pages; | |
130 | fail: | |
131 | ceph_put_page_vector(pages, idx, false); | |
132 | return ERR_PTR(ret); | |
133 | } | |
124e68e7 SW |
134 | |
135 | /* | |
136 | * Prepare an open request. Preallocate ceph_cap to avoid an | |
137 | * inopportune ENOMEM later. | |
138 | */ | |
139 | static struct ceph_mds_request * | |
140 | prepare_open_request(struct super_block *sb, int flags, int create_mode) | |
141 | { | |
3d14c5d2 YS |
142 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
143 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
124e68e7 SW |
144 | struct ceph_mds_request *req; |
145 | int want_auth = USE_ANY_MDS; | |
146 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; | |
147 | ||
148 | if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) | |
149 | want_auth = USE_AUTH_MDS; | |
150 | ||
151 | req = ceph_mdsc_create_request(mdsc, op, want_auth); | |
152 | if (IS_ERR(req)) | |
153 | goto out; | |
154 | req->r_fmode = ceph_flags_to_mode(flags); | |
f775ff7d | 155 | req->r_args.open.flags = ceph_flags_sys2wire(flags); |
124e68e7 | 156 | req->r_args.open.mode = cpu_to_le32(create_mode); |
124e68e7 SW |
157 | out: |
158 | return req; | |
159 | } | |
160 | ||
161 | /* | |
162 | * initialize private struct file data. | |
163 | * if we fail, clean up by dropping fmode reference on the ceph_inode | |
164 | */ | |
165 | static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | |
166 | { | |
167 | struct ceph_file_info *cf; | |
168 | int ret = 0; | |
169 | ||
170 | switch (inode->i_mode & S_IFMT) { | |
171 | case S_IFREG: | |
46b59b2b YZ |
172 | ceph_fscache_register_inode_cookie(inode); |
173 | ceph_fscache_file_set_cookie(inode, file); | |
124e68e7 SW |
174 | case S_IFDIR: |
175 | dout("init_file %p %p 0%o (regular)\n", inode, file, | |
176 | inode->i_mode); | |
99ec2697 | 177 | cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); |
124e68e7 SW |
178 | if (cf == NULL) { |
179 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
180 | return -ENOMEM; | |
181 | } | |
182 | cf->fmode = fmode; | |
183 | cf->next_offset = 2; | |
fdd4e158 | 184 | cf->readdir_cache_idx = -1; |
124e68e7 SW |
185 | file->private_data = cf; |
186 | BUG_ON(inode->i_fop->release != ceph_release); | |
187 | break; | |
188 | ||
189 | case S_IFLNK: | |
190 | dout("init_file %p %p 0%o (symlink)\n", inode, file, | |
191 | inode->i_mode); | |
192 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
193 | break; | |
194 | ||
195 | default: | |
196 | dout("init_file %p %p 0%o (special)\n", inode, file, | |
197 | inode->i_mode); | |
198 | /* | |
199 | * we need to drop the open ref now, since we don't | |
200 | * have .release set to ceph_release. | |
201 | */ | |
202 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | |
203 | BUG_ON(inode->i_fop->release == ceph_release); | |
204 | ||
205 | /* call the proper open fop */ | |
206 | ret = inode->i_fop->open(inode, file); | |
207 | } | |
208 | return ret; | |
209 | } | |
210 | ||
77310320 YZ |
211 | /* |
212 | * try renew caps after session gets killed. | |
213 | */ | |
214 | int ceph_renew_caps(struct inode *inode) | |
215 | { | |
216 | struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; | |
217 | struct ceph_inode_info *ci = ceph_inode(inode); | |
218 | struct ceph_mds_request *req; | |
219 | int err, flags, wanted; | |
220 | ||
221 | spin_lock(&ci->i_ceph_lock); | |
222 | wanted = __ceph_caps_file_wanted(ci); | |
223 | if (__ceph_is_any_real_caps(ci) && | |
8242c9f3 | 224 | (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) { |
77310320 YZ |
225 | int issued = __ceph_caps_issued(ci, NULL); |
226 | spin_unlock(&ci->i_ceph_lock); | |
227 | dout("renew caps %p want %s issued %s updating mds_wanted\n", | |
228 | inode, ceph_cap_string(wanted), ceph_cap_string(issued)); | |
229 | ceph_check_caps(ci, 0, NULL); | |
230 | return 0; | |
231 | } | |
232 | spin_unlock(&ci->i_ceph_lock); | |
233 | ||
234 | flags = 0; | |
235 | if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR)) | |
236 | flags = O_RDWR; | |
237 | else if (wanted & CEPH_CAP_FILE_RD) | |
238 | flags = O_RDONLY; | |
239 | else if (wanted & CEPH_CAP_FILE_WR) | |
240 | flags = O_WRONLY; | |
241 | #ifdef O_LAZY | |
242 | if (wanted & CEPH_CAP_FILE_LAZYIO) | |
243 | flags |= O_LAZY; | |
244 | #endif | |
245 | ||
246 | req = prepare_open_request(inode->i_sb, flags, 0); | |
247 | if (IS_ERR(req)) { | |
248 | err = PTR_ERR(req); | |
249 | goto out; | |
250 | } | |
251 | ||
252 | req->r_inode = inode; | |
253 | ihold(inode); | |
254 | req->r_num_caps = 1; | |
255 | req->r_fmode = -1; | |
256 | ||
257 | err = ceph_mdsc_do_request(mdsc, NULL, req); | |
258 | ceph_mdsc_put_request(req); | |
259 | out: | |
260 | dout("renew caps %p open result=%d\n", inode, err); | |
261 | return err < 0 ? err : 0; | |
262 | } | |
263 | ||
124e68e7 | 264 | /* |
124e68e7 SW |
265 | * If we already have the requisite capabilities, we can satisfy |
266 | * the open request locally (no need to request new caps from the | |
267 | * MDS). We do, however, need to inform the MDS (asynchronously) | |
268 | * if our wanted caps set expands. | |
269 | */ | |
270 | int ceph_open(struct inode *inode, struct file *file) | |
271 | { | |
272 | struct ceph_inode_info *ci = ceph_inode(inode); | |
3d14c5d2 YS |
273 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); |
274 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
124e68e7 SW |
275 | struct ceph_mds_request *req; |
276 | struct ceph_file_info *cf = file->private_data; | |
124e68e7 SW |
277 | int err; |
278 | int flags, fmode, wanted; | |
279 | ||
280 | if (cf) { | |
281 | dout("open file %p is already opened\n", file); | |
282 | return 0; | |
283 | } | |
284 | ||
285 | /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ | |
286 | flags = file->f_flags & ~(O_CREAT|O_EXCL); | |
287 | if (S_ISDIR(inode->i_mode)) | |
288 | flags = O_DIRECTORY; /* mds likes to know */ | |
289 | ||
290 | dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, | |
291 | ceph_vinop(inode), file, flags, file->f_flags); | |
292 | fmode = ceph_flags_to_mode(flags); | |
293 | wanted = ceph_caps_for_mode(fmode); | |
294 | ||
295 | /* snapped files are read-only */ | |
296 | if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) | |
297 | return -EROFS; | |
298 | ||
299 | /* trivially open snapdir */ | |
300 | if (ceph_snap(inode) == CEPH_SNAPDIR) { | |
be655596 | 301 | spin_lock(&ci->i_ceph_lock); |
124e68e7 | 302 | __ceph_get_fmode(ci, fmode); |
be655596 | 303 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
304 | return ceph_init_file(inode, file, fmode); |
305 | } | |
306 | ||
307 | /* | |
7421ab80 SW |
308 | * No need to block if we have caps on the auth MDS (for |
309 | * write) or any MDS (for read). Update wanted set | |
124e68e7 SW |
310 | * asynchronously. |
311 | */ | |
be655596 | 312 | spin_lock(&ci->i_ceph_lock); |
7421ab80 SW |
313 | if (__ceph_is_any_real_caps(ci) && |
314 | (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { | |
c1944fed | 315 | int mds_wanted = __ceph_caps_mds_wanted(ci, true); |
124e68e7 SW |
316 | int issued = __ceph_caps_issued(ci, NULL); |
317 | ||
318 | dout("open %p fmode %d want %s issued %s using existing\n", | |
319 | inode, fmode, ceph_cap_string(wanted), | |
320 | ceph_cap_string(issued)); | |
321 | __ceph_get_fmode(ci, fmode); | |
be655596 | 322 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
323 | |
324 | /* adjust wanted? */ | |
325 | if ((issued & wanted) != wanted && | |
326 | (mds_wanted & wanted) != wanted && | |
327 | ceph_snap(inode) != CEPH_SNAPDIR) | |
328 | ceph_check_caps(ci, 0, NULL); | |
329 | ||
330 | return ceph_init_file(inode, file, fmode); | |
331 | } else if (ceph_snap(inode) != CEPH_NOSNAP && | |
332 | (ci->i_snap_caps & wanted) == wanted) { | |
333 | __ceph_get_fmode(ci, fmode); | |
be655596 | 334 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
335 | return ceph_init_file(inode, file, fmode); |
336 | } | |
99ccbd22 | 337 | |
be655596 | 338 | spin_unlock(&ci->i_ceph_lock); |
124e68e7 SW |
339 | |
340 | dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); | |
341 | req = prepare_open_request(inode->i_sb, flags, 0); | |
342 | if (IS_ERR(req)) { | |
343 | err = PTR_ERR(req); | |
344 | goto out; | |
345 | } | |
70b666c3 SW |
346 | req->r_inode = inode; |
347 | ihold(inode); | |
99ccbd22 | 348 | |
124e68e7 | 349 | req->r_num_caps = 1; |
e36d571d | 350 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
124e68e7 SW |
351 | if (!err) |
352 | err = ceph_init_file(inode, file, req->r_fmode); | |
353 | ceph_mdsc_put_request(req); | |
354 | dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); | |
355 | out: | |
356 | return err; | |
357 | } | |
358 | ||
359 | ||
360 | /* | |
5ef50c3b SW |
361 | * Do a lookup + open with a single request. If we get a non-existent |
362 | * file or symlink, return 1 so the VFS can retry. | |
124e68e7 | 363 | */ |
5ef50c3b | 364 | int ceph_atomic_open(struct inode *dir, struct dentry *dentry, |
30d90494 | 365 | struct file *file, unsigned flags, umode_t mode, |
d9585277 | 366 | int *opened) |
124e68e7 | 367 | { |
3d14c5d2 YS |
368 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
369 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
124e68e7 | 370 | struct ceph_mds_request *req; |
5ef50c3b | 371 | struct dentry *dn; |
b1ee94aa | 372 | struct ceph_acls_info acls = {}; |
315f2408 | 373 | int mask; |
124e68e7 | 374 | int err; |
124e68e7 | 375 | |
a455589f AV |
376 | dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", |
377 | dir, dentry, dentry, | |
5ef50c3b SW |
378 | d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); |
379 | ||
380 | if (dentry->d_name.len > NAME_MAX) | |
381 | return -ENAMETOOLONG; | |
382 | ||
b1ee94aa YZ |
383 | if (flags & O_CREAT) { |
384 | err = ceph_pre_init_acls(dir, &mode, &acls); | |
385 | if (err < 0) | |
386 | return err; | |
387 | } | |
388 | ||
124e68e7 SW |
389 | /* do the open */ |
390 | req = prepare_open_request(dir->i_sb, flags, mode); | |
b1ee94aa YZ |
391 | if (IS_ERR(req)) { |
392 | err = PTR_ERR(req); | |
393 | goto out_acl; | |
394 | } | |
124e68e7 SW |
395 | req->r_dentry = dget(dentry); |
396 | req->r_num_caps = 2; | |
397 | if (flags & O_CREAT) { | |
398 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
399 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
b1ee94aa YZ |
400 | if (acls.pagelist) { |
401 | req->r_pagelist = acls.pagelist; | |
402 | acls.pagelist = NULL; | |
403 | } | |
124e68e7 | 404 | } |
315f2408 YZ |
405 | |
406 | mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; | |
407 | if (ceph_security_xattr_wanted(dir)) | |
408 | mask |= CEPH_CAP_XATTR_SHARED; | |
409 | req->r_args.open.mask = cpu_to_le32(mask); | |
410 | ||
3dd69aab JL |
411 | req->r_parent = dir; |
412 | set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); | |
acda7657 SW |
413 | err = ceph_mdsc_do_request(mdsc, |
414 | (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, | |
415 | req); | |
bf91c315 | 416 | err = ceph_handle_snapdir(req, dentry, err); |
79aec984 | 417 | if (err) |
b1ee94aa | 418 | goto out_req; |
79aec984 | 419 | |
a43137f7 | 420 | if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) |
124e68e7 | 421 | err = ceph_handle_notrace_create(dir, dentry); |
2d83bde9 | 422 | |
00699ad8 | 423 | if (d_in_lookup(dentry)) { |
5ef50c3b SW |
424 | dn = ceph_finish_lookup(req, dentry, err); |
425 | if (IS_ERR(dn)) | |
426 | err = PTR_ERR(dn); | |
427 | } else { | |
428 | /* we were given a hashed negative dentry */ | |
429 | dn = NULL; | |
430 | } | |
431 | if (err) | |
b1ee94aa | 432 | goto out_req; |
2b0143b5 | 433 | if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { |
5ef50c3b SW |
434 | /* make vfs retry on splice, ENOENT, or symlink */ |
435 | dout("atomic_open finish_no_open on dn %p\n", dn); | |
436 | err = finish_no_open(file, dn); | |
437 | } else { | |
438 | dout("atomic_open finish_open on dn %p\n", dn); | |
6e8575fa | 439 | if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { |
2b0143b5 | 440 | ceph_init_inode_acls(d_inode(dentry), &acls); |
6e8575fa SL |
441 | *opened |= FILE_CREATED; |
442 | } | |
5ef50c3b SW |
443 | err = finish_open(file, dentry, ceph_open, opened); |
444 | } | |
b1ee94aa | 445 | out_req: |
ab866549 YZ |
446 | if (!req->r_err && req->r_target_inode) |
447 | ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); | |
5ef50c3b | 448 | ceph_mdsc_put_request(req); |
b1ee94aa YZ |
449 | out_acl: |
450 | ceph_release_acls_info(&acls); | |
5ef50c3b | 451 | dout("atomic_open result=%d\n", err); |
d9585277 | 452 | return err; |
124e68e7 SW |
453 | } |
454 | ||
455 | int ceph_release(struct inode *inode, struct file *file) | |
456 | { | |
457 | struct ceph_inode_info *ci = ceph_inode(inode); | |
458 | struct ceph_file_info *cf = file->private_data; | |
459 | ||
460 | dout("release inode %p file %p\n", inode, file); | |
461 | ceph_put_fmode(ci, cf->fmode); | |
462 | if (cf->last_readdir) | |
463 | ceph_mdsc_put_request(cf->last_readdir); | |
464 | kfree(cf->last_name); | |
465 | kfree(cf->dir_info); | |
124e68e7 | 466 | kmem_cache_free(ceph_file_cachep, cf); |
195d3ce2 SW |
467 | |
468 | /* wake up anyone waiting for caps on this inode */ | |
03066f23 | 469 | wake_up_all(&ci->i_cap_wq); |
124e68e7 SW |
470 | return 0; |
471 | } | |
472 | ||
83701246 | 473 | enum { |
c8fe9b17 YZ |
474 | HAVE_RETRIED = 1, |
475 | CHECK_EOF = 2, | |
476 | READ_INLINE = 3, | |
83701246 YZ |
477 | }; |
478 | ||
124e68e7 SW |
479 | /* |
480 | * Read a range of bytes striped over one or more objects. Iterate over | |
481 | * objects we stripe over. (That's not atomic, but good enough for now.) | |
482 | * | |
483 | * If we get a short result from the OSD, check against i_size; we need to | |
484 | * only return a short read to the caller if we hit EOF. | |
485 | */ | |
486 | static int striped_read(struct inode *inode, | |
7ce469a5 | 487 | u64 pos, u64 len, |
6a026589 | 488 | struct page **pages, int num_pages, |
7ce469a5 | 489 | int page_align, int *checkeof) |
124e68e7 | 490 | { |
3d14c5d2 | 491 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
124e68e7 | 492 | struct ceph_inode_info *ci = ceph_inode(inode); |
7ce469a5 | 493 | u64 this_len; |
99c88e69 | 494 | loff_t i_size; |
7ce469a5 YZ |
495 | int page_idx; |
496 | int ret, read = 0; | |
124e68e7 SW |
497 | bool hit_stripe, was_short; |
498 | ||
499 | /* | |
500 | * we may need to do multiple reads. not atomic, unfortunately. | |
501 | */ | |
124e68e7 | 502 | more: |
7ce469a5 YZ |
503 | this_len = len; |
504 | page_idx = (page_align + read) >> PAGE_SHIFT; | |
3d14c5d2 | 505 | ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), |
124e68e7 | 506 | &ci->i_layout, pos, &this_len, |
7ce469a5 YZ |
507 | ci->i_truncate_seq, ci->i_truncate_size, |
508 | pages + page_idx, num_pages - page_idx, | |
509 | ((page_align + read) & ~PAGE_MASK)); | |
124e68e7 SW |
510 | if (ret == -ENOENT) |
511 | ret = 0; | |
7ce469a5 | 512 | hit_stripe = this_len < len; |
0e98728f | 513 | was_short = ret >= 0 && ret < this_len; |
7ce469a5 | 514 | dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read, |
124e68e7 SW |
515 | ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); |
516 | ||
99c88e69 | 517 | i_size = i_size_read(inode); |
02ae66d8 | 518 | if (ret >= 0) { |
99c88e69 YZ |
519 | if (was_short && (pos + ret < i_size)) { |
520 | int zlen = min(this_len - ret, i_size - pos - ret); | |
7ce469a5 | 521 | int zoff = page_align + read + ret; |
02ae66d8 | 522 | dout(" zero gap %llu to %llu\n", |
7ce469a5 | 523 | pos + ret, pos + ret + zlen); |
1487a688 YZ |
524 | ceph_zero_page_vector_range(zoff, zlen, pages); |
525 | ret += zlen; | |
124e68e7 | 526 | } |
02ae66d8 | 527 | |
7ce469a5 | 528 | read += ret; |
124e68e7 | 529 | pos += ret; |
7ce469a5 | 530 | len -= ret; |
124e68e7 | 531 | |
02ae66d8 | 532 | /* hit stripe and need continue*/ |
7ce469a5 | 533 | if (len && hit_stripe && pos < i_size) |
124e68e7 SW |
534 | goto more; |
535 | } | |
536 | ||
ee7289bf | 537 | if (read > 0) { |
02ae66d8 | 538 | ret = read; |
c3cd6283 | 539 | /* did we bounce off eof? */ |
7ce469a5 | 540 | if (pos + len > i_size) |
83701246 | 541 | *checkeof = CHECK_EOF; |
124e68e7 SW |
542 | } |
543 | ||
124e68e7 SW |
544 | dout("striped_read returns %d\n", ret); |
545 | return ret; | |
546 | } | |
547 | ||
548 | /* | |
549 | * Completely synchronous read and write methods. Direct from __user | |
550 | * buffer to osd, or directly to user pages (if O_DIRECT). | |
551 | * | |
552 | * If the read spans object boundary, just do multiple reads. | |
553 | */ | |
7ce469a5 YZ |
554 | static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, |
555 | int *checkeof) | |
124e68e7 | 556 | { |
8eb4efb0 | 557 | struct file *file = iocb->ki_filp; |
496ad9aa | 558 | struct inode *inode = file_inode(file); |
124e68e7 | 559 | struct page **pages; |
8eb4efb0 | 560 | u64 off = iocb->ki_pos; |
7ce469a5 YZ |
561 | int num_pages; |
562 | ssize_t ret; | |
563 | size_t len = iov_iter_count(to); | |
124e68e7 | 564 | |
8eb4efb0 | 565 | dout("sync_read on file %p %llu~%u %s\n", file, off, |
566 | (unsigned)len, | |
124e68e7 | 567 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); |
d0d0db22 YZ |
568 | |
569 | if (!len) | |
570 | return 0; | |
e98b6fed SW |
571 | /* |
572 | * flush any page cache pages in this range. this | |
573 | * will make concurrent normal and sync io slow, | |
574 | * but it will at least behave sensibly when they are | |
575 | * in sequence. | |
576 | */ | |
8eb4efb0 | 577 | ret = filemap_write_and_wait_range(inode->i_mapping, off, |
578 | off + len); | |
29065a51 | 579 | if (ret < 0) |
8eb4efb0 | 580 | return ret; |
29065a51 | 581 | |
7ce469a5 YZ |
582 | if (unlikely(to->type & ITER_PIPE)) { |
583 | size_t page_off; | |
584 | ret = iov_iter_get_pages_alloc(to, &pages, len, | |
585 | &page_off); | |
586 | if (ret <= 0) | |
587 | return -ENOMEM; | |
588 | num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE); | |
589 | ||
590 | ret = striped_read(inode, off, ret, pages, num_pages, | |
591 | page_off, checkeof); | |
592 | if (ret > 0) { | |
593 | iov_iter_advance(to, ret); | |
594 | off += ret; | |
595 | } else { | |
596 | iov_iter_advance(to, 0); | |
597 | } | |
598 | ceph_put_page_vector(pages, num_pages, false); | |
599 | } else { | |
600 | num_pages = calc_pages_for(off, len); | |
601 | pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); | |
602 | if (IS_ERR(pages)) | |
603 | return PTR_ERR(pages); | |
604 | ||
605 | ret = striped_read(inode, off, len, pages, num_pages, | |
606 | (off & ~PAGE_MASK), checkeof); | |
607 | if (ret > 0) { | |
608 | int l, k = 0; | |
609 | size_t left = ret; | |
610 | ||
611 | while (left) { | |
612 | size_t page_off = off & ~PAGE_MASK; | |
613 | size_t copy = min_t(size_t, left, | |
614 | PAGE_SIZE - page_off); | |
615 | l = copy_page_to_iter(pages[k++], page_off, | |
616 | copy, to); | |
617 | off += l; | |
618 | left -= l; | |
619 | if (l < copy) | |
620 | break; | |
621 | } | |
8eb4efb0 | 622 | } |
7ce469a5 | 623 | ceph_release_page_vector(pages, num_pages); |
8eb4efb0 | 624 | } |
124e68e7 | 625 | |
8eb4efb0 | 626 | if (off > iocb->ki_pos) { |
627 | ret = off - iocb->ki_pos; | |
628 | iocb->ki_pos = off; | |
629 | } | |
124e68e7 | 630 | |
7ce469a5 | 631 | dout("sync_read result %zd\n", ret); |
124e68e7 SW |
632 | return ret; |
633 | } | |
634 | ||
c8fe9b17 YZ |
635 | struct ceph_aio_request { |
636 | struct kiocb *iocb; | |
637 | size_t total_len; | |
638 | int write; | |
639 | int error; | |
640 | struct list_head osd_reqs; | |
641 | unsigned num_reqs; | |
642 | atomic_t pending_reqs; | |
5be0389d | 643 | struct timespec mtime; |
c8fe9b17 YZ |
644 | struct ceph_cap_flush *prealloc_cf; |
645 | }; | |
646 | ||
5be0389d YZ |
647 | struct ceph_aio_work { |
648 | struct work_struct work; | |
649 | struct ceph_osd_request *req; | |
650 | }; | |
651 | ||
652 | static void ceph_aio_retry_work(struct work_struct *work); | |
653 | ||
c8fe9b17 YZ |
654 | static void ceph_aio_complete(struct inode *inode, |
655 | struct ceph_aio_request *aio_req) | |
656 | { | |
657 | struct ceph_inode_info *ci = ceph_inode(inode); | |
658 | int ret; | |
659 | ||
660 | if (!atomic_dec_and_test(&aio_req->pending_reqs)) | |
661 | return; | |
662 | ||
663 | ret = aio_req->error; | |
664 | if (!ret) | |
665 | ret = aio_req->total_len; | |
666 | ||
667 | dout("ceph_aio_complete %p rc %d\n", inode, ret); | |
668 | ||
669 | if (ret >= 0 && aio_req->write) { | |
670 | int dirty; | |
671 | ||
672 | loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; | |
673 | if (endoff > i_size_read(inode)) { | |
674 | if (ceph_inode_set_size(inode, endoff)) | |
675 | ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); | |
676 | } | |
677 | ||
678 | spin_lock(&ci->i_ceph_lock); | |
679 | ci->i_inline_version = CEPH_INLINE_NONE; | |
680 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, | |
681 | &aio_req->prealloc_cf); | |
682 | spin_unlock(&ci->i_ceph_lock); | |
683 | if (dirty) | |
684 | __mark_inode_dirty(inode, dirty); | |
685 | ||
686 | } | |
687 | ||
688 | ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : | |
689 | CEPH_CAP_FILE_RD)); | |
690 | ||
691 | aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); | |
692 | ||
693 | ceph_free_cap_flush(aio_req->prealloc_cf); | |
694 | kfree(aio_req); | |
695 | } | |
696 | ||
85e084fe | 697 | static void ceph_aio_complete_req(struct ceph_osd_request *req) |
c8fe9b17 YZ |
698 | { |
699 | int rc = req->r_result; | |
700 | struct inode *inode = req->r_inode; | |
701 | struct ceph_aio_request *aio_req = req->r_priv; | |
702 | struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); | |
703 | int num_pages = calc_pages_for((u64)osd_data->alignment, | |
704 | osd_data->length); | |
705 | ||
706 | dout("ceph_aio_complete_req %p rc %d bytes %llu\n", | |
707 | inode, rc, osd_data->length); | |
708 | ||
709 | if (rc == -EOLDSNAPC) { | |
5be0389d YZ |
710 | struct ceph_aio_work *aio_work; |
711 | BUG_ON(!aio_req->write); | |
712 | ||
713 | aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); | |
714 | if (aio_work) { | |
715 | INIT_WORK(&aio_work->work, ceph_aio_retry_work); | |
716 | aio_work->req = req; | |
717 | queue_work(ceph_inode_to_client(inode)->wb_wq, | |
718 | &aio_work->work); | |
719 | return; | |
720 | } | |
721 | rc = -ENOMEM; | |
722 | } else if (!aio_req->write) { | |
c8fe9b17 YZ |
723 | if (rc == -ENOENT) |
724 | rc = 0; | |
725 | if (rc >= 0 && osd_data->length > rc) { | |
726 | int zoff = osd_data->alignment + rc; | |
727 | int zlen = osd_data->length - rc; | |
728 | /* | |
729 | * If read is satisfied by single OSD request, | |
730 | * it can pass EOF. Otherwise read is within | |
731 | * i_size. | |
732 | */ | |
733 | if (aio_req->num_reqs == 1) { | |
734 | loff_t i_size = i_size_read(inode); | |
735 | loff_t endoff = aio_req->iocb->ki_pos + rc; | |
736 | if (endoff < i_size) | |
737 | zlen = min_t(size_t, zlen, | |
738 | i_size - endoff); | |
739 | aio_req->total_len = rc + zlen; | |
740 | } | |
741 | ||
742 | if (zlen > 0) | |
743 | ceph_zero_page_vector_range(zoff, zlen, | |
744 | osd_data->pages); | |
745 | } | |
746 | } | |
747 | ||
a22bd5ff | 748 | ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write); |
c8fe9b17 YZ |
749 | ceph_osdc_put_request(req); |
750 | ||
751 | if (rc < 0) | |
752 | cmpxchg(&aio_req->error, 0, rc); | |
753 | ||
754 | ceph_aio_complete(inode, aio_req); | |
755 | return; | |
756 | } | |
757 | ||
5be0389d YZ |
758 | static void ceph_aio_retry_work(struct work_struct *work) |
759 | { | |
760 | struct ceph_aio_work *aio_work = | |
761 | container_of(work, struct ceph_aio_work, work); | |
762 | struct ceph_osd_request *orig_req = aio_work->req; | |
763 | struct ceph_aio_request *aio_req = orig_req->r_priv; | |
764 | struct inode *inode = orig_req->r_inode; | |
765 | struct ceph_inode_info *ci = ceph_inode(inode); | |
766 | struct ceph_snap_context *snapc; | |
767 | struct ceph_osd_request *req; | |
768 | int ret; | |
769 | ||
770 | spin_lock(&ci->i_ceph_lock); | |
771 | if (__ceph_have_pending_cap_snap(ci)) { | |
772 | struct ceph_cap_snap *capsnap = | |
773 | list_last_entry(&ci->i_cap_snaps, | |
774 | struct ceph_cap_snap, | |
775 | ci_item); | |
776 | snapc = ceph_get_snap_context(capsnap->context); | |
777 | } else { | |
778 | BUG_ON(!ci->i_head_snapc); | |
779 | snapc = ceph_get_snap_context(ci->i_head_snapc); | |
780 | } | |
781 | spin_unlock(&ci->i_ceph_lock); | |
782 | ||
783 | req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, | |
784 | false, GFP_NOFS); | |
1418bf07 DC |
785 | if (!req) { |
786 | ret = -ENOMEM; | |
5be0389d YZ |
787 | req = orig_req; |
788 | goto out; | |
789 | } | |
790 | ||
54ea0046 | 791 | req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; |
63244fa1 | 792 | ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); |
d30291b9 | 793 | ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); |
5be0389d | 794 | |
13d1ad16 ID |
795 | ret = ceph_osdc_alloc_messages(req, GFP_NOFS); |
796 | if (ret) { | |
797 | ceph_osdc_put_request(req); | |
798 | req = orig_req; | |
799 | goto out; | |
800 | } | |
5be0389d YZ |
801 | |
802 | req->r_ops[0] = orig_req->r_ops[0]; | |
803 | osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); | |
804 | ||
bb873b53 ID |
805 | req->r_mtime = aio_req->mtime; |
806 | req->r_data_offset = req->r_ops[0].extent.offset; | |
5be0389d | 807 | |
5be0389d YZ |
808 | ceph_osdc_put_request(orig_req); |
809 | ||
810 | req->r_callback = ceph_aio_complete_req; | |
811 | req->r_inode = inode; | |
812 | req->r_priv = aio_req; | |
a1f4020a | 813 | req->r_abort_on_full = true; |
5be0389d YZ |
814 | |
815 | ret = ceph_osdc_start_request(req->r_osdc, req, false); | |
816 | out: | |
817 | if (ret < 0) { | |
5be0389d | 818 | req->r_result = ret; |
85e084fe | 819 | ceph_aio_complete_req(req); |
5be0389d YZ |
820 | } |
821 | ||
db6aed70 | 822 | ceph_put_snap_context(snapc); |
5be0389d YZ |
823 | kfree(aio_work); |
824 | } | |
825 | ||
e8344e66 | 826 | static ssize_t |
c8fe9b17 YZ |
827 | ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, |
828 | struct ceph_snap_context *snapc, | |
829 | struct ceph_cap_flush **pcf) | |
124e68e7 | 830 | { |
e8344e66 | 831 | struct file *file = iocb->ki_filp; |
496ad9aa | 832 | struct inode *inode = file_inode(file); |
124e68e7 | 833 | struct ceph_inode_info *ci = ceph_inode(inode); |
3d14c5d2 | 834 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
acead002 | 835 | struct ceph_vino vino; |
124e68e7 SW |
836 | struct ceph_osd_request *req; |
837 | struct page **pages; | |
c8fe9b17 YZ |
838 | struct ceph_aio_request *aio_req = NULL; |
839 | int num_pages = 0; | |
124e68e7 | 840 | int flags; |
124e68e7 | 841 | int ret; |
c2050a45 | 842 | struct timespec mtime = current_time(inode); |
c8fe9b17 YZ |
843 | size_t count = iov_iter_count(iter); |
844 | loff_t pos = iocb->ki_pos; | |
845 | bool write = iov_iter_rw(iter) == WRITE; | |
124e68e7 | 846 | |
c8fe9b17 | 847 | if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) |
124e68e7 SW |
848 | return -EROFS; |
849 | ||
c8fe9b17 YZ |
850 | dout("sync_direct_read_write (%s) on file %p %lld~%u\n", |
851 | (write ? "write" : "read"), file, pos, (unsigned)count); | |
124e68e7 | 852 | |
e8344e66 | 853 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); |
29065a51 YS |
854 | if (ret < 0) |
855 | return ret; | |
856 | ||
c8fe9b17 | 857 | if (write) { |
5d7eb1a3 | 858 | int ret2 = invalidate_inode_pages2_range(inode->i_mapping, |
09cbfeaf KS |
859 | pos >> PAGE_SHIFT, |
860 | (pos + count) >> PAGE_SHIFT); | |
5d7eb1a3 | 861 | if (ret2 < 0) |
a380a031 | 862 | dout("invalidate_inode_pages2_range returned %d\n", ret2); |
29065a51 | 863 | |
54ea0046 | 864 | flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; |
c8fe9b17 YZ |
865 | } else { |
866 | flags = CEPH_OSD_FLAG_READ; | |
867 | } | |
124e68e7 | 868 | |
c8fe9b17 YZ |
869 | while (iov_iter_count(iter) > 0) { |
870 | u64 size = dio_get_pagev_size(iter); | |
871 | size_t start = 0; | |
872 | ssize_t len; | |
e8344e66 | 873 | |
e8344e66 | 874 | vino = ceph_vino(inode); |
875 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | |
c8fe9b17 YZ |
876 | vino, pos, &size, 0, |
877 | /*include a 'startsync' command*/ | |
878 | write ? 2 : 1, | |
879 | write ? CEPH_OSD_OP_WRITE : | |
880 | CEPH_OSD_OP_READ, | |
881 | flags, snapc, | |
e8344e66 | 882 | ci->i_truncate_seq, |
883 | ci->i_truncate_size, | |
884 | false); | |
885 | if (IS_ERR(req)) { | |
886 | ret = PTR_ERR(req); | |
eab87235 | 887 | break; |
e8344e66 | 888 | } |
124e68e7 | 889 | |
c8fe9b17 YZ |
890 | len = size; |
891 | pages = dio_get_pages_alloc(iter, len, &start, &num_pages); | |
b5b98989 | 892 | if (IS_ERR(pages)) { |
64c31311 | 893 | ceph_osdc_put_request(req); |
b5b98989 | 894 | ret = PTR_ERR(pages); |
64c31311 | 895 | break; |
124e68e7 SW |
896 | } |
897 | ||
898 | /* | |
c8fe9b17 YZ |
899 | * To simplify error handling, allow AIO when IO within i_size |
900 | * or IO can be satisfied by single OSD request. | |
124e68e7 | 901 | */ |
c8fe9b17 YZ |
902 | if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && |
903 | (len == count || pos + count <= i_size_read(inode))) { | |
904 | aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); | |
905 | if (aio_req) { | |
906 | aio_req->iocb = iocb; | |
907 | aio_req->write = write; | |
908 | INIT_LIST_HEAD(&aio_req->osd_reqs); | |
909 | if (write) { | |
5be0389d | 910 | aio_req->mtime = mtime; |
c8fe9b17 YZ |
911 | swap(aio_req->prealloc_cf, *pcf); |
912 | } | |
913 | } | |
914 | /* ignore error */ | |
915 | } | |
916 | ||
917 | if (write) { | |
918 | /* | |
919 | * throw out any page cache pages in this range. this | |
920 | * may block. | |
921 | */ | |
922 | truncate_inode_pages_range(inode->i_mapping, pos, | |
09cbfeaf | 923 | (pos+len) | (PAGE_SIZE - 1)); |
c8fe9b17 YZ |
924 | |
925 | osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); | |
bb873b53 | 926 | req->r_mtime = mtime; |
c8fe9b17 YZ |
927 | } |
928 | ||
c8fe9b17 YZ |
929 | osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, |
930 | false, false); | |
e8344e66 | 931 | |
c8fe9b17 YZ |
932 | if (aio_req) { |
933 | aio_req->total_len += len; | |
934 | aio_req->num_reqs++; | |
935 | atomic_inc(&aio_req->pending_reqs); | |
936 | ||
937 | req->r_callback = ceph_aio_complete_req; | |
938 | req->r_inode = inode; | |
939 | req->r_priv = aio_req; | |
940 | list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); | |
941 | ||
942 | pos += len; | |
943 | iov_iter_advance(iter, len); | |
944 | continue; | |
945 | } | |
946 | ||
947 | ret = ceph_osdc_start_request(req->r_osdc, req, false); | |
e8344e66 | 948 | if (!ret) |
949 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); | |
950 | ||
c8fe9b17 YZ |
951 | size = i_size_read(inode); |
952 | if (!write) { | |
953 | if (ret == -ENOENT) | |
954 | ret = 0; | |
955 | if (ret >= 0 && ret < len && pos + ret < size) { | |
956 | int zlen = min_t(size_t, len - ret, | |
957 | size - pos - ret); | |
958 | ceph_zero_page_vector_range(start + ret, zlen, | |
959 | pages); | |
960 | ret += zlen; | |
961 | } | |
962 | if (ret >= 0) | |
963 | len = ret; | |
964 | } | |
965 | ||
a22bd5ff | 966 | ceph_put_page_vector(pages, num_pages, !write); |
e8344e66 | 967 | |
e8344e66 | 968 | ceph_osdc_put_request(req); |
c8fe9b17 | 969 | if (ret < 0) |
e8344e66 | 970 | break; |
64c31311 | 971 | |
c8fe9b17 YZ |
972 | pos += len; |
973 | iov_iter_advance(iter, len); | |
974 | ||
975 | if (!write && pos >= size) | |
e8344e66 | 976 | break; |
64c31311 | 977 | |
c8fe9b17 YZ |
978 | if (write && pos > size) { |
979 | if (ceph_inode_set_size(inode, pos)) | |
64c31311 AV |
980 | ceph_check_caps(ceph_inode(inode), |
981 | CHECK_CAPS_AUTHONLY, | |
982 | NULL); | |
983 | } | |
e8344e66 | 984 | } |
985 | ||
c8fe9b17 | 986 | if (aio_req) { |
fc8c3892 YZ |
987 | LIST_HEAD(osd_reqs); |
988 | ||
c8fe9b17 YZ |
989 | if (aio_req->num_reqs == 0) { |
990 | kfree(aio_req); | |
991 | return ret; | |
992 | } | |
993 | ||
994 | ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : | |
995 | CEPH_CAP_FILE_RD); | |
996 | ||
fc8c3892 YZ |
997 | list_splice(&aio_req->osd_reqs, &osd_reqs); |
998 | while (!list_empty(&osd_reqs)) { | |
999 | req = list_first_entry(&osd_reqs, | |
c8fe9b17 YZ |
1000 | struct ceph_osd_request, |
1001 | r_unsafe_item); | |
1002 | list_del_init(&req->r_unsafe_item); | |
1003 | if (ret >= 0) | |
1004 | ret = ceph_osdc_start_request(req->r_osdc, | |
1005 | req, false); | |
1006 | if (ret < 0) { | |
1007 | req->r_result = ret; | |
85e084fe | 1008 | ceph_aio_complete_req(req); |
c8fe9b17 YZ |
1009 | } |
1010 | } | |
1011 | return -EIOCBQUEUED; | |
1012 | } | |
1013 | ||
1014 | if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { | |
1015 | ret = pos - iocb->ki_pos; | |
e8344e66 | 1016 | iocb->ki_pos = pos; |
e8344e66 | 1017 | } |
1018 | return ret; | |
1019 | } | |
1020 | ||
e8344e66 | 1021 | /* |
1022 | * Synchronous write, straight from __user pointer or user pages. | |
1023 | * | |
1024 | * If write spans object boundary, just do multiple writes. (For a | |
1025 | * correct atomic write, we should e.g. take write locks on all | |
1026 | * objects, rollback on failure, etc.) | |
1027 | */ | |
06fee30f | 1028 | static ssize_t |
5dda377c YZ |
1029 | ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, |
1030 | struct ceph_snap_context *snapc) | |
e8344e66 | 1031 | { |
1032 | struct file *file = iocb->ki_filp; | |
1033 | struct inode *inode = file_inode(file); | |
1034 | struct ceph_inode_info *ci = ceph_inode(inode); | |
1035 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | |
e8344e66 | 1036 | struct ceph_vino vino; |
1037 | struct ceph_osd_request *req; | |
1038 | struct page **pages; | |
1039 | u64 len; | |
1040 | int num_pages; | |
1041 | int written = 0; | |
1042 | int flags; | |
1043 | int check_caps = 0; | |
1044 | int ret; | |
c2050a45 | 1045 | struct timespec mtime = current_time(inode); |
4908b822 | 1046 | size_t count = iov_iter_count(from); |
e8344e66 | 1047 | |
1048 | if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) | |
1049 | return -EROFS; | |
1050 | ||
1051 | dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count); | |
1052 | ||
1053 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); | |
1054 | if (ret < 0) | |
1055 | return ret; | |
1056 | ||
1057 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
09cbfeaf KS |
1058 | pos >> PAGE_SHIFT, |
1059 | (pos + count) >> PAGE_SHIFT); | |
e8344e66 | 1060 | if (ret < 0) |
1061 | dout("invalidate_inode_pages2_range returned %d\n", ret); | |
1062 | ||
54ea0046 | 1063 | flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; |
e8344e66 | 1064 | |
4908b822 | 1065 | while ((len = iov_iter_count(from)) > 0) { |
e8344e66 | 1066 | size_t left; |
1067 | int n; | |
1068 | ||
e8344e66 | 1069 | vino = ceph_vino(inode); |
1070 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | |
715e4cd4 | 1071 | vino, pos, &len, 0, 1, |
e8344e66 | 1072 | CEPH_OSD_OP_WRITE, flags, snapc, |
1073 | ci->i_truncate_seq, | |
1074 | ci->i_truncate_size, | |
1075 | false); | |
1076 | if (IS_ERR(req)) { | |
1077 | ret = PTR_ERR(req); | |
eab87235 | 1078 | break; |
e8344e66 | 1079 | } |
1080 | ||
1081 | /* | |
1082 | * write from beginning of first page, | |
1083 | * regardless of io alignment | |
1084 | */ | |
09cbfeaf | 1085 | num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
e8344e66 | 1086 | |
687265e5 | 1087 | pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); |
124e68e7 SW |
1088 | if (IS_ERR(pages)) { |
1089 | ret = PTR_ERR(pages); | |
1090 | goto out; | |
1091 | } | |
e8344e66 | 1092 | |
1093 | left = len; | |
1094 | for (n = 0; n < num_pages; n++) { | |
125d725c | 1095 | size_t plen = min_t(size_t, left, PAGE_SIZE); |
4908b822 | 1096 | ret = copy_page_from_iter(pages[n], 0, plen, from); |
e8344e66 | 1097 | if (ret != plen) { |
1098 | ret = -EFAULT; | |
1099 | break; | |
1100 | } | |
1101 | left -= ret; | |
e8344e66 | 1102 | } |
1103 | ||
124e68e7 SW |
1104 | if (ret < 0) { |
1105 | ceph_release_page_vector(pages, num_pages); | |
1106 | goto out; | |
1107 | } | |
1108 | ||
e8344e66 | 1109 | req->r_inode = inode; |
124e68e7 | 1110 | |
e8344e66 | 1111 | osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, |
1112 | false, true); | |
02ee07d3 | 1113 | |
bb873b53 | 1114 | req->r_mtime = mtime; |
e8344e66 | 1115 | ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); |
1116 | if (!ret) | |
1117 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); | |
124e68e7 SW |
1118 | |
1119 | out: | |
e8344e66 | 1120 | ceph_osdc_put_request(req); |
26544c62 JL |
1121 | if (ret != 0) { |
1122 | ceph_set_error_write(ci); | |
e8344e66 | 1123 | break; |
26544c62 JL |
1124 | } |
1125 | ||
1126 | ceph_clear_error_write(ci); | |
1127 | pos += len; | |
1128 | written += len; | |
1129 | if (pos > i_size_read(inode)) { | |
1130 | check_caps = ceph_inode_set_size(inode, pos); | |
1131 | if (check_caps) | |
1132 | ceph_check_caps(ceph_inode(inode), | |
1133 | CHECK_CAPS_AUTHONLY, | |
1134 | NULL); | |
1135 | } | |
1136 | ||
e8344e66 | 1137 | } |
124e68e7 | 1138 | |
e8344e66 | 1139 | if (ret != -EOLDSNAPC && written > 0) { |
124e68e7 | 1140 | ret = written; |
e8344e66 | 1141 | iocb->ki_pos = pos; |
124e68e7 SW |
1142 | } |
1143 | return ret; | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * Wrap generic_file_aio_read with checks for cap bits on the inode. | |
1148 | * Atomically grab references, so that those bits are not released | |
1149 | * back to the MDS mid-read. | |
1150 | * | |
1151 | * Hmm, the sync read case isn't actually async... should it be? | |
1152 | */ | |
3644424d | 1153 | static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) |
124e68e7 SW |
1154 | { |
1155 | struct file *filp = iocb->ki_filp; | |
2962507c | 1156 | struct ceph_file_info *fi = filp->private_data; |
66ee59af | 1157 | size_t len = iov_iter_count(to); |
496ad9aa | 1158 | struct inode *inode = file_inode(filp); |
124e68e7 | 1159 | struct ceph_inode_info *ci = ceph_inode(inode); |
3738daa6 | 1160 | struct page *pinned_page = NULL; |
124e68e7 | 1161 | ssize_t ret; |
2962507c | 1162 | int want, got = 0; |
83701246 | 1163 | int retry_op = 0, read = 0; |
124e68e7 | 1164 | |
6a026589 | 1165 | again: |
8eb4efb0 | 1166 | dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", |
1167 | inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); | |
1168 | ||
2962507c SW |
1169 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
1170 | want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; | |
1171 | else | |
1172 | want = CEPH_CAP_FILE_CACHE; | |
3738daa6 | 1173 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); |
124e68e7 | 1174 | if (ret < 0) |
8eb4efb0 | 1175 | return ret; |
124e68e7 | 1176 | |
2962507c | 1177 | if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || |
2ba48ce5 | 1178 | (iocb->ki_flags & IOCB_DIRECT) || |
8eb4efb0 | 1179 | (fi->flags & CEPH_F_SYNC)) { |
8eb4efb0 | 1180 | |
1181 | dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", | |
1182 | inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, | |
1183 | ceph_cap_string(got)); | |
1184 | ||
83701246 | 1185 | if (ci->i_inline_version == CEPH_INLINE_NONE) { |
c8fe9b17 YZ |
1186 | if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { |
1187 | ret = ceph_direct_read_write(iocb, to, | |
1188 | NULL, NULL); | |
1189 | if (ret >= 0 && ret < len) | |
1190 | retry_op = CHECK_EOF; | |
1191 | } else { | |
1192 | ret = ceph_sync_read(iocb, to, &retry_op); | |
1193 | } | |
83701246 YZ |
1194 | } else { |
1195 | retry_op = READ_INLINE; | |
1196 | } | |
8eb4efb0 | 1197 | } else { |
8eb4efb0 | 1198 | dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", |
3644424d | 1199 | inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, |
8eb4efb0 | 1200 | ceph_cap_string(got)); |
2b1ac852 | 1201 | current->journal_info = filp; |
3644424d | 1202 | ret = generic_file_read_iter(iocb, to); |
2b1ac852 | 1203 | current->journal_info = NULL; |
8eb4efb0 | 1204 | } |
124e68e7 SW |
1205 | dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", |
1206 | inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); | |
3738daa6 | 1207 | if (pinned_page) { |
09cbfeaf | 1208 | put_page(pinned_page); |
3738daa6 YZ |
1209 | pinned_page = NULL; |
1210 | } | |
124e68e7 | 1211 | ceph_put_cap_refs(ci, got); |
c8fe9b17 | 1212 | if (retry_op > HAVE_RETRIED && ret >= 0) { |
83701246 YZ |
1213 | int statret; |
1214 | struct page *page = NULL; | |
1215 | loff_t i_size; | |
1216 | if (retry_op == READ_INLINE) { | |
687265e5 | 1217 | page = __page_cache_alloc(GFP_KERNEL); |
83701246 YZ |
1218 | if (!page) |
1219 | return -ENOMEM; | |
1220 | } | |
6a026589 | 1221 | |
83701246 YZ |
1222 | statret = __ceph_do_getattr(inode, page, |
1223 | CEPH_STAT_CAP_INLINE_DATA, !!page); | |
1224 | if (statret < 0) { | |
0d7718f6 NB |
1225 | if (page) |
1226 | __free_page(page); | |
83701246 YZ |
1227 | if (statret == -ENODATA) { |
1228 | BUG_ON(retry_op != READ_INLINE); | |
1229 | goto again; | |
1230 | } | |
1231 | return statret; | |
1232 | } | |
6a026589 | 1233 | |
83701246 YZ |
1234 | i_size = i_size_read(inode); |
1235 | if (retry_op == READ_INLINE) { | |
fcc02d2a YZ |
1236 | BUG_ON(ret > 0 || read > 0); |
1237 | if (iocb->ki_pos < i_size && | |
09cbfeaf | 1238 | iocb->ki_pos < PAGE_SIZE) { |
83701246 YZ |
1239 | loff_t end = min_t(loff_t, i_size, |
1240 | iocb->ki_pos + len); | |
09cbfeaf | 1241 | end = min_t(loff_t, end, PAGE_SIZE); |
83701246 YZ |
1242 | if (statret < end) |
1243 | zero_user_segment(page, statret, end); | |
1244 | ret = copy_page_to_iter(page, | |
1245 | iocb->ki_pos & ~PAGE_MASK, | |
1246 | end - iocb->ki_pos, to); | |
1247 | iocb->ki_pos += ret; | |
fcc02d2a YZ |
1248 | read += ret; |
1249 | } | |
1250 | if (iocb->ki_pos < i_size && read < len) { | |
1251 | size_t zlen = min_t(size_t, len - read, | |
1252 | i_size - iocb->ki_pos); | |
1253 | ret = iov_iter_zero(zlen, to); | |
1254 | iocb->ki_pos += ret; | |
1255 | read += ret; | |
83701246 YZ |
1256 | } |
1257 | __free_pages(page, 0); | |
fcc02d2a | 1258 | return read; |
83701246 | 1259 | } |
6a026589 SW |
1260 | |
1261 | /* hit EOF or hole? */ | |
83701246 | 1262 | if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && |
fcc02d2a | 1263 | ret < len) { |
8eb4efb0 | 1264 | dout("sync_read hit hole, ppos %lld < size %lld" |
99c88e69 | 1265 | ", reading more\n", iocb->ki_pos, i_size); |
8eb4efb0 | 1266 | |
6a026589 | 1267 | read += ret; |
6a026589 | 1268 | len -= ret; |
c8fe9b17 | 1269 | retry_op = HAVE_RETRIED; |
6a026589 SW |
1270 | goto again; |
1271 | } | |
1272 | } | |
8eb4efb0 | 1273 | |
6a026589 SW |
1274 | if (ret >= 0) |
1275 | ret += read; | |
1276 | ||
124e68e7 SW |
1277 | return ret; |
1278 | } | |
1279 | ||
1280 | /* | |
1281 | * Take cap references to avoid releasing caps to MDS mid-write. | |
1282 | * | |
1283 | * If we are synchronous, and write with an old snap context, the OSD | |
1284 | * may return EOLDSNAPC. In that case, retry the write.. _after_ | |
1285 | * dropping our cap refs and allowing the pending snap to logically | |
1286 | * complete _before_ this write occurs. | |
1287 | * | |
1288 | * If we are near ENOSPC, write synchronously. | |
1289 | */ | |
4908b822 | 1290 | static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) |
124e68e7 SW |
1291 | { |
1292 | struct file *file = iocb->ki_filp; | |
33caad32 | 1293 | struct ceph_file_info *fi = file->private_data; |
496ad9aa | 1294 | struct inode *inode = file_inode(file); |
124e68e7 | 1295 | struct ceph_inode_info *ci = ceph_inode(inode); |
3d14c5d2 YS |
1296 | struct ceph_osd_client *osdc = |
1297 | &ceph_sb_to_client(inode->i_sb)->client->osdc; | |
f66fd9f0 | 1298 | struct ceph_cap_flush *prealloc_cf; |
3309dd04 | 1299 | ssize_t count, written = 0; |
03d254ed | 1300 | int err, want, got; |
3309dd04 | 1301 | loff_t pos; |
124e68e7 SW |
1302 | |
1303 | if (ceph_snap(inode) != CEPH_NOSNAP) | |
1304 | return -EROFS; | |
1305 | ||
f66fd9f0 YZ |
1306 | prealloc_cf = ceph_alloc_cap_flush(); |
1307 | if (!prealloc_cf) | |
1308 | return -ENOMEM; | |
1309 | ||
5955102c | 1310 | inode_lock(inode); |
03d254ed | 1311 | |
03d254ed | 1312 | /* We can write back this queue in page reclaim */ |
de1414a6 | 1313 | current->backing_dev_info = inode_to_bdi(inode); |
03d254ed | 1314 | |
55b0b31c YZ |
1315 | if (iocb->ki_flags & IOCB_APPEND) { |
1316 | err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); | |
1317 | if (err < 0) | |
1318 | goto out; | |
1319 | } | |
1320 | ||
3309dd04 AV |
1321 | err = generic_write_checks(iocb, from); |
1322 | if (err <= 0) | |
03d254ed YZ |
1323 | goto out; |
1324 | ||
3309dd04 AV |
1325 | pos = iocb->ki_pos; |
1326 | count = iov_iter_count(from); | |
5fa8e0a1 | 1327 | err = file_remove_privs(file); |
03d254ed YZ |
1328 | if (err) |
1329 | goto out; | |
1330 | ||
1331 | err = file_update_time(file); | |
1332 | if (err) | |
1333 | goto out; | |
1334 | ||
28127bdd YZ |
1335 | if (ci->i_inline_version != CEPH_INLINE_NONE) { |
1336 | err = ceph_uninline_data(file, NULL); | |
1337 | if (err < 0) | |
1338 | goto out; | |
1339 | } | |
1340 | ||
124e68e7 | 1341 | retry_snap: |
26544c62 | 1342 | /* FIXME: not complete since it doesn't account for being at quota */ |
b7ec35b3 | 1343 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { |
03d254ed | 1344 | err = -ENOSPC; |
6070e0c1 YZ |
1345 | goto out; |
1346 | } | |
03d254ed | 1347 | |
ac7f29bf | 1348 | dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", |
99c88e69 | 1349 | inode, ceph_vinop(inode), pos, count, i_size_read(inode)); |
7971bd92 SW |
1350 | if (fi->fmode & CEPH_FILE_MODE_LAZY) |
1351 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; | |
1352 | else | |
1353 | want = CEPH_CAP_FILE_BUFFER; | |
03d254ed | 1354 | got = 0; |
3738daa6 YZ |
1355 | err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, |
1356 | &got, NULL); | |
03d254ed | 1357 | if (err < 0) |
37505d57 | 1358 | goto out; |
124e68e7 | 1359 | |
ac7f29bf | 1360 | dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", |
03d254ed | 1361 | inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); |
7971bd92 SW |
1362 | |
1363 | if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || | |
26544c62 JL |
1364 | (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) || |
1365 | (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) { | |
5dda377c | 1366 | struct ceph_snap_context *snapc; |
4908b822 | 1367 | struct iov_iter data; |
5955102c | 1368 | inode_unlock(inode); |
5dda377c YZ |
1369 | |
1370 | spin_lock(&ci->i_ceph_lock); | |
1371 | if (__ceph_have_pending_cap_snap(ci)) { | |
1372 | struct ceph_cap_snap *capsnap = | |
1373 | list_last_entry(&ci->i_cap_snaps, | |
1374 | struct ceph_cap_snap, | |
1375 | ci_item); | |
1376 | snapc = ceph_get_snap_context(capsnap->context); | |
1377 | } else { | |
1378 | BUG_ON(!ci->i_head_snapc); | |
1379 | snapc = ceph_get_snap_context(ci->i_head_snapc); | |
1380 | } | |
1381 | spin_unlock(&ci->i_ceph_lock); | |
1382 | ||
4908b822 AV |
1383 | /* we might need to revert back to that point */ |
1384 | data = *from; | |
2ba48ce5 | 1385 | if (iocb->ki_flags & IOCB_DIRECT) |
c8fe9b17 YZ |
1386 | written = ceph_direct_read_write(iocb, &data, snapc, |
1387 | &prealloc_cf); | |
e8344e66 | 1388 | else |
5dda377c | 1389 | written = ceph_sync_write(iocb, &data, pos, snapc); |
0e5dd45c | 1390 | if (written == -EOLDSNAPC) { |
1391 | dout("aio_write %p %llx.%llx %llu~%u" | |
1392 | "got EOLDSNAPC, retrying\n", | |
1393 | inode, ceph_vinop(inode), | |
4908b822 | 1394 | pos, (unsigned)count); |
5955102c | 1395 | inode_lock(inode); |
0e5dd45c | 1396 | goto retry_snap; |
1397 | } | |
4908b822 AV |
1398 | if (written > 0) |
1399 | iov_iter_advance(from, written); | |
5dda377c | 1400 | ceph_put_snap_context(snapc); |
7971bd92 | 1401 | } else { |
b0d7c223 YZ |
1402 | /* |
1403 | * No need to acquire the i_truncate_mutex. Because | |
1404 | * the MDS revokes Fwb caps before sending truncate | |
1405 | * message to us. We can't get Fwb cap while there | |
1406 | * are pending vmtruncate. So write and vmtruncate | |
1407 | * can not run at the same time | |
1408 | */ | |
4908b822 | 1409 | written = generic_perform_write(file, from, pos); |
aec605f4 AV |
1410 | if (likely(written >= 0)) |
1411 | iocb->ki_pos = pos + written; | |
5955102c | 1412 | inode_unlock(inode); |
7971bd92 | 1413 | } |
d8de9ab6 | 1414 | |
03d254ed | 1415 | if (written >= 0) { |
fca65b4a | 1416 | int dirty; |
be655596 | 1417 | spin_lock(&ci->i_ceph_lock); |
28127bdd | 1418 | ci->i_inline_version = CEPH_INLINE_NONE; |
f66fd9f0 YZ |
1419 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, |
1420 | &prealloc_cf); | |
be655596 | 1421 | spin_unlock(&ci->i_ceph_lock); |
fca65b4a SW |
1422 | if (dirty) |
1423 | __mark_inode_dirty(inode, dirty); | |
124e68e7 | 1424 | } |
7971bd92 | 1425 | |
124e68e7 | 1426 | dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", |
4908b822 | 1427 | inode, ceph_vinop(inode), pos, (unsigned)count, |
7971bd92 | 1428 | ceph_cap_string(got)); |
124e68e7 | 1429 | ceph_put_cap_refs(ci, got); |
7971bd92 | 1430 | |
6aa657c8 | 1431 | if (written >= 0) { |
b7ec35b3 | 1432 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) |
6aa657c8 CH |
1433 | iocb->ki_flags |= IOCB_DSYNC; |
1434 | ||
1435 | written = generic_write_sync(iocb, written); | |
6070e0c1 | 1436 | } |
03d254ed | 1437 | |
2f75e9e1 SW |
1438 | goto out_unlocked; |
1439 | ||
03d254ed | 1440 | out: |
5955102c | 1441 | inode_unlock(inode); |
2f75e9e1 | 1442 | out_unlocked: |
f66fd9f0 | 1443 | ceph_free_cap_flush(prealloc_cf); |
03d254ed | 1444 | current->backing_dev_info = NULL; |
03d254ed | 1445 | return written ? written : err; |
124e68e7 SW |
1446 | } |
1447 | ||
1448 | /* | |
1449 | * llseek. be sure to verify file size on SEEK_END. | |
1450 | */ | |
965c8e59 | 1451 | static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) |
124e68e7 SW |
1452 | { |
1453 | struct inode *inode = file->f_mapping->host; | |
99c88e69 | 1454 | loff_t i_size; |
955818cd | 1455 | loff_t ret; |
124e68e7 | 1456 | |
5955102c | 1457 | inode_lock(inode); |
6a82c47a | 1458 | |
965c8e59 | 1459 | if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { |
508b32d8 | 1460 | ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); |
955818cd | 1461 | if (ret < 0) |
124e68e7 | 1462 | goto out; |
06222e49 JB |
1463 | } |
1464 | ||
99c88e69 | 1465 | i_size = i_size_read(inode); |
965c8e59 | 1466 | switch (whence) { |
06222e49 | 1467 | case SEEK_END: |
99c88e69 | 1468 | offset += i_size; |
124e68e7 SW |
1469 | break; |
1470 | case SEEK_CUR: | |
1471 | /* | |
1472 | * Here we special-case the lseek(fd, 0, SEEK_CUR) | |
1473 | * position-querying operation. Avoid rewriting the "same" | |
1474 | * f_pos value back to the file because a concurrent read(), | |
1475 | * write() or lseek() might have altered it | |
1476 | */ | |
1477 | if (offset == 0) { | |
955818cd | 1478 | ret = file->f_pos; |
124e68e7 SW |
1479 | goto out; |
1480 | } | |
1481 | offset += file->f_pos; | |
1482 | break; | |
06222e49 | 1483 | case SEEK_DATA: |
99c88e69 | 1484 | if (offset >= i_size) { |
06222e49 JB |
1485 | ret = -ENXIO; |
1486 | goto out; | |
1487 | } | |
1488 | break; | |
1489 | case SEEK_HOLE: | |
99c88e69 | 1490 | if (offset >= i_size) { |
06222e49 JB |
1491 | ret = -ENXIO; |
1492 | goto out; | |
1493 | } | |
99c88e69 | 1494 | offset = i_size; |
06222e49 | 1495 | break; |
124e68e7 SW |
1496 | } |
1497 | ||
955818cd | 1498 | ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); |
124e68e7 SW |
1499 | |
1500 | out: | |
5955102c | 1501 | inode_unlock(inode); |
955818cd | 1502 | return ret; |
124e68e7 SW |
1503 | } |
1504 | ||
ad7a60de LW |
1505 | static inline void ceph_zero_partial_page( |
1506 | struct inode *inode, loff_t offset, unsigned size) | |
1507 | { | |
1508 | struct page *page; | |
09cbfeaf | 1509 | pgoff_t index = offset >> PAGE_SHIFT; |
ad7a60de LW |
1510 | |
1511 | page = find_lock_page(inode->i_mapping, index); | |
1512 | if (page) { | |
1513 | wait_on_page_writeback(page); | |
09cbfeaf | 1514 | zero_user(page, offset & (PAGE_SIZE - 1), size); |
ad7a60de | 1515 | unlock_page(page); |
09cbfeaf | 1516 | put_page(page); |
ad7a60de LW |
1517 | } |
1518 | } | |
1519 | ||
1520 | static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, | |
1521 | loff_t length) | |
1522 | { | |
09cbfeaf | 1523 | loff_t nearly = round_up(offset, PAGE_SIZE); |
ad7a60de LW |
1524 | if (offset < nearly) { |
1525 | loff_t size = nearly - offset; | |
1526 | if (length < size) | |
1527 | size = length; | |
1528 | ceph_zero_partial_page(inode, offset, size); | |
1529 | offset += size; | |
1530 | length -= size; | |
1531 | } | |
09cbfeaf KS |
1532 | if (length >= PAGE_SIZE) { |
1533 | loff_t size = round_down(length, PAGE_SIZE); | |
ad7a60de LW |
1534 | truncate_pagecache_range(inode, offset, offset + size - 1); |
1535 | offset += size; | |
1536 | length -= size; | |
1537 | } | |
1538 | if (length) | |
1539 | ceph_zero_partial_page(inode, offset, length); | |
1540 | } | |
1541 | ||
1542 | static int ceph_zero_partial_object(struct inode *inode, | |
1543 | loff_t offset, loff_t *length) | |
1544 | { | |
1545 | struct ceph_inode_info *ci = ceph_inode(inode); | |
1546 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | |
1547 | struct ceph_osd_request *req; | |
1548 | int ret = 0; | |
1549 | loff_t zero = 0; | |
1550 | int op; | |
1551 | ||
1552 | if (!length) { | |
1553 | op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; | |
1554 | length = &zero; | |
1555 | } else { | |
1556 | op = CEPH_OSD_OP_ZERO; | |
1557 | } | |
1558 | ||
1559 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | |
1560 | ceph_vino(inode), | |
1561 | offset, length, | |
715e4cd4 | 1562 | 0, 1, op, |
54ea0046 | 1563 | CEPH_OSD_FLAG_WRITE, |
ad7a60de LW |
1564 | NULL, 0, 0, false); |
1565 | if (IS_ERR(req)) { | |
1566 | ret = PTR_ERR(req); | |
1567 | goto out; | |
1568 | } | |
1569 | ||
bb873b53 | 1570 | req->r_mtime = inode->i_mtime; |
ad7a60de LW |
1571 | ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); |
1572 | if (!ret) { | |
1573 | ret = ceph_osdc_wait_request(&fsc->client->osdc, req); | |
1574 | if (ret == -ENOENT) | |
1575 | ret = 0; | |
1576 | } | |
1577 | ceph_osdc_put_request(req); | |
1578 | ||
1579 | out: | |
1580 | return ret; | |
1581 | } | |
1582 | ||
1583 | static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) | |
1584 | { | |
1585 | int ret = 0; | |
1586 | struct ceph_inode_info *ci = ceph_inode(inode); | |
7627151e YZ |
1587 | s32 stripe_unit = ci->i_layout.stripe_unit; |
1588 | s32 stripe_count = ci->i_layout.stripe_count; | |
1589 | s32 object_size = ci->i_layout.object_size; | |
b314a90d SW |
1590 | u64 object_set_size = object_size * stripe_count; |
1591 | u64 nearly, t; | |
1592 | ||
1593 | /* round offset up to next period boundary */ | |
1594 | nearly = offset + object_set_size - 1; | |
1595 | t = nearly; | |
1596 | nearly -= do_div(t, object_set_size); | |
ad7a60de | 1597 | |
ad7a60de LW |
1598 | while (length && offset < nearly) { |
1599 | loff_t size = length; | |
1600 | ret = ceph_zero_partial_object(inode, offset, &size); | |
1601 | if (ret < 0) | |
1602 | return ret; | |
1603 | offset += size; | |
1604 | length -= size; | |
1605 | } | |
1606 | while (length >= object_set_size) { | |
1607 | int i; | |
1608 | loff_t pos = offset; | |
1609 | for (i = 0; i < stripe_count; ++i) { | |
1610 | ret = ceph_zero_partial_object(inode, pos, NULL); | |
1611 | if (ret < 0) | |
1612 | return ret; | |
1613 | pos += stripe_unit; | |
1614 | } | |
1615 | offset += object_set_size; | |
1616 | length -= object_set_size; | |
1617 | } | |
1618 | while (length) { | |
1619 | loff_t size = length; | |
1620 | ret = ceph_zero_partial_object(inode, offset, &size); | |
1621 | if (ret < 0) | |
1622 | return ret; | |
1623 | offset += size; | |
1624 | length -= size; | |
1625 | } | |
1626 | return ret; | |
1627 | } | |
1628 | ||
1629 | static long ceph_fallocate(struct file *file, int mode, | |
1630 | loff_t offset, loff_t length) | |
1631 | { | |
1632 | struct ceph_file_info *fi = file->private_data; | |
aa8b60e0 | 1633 | struct inode *inode = file_inode(file); |
ad7a60de LW |
1634 | struct ceph_inode_info *ci = ceph_inode(inode); |
1635 | struct ceph_osd_client *osdc = | |
1636 | &ceph_inode_to_client(inode)->client->osdc; | |
f66fd9f0 | 1637 | struct ceph_cap_flush *prealloc_cf; |
ad7a60de LW |
1638 | int want, got = 0; |
1639 | int dirty; | |
1640 | int ret = 0; | |
1641 | loff_t endoff = 0; | |
1642 | loff_t size; | |
1643 | ||
494d77bf YZ |
1644 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
1645 | return -EOPNOTSUPP; | |
1646 | ||
ad7a60de LW |
1647 | if (!S_ISREG(inode->i_mode)) |
1648 | return -EOPNOTSUPP; | |
1649 | ||
f66fd9f0 YZ |
1650 | prealloc_cf = ceph_alloc_cap_flush(); |
1651 | if (!prealloc_cf) | |
1652 | return -ENOMEM; | |
1653 | ||
5955102c | 1654 | inode_lock(inode); |
ad7a60de LW |
1655 | |
1656 | if (ceph_snap(inode) != CEPH_NOSNAP) { | |
1657 | ret = -EROFS; | |
1658 | goto unlock; | |
1659 | } | |
1660 | ||
b7ec35b3 ID |
1661 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && |
1662 | !(mode & FALLOC_FL_PUNCH_HOLE)) { | |
ad7a60de LW |
1663 | ret = -ENOSPC; |
1664 | goto unlock; | |
1665 | } | |
1666 | ||
28127bdd YZ |
1667 | if (ci->i_inline_version != CEPH_INLINE_NONE) { |
1668 | ret = ceph_uninline_data(file, NULL); | |
1669 | if (ret < 0) | |
1670 | goto unlock; | |
1671 | } | |
1672 | ||
ad7a60de | 1673 | size = i_size_read(inode); |
42c99fc4 | 1674 | if (!(mode & FALLOC_FL_KEEP_SIZE)) { |
ad7a60de | 1675 | endoff = offset + length; |
42c99fc4 LH |
1676 | ret = inode_newsize_ok(inode, endoff); |
1677 | if (ret) | |
1678 | goto unlock; | |
1679 | } | |
ad7a60de LW |
1680 | |
1681 | if (fi->fmode & CEPH_FILE_MODE_LAZY) | |
1682 | want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; | |
1683 | else | |
1684 | want = CEPH_CAP_FILE_BUFFER; | |
1685 | ||
3738daa6 | 1686 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); |
ad7a60de LW |
1687 | if (ret < 0) |
1688 | goto unlock; | |
1689 | ||
1690 | if (mode & FALLOC_FL_PUNCH_HOLE) { | |
1691 | if (offset < size) | |
1692 | ceph_zero_pagecache_range(inode, offset, length); | |
1693 | ret = ceph_zero_objects(inode, offset, length); | |
1694 | } else if (endoff > size) { | |
1695 | truncate_pagecache_range(inode, size, -1); | |
1696 | if (ceph_inode_set_size(inode, endoff)) | |
1697 | ceph_check_caps(ceph_inode(inode), | |
1698 | CHECK_CAPS_AUTHONLY, NULL); | |
1699 | } | |
1700 | ||
1701 | if (!ret) { | |
1702 | spin_lock(&ci->i_ceph_lock); | |
28127bdd | 1703 | ci->i_inline_version = CEPH_INLINE_NONE; |
f66fd9f0 YZ |
1704 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, |
1705 | &prealloc_cf); | |
ad7a60de LW |
1706 | spin_unlock(&ci->i_ceph_lock); |
1707 | if (dirty) | |
1708 | __mark_inode_dirty(inode, dirty); | |
1709 | } | |
1710 | ||
1711 | ceph_put_cap_refs(ci, got); | |
1712 | unlock: | |
5955102c | 1713 | inode_unlock(inode); |
f66fd9f0 | 1714 | ceph_free_cap_flush(prealloc_cf); |
ad7a60de LW |
1715 | return ret; |
1716 | } | |
1717 | ||
124e68e7 SW |
1718 | const struct file_operations ceph_file_fops = { |
1719 | .open = ceph_open, | |
1720 | .release = ceph_release, | |
1721 | .llseek = ceph_llseek, | |
3644424d | 1722 | .read_iter = ceph_read_iter, |
4908b822 | 1723 | .write_iter = ceph_write_iter, |
124e68e7 SW |
1724 | .mmap = ceph_mmap, |
1725 | .fsync = ceph_fsync, | |
40819f6f GF |
1726 | .lock = ceph_lock, |
1727 | .flock = ceph_flock, | |
7ce469a5 | 1728 | .splice_read = generic_file_splice_read, |
3551dd79 | 1729 | .splice_write = iter_file_splice_write, |
124e68e7 SW |
1730 | .unlocked_ioctl = ceph_ioctl, |
1731 | .compat_ioctl = ceph_ioctl, | |
ad7a60de | 1732 | .fallocate = ceph_fallocate, |
124e68e7 SW |
1733 | }; |
1734 |