]>
Commit | Line | Data |
---|---|---|
f24e9980 SW |
1 | #include "ceph_debug.h" |
2 | ||
3 | #include <linux/err.h> | |
4 | #include <linux/highmem.h> | |
5 | #include <linux/mm.h> | |
6 | #include <linux/pagemap.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/uaccess.h> | |
9 | ||
10 | #include "super.h" | |
11 | #include "osd_client.h" | |
12 | #include "messenger.h" | |
13 | #include "decode.h" | |
14 | ||
15 | const static struct ceph_connection_operations osd_con_ops; | |
16 | ||
17 | static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); | |
18 | ||
19 | /* | |
20 | * Implement client access to distributed object storage cluster. | |
21 | * | |
22 | * All data objects are stored within a cluster/cloud of OSDs, or | |
23 | * "object storage devices." (Note that Ceph OSDs have _nothing_ to | |
24 | * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply | |
25 | * remote daemons serving up and coordinating consistent and safe | |
26 | * access to storage. | |
27 | * | |
28 | * Cluster membership and the mapping of data objects onto storage devices | |
29 | * are described by the osd map. | |
30 | * | |
31 | * We keep track of pending OSD requests (read, write), resubmit | |
32 | * requests to different OSDs when the cluster topology/data layout | |
33 | * change, or retry the affected requests when the communications | |
34 | * channel with an OSD is reset. | |
35 | */ | |
36 | ||
37 | /* | |
38 | * calculate the mapping of a file extent onto an object, and fill out the | |
39 | * request accordingly. shorten extent as necessary if it crosses an | |
40 | * object boundary. | |
41 | * | |
42 | * fill osd op in request message. | |
43 | */ | |
44 | static void calc_layout(struct ceph_osd_client *osdc, | |
45 | struct ceph_vino vino, struct ceph_file_layout *layout, | |
46 | u64 off, u64 *plen, | |
47 | struct ceph_osd_request *req) | |
48 | { | |
49 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | |
50 | struct ceph_osd_op *op = (void *)(reqhead + 1); | |
51 | u64 orig_len = *plen; | |
52 | u64 objoff, objlen; /* extent in object */ | |
53 | u64 bno; | |
54 | ||
55 | reqhead->snapid = cpu_to_le64(vino.snap); | |
56 | ||
57 | /* object extent? */ | |
58 | ceph_calc_file_object_mapping(layout, off, plen, &bno, | |
59 | &objoff, &objlen); | |
60 | if (*plen < orig_len) | |
61 | dout(" skipping last %llu, final file extent %llu~%llu\n", | |
62 | orig_len - *plen, off, *plen); | |
63 | ||
64 | sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); | |
65 | req->r_oid_len = strlen(req->r_oid); | |
66 | ||
67 | op->extent.offset = cpu_to_le64(objoff); | |
68 | op->extent.length = cpu_to_le64(objlen); | |
69 | req->r_num_pages = calc_pages_for(off, *plen); | |
70 | ||
71 | dout("calc_layout %s (%d) %llu~%llu (%d pages)\n", | |
72 | req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages); | |
73 | } | |
74 | ||
75 | ||
76 | /* | |
77 | * requests | |
78 | */ | |
79 | void ceph_osdc_put_request(struct ceph_osd_request *req) | |
80 | { | |
81 | dout("osdc put_request %p %d -> %d\n", req, atomic_read(&req->r_ref), | |
82 | atomic_read(&req->r_ref)-1); | |
83 | BUG_ON(atomic_read(&req->r_ref) <= 0); | |
84 | if (atomic_dec_and_test(&req->r_ref)) { | |
85 | if (req->r_request) | |
86 | ceph_msg_put(req->r_request); | |
87 | if (req->r_reply) | |
88 | ceph_msg_put(req->r_reply); | |
89 | if (req->r_own_pages) | |
90 | ceph_release_page_vector(req->r_pages, | |
91 | req->r_num_pages); | |
92 | ceph_put_snap_context(req->r_snapc); | |
93 | if (req->r_mempool) | |
94 | mempool_free(req, req->r_osdc->req_mempool); | |
95 | else | |
96 | kfree(req); | |
97 | } | |
98 | } | |
99 | ||
100 | /* | |
101 | * build new request AND message, calculate layout, and adjust file | |
102 | * extent as needed. | |
103 | * | |
104 | * if the file was recently truncated, we include information about its | |
105 | * old and new size so that the object can be updated appropriately. (we | |
106 | * avoid synchronously deleting truncated objects because it's slow.) | |
107 | * | |
108 | * if @do_sync, include a 'startsync' command so that the osd will flush | |
109 | * data quickly. | |
110 | */ | |
111 | struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |
112 | struct ceph_file_layout *layout, | |
113 | struct ceph_vino vino, | |
114 | u64 off, u64 *plen, | |
115 | int opcode, int flags, | |
116 | struct ceph_snap_context *snapc, | |
117 | int do_sync, | |
118 | u32 truncate_seq, | |
119 | u64 truncate_size, | |
120 | struct timespec *mtime, | |
121 | bool use_mempool, int num_reply) | |
122 | { | |
123 | struct ceph_osd_request *req; | |
124 | struct ceph_msg *msg; | |
125 | struct ceph_osd_request_head *head; | |
126 | struct ceph_osd_op *op; | |
127 | void *p; | |
128 | int do_trunc = truncate_seq && (off + *plen > truncate_size); | |
129 | int num_op = 1 + do_sync + do_trunc; | |
130 | size_t msg_size = sizeof(*head) + num_op*sizeof(*op); | |
131 | int err, i; | |
132 | u64 prevofs; | |
133 | ||
134 | if (use_mempool) { | |
135 | req = mempool_alloc(osdc->req_mempool, GFP_NOFS); | |
136 | memset(req, 0, sizeof(*req)); | |
137 | } else { | |
138 | req = kzalloc(sizeof(*req), GFP_NOFS); | |
139 | } | |
140 | if (req == NULL) | |
141 | return ERR_PTR(-ENOMEM); | |
142 | ||
143 | err = ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply); | |
144 | if (err) { | |
145 | ceph_osdc_put_request(req); | |
146 | return ERR_PTR(-ENOMEM); | |
147 | } | |
148 | ||
149 | req->r_osdc = osdc; | |
150 | req->r_mempool = use_mempool; | |
151 | atomic_set(&req->r_ref, 1); | |
152 | init_completion(&req->r_completion); | |
153 | init_completion(&req->r_safe_completion); | |
154 | INIT_LIST_HEAD(&req->r_unsafe_item); | |
155 | req->r_flags = flags; | |
156 | ||
157 | WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); | |
158 | ||
159 | /* create message; allow space for oid */ | |
160 | msg_size += 40; | |
161 | if (snapc) | |
162 | msg_size += sizeof(u64) * snapc->num_snaps; | |
163 | if (use_mempool) | |
164 | msg = ceph_msgpool_get(&osdc->msgpool_op); | |
165 | else | |
166 | msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL); | |
167 | if (IS_ERR(msg)) { | |
168 | ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply); | |
169 | ceph_osdc_put_request(req); | |
170 | return ERR_PTR(PTR_ERR(msg)); | |
171 | } | |
172 | msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); | |
173 | memset(msg->front.iov_base, 0, msg->front.iov_len); | |
174 | head = msg->front.iov_base; | |
175 | op = (void *)(head + 1); | |
176 | p = (void *)(op + num_op); | |
177 | ||
178 | req->r_request = msg; | |
179 | req->r_snapc = ceph_get_snap_context(snapc); | |
180 | ||
181 | head->client_inc = cpu_to_le32(1); /* always, for now. */ | |
182 | head->flags = cpu_to_le32(flags); | |
183 | if (flags & CEPH_OSD_FLAG_WRITE) | |
184 | ceph_encode_timespec(&head->mtime, mtime); | |
185 | head->num_ops = cpu_to_le16(num_op); | |
186 | op->op = cpu_to_le16(opcode); | |
187 | ||
188 | /* calculate max write size */ | |
189 | calc_layout(osdc, vino, layout, off, plen, req); | |
190 | req->r_file_layout = *layout; /* keep a copy */ | |
191 | ||
192 | if (flags & CEPH_OSD_FLAG_WRITE) { | |
193 | req->r_request->hdr.data_off = cpu_to_le16(off); | |
194 | req->r_request->hdr.data_len = cpu_to_le32(*plen); | |
195 | op->payload_len = cpu_to_le32(*plen); | |
196 | } | |
197 | ||
198 | /* fill in oid */ | |
199 | head->object_len = cpu_to_le32(req->r_oid_len); | |
200 | memcpy(p, req->r_oid, req->r_oid_len); | |
201 | p += req->r_oid_len; | |
202 | ||
203 | /* additional ops */ | |
204 | if (do_trunc) { | |
205 | op++; | |
206 | op->op = cpu_to_le16(opcode == CEPH_OSD_OP_READ ? | |
207 | CEPH_OSD_OP_MASKTRUNC : CEPH_OSD_OP_SETTRUNC); | |
208 | op->trunc.truncate_seq = cpu_to_le32(truncate_seq); | |
209 | prevofs = le64_to_cpu((op-1)->extent.offset); | |
210 | op->trunc.truncate_size = cpu_to_le64(truncate_size - | |
211 | (off-prevofs)); | |
212 | } | |
213 | if (do_sync) { | |
214 | op++; | |
215 | op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC); | |
216 | } | |
217 | if (snapc) { | |
218 | head->snap_seq = cpu_to_le64(snapc->seq); | |
219 | head->num_snaps = cpu_to_le32(snapc->num_snaps); | |
220 | for (i = 0; i < snapc->num_snaps; i++) { | |
221 | put_unaligned_le64(snapc->snaps[i], p); | |
222 | p += sizeof(u64); | |
223 | } | |
224 | } | |
225 | ||
226 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); | |
227 | return req; | |
228 | } | |
229 | ||
230 | /* | |
231 | * We keep osd requests in an rbtree, sorted by ->r_tid. | |
232 | */ | |
233 | static void __insert_request(struct ceph_osd_client *osdc, | |
234 | struct ceph_osd_request *new) | |
235 | { | |
236 | struct rb_node **p = &osdc->requests.rb_node; | |
237 | struct rb_node *parent = NULL; | |
238 | struct ceph_osd_request *req = NULL; | |
239 | ||
240 | while (*p) { | |
241 | parent = *p; | |
242 | req = rb_entry(parent, struct ceph_osd_request, r_node); | |
243 | if (new->r_tid < req->r_tid) | |
244 | p = &(*p)->rb_left; | |
245 | else if (new->r_tid > req->r_tid) | |
246 | p = &(*p)->rb_right; | |
247 | else | |
248 | BUG(); | |
249 | } | |
250 | ||
251 | rb_link_node(&new->r_node, parent, p); | |
252 | rb_insert_color(&new->r_node, &osdc->requests); | |
253 | } | |
254 | ||
255 | static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, | |
256 | u64 tid) | |
257 | { | |
258 | struct ceph_osd_request *req; | |
259 | struct rb_node *n = osdc->requests.rb_node; | |
260 | ||
261 | while (n) { | |
262 | req = rb_entry(n, struct ceph_osd_request, r_node); | |
263 | if (tid < req->r_tid) | |
264 | n = n->rb_left; | |
265 | else if (tid > req->r_tid) | |
266 | n = n->rb_right; | |
267 | else | |
268 | return req; | |
269 | } | |
270 | return NULL; | |
271 | } | |
272 | ||
273 | static struct ceph_osd_request * | |
274 | __lookup_request_ge(struct ceph_osd_client *osdc, | |
275 | u64 tid) | |
276 | { | |
277 | struct ceph_osd_request *req; | |
278 | struct rb_node *n = osdc->requests.rb_node; | |
279 | ||
280 | while (n) { | |
281 | req = rb_entry(n, struct ceph_osd_request, r_node); | |
282 | if (tid < req->r_tid) { | |
283 | if (!n->rb_left) | |
284 | return req; | |
285 | n = n->rb_left; | |
286 | } else if (tid > req->r_tid) { | |
287 | n = n->rb_right; | |
288 | } else { | |
289 | return req; | |
290 | } | |
291 | } | |
292 | return NULL; | |
293 | } | |
294 | ||
295 | ||
296 | /* | |
297 | * The messaging layer will reconnect to the osd as needed. If the | |
298 | * session has dropped, the OSD will have dropped the session state, | |
299 | * and we'll get notified by the messaging layer. If that happens, we | |
300 | * need to resubmit all requests for that osd. | |
301 | */ | |
302 | static void osd_reset(struct ceph_connection *con) | |
303 | { | |
304 | struct ceph_osd *osd = con->private; | |
305 | struct ceph_osd_client *osdc; | |
306 | ||
307 | if (!osd) | |
308 | return; | |
309 | dout("osd_reset osd%d\n", osd->o_osd); | |
310 | osdc = osd->o_osdc; | |
311 | osd->o_incarnation++; | |
312 | down_read(&osdc->map_sem); | |
313 | kick_requests(osdc, osd); | |
314 | up_read(&osdc->map_sem); | |
315 | } | |
316 | ||
317 | /* | |
318 | * Track open sessions with osds. | |
319 | */ | |
320 | static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) | |
321 | { | |
322 | struct ceph_osd *osd; | |
323 | ||
324 | osd = kzalloc(sizeof(*osd), GFP_NOFS); | |
325 | if (!osd) | |
326 | return NULL; | |
327 | ||
328 | atomic_set(&osd->o_ref, 1); | |
329 | osd->o_osdc = osdc; | |
330 | INIT_LIST_HEAD(&osd->o_requests); | |
331 | osd->o_incarnation = 1; | |
332 | ||
333 | ceph_con_init(osdc->client->msgr, &osd->o_con); | |
334 | osd->o_con.private = osd; | |
335 | osd->o_con.ops = &osd_con_ops; | |
336 | osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD; | |
337 | return osd; | |
338 | } | |
339 | ||
340 | static struct ceph_osd *get_osd(struct ceph_osd *osd) | |
341 | { | |
342 | if (atomic_inc_not_zero(&osd->o_ref)) { | |
343 | dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, | |
344 | atomic_read(&osd->o_ref)); | |
345 | return osd; | |
346 | } else { | |
347 | dout("get_osd %p FAIL\n", osd); | |
348 | return NULL; | |
349 | } | |
350 | } | |
351 | ||
352 | static void put_osd(struct ceph_osd *osd) | |
353 | { | |
354 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), | |
355 | atomic_read(&osd->o_ref) - 1); | |
356 | if (atomic_dec_and_test(&osd->o_ref)) { | |
357 | ceph_con_shutdown(&osd->o_con); | |
358 | kfree(osd); | |
359 | } | |
360 | } | |
361 | ||
362 | /* | |
363 | * remove an osd from our map | |
364 | */ | |
365 | static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | |
366 | { | |
367 | dout("remove_osd %p\n", osd); | |
368 | BUG_ON(!list_empty(&osd->o_requests)); | |
369 | rb_erase(&osd->o_node, &osdc->osds); | |
370 | ceph_con_close(&osd->o_con); | |
371 | put_osd(osd); | |
372 | } | |
373 | ||
374 | /* | |
375 | * reset osd connect | |
376 | */ | |
377 | static int reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | |
378 | { | |
379 | int ret = 0; | |
380 | ||
381 | dout("reset_osd %p osd%d\n", osd, osd->o_osd); | |
382 | if (list_empty(&osd->o_requests)) { | |
383 | remove_osd(osdc, osd); | |
384 | } else { | |
385 | ceph_con_close(&osd->o_con); | |
386 | ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); | |
387 | osd->o_incarnation++; | |
388 | } | |
389 | return ret; | |
390 | } | |
391 | ||
392 | static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) | |
393 | { | |
394 | struct rb_node **p = &osdc->osds.rb_node; | |
395 | struct rb_node *parent = NULL; | |
396 | struct ceph_osd *osd = NULL; | |
397 | ||
398 | while (*p) { | |
399 | parent = *p; | |
400 | osd = rb_entry(parent, struct ceph_osd, o_node); | |
401 | if (new->o_osd < osd->o_osd) | |
402 | p = &(*p)->rb_left; | |
403 | else if (new->o_osd > osd->o_osd) | |
404 | p = &(*p)->rb_right; | |
405 | else | |
406 | BUG(); | |
407 | } | |
408 | ||
409 | rb_link_node(&new->o_node, parent, p); | |
410 | rb_insert_color(&new->o_node, &osdc->osds); | |
411 | } | |
412 | ||
413 | static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) | |
414 | { | |
415 | struct ceph_osd *osd; | |
416 | struct rb_node *n = osdc->osds.rb_node; | |
417 | ||
418 | while (n) { | |
419 | osd = rb_entry(n, struct ceph_osd, o_node); | |
420 | if (o < osd->o_osd) | |
421 | n = n->rb_left; | |
422 | else if (o > osd->o_osd) | |
423 | n = n->rb_right; | |
424 | else | |
425 | return osd; | |
426 | } | |
427 | return NULL; | |
428 | } | |
429 | ||
430 | ||
431 | /* | |
432 | * Register request, assign tid. If this is the first request, set up | |
433 | * the timeout event. | |
434 | */ | |
435 | static void register_request(struct ceph_osd_client *osdc, | |
436 | struct ceph_osd_request *req) | |
437 | { | |
438 | struct ceph_osd_request_head *head = req->r_request->front.iov_base; | |
439 | ||
440 | mutex_lock(&osdc->request_mutex); | |
441 | req->r_tid = ++osdc->last_tid; | |
442 | head->tid = cpu_to_le64(req->r_tid); | |
443 | ||
444 | dout("register_request %p tid %lld\n", req, req->r_tid); | |
445 | __insert_request(osdc, req); | |
446 | ceph_osdc_get_request(req); | |
447 | osdc->num_requests++; | |
448 | ||
449 | req->r_timeout_stamp = | |
450 | jiffies + osdc->client->mount_args.osd_timeout*HZ; | |
451 | ||
452 | if (osdc->num_requests == 1) { | |
453 | osdc->timeout_tid = req->r_tid; | |
454 | dout(" timeout on tid %llu at %lu\n", req->r_tid, | |
455 | req->r_timeout_stamp); | |
456 | schedule_delayed_work(&osdc->timeout_work, | |
457 | round_jiffies_relative(req->r_timeout_stamp - jiffies)); | |
458 | } | |
459 | mutex_unlock(&osdc->request_mutex); | |
460 | } | |
461 | ||
462 | /* | |
463 | * called under osdc->request_mutex | |
464 | */ | |
465 | static void __unregister_request(struct ceph_osd_client *osdc, | |
466 | struct ceph_osd_request *req) | |
467 | { | |
468 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | |
469 | rb_erase(&req->r_node, &osdc->requests); | |
470 | osdc->num_requests--; | |
471 | ||
472 | list_del_init(&req->r_osd_item); | |
473 | if (list_empty(&req->r_osd->o_requests)) | |
474 | remove_osd(osdc, req->r_osd); | |
475 | req->r_osd = NULL; | |
476 | ||
477 | ceph_osdc_put_request(req); | |
478 | ||
479 | if (req->r_tid == osdc->timeout_tid) { | |
480 | if (osdc->num_requests == 0) { | |
481 | dout("no requests, canceling timeout\n"); | |
482 | osdc->timeout_tid = 0; | |
483 | cancel_delayed_work(&osdc->timeout_work); | |
484 | } else { | |
485 | req = rb_entry(rb_first(&osdc->requests), | |
486 | struct ceph_osd_request, r_node); | |
487 | osdc->timeout_tid = req->r_tid; | |
488 | dout("rescheduled timeout on tid %llu at %lu\n", | |
489 | req->r_tid, req->r_timeout_stamp); | |
490 | schedule_delayed_work(&osdc->timeout_work, | |
491 | round_jiffies_relative(req->r_timeout_stamp - | |
492 | jiffies)); | |
493 | } | |
494 | } | |
495 | } | |
496 | ||
497 | /* | |
498 | * Cancel a previously queued request message | |
499 | */ | |
500 | static void __cancel_request(struct ceph_osd_request *req) | |
501 | { | |
502 | if (req->r_sent) { | |
503 | ceph_con_revoke(&req->r_osd->o_con, req->r_request); | |
504 | req->r_sent = 0; | |
505 | } | |
506 | } | |
507 | ||
508 | /* | |
509 | * Pick an osd (the first 'up' osd in the pg), allocate the osd struct | |
510 | * (as needed), and set the request r_osd appropriately. If there is | |
511 | * no up osd, set r_osd to NULL. | |
512 | * | |
513 | * Return 0 if unchanged, 1 if changed, or negative on error. | |
514 | * | |
515 | * Caller should hold map_sem for read and request_mutex. | |
516 | */ | |
517 | static int __map_osds(struct ceph_osd_client *osdc, | |
518 | struct ceph_osd_request *req) | |
519 | { | |
520 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | |
521 | union ceph_pg pgid; | |
522 | int o = -1; | |
523 | int err; | |
524 | struct ceph_osd *newosd = NULL; | |
525 | ||
526 | dout("map_osds %p tid %lld\n", req, req->r_tid); | |
527 | err = ceph_calc_object_layout(&reqhead->layout, req->r_oid, | |
528 | &req->r_file_layout, osdc->osdmap); | |
529 | if (err) | |
530 | return err; | |
531 | pgid.pg64 = le64_to_cpu(reqhead->layout.ol_pgid); | |
532 | o = ceph_calc_pg_primary(osdc->osdmap, pgid); | |
533 | ||
534 | if ((req->r_osd && req->r_osd->o_osd == o && | |
535 | req->r_sent >= req->r_osd->o_incarnation) || | |
536 | (req->r_osd == NULL && o == -1)) | |
537 | return 0; /* no change */ | |
538 | ||
539 | dout("map_osds tid %llu pgid %llx pool %d osd%d (was osd%d)\n", | |
540 | req->r_tid, pgid.pg64, pgid.pg.pool, o, | |
541 | req->r_osd ? req->r_osd->o_osd : -1); | |
542 | ||
543 | if (req->r_osd) { | |
544 | __cancel_request(req); | |
545 | list_del_init(&req->r_osd_item); | |
546 | if (list_empty(&req->r_osd->o_requests)) { | |
547 | /* try to re-use r_osd if possible */ | |
548 | newosd = get_osd(req->r_osd); | |
549 | remove_osd(osdc, newosd); | |
550 | } | |
551 | req->r_osd = NULL; | |
552 | } | |
553 | ||
554 | req->r_osd = __lookup_osd(osdc, o); | |
555 | if (!req->r_osd && o >= 0) { | |
556 | if (newosd) { | |
557 | req->r_osd = newosd; | |
558 | newosd = NULL; | |
559 | } else { | |
560 | err = -ENOMEM; | |
561 | req->r_osd = create_osd(osdc); | |
562 | if (!req->r_osd) | |
563 | goto out; | |
564 | } | |
565 | ||
566 | dout("map_osds osd %p is osd%d\n", req->r_osd, o); | |
567 | req->r_osd->o_osd = o; | |
568 | req->r_osd->o_con.peer_name.num = cpu_to_le64(o); | |
569 | __insert_osd(osdc, req->r_osd); | |
570 | ||
571 | ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]); | |
572 | } | |
573 | ||
574 | if (req->r_osd) | |
575 | list_add(&req->r_osd_item, &req->r_osd->o_requests); | |
576 | err = 1; /* osd changed */ | |
577 | ||
578 | out: | |
579 | if (newosd) | |
580 | put_osd(newosd); | |
581 | return err; | |
582 | } | |
583 | ||
584 | /* | |
585 | * caller should hold map_sem (for read) and request_mutex | |
586 | */ | |
587 | static int __send_request(struct ceph_osd_client *osdc, | |
588 | struct ceph_osd_request *req) | |
589 | { | |
590 | struct ceph_osd_request_head *reqhead; | |
591 | int err; | |
592 | ||
593 | err = __map_osds(osdc, req); | |
594 | if (err < 0) | |
595 | return err; | |
596 | if (req->r_osd == NULL) { | |
597 | dout("send_request %p no up osds in pg\n", req); | |
598 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
599 | return 0; | |
600 | } | |
601 | ||
602 | dout("send_request %p tid %llu to osd%d flags %d\n", | |
603 | req, req->r_tid, req->r_osd->o_osd, req->r_flags); | |
604 | ||
605 | reqhead = req->r_request->front.iov_base; | |
606 | reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch); | |
607 | reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ | |
608 | reqhead->reassert_version = req->r_reassert_version; | |
609 | ||
610 | req->r_timeout_stamp = jiffies+osdc->client->mount_args.osd_timeout*HZ; | |
611 | ||
612 | ceph_msg_get(req->r_request); /* send consumes a ref */ | |
613 | ceph_con_send(&req->r_osd->o_con, req->r_request); | |
614 | req->r_sent = req->r_osd->o_incarnation; | |
615 | return 0; | |
616 | } | |
617 | ||
618 | /* | |
619 | * Timeout callback, called every N seconds when 1 or more osd | |
620 | * requests has been active for more than N seconds. When this | |
621 | * happens, we ping all OSDs with requests who have timed out to | |
622 | * ensure any communications channel reset is detected. Reset the | |
623 | * request timeouts another N seconds in the future as we go. | |
624 | * Reschedule the timeout event another N seconds in future (unless | |
625 | * there are no open requests). | |
626 | */ | |
627 | static void handle_timeout(struct work_struct *work) | |
628 | { | |
629 | struct ceph_osd_client *osdc = | |
630 | container_of(work, struct ceph_osd_client, timeout_work.work); | |
631 | struct ceph_osd_request *req; | |
632 | struct ceph_osd *osd; | |
633 | unsigned long timeout = osdc->client->mount_args.osd_timeout * HZ; | |
634 | unsigned long next_timeout = timeout + jiffies; | |
635 | struct rb_node *p; | |
636 | ||
637 | dout("timeout\n"); | |
638 | down_read(&osdc->map_sem); | |
639 | ||
640 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
641 | ||
642 | mutex_lock(&osdc->request_mutex); | |
643 | for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { | |
644 | req = rb_entry(p, struct ceph_osd_request, r_node); | |
645 | ||
646 | if (req->r_resend) { | |
647 | int err; | |
648 | ||
649 | dout("osdc resending prev failed %lld\n", req->r_tid); | |
650 | err = __send_request(osdc, req); | |
651 | if (err) | |
652 | dout("osdc failed again on %lld\n", req->r_tid); | |
653 | else | |
654 | req->r_resend = false; | |
655 | continue; | |
656 | } | |
657 | } | |
658 | for (p = rb_first(&osdc->osds); p; p = rb_next(p)) { | |
659 | osd = rb_entry(p, struct ceph_osd, o_node); | |
660 | if (list_empty(&osd->o_requests)) | |
661 | continue; | |
662 | req = list_first_entry(&osd->o_requests, | |
663 | struct ceph_osd_request, r_osd_item); | |
664 | if (time_before(jiffies, req->r_timeout_stamp)) | |
665 | continue; | |
666 | ||
667 | dout(" tid %llu (at least) timed out on osd%d\n", | |
668 | req->r_tid, osd->o_osd); | |
669 | req->r_timeout_stamp = next_timeout; | |
670 | ceph_con_keepalive(&osd->o_con); | |
671 | } | |
672 | ||
673 | if (osdc->timeout_tid) | |
674 | schedule_delayed_work(&osdc->timeout_work, | |
675 | round_jiffies_relative(timeout)); | |
676 | ||
677 | mutex_unlock(&osdc->request_mutex); | |
678 | ||
679 | up_read(&osdc->map_sem); | |
680 | } | |
681 | ||
682 | /* | |
683 | * handle osd op reply. either call the callback if it is specified, | |
684 | * or do the completion to wake up the waiting thread. | |
685 | */ | |
686 | static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |
687 | { | |
688 | struct ceph_osd_reply_head *rhead = msg->front.iov_base; | |
689 | struct ceph_osd_request *req; | |
690 | u64 tid; | |
691 | int numops, object_len, flags; | |
692 | ||
693 | if (msg->front.iov_len < sizeof(*rhead)) | |
694 | goto bad; | |
695 | tid = le64_to_cpu(rhead->tid); | |
696 | numops = le32_to_cpu(rhead->num_ops); | |
697 | object_len = le32_to_cpu(rhead->object_len); | |
698 | if (msg->front.iov_len != sizeof(*rhead) + object_len + | |
699 | numops * sizeof(struct ceph_osd_op)) | |
700 | goto bad; | |
701 | dout("handle_reply %p tid %llu\n", msg, tid); | |
702 | ||
703 | /* lookup */ | |
704 | mutex_lock(&osdc->request_mutex); | |
705 | req = __lookup_request(osdc, tid); | |
706 | if (req == NULL) { | |
707 | dout("handle_reply tid %llu dne\n", tid); | |
708 | mutex_unlock(&osdc->request_mutex); | |
709 | return; | |
710 | } | |
711 | ceph_osdc_get_request(req); | |
712 | flags = le32_to_cpu(rhead->flags); | |
713 | ||
714 | if (req->r_reply) { | |
715 | /* | |
716 | * once we see the message has been received, we don't | |
717 | * need a ref (which is only needed for revoking | |
718 | * pages) | |
719 | */ | |
720 | ceph_msg_put(req->r_reply); | |
721 | req->r_reply = NULL; | |
722 | } | |
723 | ||
724 | if (!req->r_got_reply) { | |
725 | unsigned bytes; | |
726 | ||
727 | req->r_result = le32_to_cpu(rhead->result); | |
728 | bytes = le32_to_cpu(msg->hdr.data_len); | |
729 | dout("handle_reply result %d bytes %d\n", req->r_result, | |
730 | bytes); | |
731 | if (req->r_result == 0) | |
732 | req->r_result = bytes; | |
733 | ||
734 | /* in case this is a write and we need to replay, */ | |
735 | req->r_reassert_version = rhead->reassert_version; | |
736 | ||
737 | req->r_got_reply = 1; | |
738 | } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { | |
739 | dout("handle_reply tid %llu dup ack\n", tid); | |
740 | goto done; | |
741 | } | |
742 | ||
743 | dout("handle_reply tid %llu flags %d\n", tid, flags); | |
744 | ||
745 | /* either this is a read, or we got the safe response */ | |
746 | if ((flags & CEPH_OSD_FLAG_ONDISK) || | |
747 | ((flags & CEPH_OSD_FLAG_WRITE) == 0)) | |
748 | __unregister_request(osdc, req); | |
749 | ||
750 | mutex_unlock(&osdc->request_mutex); | |
751 | ||
752 | if (req->r_callback) | |
753 | req->r_callback(req, msg); | |
754 | else | |
755 | complete(&req->r_completion); | |
756 | ||
757 | if (flags & CEPH_OSD_FLAG_ONDISK) { | |
758 | if (req->r_safe_callback) | |
759 | req->r_safe_callback(req, msg); | |
760 | complete(&req->r_safe_completion); /* fsync waiter */ | |
761 | } | |
762 | ||
763 | done: | |
764 | ceph_osdc_put_request(req); | |
765 | return; | |
766 | ||
767 | bad: | |
768 | pr_err("corrupt osd_op_reply got %d %d expected %d\n", | |
769 | (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len), | |
770 | (int)sizeof(*rhead)); | |
771 | } | |
772 | ||
773 | ||
774 | /* | |
775 | * Resubmit osd requests whose osd or osd address has changed. Request | |
776 | * a new osd map if osds are down, or we are otherwise unable to determine | |
777 | * how to direct a request. | |
778 | * | |
779 | * Close connections to down osds. | |
780 | * | |
781 | * If @who is specified, resubmit requests for that specific osd. | |
782 | * | |
783 | * Caller should hold map_sem for read and request_mutex. | |
784 | */ | |
785 | static void kick_requests(struct ceph_osd_client *osdc, | |
786 | struct ceph_osd *kickosd) | |
787 | { | |
788 | struct ceph_osd_request *req; | |
789 | struct rb_node *p, *n; | |
790 | int needmap = 0; | |
791 | int err; | |
792 | ||
793 | dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1); | |
794 | mutex_lock(&osdc->request_mutex); | |
795 | if (!kickosd) { | |
796 | for (p = rb_first(&osdc->osds); p; p = n) { | |
797 | struct ceph_osd *osd = | |
798 | rb_entry(p, struct ceph_osd, o_node); | |
799 | ||
800 | n = rb_next(p); | |
801 | if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || | |
802 | !ceph_entity_addr_equal(&osd->o_con.peer_addr, | |
803 | ceph_osd_addr(osdc->osdmap, | |
804 | osd->o_osd))) | |
805 | reset_osd(osdc, osd); | |
806 | } | |
807 | } | |
808 | ||
809 | for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { | |
810 | req = rb_entry(p, struct ceph_osd_request, r_node); | |
811 | ||
812 | if (req->r_resend) { | |
813 | dout(" r_resend set on tid %llu\n", req->r_tid); | |
814 | goto kick; | |
815 | } | |
816 | if (req->r_osd && kickosd == req->r_osd) | |
817 | goto kick; | |
818 | ||
819 | err = __map_osds(osdc, req); | |
820 | if (err == 0) | |
821 | continue; /* no change */ | |
822 | if (err < 0) { | |
823 | /* | |
824 | * FIXME: really, we should set the request | |
825 | * error and fail if this isn't a 'nofail' | |
826 | * request, but that's a fair bit more | |
827 | * complicated to do. So retry! | |
828 | */ | |
829 | dout(" setting r_resend on %llu\n", req->r_tid); | |
830 | req->r_resend = true; | |
831 | continue; | |
832 | } | |
833 | if (req->r_osd == NULL) { | |
834 | dout("tid %llu maps to no valid osd\n", req->r_tid); | |
835 | needmap++; /* request a newer map */ | |
836 | continue; | |
837 | } | |
838 | ||
839 | kick: | |
840 | dout("kicking tid %llu osd%d\n", req->r_tid, req->r_osd->o_osd); | |
841 | req->r_flags |= CEPH_OSD_FLAG_RETRY; | |
842 | err = __send_request(osdc, req); | |
843 | if (err) { | |
844 | dout(" setting r_resend on %llu\n", req->r_tid); | |
845 | req->r_resend = true; | |
846 | } | |
847 | } | |
848 | mutex_unlock(&osdc->request_mutex); | |
849 | ||
850 | if (needmap) { | |
851 | dout("%d requests for down osds, need new map\n", needmap); | |
852 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
853 | } | |
854 | } | |
855 | ||
856 | /* | |
857 | * Process updated osd map. | |
858 | * | |
859 | * The message contains any number of incremental and full maps, normally | |
860 | * indicating some sort of topology change in the cluster. Kick requests | |
861 | * off to different OSDs as needed. | |
862 | */ | |
863 | void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |
864 | { | |
865 | void *p, *end, *next; | |
866 | u32 nr_maps, maplen; | |
867 | u32 epoch; | |
868 | struct ceph_osdmap *newmap = NULL, *oldmap; | |
869 | int err; | |
870 | struct ceph_fsid fsid; | |
871 | ||
872 | dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); | |
873 | p = msg->front.iov_base; | |
874 | end = p + msg->front.iov_len; | |
875 | ||
876 | /* verify fsid */ | |
877 | ceph_decode_need(&p, end, sizeof(fsid), bad); | |
878 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
879 | if (ceph_fsid_compare(&fsid, &osdc->client->monc.monmap->fsid)) { | |
880 | pr_err("got osdmap with wrong fsid, ignoring\n"); | |
881 | return; | |
882 | } | |
883 | ||
884 | down_write(&osdc->map_sem); | |
885 | ||
886 | /* incremental maps */ | |
887 | ceph_decode_32_safe(&p, end, nr_maps, bad); | |
888 | dout(" %d inc maps\n", nr_maps); | |
889 | while (nr_maps > 0) { | |
890 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); | |
891 | ceph_decode_32(&p, epoch); | |
892 | ceph_decode_32(&p, maplen); | |
893 | ceph_decode_need(&p, end, maplen, bad); | |
894 | next = p + maplen; | |
895 | if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { | |
896 | dout("applying incremental map %u len %d\n", | |
897 | epoch, maplen); | |
898 | newmap = osdmap_apply_incremental(&p, next, | |
899 | osdc->osdmap, | |
900 | osdc->client->msgr); | |
901 | if (IS_ERR(newmap)) { | |
902 | err = PTR_ERR(newmap); | |
903 | goto bad; | |
904 | } | |
905 | if (newmap != osdc->osdmap) { | |
906 | ceph_osdmap_destroy(osdc->osdmap); | |
907 | osdc->osdmap = newmap; | |
908 | } | |
909 | } else { | |
910 | dout("ignoring incremental map %u len %d\n", | |
911 | epoch, maplen); | |
912 | } | |
913 | p = next; | |
914 | nr_maps--; | |
915 | } | |
916 | if (newmap) | |
917 | goto done; | |
918 | ||
919 | /* full maps */ | |
920 | ceph_decode_32_safe(&p, end, nr_maps, bad); | |
921 | dout(" %d full maps\n", nr_maps); | |
922 | while (nr_maps) { | |
923 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); | |
924 | ceph_decode_32(&p, epoch); | |
925 | ceph_decode_32(&p, maplen); | |
926 | ceph_decode_need(&p, end, maplen, bad); | |
927 | if (nr_maps > 1) { | |
928 | dout("skipping non-latest full map %u len %d\n", | |
929 | epoch, maplen); | |
930 | } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { | |
931 | dout("skipping full map %u len %d, " | |
932 | "older than our %u\n", epoch, maplen, | |
933 | osdc->osdmap->epoch); | |
934 | } else { | |
935 | dout("taking full map %u len %d\n", epoch, maplen); | |
936 | newmap = osdmap_decode(&p, p+maplen); | |
937 | if (IS_ERR(newmap)) { | |
938 | err = PTR_ERR(newmap); | |
939 | goto bad; | |
940 | } | |
941 | oldmap = osdc->osdmap; | |
942 | osdc->osdmap = newmap; | |
943 | if (oldmap) | |
944 | ceph_osdmap_destroy(oldmap); | |
945 | } | |
946 | p += maplen; | |
947 | nr_maps--; | |
948 | } | |
949 | ||
950 | done: | |
951 | downgrade_write(&osdc->map_sem); | |
952 | ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); | |
953 | if (newmap) | |
954 | kick_requests(osdc, NULL); | |
955 | up_read(&osdc->map_sem); | |
956 | return; | |
957 | ||
958 | bad: | |
959 | pr_err("osdc handle_map corrupt msg\n"); | |
960 | up_write(&osdc->map_sem); | |
961 | return; | |
962 | } | |
963 | ||
964 | ||
965 | /* | |
966 | * A read request prepares specific pages that data is to be read into. | |
967 | * When a message is being read off the wire, we call prepare_pages to | |
968 | * find those pages. | |
969 | * 0 = success, -1 failure. | |
970 | */ | |
971 | static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m, | |
972 | int want) | |
973 | { | |
974 | struct ceph_osd *osd = con->private; | |
975 | struct ceph_osd_client *osdc; | |
976 | struct ceph_osd_reply_head *rhead = m->front.iov_base; | |
977 | struct ceph_osd_request *req; | |
978 | u64 tid; | |
979 | int ret = -1; | |
980 | int type = le16_to_cpu(m->hdr.type); | |
981 | ||
982 | if (!osd) | |
983 | return -1; | |
984 | osdc = osd->o_osdc; | |
985 | ||
986 | dout("prepare_pages on msg %p want %d\n", m, want); | |
987 | if (unlikely(type != CEPH_MSG_OSD_OPREPLY)) | |
988 | return -1; /* hmm! */ | |
989 | ||
990 | tid = le64_to_cpu(rhead->tid); | |
991 | mutex_lock(&osdc->request_mutex); | |
992 | req = __lookup_request(osdc, tid); | |
993 | if (!req) { | |
994 | dout("prepare_pages unknown tid %llu\n", tid); | |
995 | goto out; | |
996 | } | |
997 | dout("prepare_pages tid %llu has %d pages, want %d\n", | |
998 | tid, req->r_num_pages, want); | |
999 | if (likely(req->r_num_pages >= want && !req->r_prepared_pages)) { | |
1000 | m->pages = req->r_pages; | |
1001 | m->nr_pages = req->r_num_pages; | |
1002 | req->r_reply = m; /* only for duration of read over socket */ | |
1003 | ceph_msg_get(m); | |
1004 | req->r_prepared_pages = 1; | |
1005 | ret = 0; /* success */ | |
1006 | } | |
1007 | out: | |
1008 | mutex_unlock(&osdc->request_mutex); | |
1009 | return ret; | |
1010 | } | |
1011 | ||
1012 | /* | |
1013 | * Register request, send initial attempt. | |
1014 | */ | |
1015 | int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |
1016 | struct ceph_osd_request *req, | |
1017 | bool nofail) | |
1018 | { | |
1019 | int rc; | |
1020 | ||
1021 | req->r_request->pages = req->r_pages; | |
1022 | req->r_request->nr_pages = req->r_num_pages; | |
1023 | ||
1024 | register_request(osdc, req); | |
1025 | ||
1026 | down_read(&osdc->map_sem); | |
1027 | mutex_lock(&osdc->request_mutex); | |
1028 | rc = __send_request(osdc, req); | |
1029 | if (rc) { | |
1030 | if (nofail) { | |
1031 | dout("osdc_start_request failed send, marking %lld\n", | |
1032 | req->r_tid); | |
1033 | req->r_resend = true; | |
1034 | rc = 0; | |
1035 | } else { | |
1036 | __unregister_request(osdc, req); | |
1037 | } | |
1038 | } | |
1039 | mutex_unlock(&osdc->request_mutex); | |
1040 | up_read(&osdc->map_sem); | |
1041 | return rc; | |
1042 | } | |
1043 | ||
1044 | /* | |
1045 | * wait for a request to complete | |
1046 | */ | |
1047 | int ceph_osdc_wait_request(struct ceph_osd_client *osdc, | |
1048 | struct ceph_osd_request *req) | |
1049 | { | |
1050 | int rc; | |
1051 | ||
1052 | rc = wait_for_completion_interruptible(&req->r_completion); | |
1053 | if (rc < 0) { | |
1054 | mutex_lock(&osdc->request_mutex); | |
1055 | __cancel_request(req); | |
1056 | mutex_unlock(&osdc->request_mutex); | |
1057 | dout("wait_request tid %llu timed out\n", req->r_tid); | |
1058 | return rc; | |
1059 | } | |
1060 | ||
1061 | dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); | |
1062 | return req->r_result; | |
1063 | } | |
1064 | ||
1065 | /* | |
1066 | * sync - wait for all in-flight requests to flush. avoid starvation. | |
1067 | */ | |
1068 | void ceph_osdc_sync(struct ceph_osd_client *osdc) | |
1069 | { | |
1070 | struct ceph_osd_request *req; | |
1071 | u64 last_tid, next_tid = 0; | |
1072 | ||
1073 | mutex_lock(&osdc->request_mutex); | |
1074 | last_tid = osdc->last_tid; | |
1075 | while (1) { | |
1076 | req = __lookup_request_ge(osdc, next_tid); | |
1077 | if (!req) | |
1078 | break; | |
1079 | if (req->r_tid > last_tid) | |
1080 | break; | |
1081 | ||
1082 | next_tid = req->r_tid + 1; | |
1083 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) | |
1084 | continue; | |
1085 | ||
1086 | ceph_osdc_get_request(req); | |
1087 | mutex_unlock(&osdc->request_mutex); | |
1088 | dout("sync waiting on tid %llu (last is %llu)\n", | |
1089 | req->r_tid, last_tid); | |
1090 | wait_for_completion(&req->r_safe_completion); | |
1091 | mutex_lock(&osdc->request_mutex); | |
1092 | ceph_osdc_put_request(req); | |
1093 | } | |
1094 | mutex_unlock(&osdc->request_mutex); | |
1095 | dout("sync done (thru tid %llu)\n", last_tid); | |
1096 | } | |
1097 | ||
1098 | /* | |
1099 | * init, shutdown | |
1100 | */ | |
1101 | int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) | |
1102 | { | |
1103 | int err; | |
1104 | ||
1105 | dout("init\n"); | |
1106 | osdc->client = client; | |
1107 | osdc->osdmap = NULL; | |
1108 | init_rwsem(&osdc->map_sem); | |
1109 | init_completion(&osdc->map_waiters); | |
1110 | osdc->last_requested_map = 0; | |
1111 | mutex_init(&osdc->request_mutex); | |
1112 | osdc->timeout_tid = 0; | |
1113 | osdc->last_tid = 0; | |
1114 | osdc->osds = RB_ROOT; | |
1115 | osdc->requests = RB_ROOT; | |
1116 | osdc->num_requests = 0; | |
1117 | INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); | |
1118 | ||
1119 | osdc->req_mempool = mempool_create_kmalloc_pool(10, | |
1120 | sizeof(struct ceph_osd_request)); | |
1121 | if (!osdc->req_mempool) | |
1122 | return -ENOMEM; | |
1123 | ||
1124 | err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true); | |
1125 | if (err < 0) | |
1126 | return -ENOMEM; | |
1127 | err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false); | |
1128 | if (err < 0) | |
1129 | return -ENOMEM; | |
1130 | ||
1131 | return 0; | |
1132 | } | |
1133 | ||
1134 | void ceph_osdc_stop(struct ceph_osd_client *osdc) | |
1135 | { | |
1136 | cancel_delayed_work_sync(&osdc->timeout_work); | |
1137 | if (osdc->osdmap) { | |
1138 | ceph_osdmap_destroy(osdc->osdmap); | |
1139 | osdc->osdmap = NULL; | |
1140 | } | |
1141 | mempool_destroy(osdc->req_mempool); | |
1142 | ceph_msgpool_destroy(&osdc->msgpool_op); | |
1143 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * Read some contiguous pages. If we cross a stripe boundary, shorten | |
1148 | * *plen. Return number of bytes read, or error. | |
1149 | */ | |
1150 | int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |
1151 | struct ceph_vino vino, struct ceph_file_layout *layout, | |
1152 | u64 off, u64 *plen, | |
1153 | u32 truncate_seq, u64 truncate_size, | |
1154 | struct page **pages, int num_pages) | |
1155 | { | |
1156 | struct ceph_osd_request *req; | |
1157 | int rc = 0; | |
1158 | ||
1159 | dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, | |
1160 | vino.snap, off, *plen); | |
1161 | req = ceph_osdc_new_request(osdc, layout, vino, off, plen, | |
1162 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, | |
1163 | NULL, 0, truncate_seq, truncate_size, NULL, | |
1164 | false, 1); | |
1165 | if (IS_ERR(req)) | |
1166 | return PTR_ERR(req); | |
1167 | ||
1168 | /* it may be a short read due to an object boundary */ | |
1169 | req->r_pages = pages; | |
1170 | num_pages = calc_pages_for(off, *plen); | |
1171 | req->r_num_pages = num_pages; | |
1172 | ||
1173 | dout("readpages final extent is %llu~%llu (%d pages)\n", | |
1174 | off, *plen, req->r_num_pages); | |
1175 | ||
1176 | rc = ceph_osdc_start_request(osdc, req, false); | |
1177 | if (!rc) | |
1178 | rc = ceph_osdc_wait_request(osdc, req); | |
1179 | ||
1180 | ceph_osdc_put_request(req); | |
1181 | dout("readpages result %d\n", rc); | |
1182 | return rc; | |
1183 | } | |
1184 | ||
1185 | /* | |
1186 | * do a synchronous write on N pages | |
1187 | */ | |
1188 | int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |
1189 | struct ceph_file_layout *layout, | |
1190 | struct ceph_snap_context *snapc, | |
1191 | u64 off, u64 len, | |
1192 | u32 truncate_seq, u64 truncate_size, | |
1193 | struct timespec *mtime, | |
1194 | struct page **pages, int num_pages, | |
1195 | int flags, int do_sync, bool nofail) | |
1196 | { | |
1197 | struct ceph_osd_request *req; | |
1198 | int rc = 0; | |
1199 | ||
1200 | BUG_ON(vino.snap != CEPH_NOSNAP); | |
1201 | req = ceph_osdc_new_request(osdc, layout, vino, off, &len, | |
1202 | CEPH_OSD_OP_WRITE, | |
1203 | flags | CEPH_OSD_FLAG_ONDISK | | |
1204 | CEPH_OSD_FLAG_WRITE, | |
1205 | snapc, do_sync, | |
1206 | truncate_seq, truncate_size, mtime, | |
1207 | nofail, 1); | |
1208 | if (IS_ERR(req)) | |
1209 | return PTR_ERR(req); | |
1210 | ||
1211 | /* it may be a short write due to an object boundary */ | |
1212 | req->r_pages = pages; | |
1213 | req->r_num_pages = calc_pages_for(off, len); | |
1214 | dout("writepages %llu~%llu (%d pages)\n", off, len, | |
1215 | req->r_num_pages); | |
1216 | ||
1217 | rc = ceph_osdc_start_request(osdc, req, nofail); | |
1218 | if (!rc) | |
1219 | rc = ceph_osdc_wait_request(osdc, req); | |
1220 | ||
1221 | ceph_osdc_put_request(req); | |
1222 | if (rc == 0) | |
1223 | rc = len; | |
1224 | dout("writepages result %d\n", rc); | |
1225 | return rc; | |
1226 | } | |
1227 | ||
1228 | /* | |
1229 | * handle incoming message | |
1230 | */ | |
1231 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
1232 | { | |
1233 | struct ceph_osd *osd = con->private; | |
1234 | struct ceph_osd_client *osdc = osd->o_osdc; | |
1235 | int type = le16_to_cpu(msg->hdr.type); | |
1236 | ||
1237 | if (!osd) | |
1238 | return; | |
1239 | ||
1240 | switch (type) { | |
1241 | case CEPH_MSG_OSD_MAP: | |
1242 | ceph_osdc_handle_map(osdc, msg); | |
1243 | break; | |
1244 | case CEPH_MSG_OSD_OPREPLY: | |
1245 | handle_reply(osdc, msg); | |
1246 | break; | |
1247 | ||
1248 | default: | |
1249 | pr_err("received unknown message type %d %s\n", type, | |
1250 | ceph_msg_type_name(type)); | |
1251 | } | |
1252 | ceph_msg_put(msg); | |
1253 | } | |
1254 | ||
1255 | static struct ceph_msg *alloc_msg(struct ceph_connection *con, | |
1256 | struct ceph_msg_header *hdr) | |
1257 | { | |
1258 | struct ceph_osd *osd = con->private; | |
1259 | struct ceph_osd_client *osdc = osd->o_osdc; | |
1260 | int type = le16_to_cpu(hdr->type); | |
1261 | ||
1262 | switch (type) { | |
1263 | case CEPH_MSG_OSD_OPREPLY: | |
1264 | return ceph_msgpool_get(&osdc->msgpool_op_reply); | |
1265 | } | |
1266 | return ceph_alloc_msg(con, hdr); | |
1267 | } | |
1268 | ||
1269 | /* | |
1270 | * Wrappers to refcount containing ceph_osd struct | |
1271 | */ | |
1272 | static struct ceph_connection *get_osd_con(struct ceph_connection *con) | |
1273 | { | |
1274 | struct ceph_osd *osd = con->private; | |
1275 | if (get_osd(osd)) | |
1276 | return con; | |
1277 | return NULL; | |
1278 | } | |
1279 | ||
1280 | static void put_osd_con(struct ceph_connection *con) | |
1281 | { | |
1282 | struct ceph_osd *osd = con->private; | |
1283 | put_osd(osd); | |
1284 | } | |
1285 | ||
1286 | const static struct ceph_connection_operations osd_con_ops = { | |
1287 | .get = get_osd_con, | |
1288 | .put = put_osd_con, | |
1289 | .dispatch = dispatch, | |
1290 | .alloc_msg = alloc_msg, | |
1291 | .peer_reset = osd_reset, | |
1292 | .alloc_middle = ceph_alloc_middle, | |
1293 | .prepare_pages = prepare_pages, | |
1294 | }; |