2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/highmem.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
12 #include <linux/bio.h>
15 #include <linux/ceph/ceph_features.h>
16 #include <linux/ceph/libceph.h>
17 #include <linux/ceph/osd_client.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/pagelist.h>
23 #define OSD_OPREPLY_FRONT_LEN 512
25 static struct kmem_cache
*ceph_osd_request_cache
;
27 static const struct ceph_connection_operations osd_con_ops
;
30 * Implement client access to distributed object storage cluster.
32 * All data objects are stored within a cluster/cloud of OSDs, or
33 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
34 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
35 * remote daemons serving up and coordinating consistent and safe
38 * Cluster membership and the mapping of data objects onto storage devices
39 * are described by the osd map.
41 * We keep track of pending OSD requests (read, write), resubmit
42 * requests to different OSDs when the cluster topology/data layout
43 * change, or retry the affected requests when the communications
44 * channel with an OSD is reset.
47 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
48 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
);
49 static void link_linger(struct ceph_osd
*osd
,
50 struct ceph_osd_linger_request
*lreq
);
51 static void unlink_linger(struct ceph_osd
*osd
,
52 struct ceph_osd_linger_request
*lreq
);
53 static void clear_backoffs(struct ceph_osd
*osd
);
56 static inline bool rwsem_is_wrlocked(struct rw_semaphore
*sem
)
60 if (unlikely(down_read_trylock(sem
))) {
67 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
)
69 WARN_ON(!rwsem_is_locked(&osdc
->lock
));
71 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
)
73 WARN_ON(!rwsem_is_wrlocked(&osdc
->lock
));
75 static inline void verify_osd_locked(struct ceph_osd
*osd
)
77 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
79 WARN_ON(!(mutex_is_locked(&osd
->lock
) &&
80 rwsem_is_locked(&osdc
->lock
)) &&
81 !rwsem_is_wrlocked(&osdc
->lock
));
83 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
)
85 WARN_ON(!mutex_is_locked(&lreq
->lock
));
88 static inline void verify_osdc_locked(struct ceph_osd_client
*osdc
) { }
89 static inline void verify_osdc_wrlocked(struct ceph_osd_client
*osdc
) { }
90 static inline void verify_osd_locked(struct ceph_osd
*osd
) { }
91 static inline void verify_lreq_locked(struct ceph_osd_linger_request
*lreq
) { }
95 * calculate the mapping of a file extent onto an object, and fill out the
96 * request accordingly. shorten extent as necessary if it crosses an
99 * fill osd op in request message.
101 static int calc_layout(struct ceph_file_layout
*layout
, u64 off
, u64
*plen
,
102 u64
*objnum
, u64
*objoff
, u64
*objlen
)
104 u64 orig_len
= *plen
;
108 r
= ceph_calc_file_object_mapping(layout
, off
, orig_len
, objnum
,
112 if (*objlen
< orig_len
) {
114 dout(" skipping last %llu, final file extent %llu~%llu\n",
115 orig_len
- *plen
, off
, *plen
);
118 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum
, *objoff
, *objlen
);
123 static void ceph_osd_data_init(struct ceph_osd_data
*osd_data
)
125 memset(osd_data
, 0, sizeof (*osd_data
));
126 osd_data
->type
= CEPH_OSD_DATA_TYPE_NONE
;
129 static void ceph_osd_data_pages_init(struct ceph_osd_data
*osd_data
,
130 struct page
**pages
, u64 length
, u32 alignment
,
131 bool pages_from_pool
, bool own_pages
)
133 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGES
;
134 osd_data
->pages
= pages
;
135 osd_data
->length
= length
;
136 osd_data
->alignment
= alignment
;
137 osd_data
->pages_from_pool
= pages_from_pool
;
138 osd_data
->own_pages
= own_pages
;
141 static void ceph_osd_data_pagelist_init(struct ceph_osd_data
*osd_data
,
142 struct ceph_pagelist
*pagelist
)
144 osd_data
->type
= CEPH_OSD_DATA_TYPE_PAGELIST
;
145 osd_data
->pagelist
= pagelist
;
149 static void ceph_osd_data_bio_init(struct ceph_osd_data
*osd_data
,
150 struct bio
*bio
, size_t bio_length
)
152 osd_data
->type
= CEPH_OSD_DATA_TYPE_BIO
;
154 osd_data
->bio_length
= bio_length
;
156 #endif /* CONFIG_BLOCK */
158 #define osd_req_op_data(oreq, whch, typ, fld) \
160 struct ceph_osd_request *__oreq = (oreq); \
161 unsigned int __whch = (whch); \
162 BUG_ON(__whch >= __oreq->r_num_ops); \
163 &__oreq->r_ops[__whch].typ.fld; \
166 static struct ceph_osd_data
*
167 osd_req_op_raw_data_in(struct ceph_osd_request
*osd_req
, unsigned int which
)
169 BUG_ON(which
>= osd_req
->r_num_ops
);
171 return &osd_req
->r_ops
[which
].raw_data_in
;
174 struct ceph_osd_data
*
175 osd_req_op_extent_osd_data(struct ceph_osd_request
*osd_req
,
178 return osd_req_op_data(osd_req
, which
, extent
, osd_data
);
180 EXPORT_SYMBOL(osd_req_op_extent_osd_data
);
182 void osd_req_op_raw_data_in_pages(struct ceph_osd_request
*osd_req
,
183 unsigned int which
, struct page
**pages
,
184 u64 length
, u32 alignment
,
185 bool pages_from_pool
, bool own_pages
)
187 struct ceph_osd_data
*osd_data
;
189 osd_data
= osd_req_op_raw_data_in(osd_req
, which
);
190 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
191 pages_from_pool
, own_pages
);
193 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages
);
195 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request
*osd_req
,
196 unsigned int which
, struct page
**pages
,
197 u64 length
, u32 alignment
,
198 bool pages_from_pool
, bool own_pages
)
200 struct ceph_osd_data
*osd_data
;
202 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
203 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
204 pages_from_pool
, own_pages
);
206 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages
);
208 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request
*osd_req
,
209 unsigned int which
, struct ceph_pagelist
*pagelist
)
211 struct ceph_osd_data
*osd_data
;
213 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
214 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
216 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist
);
219 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request
*osd_req
,
220 unsigned int which
, struct bio
*bio
, size_t bio_length
)
222 struct ceph_osd_data
*osd_data
;
224 osd_data
= osd_req_op_data(osd_req
, which
, extent
, osd_data
);
225 ceph_osd_data_bio_init(osd_data
, bio
, bio_length
);
227 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio
);
228 #endif /* CONFIG_BLOCK */
230 static void osd_req_op_cls_request_info_pagelist(
231 struct ceph_osd_request
*osd_req
,
232 unsigned int which
, struct ceph_pagelist
*pagelist
)
234 struct ceph_osd_data
*osd_data
;
236 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_info
);
237 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
240 void osd_req_op_cls_request_data_pagelist(
241 struct ceph_osd_request
*osd_req
,
242 unsigned int which
, struct ceph_pagelist
*pagelist
)
244 struct ceph_osd_data
*osd_data
;
246 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
247 ceph_osd_data_pagelist_init(osd_data
, pagelist
);
248 osd_req
->r_ops
[which
].cls
.indata_len
+= pagelist
->length
;
249 osd_req
->r_ops
[which
].indata_len
+= pagelist
->length
;
251 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist
);
253 void osd_req_op_cls_request_data_pages(struct ceph_osd_request
*osd_req
,
254 unsigned int which
, struct page
**pages
, u64 length
,
255 u32 alignment
, bool pages_from_pool
, bool own_pages
)
257 struct ceph_osd_data
*osd_data
;
259 osd_data
= osd_req_op_data(osd_req
, which
, cls
, request_data
);
260 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
261 pages_from_pool
, own_pages
);
262 osd_req
->r_ops
[which
].cls
.indata_len
+= length
;
263 osd_req
->r_ops
[which
].indata_len
+= length
;
265 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages
);
267 void osd_req_op_cls_response_data_pages(struct ceph_osd_request
*osd_req
,
268 unsigned int which
, struct page
**pages
, u64 length
,
269 u32 alignment
, bool pages_from_pool
, bool own_pages
)
271 struct ceph_osd_data
*osd_data
;
273 osd_data
= osd_req_op_data(osd_req
, which
, cls
, response_data
);
274 ceph_osd_data_pages_init(osd_data
, pages
, length
, alignment
,
275 pages_from_pool
, own_pages
);
277 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages
);
279 static u64
ceph_osd_data_length(struct ceph_osd_data
*osd_data
)
281 switch (osd_data
->type
) {
282 case CEPH_OSD_DATA_TYPE_NONE
:
284 case CEPH_OSD_DATA_TYPE_PAGES
:
285 return osd_data
->length
;
286 case CEPH_OSD_DATA_TYPE_PAGELIST
:
287 return (u64
)osd_data
->pagelist
->length
;
289 case CEPH_OSD_DATA_TYPE_BIO
:
290 return (u64
)osd_data
->bio_length
;
291 #endif /* CONFIG_BLOCK */
293 WARN(true, "unrecognized data type %d\n", (int)osd_data
->type
);
298 static void ceph_osd_data_release(struct ceph_osd_data
*osd_data
)
300 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
&& osd_data
->own_pages
) {
303 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
304 (u64
)osd_data
->length
);
305 ceph_release_page_vector(osd_data
->pages
, num_pages
);
307 ceph_osd_data_init(osd_data
);
310 static void osd_req_op_data_release(struct ceph_osd_request
*osd_req
,
313 struct ceph_osd_req_op
*op
;
315 BUG_ON(which
>= osd_req
->r_num_ops
);
316 op
= &osd_req
->r_ops
[which
];
319 case CEPH_OSD_OP_READ
:
320 case CEPH_OSD_OP_WRITE
:
321 case CEPH_OSD_OP_WRITEFULL
:
322 ceph_osd_data_release(&op
->extent
.osd_data
);
324 case CEPH_OSD_OP_CALL
:
325 ceph_osd_data_release(&op
->cls
.request_info
);
326 ceph_osd_data_release(&op
->cls
.request_data
);
327 ceph_osd_data_release(&op
->cls
.response_data
);
329 case CEPH_OSD_OP_SETXATTR
:
330 case CEPH_OSD_OP_CMPXATTR
:
331 ceph_osd_data_release(&op
->xattr
.osd_data
);
333 case CEPH_OSD_OP_STAT
:
334 ceph_osd_data_release(&op
->raw_data_in
);
336 case CEPH_OSD_OP_NOTIFY_ACK
:
337 ceph_osd_data_release(&op
->notify_ack
.request_data
);
339 case CEPH_OSD_OP_NOTIFY
:
340 ceph_osd_data_release(&op
->notify
.request_data
);
341 ceph_osd_data_release(&op
->notify
.response_data
);
343 case CEPH_OSD_OP_LIST_WATCHERS
:
344 ceph_osd_data_release(&op
->list_watchers
.response_data
);
352 * Assumes @t is zero-initialized.
354 static void target_init(struct ceph_osd_request_target
*t
)
356 ceph_oid_init(&t
->base_oid
);
357 ceph_oloc_init(&t
->base_oloc
);
358 ceph_oid_init(&t
->target_oid
);
359 ceph_oloc_init(&t
->target_oloc
);
361 ceph_osds_init(&t
->acting
);
362 ceph_osds_init(&t
->up
);
366 t
->osd
= CEPH_HOMELESS_OSD
;
369 static void target_copy(struct ceph_osd_request_target
*dest
,
370 const struct ceph_osd_request_target
*src
)
372 ceph_oid_copy(&dest
->base_oid
, &src
->base_oid
);
373 ceph_oloc_copy(&dest
->base_oloc
, &src
->base_oloc
);
374 ceph_oid_copy(&dest
->target_oid
, &src
->target_oid
);
375 ceph_oloc_copy(&dest
->target_oloc
, &src
->target_oloc
);
377 dest
->pgid
= src
->pgid
; /* struct */
378 dest
->spgid
= src
->spgid
; /* struct */
379 dest
->pg_num
= src
->pg_num
;
380 dest
->pg_num_mask
= src
->pg_num_mask
;
381 ceph_osds_copy(&dest
->acting
, &src
->acting
);
382 ceph_osds_copy(&dest
->up
, &src
->up
);
383 dest
->size
= src
->size
;
384 dest
->min_size
= src
->min_size
;
385 dest
->sort_bitwise
= src
->sort_bitwise
;
387 dest
->flags
= src
->flags
;
388 dest
->paused
= src
->paused
;
390 dest
->epoch
= src
->epoch
;
391 dest
->last_force_resend
= src
->last_force_resend
;
393 dest
->osd
= src
->osd
;
396 static void target_destroy(struct ceph_osd_request_target
*t
)
398 ceph_oid_destroy(&t
->base_oid
);
399 ceph_oloc_destroy(&t
->base_oloc
);
400 ceph_oid_destroy(&t
->target_oid
);
401 ceph_oloc_destroy(&t
->target_oloc
);
407 static void request_release_checks(struct ceph_osd_request
*req
)
409 WARN_ON(!RB_EMPTY_NODE(&req
->r_node
));
410 WARN_ON(!RB_EMPTY_NODE(&req
->r_mc_node
));
411 WARN_ON(!list_empty(&req
->r_unsafe_item
));
415 static void ceph_osdc_release_request(struct kref
*kref
)
417 struct ceph_osd_request
*req
= container_of(kref
,
418 struct ceph_osd_request
, r_kref
);
421 dout("%s %p (r_request %p r_reply %p)\n", __func__
, req
,
422 req
->r_request
, req
->r_reply
);
423 request_release_checks(req
);
426 ceph_msg_put(req
->r_request
);
428 ceph_msg_put(req
->r_reply
);
430 for (which
= 0; which
< req
->r_num_ops
; which
++)
431 osd_req_op_data_release(req
, which
);
433 target_destroy(&req
->r_t
);
434 ceph_put_snap_context(req
->r_snapc
);
437 mempool_free(req
, req
->r_osdc
->req_mempool
);
438 else if (req
->r_num_ops
<= CEPH_OSD_SLAB_OPS
)
439 kmem_cache_free(ceph_osd_request_cache
, req
);
444 void ceph_osdc_get_request(struct ceph_osd_request
*req
)
446 dout("%s %p (was %d)\n", __func__
, req
,
447 kref_read(&req
->r_kref
));
448 kref_get(&req
->r_kref
);
450 EXPORT_SYMBOL(ceph_osdc_get_request
);
452 void ceph_osdc_put_request(struct ceph_osd_request
*req
)
455 dout("%s %p (was %d)\n", __func__
, req
,
456 kref_read(&req
->r_kref
));
457 kref_put(&req
->r_kref
, ceph_osdc_release_request
);
460 EXPORT_SYMBOL(ceph_osdc_put_request
);
462 static void request_init(struct ceph_osd_request
*req
)
464 /* req only, each op is zeroed in _osd_req_op_init() */
465 memset(req
, 0, sizeof(*req
));
467 kref_init(&req
->r_kref
);
468 init_completion(&req
->r_completion
);
469 RB_CLEAR_NODE(&req
->r_node
);
470 RB_CLEAR_NODE(&req
->r_mc_node
);
471 INIT_LIST_HEAD(&req
->r_unsafe_item
);
473 target_init(&req
->r_t
);
477 * This is ugly, but it allows us to reuse linger registration and ping
478 * requests, keeping the structure of the code around send_linger{_ping}()
479 * reasonable. Setting up a min_nr=2 mempool for each linger request
480 * and dealing with copying ops (this blasts req only, watch op remains
481 * intact) isn't any better.
483 static void request_reinit(struct ceph_osd_request
*req
)
485 struct ceph_osd_client
*osdc
= req
->r_osdc
;
486 bool mempool
= req
->r_mempool
;
487 unsigned int num_ops
= req
->r_num_ops
;
488 u64 snapid
= req
->r_snapid
;
489 struct ceph_snap_context
*snapc
= req
->r_snapc
;
490 bool linger
= req
->r_linger
;
491 struct ceph_msg
*request_msg
= req
->r_request
;
492 struct ceph_msg
*reply_msg
= req
->r_reply
;
494 dout("%s req %p\n", __func__
, req
);
495 WARN_ON(kref_read(&req
->r_kref
) != 1);
496 request_release_checks(req
);
498 WARN_ON(kref_read(&request_msg
->kref
) != 1);
499 WARN_ON(kref_read(&reply_msg
->kref
) != 1);
500 target_destroy(&req
->r_t
);
504 req
->r_mempool
= mempool
;
505 req
->r_num_ops
= num_ops
;
506 req
->r_snapid
= snapid
;
507 req
->r_snapc
= snapc
;
508 req
->r_linger
= linger
;
509 req
->r_request
= request_msg
;
510 req
->r_reply
= reply_msg
;
513 struct ceph_osd_request
*ceph_osdc_alloc_request(struct ceph_osd_client
*osdc
,
514 struct ceph_snap_context
*snapc
,
515 unsigned int num_ops
,
519 struct ceph_osd_request
*req
;
522 BUG_ON(num_ops
> CEPH_OSD_SLAB_OPS
);
523 req
= mempool_alloc(osdc
->req_mempool
, gfp_flags
);
524 } else if (num_ops
<= CEPH_OSD_SLAB_OPS
) {
525 req
= kmem_cache_alloc(ceph_osd_request_cache
, gfp_flags
);
527 BUG_ON(num_ops
> CEPH_OSD_MAX_OPS
);
528 req
= kmalloc(sizeof(*req
) + num_ops
* sizeof(req
->r_ops
[0]),
536 req
->r_mempool
= use_mempool
;
537 req
->r_num_ops
= num_ops
;
538 req
->r_snapid
= CEPH_NOSNAP
;
539 req
->r_snapc
= ceph_get_snap_context(snapc
);
541 dout("%s req %p\n", __func__
, req
);
544 EXPORT_SYMBOL(ceph_osdc_alloc_request
);
546 static int ceph_oloc_encoding_size(const struct ceph_object_locator
*oloc
)
548 return 8 + 4 + 4 + 4 + (oloc
->pool_ns
? oloc
->pool_ns
->len
: 0);
551 int ceph_osdc_alloc_messages(struct ceph_osd_request
*req
, gfp_t gfp
)
553 struct ceph_osd_client
*osdc
= req
->r_osdc
;
554 struct ceph_msg
*msg
;
557 WARN_ON(ceph_oid_empty(&req
->r_base_oid
));
558 WARN_ON(ceph_oloc_empty(&req
->r_base_oloc
));
560 /* create request message */
561 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
562 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
563 msg_size
+= 4 + 4 + 4; /* hash, osdmap_epoch, flags */
564 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
565 sizeof(struct ceph_osd_reqid
); /* reqid */
566 msg_size
+= sizeof(struct ceph_blkin_trace_info
); /* trace */
567 msg_size
+= 4 + sizeof(struct ceph_timespec
); /* client_inc, mtime */
568 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
569 ceph_oloc_encoding_size(&req
->r_base_oloc
); /* oloc */
570 msg_size
+= 4 + req
->r_base_oid
.name_len
; /* oid */
571 msg_size
+= 2 + req
->r_num_ops
* sizeof(struct ceph_osd_op
);
572 msg_size
+= 8; /* snapid */
573 msg_size
+= 8; /* snap_seq */
574 msg_size
+= 4 + 8 * (req
->r_snapc
? req
->r_snapc
->num_snaps
: 0);
575 msg_size
+= 4 + 8; /* retry_attempt, features */
578 msg
= ceph_msgpool_get(&osdc
->msgpool_op
, 0);
580 msg
= ceph_msg_new(CEPH_MSG_OSD_OP
, msg_size
, gfp
, true);
584 memset(msg
->front
.iov_base
, 0, msg
->front
.iov_len
);
585 req
->r_request
= msg
;
587 /* create reply message */
588 msg_size
= OSD_OPREPLY_FRONT_LEN
;
589 msg_size
+= req
->r_base_oid
.name_len
;
590 msg_size
+= req
->r_num_ops
* sizeof(struct ceph_osd_op
);
593 msg
= ceph_msgpool_get(&osdc
->msgpool_op_reply
, 0);
595 msg
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, msg_size
, gfp
, true);
603 EXPORT_SYMBOL(ceph_osdc_alloc_messages
);
605 static bool osd_req_opcode_valid(u16 opcode
)
608 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
609 __CEPH_FORALL_OSD_OPS(GENERATE_CASE
)
617 * This is an osd op init function for opcodes that have no data or
618 * other information associated with them. It also serves as a
619 * common init routine for all the other init functions, below.
621 static struct ceph_osd_req_op
*
622 _osd_req_op_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
623 u16 opcode
, u32 flags
)
625 struct ceph_osd_req_op
*op
;
627 BUG_ON(which
>= osd_req
->r_num_ops
);
628 BUG_ON(!osd_req_opcode_valid(opcode
));
630 op
= &osd_req
->r_ops
[which
];
631 memset(op
, 0, sizeof (*op
));
638 void osd_req_op_init(struct ceph_osd_request
*osd_req
,
639 unsigned int which
, u16 opcode
, u32 flags
)
641 (void)_osd_req_op_init(osd_req
, which
, opcode
, flags
);
643 EXPORT_SYMBOL(osd_req_op_init
);
645 void osd_req_op_extent_init(struct ceph_osd_request
*osd_req
,
646 unsigned int which
, u16 opcode
,
647 u64 offset
, u64 length
,
648 u64 truncate_size
, u32 truncate_seq
)
650 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
652 size_t payload_len
= 0;
654 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
655 opcode
!= CEPH_OSD_OP_WRITEFULL
&& opcode
!= CEPH_OSD_OP_ZERO
&&
656 opcode
!= CEPH_OSD_OP_TRUNCATE
);
658 op
->extent
.offset
= offset
;
659 op
->extent
.length
= length
;
660 op
->extent
.truncate_size
= truncate_size
;
661 op
->extent
.truncate_seq
= truncate_seq
;
662 if (opcode
== CEPH_OSD_OP_WRITE
|| opcode
== CEPH_OSD_OP_WRITEFULL
)
663 payload_len
+= length
;
665 op
->indata_len
= payload_len
;
667 EXPORT_SYMBOL(osd_req_op_extent_init
);
669 void osd_req_op_extent_update(struct ceph_osd_request
*osd_req
,
670 unsigned int which
, u64 length
)
672 struct ceph_osd_req_op
*op
;
675 BUG_ON(which
>= osd_req
->r_num_ops
);
676 op
= &osd_req
->r_ops
[which
];
677 previous
= op
->extent
.length
;
679 if (length
== previous
)
680 return; /* Nothing to do */
681 BUG_ON(length
> previous
);
683 op
->extent
.length
= length
;
684 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
685 op
->indata_len
-= previous
- length
;
687 EXPORT_SYMBOL(osd_req_op_extent_update
);
689 void osd_req_op_extent_dup_last(struct ceph_osd_request
*osd_req
,
690 unsigned int which
, u64 offset_inc
)
692 struct ceph_osd_req_op
*op
, *prev_op
;
694 BUG_ON(which
+ 1 >= osd_req
->r_num_ops
);
696 prev_op
= &osd_req
->r_ops
[which
];
697 op
= _osd_req_op_init(osd_req
, which
+ 1, prev_op
->op
, prev_op
->flags
);
698 /* dup previous one */
699 op
->indata_len
= prev_op
->indata_len
;
700 op
->outdata_len
= prev_op
->outdata_len
;
701 op
->extent
= prev_op
->extent
;
703 op
->extent
.offset
+= offset_inc
;
704 op
->extent
.length
-= offset_inc
;
706 if (op
->op
== CEPH_OSD_OP_WRITE
|| op
->op
== CEPH_OSD_OP_WRITEFULL
)
707 op
->indata_len
-= offset_inc
;
709 EXPORT_SYMBOL(osd_req_op_extent_dup_last
);
711 void osd_req_op_cls_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
712 u16 opcode
, const char *class, const char *method
)
714 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
716 struct ceph_pagelist
*pagelist
;
717 size_t payload_len
= 0;
720 BUG_ON(opcode
!= CEPH_OSD_OP_CALL
);
722 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
724 ceph_pagelist_init(pagelist
);
726 op
->cls
.class_name
= class;
727 size
= strlen(class);
728 BUG_ON(size
> (size_t) U8_MAX
);
729 op
->cls
.class_len
= size
;
730 ceph_pagelist_append(pagelist
, class, size
);
733 op
->cls
.method_name
= method
;
734 size
= strlen(method
);
735 BUG_ON(size
> (size_t) U8_MAX
);
736 op
->cls
.method_len
= size
;
737 ceph_pagelist_append(pagelist
, method
, size
);
740 osd_req_op_cls_request_info_pagelist(osd_req
, which
, pagelist
);
742 op
->indata_len
= payload_len
;
744 EXPORT_SYMBOL(osd_req_op_cls_init
);
746 int osd_req_op_xattr_init(struct ceph_osd_request
*osd_req
, unsigned int which
,
747 u16 opcode
, const char *name
, const void *value
,
748 size_t size
, u8 cmp_op
, u8 cmp_mode
)
750 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
752 struct ceph_pagelist
*pagelist
;
755 BUG_ON(opcode
!= CEPH_OSD_OP_SETXATTR
&& opcode
!= CEPH_OSD_OP_CMPXATTR
);
757 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
761 ceph_pagelist_init(pagelist
);
763 payload_len
= strlen(name
);
764 op
->xattr
.name_len
= payload_len
;
765 ceph_pagelist_append(pagelist
, name
, payload_len
);
767 op
->xattr
.value_len
= size
;
768 ceph_pagelist_append(pagelist
, value
, size
);
771 op
->xattr
.cmp_op
= cmp_op
;
772 op
->xattr
.cmp_mode
= cmp_mode
;
774 ceph_osd_data_pagelist_init(&op
->xattr
.osd_data
, pagelist
);
775 op
->indata_len
= payload_len
;
778 EXPORT_SYMBOL(osd_req_op_xattr_init
);
781 * @watch_opcode: CEPH_OSD_WATCH_OP_*
783 static void osd_req_op_watch_init(struct ceph_osd_request
*req
, int which
,
784 u64 cookie
, u8 watch_opcode
)
786 struct ceph_osd_req_op
*op
;
788 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_WATCH
, 0);
789 op
->watch
.cookie
= cookie
;
790 op
->watch
.op
= watch_opcode
;
794 void osd_req_op_alloc_hint_init(struct ceph_osd_request
*osd_req
,
796 u64 expected_object_size
,
797 u64 expected_write_size
)
799 struct ceph_osd_req_op
*op
= _osd_req_op_init(osd_req
, which
,
800 CEPH_OSD_OP_SETALLOCHINT
,
803 op
->alloc_hint
.expected_object_size
= expected_object_size
;
804 op
->alloc_hint
.expected_write_size
= expected_write_size
;
807 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
808 * not worth a feature bit. Set FAILOK per-op flag to make
809 * sure older osds don't trip over an unsupported opcode.
811 op
->flags
|= CEPH_OSD_OP_FLAG_FAILOK
;
813 EXPORT_SYMBOL(osd_req_op_alloc_hint_init
);
815 static void ceph_osdc_msg_data_add(struct ceph_msg
*msg
,
816 struct ceph_osd_data
*osd_data
)
818 u64 length
= ceph_osd_data_length(osd_data
);
820 if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGES
) {
821 BUG_ON(length
> (u64
) SIZE_MAX
);
823 ceph_msg_data_add_pages(msg
, osd_data
->pages
,
824 length
, osd_data
->alignment
);
825 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_PAGELIST
) {
827 ceph_msg_data_add_pagelist(msg
, osd_data
->pagelist
);
829 } else if (osd_data
->type
== CEPH_OSD_DATA_TYPE_BIO
) {
830 ceph_msg_data_add_bio(msg
, osd_data
->bio
, length
);
833 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_NONE
);
837 static u32
osd_req_encode_op(struct ceph_osd_op
*dst
,
838 const struct ceph_osd_req_op
*src
)
840 if (WARN_ON(!osd_req_opcode_valid(src
->op
))) {
841 pr_err("unrecognized osd opcode %d\n", src
->op
);
847 case CEPH_OSD_OP_STAT
:
849 case CEPH_OSD_OP_READ
:
850 case CEPH_OSD_OP_WRITE
:
851 case CEPH_OSD_OP_WRITEFULL
:
852 case CEPH_OSD_OP_ZERO
:
853 case CEPH_OSD_OP_TRUNCATE
:
854 dst
->extent
.offset
= cpu_to_le64(src
->extent
.offset
);
855 dst
->extent
.length
= cpu_to_le64(src
->extent
.length
);
856 dst
->extent
.truncate_size
=
857 cpu_to_le64(src
->extent
.truncate_size
);
858 dst
->extent
.truncate_seq
=
859 cpu_to_le32(src
->extent
.truncate_seq
);
861 case CEPH_OSD_OP_CALL
:
862 dst
->cls
.class_len
= src
->cls
.class_len
;
863 dst
->cls
.method_len
= src
->cls
.method_len
;
864 dst
->cls
.indata_len
= cpu_to_le32(src
->cls
.indata_len
);
866 case CEPH_OSD_OP_STARTSYNC
:
868 case CEPH_OSD_OP_WATCH
:
869 dst
->watch
.cookie
= cpu_to_le64(src
->watch
.cookie
);
870 dst
->watch
.ver
= cpu_to_le64(0);
871 dst
->watch
.op
= src
->watch
.op
;
872 dst
->watch
.gen
= cpu_to_le32(src
->watch
.gen
);
874 case CEPH_OSD_OP_NOTIFY_ACK
:
876 case CEPH_OSD_OP_NOTIFY
:
877 dst
->notify
.cookie
= cpu_to_le64(src
->notify
.cookie
);
879 case CEPH_OSD_OP_LIST_WATCHERS
:
881 case CEPH_OSD_OP_SETALLOCHINT
:
882 dst
->alloc_hint
.expected_object_size
=
883 cpu_to_le64(src
->alloc_hint
.expected_object_size
);
884 dst
->alloc_hint
.expected_write_size
=
885 cpu_to_le64(src
->alloc_hint
.expected_write_size
);
887 case CEPH_OSD_OP_SETXATTR
:
888 case CEPH_OSD_OP_CMPXATTR
:
889 dst
->xattr
.name_len
= cpu_to_le32(src
->xattr
.name_len
);
890 dst
->xattr
.value_len
= cpu_to_le32(src
->xattr
.value_len
);
891 dst
->xattr
.cmp_op
= src
->xattr
.cmp_op
;
892 dst
->xattr
.cmp_mode
= src
->xattr
.cmp_mode
;
894 case CEPH_OSD_OP_CREATE
:
895 case CEPH_OSD_OP_DELETE
:
898 pr_err("unsupported osd opcode %s\n",
899 ceph_osd_op_name(src
->op
));
905 dst
->op
= cpu_to_le16(src
->op
);
906 dst
->flags
= cpu_to_le32(src
->flags
);
907 dst
->payload_len
= cpu_to_le32(src
->indata_len
);
909 return src
->indata_len
;
913 * build new request AND message, calculate layout, and adjust file
916 * if the file was recently truncated, we include information about its
917 * old and new size so that the object can be updated appropriately. (we
918 * avoid synchronously deleting truncated objects because it's slow.)
920 * if @do_sync, include a 'startsync' command so that the osd will flush
923 struct ceph_osd_request
*ceph_osdc_new_request(struct ceph_osd_client
*osdc
,
924 struct ceph_file_layout
*layout
,
925 struct ceph_vino vino
,
927 unsigned int which
, int num_ops
,
928 int opcode
, int flags
,
929 struct ceph_snap_context
*snapc
,
934 struct ceph_osd_request
*req
;
940 BUG_ON(opcode
!= CEPH_OSD_OP_READ
&& opcode
!= CEPH_OSD_OP_WRITE
&&
941 opcode
!= CEPH_OSD_OP_ZERO
&& opcode
!= CEPH_OSD_OP_TRUNCATE
&&
942 opcode
!= CEPH_OSD_OP_CREATE
&& opcode
!= CEPH_OSD_OP_DELETE
);
944 req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, use_mempool
,
951 /* calculate max write size */
952 r
= calc_layout(layout
, off
, plen
, &objnum
, &objoff
, &objlen
);
956 if (opcode
== CEPH_OSD_OP_CREATE
|| opcode
== CEPH_OSD_OP_DELETE
) {
957 osd_req_op_init(req
, which
, opcode
, 0);
959 u32 object_size
= layout
->object_size
;
960 u32 object_base
= off
- objoff
;
961 if (!(truncate_seq
== 1 && truncate_size
== -1ULL)) {
962 if (truncate_size
<= object_base
) {
965 truncate_size
-= object_base
;
966 if (truncate_size
> object_size
)
967 truncate_size
= object_size
;
970 osd_req_op_extent_init(req
, which
, opcode
, objoff
, objlen
,
971 truncate_size
, truncate_seq
);
974 req
->r_abort_on_full
= true;
975 req
->r_flags
= flags
;
976 req
->r_base_oloc
.pool
= layout
->pool_id
;
977 req
->r_base_oloc
.pool_ns
= ceph_try_get_string(layout
->pool_ns
);
978 ceph_oid_printf(&req
->r_base_oid
, "%llx.%08llx", vino
.ino
, objnum
);
980 req
->r_snapid
= vino
.snap
;
981 if (flags
& CEPH_OSD_FLAG_WRITE
)
982 req
->r_data_offset
= off
;
984 r
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
991 ceph_osdc_put_request(req
);
994 EXPORT_SYMBOL(ceph_osdc_new_request
);
997 * We keep osd requests in an rbtree, sorted by ->r_tid.
999 DEFINE_RB_FUNCS(request
, struct ceph_osd_request
, r_tid
, r_node
)
1000 DEFINE_RB_FUNCS(request_mc
, struct ceph_osd_request
, r_tid
, r_mc_node
)
1002 static bool osd_homeless(struct ceph_osd
*osd
)
1004 return osd
->o_osd
== CEPH_HOMELESS_OSD
;
1007 static bool osd_registered(struct ceph_osd
*osd
)
1009 verify_osdc_locked(osd
->o_osdc
);
1011 return !RB_EMPTY_NODE(&osd
->o_node
);
1015 * Assumes @osd is zero-initialized.
1017 static void osd_init(struct ceph_osd
*osd
)
1019 refcount_set(&osd
->o_ref
, 1);
1020 RB_CLEAR_NODE(&osd
->o_node
);
1021 osd
->o_requests
= RB_ROOT
;
1022 osd
->o_linger_requests
= RB_ROOT
;
1023 osd
->o_backoff_mappings
= RB_ROOT
;
1024 osd
->o_backoffs_by_id
= RB_ROOT
;
1025 INIT_LIST_HEAD(&osd
->o_osd_lru
);
1026 INIT_LIST_HEAD(&osd
->o_keepalive_item
);
1027 osd
->o_incarnation
= 1;
1028 mutex_init(&osd
->lock
);
1031 static void osd_cleanup(struct ceph_osd
*osd
)
1033 WARN_ON(!RB_EMPTY_NODE(&osd
->o_node
));
1034 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
1035 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
1036 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
));
1037 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_backoffs_by_id
));
1038 WARN_ON(!list_empty(&osd
->o_osd_lru
));
1039 WARN_ON(!list_empty(&osd
->o_keepalive_item
));
1041 if (osd
->o_auth
.authorizer
) {
1042 WARN_ON(osd_homeless(osd
));
1043 ceph_auth_destroy_authorizer(osd
->o_auth
.authorizer
);
1048 * Track open sessions with osds.
1050 static struct ceph_osd
*create_osd(struct ceph_osd_client
*osdc
, int onum
)
1052 struct ceph_osd
*osd
;
1054 WARN_ON(onum
== CEPH_HOMELESS_OSD
);
1056 osd
= kzalloc(sizeof(*osd
), GFP_NOIO
| __GFP_NOFAIL
);
1061 ceph_con_init(&osd
->o_con
, osd
, &osd_con_ops
, &osdc
->client
->msgr
);
1066 static struct ceph_osd
*get_osd(struct ceph_osd
*osd
)
1068 if (refcount_inc_not_zero(&osd
->o_ref
)) {
1069 dout("get_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
)-1,
1070 refcount_read(&osd
->o_ref
));
1073 dout("get_osd %p FAIL\n", osd
);
1078 static void put_osd(struct ceph_osd
*osd
)
1080 dout("put_osd %p %d -> %d\n", osd
, refcount_read(&osd
->o_ref
),
1081 refcount_read(&osd
->o_ref
) - 1);
1082 if (refcount_dec_and_test(&osd
->o_ref
)) {
1088 DEFINE_RB_FUNCS(osd
, struct ceph_osd
, o_osd
, o_node
)
1090 static void __move_osd_to_lru(struct ceph_osd
*osd
)
1092 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1094 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1095 BUG_ON(!list_empty(&osd
->o_osd_lru
));
1097 spin_lock(&osdc
->osd_lru_lock
);
1098 list_add_tail(&osd
->o_osd_lru
, &osdc
->osd_lru
);
1099 spin_unlock(&osdc
->osd_lru_lock
);
1101 osd
->lru_ttl
= jiffies
+ osdc
->client
->options
->osd_idle_ttl
;
1104 static void maybe_move_osd_to_lru(struct ceph_osd
*osd
)
1106 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1107 RB_EMPTY_ROOT(&osd
->o_linger_requests
))
1108 __move_osd_to_lru(osd
);
1111 static void __remove_osd_from_lru(struct ceph_osd
*osd
)
1113 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1115 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1117 spin_lock(&osdc
->osd_lru_lock
);
1118 if (!list_empty(&osd
->o_osd_lru
))
1119 list_del_init(&osd
->o_osd_lru
);
1120 spin_unlock(&osdc
->osd_lru_lock
);
1124 * Close the connection and assign any leftover requests to the
1127 static void close_osd(struct ceph_osd
*osd
)
1129 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1132 verify_osdc_wrlocked(osdc
);
1133 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1135 ceph_con_close(&osd
->o_con
);
1137 for (n
= rb_first(&osd
->o_requests
); n
; ) {
1138 struct ceph_osd_request
*req
=
1139 rb_entry(n
, struct ceph_osd_request
, r_node
);
1141 n
= rb_next(n
); /* unlink_request() */
1143 dout(" reassigning req %p tid %llu\n", req
, req
->r_tid
);
1144 unlink_request(osd
, req
);
1145 link_request(&osdc
->homeless_osd
, req
);
1147 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
1148 struct ceph_osd_linger_request
*lreq
=
1149 rb_entry(n
, struct ceph_osd_linger_request
, node
);
1151 n
= rb_next(n
); /* unlink_linger() */
1153 dout(" reassigning lreq %p linger_id %llu\n", lreq
,
1155 unlink_linger(osd
, lreq
);
1156 link_linger(&osdc
->homeless_osd
, lreq
);
1158 clear_backoffs(osd
);
1160 __remove_osd_from_lru(osd
);
1161 erase_osd(&osdc
->osds
, osd
);
1168 static int reopen_osd(struct ceph_osd
*osd
)
1170 struct ceph_entity_addr
*peer_addr
;
1172 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
1174 if (RB_EMPTY_ROOT(&osd
->o_requests
) &&
1175 RB_EMPTY_ROOT(&osd
->o_linger_requests
)) {
1180 peer_addr
= &osd
->o_osdc
->osdmap
->osd_addr
[osd
->o_osd
];
1181 if (!memcmp(peer_addr
, &osd
->o_con
.peer_addr
, sizeof (*peer_addr
)) &&
1182 !ceph_con_opened(&osd
->o_con
)) {
1185 dout("osd addr hasn't changed and connection never opened, "
1186 "letting msgr retry\n");
1187 /* touch each r_stamp for handle_timeout()'s benfit */
1188 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
1189 struct ceph_osd_request
*req
=
1190 rb_entry(n
, struct ceph_osd_request
, r_node
);
1191 req
->r_stamp
= jiffies
;
1197 ceph_con_close(&osd
->o_con
);
1198 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
, peer_addr
);
1199 osd
->o_incarnation
++;
1204 static struct ceph_osd
*lookup_create_osd(struct ceph_osd_client
*osdc
, int o
,
1207 struct ceph_osd
*osd
;
1210 verify_osdc_wrlocked(osdc
);
1212 verify_osdc_locked(osdc
);
1214 if (o
!= CEPH_HOMELESS_OSD
)
1215 osd
= lookup_osd(&osdc
->osds
, o
);
1217 osd
= &osdc
->homeless_osd
;
1220 return ERR_PTR(-EAGAIN
);
1222 osd
= create_osd(osdc
, o
);
1223 insert_osd(&osdc
->osds
, osd
);
1224 ceph_con_open(&osd
->o_con
, CEPH_ENTITY_TYPE_OSD
, osd
->o_osd
,
1225 &osdc
->osdmap
->osd_addr
[osd
->o_osd
]);
1228 dout("%s osdc %p osd%d -> osd %p\n", __func__
, osdc
, o
, osd
);
1233 * Create request <-> OSD session relation.
1235 * @req has to be assigned a tid, @osd may be homeless.
1237 static void link_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1239 verify_osd_locked(osd
);
1240 WARN_ON(!req
->r_tid
|| req
->r_osd
);
1241 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1244 if (!osd_homeless(osd
))
1245 __remove_osd_from_lru(osd
);
1247 atomic_inc(&osd
->o_osdc
->num_homeless
);
1250 insert_request(&osd
->o_requests
, req
);
1254 static void unlink_request(struct ceph_osd
*osd
, struct ceph_osd_request
*req
)
1256 verify_osd_locked(osd
);
1257 WARN_ON(req
->r_osd
!= osd
);
1258 dout("%s osd %p osd%d req %p tid %llu\n", __func__
, osd
, osd
->o_osd
,
1262 erase_request(&osd
->o_requests
, req
);
1265 if (!osd_homeless(osd
))
1266 maybe_move_osd_to_lru(osd
);
1268 atomic_dec(&osd
->o_osdc
->num_homeless
);
1271 static bool __pool_full(struct ceph_pg_pool_info
*pi
)
1273 return pi
->flags
& CEPH_POOL_FLAG_FULL
;
1276 static bool have_pool_full(struct ceph_osd_client
*osdc
)
1280 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
1281 struct ceph_pg_pool_info
*pi
=
1282 rb_entry(n
, struct ceph_pg_pool_info
, node
);
1284 if (__pool_full(pi
))
1291 static bool pool_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
1293 struct ceph_pg_pool_info
*pi
;
1295 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
1299 return __pool_full(pi
);
1303 * Returns whether a request should be blocked from being sent
1304 * based on the current osdmap and osd_client settings.
1306 static bool target_should_be_paused(struct ceph_osd_client
*osdc
,
1307 const struct ceph_osd_request_target
*t
,
1308 struct ceph_pg_pool_info
*pi
)
1310 bool pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
1311 bool pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
1312 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
1315 WARN_ON(pi
->id
!= t
->target_oloc
.pool
);
1316 return ((t
->flags
& CEPH_OSD_FLAG_READ
) && pauserd
) ||
1317 ((t
->flags
& CEPH_OSD_FLAG_WRITE
) && pausewr
) ||
1318 (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
);
1321 enum calc_target_result
{
1322 CALC_TARGET_NO_ACTION
= 0,
1323 CALC_TARGET_NEED_RESEND
,
1324 CALC_TARGET_POOL_DNE
,
1327 static enum calc_target_result
calc_target(struct ceph_osd_client
*osdc
,
1328 struct ceph_osd_request_target
*t
,
1329 struct ceph_connection
*con
,
1332 struct ceph_pg_pool_info
*pi
;
1333 struct ceph_pg pgid
, last_pgid
;
1334 struct ceph_osds up
, acting
;
1335 bool force_resend
= false;
1336 bool unpaused
= false;
1339 bool sort_bitwise
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_SORTBITWISE
);
1340 bool recovery_deletes
= ceph_osdmap_flag(osdc
,
1341 CEPH_OSDMAP_RECOVERY_DELETES
);
1342 enum calc_target_result ct_res
;
1345 t
->epoch
= osdc
->osdmap
->epoch
;
1346 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->base_oloc
.pool
);
1348 t
->osd
= CEPH_HOMELESS_OSD
;
1349 ct_res
= CALC_TARGET_POOL_DNE
;
1353 if (osdc
->osdmap
->epoch
== pi
->last_force_request_resend
) {
1354 if (t
->last_force_resend
< pi
->last_force_request_resend
) {
1355 t
->last_force_resend
= pi
->last_force_request_resend
;
1356 force_resend
= true;
1357 } else if (t
->last_force_resend
== 0) {
1358 force_resend
= true;
1363 ceph_oid_copy(&t
->target_oid
, &t
->base_oid
);
1364 ceph_oloc_copy(&t
->target_oloc
, &t
->base_oloc
);
1365 if ((t
->flags
& CEPH_OSD_FLAG_IGNORE_OVERLAY
) == 0) {
1366 if (t
->flags
& CEPH_OSD_FLAG_READ
&& pi
->read_tier
>= 0)
1367 t
->target_oloc
.pool
= pi
->read_tier
;
1368 if (t
->flags
& CEPH_OSD_FLAG_WRITE
&& pi
->write_tier
>= 0)
1369 t
->target_oloc
.pool
= pi
->write_tier
;
1371 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, t
->target_oloc
.pool
);
1373 t
->osd
= CEPH_HOMELESS_OSD
;
1374 ct_res
= CALC_TARGET_POOL_DNE
;
1379 ret
= __ceph_object_locator_to_pg(pi
, &t
->target_oid
, &t
->target_oloc
,
1382 WARN_ON(ret
!= -ENOENT
);
1383 t
->osd
= CEPH_HOMELESS_OSD
;
1384 ct_res
= CALC_TARGET_POOL_DNE
;
1387 last_pgid
.pool
= pgid
.pool
;
1388 last_pgid
.seed
= ceph_stable_mod(pgid
.seed
, t
->pg_num
, t
->pg_num_mask
);
1390 ceph_pg_to_up_acting_osds(osdc
->osdmap
, pi
, &pgid
, &up
, &acting
);
1392 ceph_is_new_interval(&t
->acting
,
1404 t
->recovery_deletes
,
1407 force_resend
= true;
1409 if (t
->paused
&& !target_should_be_paused(osdc
, t
, pi
)) {
1413 legacy_change
= ceph_pg_compare(&t
->pgid
, &pgid
) ||
1414 ceph_osds_changed(&t
->acting
, &acting
, any_change
);
1416 split
= ceph_pg_is_split(&last_pgid
, t
->pg_num
, pi
->pg_num
);
1418 if (legacy_change
|| force_resend
|| split
) {
1419 t
->pgid
= pgid
; /* struct */
1420 ceph_pg_to_primary_shard(osdc
->osdmap
, pi
, &pgid
, &t
->spgid
);
1421 ceph_osds_copy(&t
->acting
, &acting
);
1422 ceph_osds_copy(&t
->up
, &up
);
1424 t
->min_size
= pi
->min_size
;
1425 t
->pg_num
= pi
->pg_num
;
1426 t
->pg_num_mask
= pi
->pg_num_mask
;
1427 t
->sort_bitwise
= sort_bitwise
;
1428 t
->recovery_deletes
= recovery_deletes
;
1430 t
->osd
= acting
.primary
;
1433 if (unpaused
|| legacy_change
|| force_resend
||
1434 (split
&& con
&& CEPH_HAVE_FEATURE(con
->peer_features
,
1436 ct_res
= CALC_TARGET_NEED_RESEND
;
1438 ct_res
= CALC_TARGET_NO_ACTION
;
1441 dout("%s t %p -> ct_res %d osd %d\n", __func__
, t
, ct_res
, t
->osd
);
1445 static struct ceph_spg_mapping
*alloc_spg_mapping(void)
1447 struct ceph_spg_mapping
*spg
;
1449 spg
= kmalloc(sizeof(*spg
), GFP_NOIO
);
1453 RB_CLEAR_NODE(&spg
->node
);
1454 spg
->backoffs
= RB_ROOT
;
1458 static void free_spg_mapping(struct ceph_spg_mapping
*spg
)
1460 WARN_ON(!RB_EMPTY_NODE(&spg
->node
));
1461 WARN_ON(!RB_EMPTY_ROOT(&spg
->backoffs
));
1467 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1468 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1469 * defined only within a specific spgid; it does not pass anything to
1470 * children on split, or to another primary.
1472 DEFINE_RB_FUNCS2(spg_mapping
, struct ceph_spg_mapping
, spgid
, ceph_spg_compare
,
1473 RB_BYPTR
, const struct ceph_spg
*, node
)
1475 static u64
hoid_get_bitwise_key(const struct ceph_hobject_id
*hoid
)
1477 return hoid
->is_max
? 0x100000000ull
: hoid
->hash_reverse_bits
;
1480 static void hoid_get_effective_key(const struct ceph_hobject_id
*hoid
,
1481 void **pkey
, size_t *pkey_len
)
1483 if (hoid
->key_len
) {
1485 *pkey_len
= hoid
->key_len
;
1488 *pkey_len
= hoid
->oid_len
;
1492 static int compare_names(const void *name1
, size_t name1_len
,
1493 const void *name2
, size_t name2_len
)
1497 ret
= memcmp(name1
, name2
, min(name1_len
, name2_len
));
1499 if (name1_len
< name2_len
)
1501 else if (name1_len
> name2_len
)
1507 static int hoid_compare(const struct ceph_hobject_id
*lhs
,
1508 const struct ceph_hobject_id
*rhs
)
1510 void *effective_key1
, *effective_key2
;
1511 size_t effective_key1_len
, effective_key2_len
;
1514 if (lhs
->is_max
< rhs
->is_max
)
1516 if (lhs
->is_max
> rhs
->is_max
)
1519 if (lhs
->pool
< rhs
->pool
)
1521 if (lhs
->pool
> rhs
->pool
)
1524 if (hoid_get_bitwise_key(lhs
) < hoid_get_bitwise_key(rhs
))
1526 if (hoid_get_bitwise_key(lhs
) > hoid_get_bitwise_key(rhs
))
1529 ret
= compare_names(lhs
->nspace
, lhs
->nspace_len
,
1530 rhs
->nspace
, rhs
->nspace_len
);
1534 hoid_get_effective_key(lhs
, &effective_key1
, &effective_key1_len
);
1535 hoid_get_effective_key(rhs
, &effective_key2
, &effective_key2_len
);
1536 ret
= compare_names(effective_key1
, effective_key1_len
,
1537 effective_key2
, effective_key2_len
);
1541 ret
= compare_names(lhs
->oid
, lhs
->oid_len
, rhs
->oid
, rhs
->oid_len
);
1545 if (lhs
->snapid
< rhs
->snapid
)
1547 if (lhs
->snapid
> rhs
->snapid
)
1554 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1555 * compat stuff here.
1557 * Assumes @hoid is zero-initialized.
1559 static int decode_hoid(void **p
, void *end
, struct ceph_hobject_id
*hoid
)
1565 ret
= ceph_start_decoding(p
, end
, 4, "hobject_t", &struct_v
,
1571 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v
);
1575 hoid
->key
= ceph_extract_encoded_string(p
, end
, &hoid
->key_len
,
1577 if (IS_ERR(hoid
->key
)) {
1578 ret
= PTR_ERR(hoid
->key
);
1583 hoid
->oid
= ceph_extract_encoded_string(p
, end
, &hoid
->oid_len
,
1585 if (IS_ERR(hoid
->oid
)) {
1586 ret
= PTR_ERR(hoid
->oid
);
1591 ceph_decode_64_safe(p
, end
, hoid
->snapid
, e_inval
);
1592 ceph_decode_32_safe(p
, end
, hoid
->hash
, e_inval
);
1593 ceph_decode_8_safe(p
, end
, hoid
->is_max
, e_inval
);
1595 hoid
->nspace
= ceph_extract_encoded_string(p
, end
, &hoid
->nspace_len
,
1597 if (IS_ERR(hoid
->nspace
)) {
1598 ret
= PTR_ERR(hoid
->nspace
);
1599 hoid
->nspace
= NULL
;
1603 ceph_decode_64_safe(p
, end
, hoid
->pool
, e_inval
);
1605 ceph_hoid_build_hash_cache(hoid
);
1612 static int hoid_encoding_size(const struct ceph_hobject_id
*hoid
)
1614 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1615 4 + hoid
->key_len
+ 4 + hoid
->oid_len
+ 4 + hoid
->nspace_len
;
1618 static void encode_hoid(void **p
, void *end
, const struct ceph_hobject_id
*hoid
)
1620 ceph_start_encoding(p
, 4, 3, hoid_encoding_size(hoid
));
1621 ceph_encode_string(p
, end
, hoid
->key
, hoid
->key_len
);
1622 ceph_encode_string(p
, end
, hoid
->oid
, hoid
->oid_len
);
1623 ceph_encode_64(p
, hoid
->snapid
);
1624 ceph_encode_32(p
, hoid
->hash
);
1625 ceph_encode_8(p
, hoid
->is_max
);
1626 ceph_encode_string(p
, end
, hoid
->nspace
, hoid
->nspace_len
);
1627 ceph_encode_64(p
, hoid
->pool
);
1630 static void free_hoid(struct ceph_hobject_id
*hoid
)
1635 kfree(hoid
->nspace
);
1640 static struct ceph_osd_backoff
*alloc_backoff(void)
1642 struct ceph_osd_backoff
*backoff
;
1644 backoff
= kzalloc(sizeof(*backoff
), GFP_NOIO
);
1648 RB_CLEAR_NODE(&backoff
->spg_node
);
1649 RB_CLEAR_NODE(&backoff
->id_node
);
1653 static void free_backoff(struct ceph_osd_backoff
*backoff
)
1655 WARN_ON(!RB_EMPTY_NODE(&backoff
->spg_node
));
1656 WARN_ON(!RB_EMPTY_NODE(&backoff
->id_node
));
1658 free_hoid(backoff
->begin
);
1659 free_hoid(backoff
->end
);
1664 * Within a specific spgid, backoffs are managed by ->begin hoid.
1666 DEFINE_RB_INSDEL_FUNCS2(backoff
, struct ceph_osd_backoff
, begin
, hoid_compare
,
1667 RB_BYVAL
, spg_node
);
1669 static struct ceph_osd_backoff
*lookup_containing_backoff(struct rb_root
*root
,
1670 const struct ceph_hobject_id
*hoid
)
1672 struct rb_node
*n
= root
->rb_node
;
1675 struct ceph_osd_backoff
*cur
=
1676 rb_entry(n
, struct ceph_osd_backoff
, spg_node
);
1679 cmp
= hoid_compare(hoid
, cur
->begin
);
1682 } else if (cmp
> 0) {
1683 if (hoid_compare(hoid
, cur
->end
) < 0)
1696 * Each backoff has a unique id within its OSD session.
1698 DEFINE_RB_FUNCS(backoff_by_id
, struct ceph_osd_backoff
, id
, id_node
)
1700 static void clear_backoffs(struct ceph_osd
*osd
)
1702 while (!RB_EMPTY_ROOT(&osd
->o_backoff_mappings
)) {
1703 struct ceph_spg_mapping
*spg
=
1704 rb_entry(rb_first(&osd
->o_backoff_mappings
),
1705 struct ceph_spg_mapping
, node
);
1707 while (!RB_EMPTY_ROOT(&spg
->backoffs
)) {
1708 struct ceph_osd_backoff
*backoff
=
1709 rb_entry(rb_first(&spg
->backoffs
),
1710 struct ceph_osd_backoff
, spg_node
);
1712 erase_backoff(&spg
->backoffs
, backoff
);
1713 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
1714 free_backoff(backoff
);
1716 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
1717 free_spg_mapping(spg
);
1722 * Set up a temporary, non-owning view into @t.
1724 static void hoid_fill_from_target(struct ceph_hobject_id
*hoid
,
1725 const struct ceph_osd_request_target
*t
)
1729 hoid
->oid
= t
->target_oid
.name
;
1730 hoid
->oid_len
= t
->target_oid
.name_len
;
1731 hoid
->snapid
= CEPH_NOSNAP
;
1732 hoid
->hash
= t
->pgid
.seed
;
1733 hoid
->is_max
= false;
1734 if (t
->target_oloc
.pool_ns
) {
1735 hoid
->nspace
= t
->target_oloc
.pool_ns
->str
;
1736 hoid
->nspace_len
= t
->target_oloc
.pool_ns
->len
;
1738 hoid
->nspace
= NULL
;
1739 hoid
->nspace_len
= 0;
1741 hoid
->pool
= t
->target_oloc
.pool
;
1742 ceph_hoid_build_hash_cache(hoid
);
1745 static bool should_plug_request(struct ceph_osd_request
*req
)
1747 struct ceph_osd
*osd
= req
->r_osd
;
1748 struct ceph_spg_mapping
*spg
;
1749 struct ceph_osd_backoff
*backoff
;
1750 struct ceph_hobject_id hoid
;
1752 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &req
->r_t
.spgid
);
1756 hoid_fill_from_target(&hoid
, &req
->r_t
);
1757 backoff
= lookup_containing_backoff(&spg
->backoffs
, &hoid
);
1761 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1762 __func__
, req
, req
->r_tid
, osd
->o_osd
, backoff
->spgid
.pgid
.pool
,
1763 backoff
->spgid
.pgid
.seed
, backoff
->spgid
.shard
, backoff
->id
);
1767 static void setup_request_data(struct ceph_osd_request
*req
,
1768 struct ceph_msg
*msg
)
1773 if (!list_empty(&msg
->data
))
1776 WARN_ON(msg
->data_length
);
1777 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1778 struct ceph_osd_req_op
*op
= &req
->r_ops
[i
];
1782 case CEPH_OSD_OP_WRITE
:
1783 case CEPH_OSD_OP_WRITEFULL
:
1784 WARN_ON(op
->indata_len
!= op
->extent
.length
);
1785 ceph_osdc_msg_data_add(msg
, &op
->extent
.osd_data
);
1787 case CEPH_OSD_OP_SETXATTR
:
1788 case CEPH_OSD_OP_CMPXATTR
:
1789 WARN_ON(op
->indata_len
!= op
->xattr
.name_len
+
1790 op
->xattr
.value_len
);
1791 ceph_osdc_msg_data_add(msg
, &op
->xattr
.osd_data
);
1793 case CEPH_OSD_OP_NOTIFY_ACK
:
1794 ceph_osdc_msg_data_add(msg
,
1795 &op
->notify_ack
.request_data
);
1799 case CEPH_OSD_OP_STAT
:
1800 ceph_osdc_msg_data_add(req
->r_reply
,
1803 case CEPH_OSD_OP_READ
:
1804 ceph_osdc_msg_data_add(req
->r_reply
,
1805 &op
->extent
.osd_data
);
1807 case CEPH_OSD_OP_LIST_WATCHERS
:
1808 ceph_osdc_msg_data_add(req
->r_reply
,
1809 &op
->list_watchers
.response_data
);
1813 case CEPH_OSD_OP_CALL
:
1814 WARN_ON(op
->indata_len
!= op
->cls
.class_len
+
1815 op
->cls
.method_len
+
1816 op
->cls
.indata_len
);
1817 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_info
);
1818 /* optional, can be NONE */
1819 ceph_osdc_msg_data_add(msg
, &op
->cls
.request_data
);
1820 /* optional, can be NONE */
1821 ceph_osdc_msg_data_add(req
->r_reply
,
1822 &op
->cls
.response_data
);
1824 case CEPH_OSD_OP_NOTIFY
:
1825 ceph_osdc_msg_data_add(msg
,
1826 &op
->notify
.request_data
);
1827 ceph_osdc_msg_data_add(req
->r_reply
,
1828 &op
->notify
.response_data
);
1832 data_len
+= op
->indata_len
;
1835 WARN_ON(data_len
!= msg
->data_length
);
1838 static void encode_pgid(void **p
, const struct ceph_pg
*pgid
)
1840 ceph_encode_8(p
, 1);
1841 ceph_encode_64(p
, pgid
->pool
);
1842 ceph_encode_32(p
, pgid
->seed
);
1843 ceph_encode_32(p
, -1); /* preferred */
1846 static void encode_spgid(void **p
, const struct ceph_spg
*spgid
)
1848 ceph_start_encoding(p
, 1, 1, CEPH_PGID_ENCODING_LEN
+ 1);
1849 encode_pgid(p
, &spgid
->pgid
);
1850 ceph_encode_8(p
, spgid
->shard
);
1853 static void encode_oloc(void **p
, void *end
,
1854 const struct ceph_object_locator
*oloc
)
1856 ceph_start_encoding(p
, 5, 4, ceph_oloc_encoding_size(oloc
));
1857 ceph_encode_64(p
, oloc
->pool
);
1858 ceph_encode_32(p
, -1); /* preferred */
1859 ceph_encode_32(p
, 0); /* key len */
1861 ceph_encode_string(p
, end
, oloc
->pool_ns
->str
,
1862 oloc
->pool_ns
->len
);
1864 ceph_encode_32(p
, 0);
1867 static void encode_request_partial(struct ceph_osd_request
*req
,
1868 struct ceph_msg
*msg
)
1870 void *p
= msg
->front
.iov_base
;
1871 void *const end
= p
+ msg
->front_alloc_len
;
1875 if (req
->r_flags
& CEPH_OSD_FLAG_WRITE
) {
1876 /* snapshots aren't writeable */
1877 WARN_ON(req
->r_snapid
!= CEPH_NOSNAP
);
1879 WARN_ON(req
->r_mtime
.tv_sec
|| req
->r_mtime
.tv_nsec
||
1880 req
->r_data_offset
|| req
->r_snapc
);
1883 setup_request_data(req
, msg
);
1885 encode_spgid(&p
, &req
->r_t
.spgid
); /* actual spg */
1886 ceph_encode_32(&p
, req
->r_t
.pgid
.seed
); /* raw hash */
1887 ceph_encode_32(&p
, req
->r_osdc
->osdmap
->epoch
);
1888 ceph_encode_32(&p
, req
->r_flags
);
1891 ceph_start_encoding(&p
, 2, 2, sizeof(struct ceph_osd_reqid
));
1892 memset(p
, 0, sizeof(struct ceph_osd_reqid
));
1893 p
+= sizeof(struct ceph_osd_reqid
);
1896 memset(p
, 0, sizeof(struct ceph_blkin_trace_info
));
1897 p
+= sizeof(struct ceph_blkin_trace_info
);
1899 ceph_encode_32(&p
, 0); /* client_inc, always 0 */
1900 ceph_encode_timespec(p
, &req
->r_mtime
);
1901 p
+= sizeof(struct ceph_timespec
);
1903 encode_oloc(&p
, end
, &req
->r_t
.target_oloc
);
1904 ceph_encode_string(&p
, end
, req
->r_t
.target_oid
.name
,
1905 req
->r_t
.target_oid
.name_len
);
1907 /* ops, can imply data */
1908 ceph_encode_16(&p
, req
->r_num_ops
);
1909 for (i
= 0; i
< req
->r_num_ops
; i
++) {
1910 data_len
+= osd_req_encode_op(p
, &req
->r_ops
[i
]);
1911 p
+= sizeof(struct ceph_osd_op
);
1914 ceph_encode_64(&p
, req
->r_snapid
); /* snapid */
1916 ceph_encode_64(&p
, req
->r_snapc
->seq
);
1917 ceph_encode_32(&p
, req
->r_snapc
->num_snaps
);
1918 for (i
= 0; i
< req
->r_snapc
->num_snaps
; i
++)
1919 ceph_encode_64(&p
, req
->r_snapc
->snaps
[i
]);
1921 ceph_encode_64(&p
, 0); /* snap_seq */
1922 ceph_encode_32(&p
, 0); /* snaps len */
1925 ceph_encode_32(&p
, req
->r_attempts
); /* retry_attempt */
1926 BUG_ON(p
> end
- 8); /* space for features */
1928 msg
->hdr
.version
= cpu_to_le16(8); /* MOSDOp v8 */
1929 /* front_len is finalized in encode_request_finish() */
1930 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1931 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1932 msg
->hdr
.data_len
= cpu_to_le32(data_len
);
1934 * The header "data_off" is a hint to the receiver allowing it
1935 * to align received data into its buffers such that there's no
1936 * need to re-copy it before writing it to disk (direct I/O).
1938 msg
->hdr
.data_off
= cpu_to_le16(req
->r_data_offset
);
1940 dout("%s req %p msg %p oid %s oid_len %d\n", __func__
, req
, msg
,
1941 req
->r_t
.target_oid
.name
, req
->r_t
.target_oid
.name_len
);
1944 static void encode_request_finish(struct ceph_msg
*msg
)
1946 void *p
= msg
->front
.iov_base
;
1947 void *const partial_end
= p
+ msg
->front
.iov_len
;
1948 void *const end
= p
+ msg
->front_alloc_len
;
1950 if (CEPH_HAVE_FEATURE(msg
->con
->peer_features
, RESEND_ON_SPLIT
)) {
1951 /* luminous OSD -- encode features and be done */
1953 ceph_encode_64(&p
, msg
->con
->peer_features
);
1956 char spgid
[CEPH_ENCODING_START_BLK_LEN
+
1957 CEPH_PGID_ENCODING_LEN
+ 1];
1961 char reqid
[CEPH_ENCODING_START_BLK_LEN
+
1962 sizeof(struct ceph_osd_reqid
)];
1963 char trace
[sizeof(struct ceph_blkin_trace_info
)];
1965 struct ceph_timespec mtime
;
1967 struct ceph_pg pgid
;
1968 void *oloc
, *oid
, *tail
;
1969 int oloc_len
, oid_len
, tail_len
;
1973 * Pre-luminous OSD -- reencode v8 into v4 using @head
1974 * as a temporary buffer. Encode the raw PG; the rest
1975 * is just a matter of moving oloc, oid and tail blobs
1978 memcpy(&head
, p
, sizeof(head
));
1982 p
+= CEPH_ENCODING_START_BLK_LEN
;
1983 pgid
.pool
= ceph_decode_64(&p
);
1984 p
+= 4 + 4; /* preferred, key len */
1985 len
= ceph_decode_32(&p
);
1986 p
+= len
; /* nspace */
1987 oloc_len
= p
- oloc
;
1990 len
= ceph_decode_32(&p
);
1995 tail_len
= partial_end
- p
;
1997 p
= msg
->front
.iov_base
;
1998 ceph_encode_copy(&p
, &head
.client_inc
, sizeof(head
.client_inc
));
1999 ceph_encode_copy(&p
, &head
.epoch
, sizeof(head
.epoch
));
2000 ceph_encode_copy(&p
, &head
.flags
, sizeof(head
.flags
));
2001 ceph_encode_copy(&p
, &head
.mtime
, sizeof(head
.mtime
));
2003 /* reassert_version */
2004 memset(p
, 0, sizeof(struct ceph_eversion
));
2005 p
+= sizeof(struct ceph_eversion
);
2008 memmove(p
, oloc
, oloc_len
);
2011 pgid
.seed
= le32_to_cpu(head
.hash
);
2012 encode_pgid(&p
, &pgid
); /* raw pg */
2015 memmove(p
, oid
, oid_len
);
2018 /* tail -- ops, snapid, snapc, retry_attempt */
2020 memmove(p
, tail
, tail_len
);
2023 msg
->hdr
.version
= cpu_to_le16(4); /* MOSDOp v4 */
2027 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2028 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2030 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__
, msg
,
2031 le64_to_cpu(msg
->hdr
.tid
), le32_to_cpu(msg
->hdr
.front_len
),
2032 le32_to_cpu(msg
->hdr
.middle_len
), le32_to_cpu(msg
->hdr
.data_len
),
2033 le16_to_cpu(msg
->hdr
.version
));
2037 * @req has to be assigned a tid and registered.
2039 static void send_request(struct ceph_osd_request
*req
)
2041 struct ceph_osd
*osd
= req
->r_osd
;
2043 verify_osd_locked(osd
);
2044 WARN_ON(osd
->o_osd
!= req
->r_t
.osd
);
2047 if (should_plug_request(req
))
2051 * We may have a previously queued request message hanging
2052 * around. Cancel it to avoid corrupting the msgr.
2055 ceph_msg_revoke(req
->r_request
);
2057 req
->r_flags
|= CEPH_OSD_FLAG_KNOWN_REDIR
;
2058 if (req
->r_attempts
)
2059 req
->r_flags
|= CEPH_OSD_FLAG_RETRY
;
2061 WARN_ON(req
->r_flags
& CEPH_OSD_FLAG_RETRY
);
2063 encode_request_partial(req
, req
->r_request
);
2065 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2066 __func__
, req
, req
->r_tid
, req
->r_t
.pgid
.pool
, req
->r_t
.pgid
.seed
,
2067 req
->r_t
.spgid
.pgid
.pool
, req
->r_t
.spgid
.pgid
.seed
,
2068 req
->r_t
.spgid
.shard
, osd
->o_osd
, req
->r_t
.epoch
, req
->r_flags
,
2071 req
->r_t
.paused
= false;
2072 req
->r_stamp
= jiffies
;
2075 req
->r_sent
= osd
->o_incarnation
;
2076 req
->r_request
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
2077 ceph_con_send(&osd
->o_con
, ceph_msg_get(req
->r_request
));
2080 static void maybe_request_map(struct ceph_osd_client
*osdc
)
2082 bool continuous
= false;
2084 verify_osdc_locked(osdc
);
2085 WARN_ON(!osdc
->osdmap
->epoch
);
2087 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2088 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
) ||
2089 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2090 dout("%s osdc %p continuous\n", __func__
, osdc
);
2093 dout("%s osdc %p onetime\n", __func__
, osdc
);
2096 if (ceph_monc_want_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
2097 osdc
->osdmap
->epoch
+ 1, continuous
))
2098 ceph_monc_renew_subs(&osdc
->client
->monc
);
2101 static void complete_request(struct ceph_osd_request
*req
, int err
);
2102 static void send_map_check(struct ceph_osd_request
*req
);
2104 static void __submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2106 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2107 struct ceph_osd
*osd
;
2108 enum calc_target_result ct_res
;
2109 bool need_send
= false;
2110 bool promoted
= false;
2111 bool need_abort
= false;
2113 WARN_ON(req
->r_tid
);
2114 dout("%s req %p wrlocked %d\n", __func__
, req
, wrlocked
);
2117 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
2118 if (ct_res
== CALC_TARGET_POOL_DNE
&& !wrlocked
)
2121 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, wrlocked
);
2123 WARN_ON(PTR_ERR(osd
) != -EAGAIN
|| wrlocked
);
2127 if (osdc
->osdmap
->epoch
< osdc
->epoch_barrier
) {
2128 dout("req %p epoch %u barrier %u\n", req
, osdc
->osdmap
->epoch
,
2129 osdc
->epoch_barrier
);
2130 req
->r_t
.paused
= true;
2131 maybe_request_map(osdc
);
2132 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2133 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
)) {
2134 dout("req %p pausewr\n", req
);
2135 req
->r_t
.paused
= true;
2136 maybe_request_map(osdc
);
2137 } else if ((req
->r_flags
& CEPH_OSD_FLAG_READ
) &&
2138 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2139 dout("req %p pauserd\n", req
);
2140 req
->r_t
.paused
= true;
2141 maybe_request_map(osdc
);
2142 } else if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) &&
2143 !(req
->r_flags
& (CEPH_OSD_FLAG_FULL_TRY
|
2144 CEPH_OSD_FLAG_FULL_FORCE
)) &&
2145 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2146 pool_full(osdc
, req
->r_t
.base_oloc
.pool
))) {
2147 dout("req %p full/pool_full\n", req
);
2148 pr_warn_ratelimited("FULL or reached pool quota\n");
2149 req
->r_t
.paused
= true;
2150 maybe_request_map(osdc
);
2151 if (req
->r_abort_on_full
)
2153 } else if (!osd_homeless(osd
)) {
2156 maybe_request_map(osdc
);
2159 mutex_lock(&osd
->lock
);
2161 * Assign the tid atomically with send_request() to protect
2162 * multiple writes to the same object from racing with each
2163 * other, resulting in out of order ops on the OSDs.
2165 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2166 link_request(osd
, req
);
2169 else if (need_abort
)
2170 complete_request(req
, -ENOSPC
);
2171 mutex_unlock(&osd
->lock
);
2173 if (ct_res
== CALC_TARGET_POOL_DNE
)
2174 send_map_check(req
);
2177 downgrade_write(&osdc
->lock
);
2181 up_read(&osdc
->lock
);
2182 down_write(&osdc
->lock
);
2188 static void account_request(struct ceph_osd_request
*req
)
2190 WARN_ON(req
->r_flags
& (CEPH_OSD_FLAG_ACK
| CEPH_OSD_FLAG_ONDISK
));
2191 WARN_ON(!(req
->r_flags
& (CEPH_OSD_FLAG_READ
| CEPH_OSD_FLAG_WRITE
)));
2193 req
->r_flags
|= CEPH_OSD_FLAG_ONDISK
;
2194 atomic_inc(&req
->r_osdc
->num_requests
);
2196 req
->r_start_stamp
= jiffies
;
2199 static void submit_request(struct ceph_osd_request
*req
, bool wrlocked
)
2201 ceph_osdc_get_request(req
);
2202 account_request(req
);
2203 __submit_request(req
, wrlocked
);
2206 static void finish_request(struct ceph_osd_request
*req
)
2208 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2210 WARN_ON(lookup_request_mc(&osdc
->map_checks
, req
->r_tid
));
2211 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2214 unlink_request(req
->r_osd
, req
);
2215 atomic_dec(&osdc
->num_requests
);
2218 * If an OSD has failed or returned and a request has been sent
2219 * twice, it's possible to get a reply and end up here while the
2220 * request message is queued for delivery. We will ignore the
2221 * reply, so not a big deal, but better to try and catch it.
2223 ceph_msg_revoke(req
->r_request
);
2224 ceph_msg_revoke_incoming(req
->r_reply
);
2227 static void __complete_request(struct ceph_osd_request
*req
)
2229 if (req
->r_callback
) {
2230 dout("%s req %p tid %llu cb %pf result %d\n", __func__
, req
,
2231 req
->r_tid
, req
->r_callback
, req
->r_result
);
2232 req
->r_callback(req
);
2237 * This is open-coded in handle_reply().
2239 static void complete_request(struct ceph_osd_request
*req
, int err
)
2241 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2243 req
->r_result
= err
;
2244 finish_request(req
);
2245 __complete_request(req
);
2246 complete_all(&req
->r_completion
);
2247 ceph_osdc_put_request(req
);
2250 static void cancel_map_check(struct ceph_osd_request
*req
)
2252 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2253 struct ceph_osd_request
*lookup_req
;
2255 verify_osdc_wrlocked(osdc
);
2257 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2261 WARN_ON(lookup_req
!= req
);
2262 erase_request_mc(&osdc
->map_checks
, req
);
2263 ceph_osdc_put_request(req
);
2266 static void cancel_request(struct ceph_osd_request
*req
)
2268 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
2270 cancel_map_check(req
);
2271 finish_request(req
);
2272 complete_all(&req
->r_completion
);
2273 ceph_osdc_put_request(req
);
2276 static void abort_request(struct ceph_osd_request
*req
, int err
)
2278 dout("%s req %p tid %llu err %d\n", __func__
, req
, req
->r_tid
, err
);
2280 cancel_map_check(req
);
2281 complete_request(req
, err
);
2284 static void update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2286 if (likely(eb
> osdc
->epoch_barrier
)) {
2287 dout("updating epoch_barrier from %u to %u\n",
2288 osdc
->epoch_barrier
, eb
);
2289 osdc
->epoch_barrier
= eb
;
2290 /* Request map if we're not to the barrier yet */
2291 if (eb
> osdc
->osdmap
->epoch
)
2292 maybe_request_map(osdc
);
2296 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client
*osdc
, u32 eb
)
2298 down_read(&osdc
->lock
);
2299 if (unlikely(eb
> osdc
->epoch_barrier
)) {
2300 up_read(&osdc
->lock
);
2301 down_write(&osdc
->lock
);
2302 update_epoch_barrier(osdc
, eb
);
2303 up_write(&osdc
->lock
);
2305 up_read(&osdc
->lock
);
2308 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier
);
2311 * Drop all pending requests that are stalled waiting on a full condition to
2312 * clear, and complete them with ENOSPC as the return code. Set the
2313 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2316 static void ceph_osdc_abort_on_full(struct ceph_osd_client
*osdc
)
2319 bool victims
= false;
2321 dout("enter abort_on_full\n");
2323 if (!ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) && !have_pool_full(osdc
))
2326 /* Scan list and see if there is anything to abort */
2327 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2328 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2331 m
= rb_first(&osd
->o_requests
);
2333 struct ceph_osd_request
*req
= rb_entry(m
,
2334 struct ceph_osd_request
, r_node
);
2337 if (req
->r_abort_on_full
) {
2350 * Update the barrier to current epoch if it's behind that point,
2351 * since we know we have some calls to be aborted in the tree.
2353 update_epoch_barrier(osdc
, osdc
->osdmap
->epoch
);
2355 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
2356 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
2359 m
= rb_first(&osd
->o_requests
);
2361 struct ceph_osd_request
*req
= rb_entry(m
,
2362 struct ceph_osd_request
, r_node
);
2365 if (req
->r_abort_on_full
&&
2366 (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
2367 pool_full(osdc
, req
->r_t
.target_oloc
.pool
)))
2368 abort_request(req
, -ENOSPC
);
2372 dout("return abort_on_full barrier=%u\n", osdc
->epoch_barrier
);
2375 static void check_pool_dne(struct ceph_osd_request
*req
)
2377 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2378 struct ceph_osdmap
*map
= osdc
->osdmap
;
2380 verify_osdc_wrlocked(osdc
);
2381 WARN_ON(!map
->epoch
);
2383 if (req
->r_attempts
) {
2385 * We sent a request earlier, which means that
2386 * previously the pool existed, and now it does not
2387 * (i.e., it was deleted).
2389 req
->r_map_dne_bound
= map
->epoch
;
2390 dout("%s req %p tid %llu pool disappeared\n", __func__
, req
,
2393 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__
,
2394 req
, req
->r_tid
, req
->r_map_dne_bound
, map
->epoch
);
2397 if (req
->r_map_dne_bound
) {
2398 if (map
->epoch
>= req
->r_map_dne_bound
) {
2399 /* we had a new enough map */
2400 pr_info_ratelimited("tid %llu pool does not exist\n",
2402 complete_request(req
, -ENOENT
);
2405 send_map_check(req
);
2409 static void map_check_cb(struct ceph_mon_generic_request
*greq
)
2411 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2412 struct ceph_osd_request
*req
;
2413 u64 tid
= greq
->private_data
;
2415 WARN_ON(greq
->result
|| !greq
->u
.newest
);
2417 down_write(&osdc
->lock
);
2418 req
= lookup_request_mc(&osdc
->map_checks
, tid
);
2420 dout("%s tid %llu dne\n", __func__
, tid
);
2424 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__
,
2425 req
, req
->r_tid
, req
->r_map_dne_bound
, greq
->u
.newest
);
2426 if (!req
->r_map_dne_bound
)
2427 req
->r_map_dne_bound
= greq
->u
.newest
;
2428 erase_request_mc(&osdc
->map_checks
, req
);
2429 check_pool_dne(req
);
2431 ceph_osdc_put_request(req
);
2433 up_write(&osdc
->lock
);
2436 static void send_map_check(struct ceph_osd_request
*req
)
2438 struct ceph_osd_client
*osdc
= req
->r_osdc
;
2439 struct ceph_osd_request
*lookup_req
;
2442 verify_osdc_wrlocked(osdc
);
2444 lookup_req
= lookup_request_mc(&osdc
->map_checks
, req
->r_tid
);
2446 WARN_ON(lookup_req
!= req
);
2450 ceph_osdc_get_request(req
);
2451 insert_request_mc(&osdc
->map_checks
, req
);
2452 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
2453 map_check_cb
, req
->r_tid
);
2458 * lingering requests, watch/notify v2 infrastructure
2460 static void linger_release(struct kref
*kref
)
2462 struct ceph_osd_linger_request
*lreq
=
2463 container_of(kref
, struct ceph_osd_linger_request
, kref
);
2465 dout("%s lreq %p reg_req %p ping_req %p\n", __func__
, lreq
,
2466 lreq
->reg_req
, lreq
->ping_req
);
2467 WARN_ON(!RB_EMPTY_NODE(&lreq
->node
));
2468 WARN_ON(!RB_EMPTY_NODE(&lreq
->osdc_node
));
2469 WARN_ON(!RB_EMPTY_NODE(&lreq
->mc_node
));
2470 WARN_ON(!list_empty(&lreq
->scan_item
));
2471 WARN_ON(!list_empty(&lreq
->pending_lworks
));
2475 ceph_osdc_put_request(lreq
->reg_req
);
2477 ceph_osdc_put_request(lreq
->ping_req
);
2478 target_destroy(&lreq
->t
);
2482 static void linger_put(struct ceph_osd_linger_request
*lreq
)
2485 kref_put(&lreq
->kref
, linger_release
);
2488 static struct ceph_osd_linger_request
*
2489 linger_get(struct ceph_osd_linger_request
*lreq
)
2491 kref_get(&lreq
->kref
);
2495 static struct ceph_osd_linger_request
*
2496 linger_alloc(struct ceph_osd_client
*osdc
)
2498 struct ceph_osd_linger_request
*lreq
;
2500 lreq
= kzalloc(sizeof(*lreq
), GFP_NOIO
);
2504 kref_init(&lreq
->kref
);
2505 mutex_init(&lreq
->lock
);
2506 RB_CLEAR_NODE(&lreq
->node
);
2507 RB_CLEAR_NODE(&lreq
->osdc_node
);
2508 RB_CLEAR_NODE(&lreq
->mc_node
);
2509 INIT_LIST_HEAD(&lreq
->scan_item
);
2510 INIT_LIST_HEAD(&lreq
->pending_lworks
);
2511 init_completion(&lreq
->reg_commit_wait
);
2512 init_completion(&lreq
->notify_finish_wait
);
2515 target_init(&lreq
->t
);
2517 dout("%s lreq %p\n", __func__
, lreq
);
2521 DEFINE_RB_INSDEL_FUNCS(linger
, struct ceph_osd_linger_request
, linger_id
, node
)
2522 DEFINE_RB_FUNCS(linger_osdc
, struct ceph_osd_linger_request
, linger_id
, osdc_node
)
2523 DEFINE_RB_FUNCS(linger_mc
, struct ceph_osd_linger_request
, linger_id
, mc_node
)
2526 * Create linger request <-> OSD session relation.
2528 * @lreq has to be registered, @osd may be homeless.
2530 static void link_linger(struct ceph_osd
*osd
,
2531 struct ceph_osd_linger_request
*lreq
)
2533 verify_osd_locked(osd
);
2534 WARN_ON(!lreq
->linger_id
|| lreq
->osd
);
2535 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2536 osd
->o_osd
, lreq
, lreq
->linger_id
);
2538 if (!osd_homeless(osd
))
2539 __remove_osd_from_lru(osd
);
2541 atomic_inc(&osd
->o_osdc
->num_homeless
);
2544 insert_linger(&osd
->o_linger_requests
, lreq
);
2548 static void unlink_linger(struct ceph_osd
*osd
,
2549 struct ceph_osd_linger_request
*lreq
)
2551 verify_osd_locked(osd
);
2552 WARN_ON(lreq
->osd
!= osd
);
2553 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__
, osd
,
2554 osd
->o_osd
, lreq
, lreq
->linger_id
);
2557 erase_linger(&osd
->o_linger_requests
, lreq
);
2560 if (!osd_homeless(osd
))
2561 maybe_move_osd_to_lru(osd
);
2563 atomic_dec(&osd
->o_osdc
->num_homeless
);
2566 static bool __linger_registered(struct ceph_osd_linger_request
*lreq
)
2568 verify_osdc_locked(lreq
->osdc
);
2570 return !RB_EMPTY_NODE(&lreq
->osdc_node
);
2573 static bool linger_registered(struct ceph_osd_linger_request
*lreq
)
2575 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2578 down_read(&osdc
->lock
);
2579 registered
= __linger_registered(lreq
);
2580 up_read(&osdc
->lock
);
2585 static void linger_register(struct ceph_osd_linger_request
*lreq
)
2587 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2589 verify_osdc_wrlocked(osdc
);
2590 WARN_ON(lreq
->linger_id
);
2593 lreq
->linger_id
= ++osdc
->last_linger_id
;
2594 insert_linger_osdc(&osdc
->linger_requests
, lreq
);
2597 static void linger_unregister(struct ceph_osd_linger_request
*lreq
)
2599 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2601 verify_osdc_wrlocked(osdc
);
2603 erase_linger_osdc(&osdc
->linger_requests
, lreq
);
2607 static void cancel_linger_request(struct ceph_osd_request
*req
)
2609 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2611 WARN_ON(!req
->r_linger
);
2612 cancel_request(req
);
2616 struct linger_work
{
2617 struct work_struct work
;
2618 struct ceph_osd_linger_request
*lreq
;
2619 struct list_head pending_item
;
2620 unsigned long queued_stamp
;
2626 void *payload
; /* points into @msg front */
2629 struct ceph_msg
*msg
; /* for ceph_msg_put() */
2637 static struct linger_work
*lwork_alloc(struct ceph_osd_linger_request
*lreq
,
2640 struct linger_work
*lwork
;
2642 lwork
= kzalloc(sizeof(*lwork
), GFP_NOIO
);
2646 INIT_WORK(&lwork
->work
, workfn
);
2647 INIT_LIST_HEAD(&lwork
->pending_item
);
2648 lwork
->lreq
= linger_get(lreq
);
2653 static void lwork_free(struct linger_work
*lwork
)
2655 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2657 mutex_lock(&lreq
->lock
);
2658 list_del(&lwork
->pending_item
);
2659 mutex_unlock(&lreq
->lock
);
2665 static void lwork_queue(struct linger_work
*lwork
)
2667 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2668 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2670 verify_lreq_locked(lreq
);
2671 WARN_ON(!list_empty(&lwork
->pending_item
));
2673 lwork
->queued_stamp
= jiffies
;
2674 list_add_tail(&lwork
->pending_item
, &lreq
->pending_lworks
);
2675 queue_work(osdc
->notify_wq
, &lwork
->work
);
2678 static void do_watch_notify(struct work_struct
*w
)
2680 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2681 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2683 if (!linger_registered(lreq
)) {
2684 dout("%s lreq %p not registered\n", __func__
, lreq
);
2688 WARN_ON(!lreq
->is_watch
);
2689 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2690 __func__
, lreq
, lwork
->notify
.notify_id
, lwork
->notify
.notifier_id
,
2691 lwork
->notify
.payload_len
);
2692 lreq
->wcb(lreq
->data
, lwork
->notify
.notify_id
, lreq
->linger_id
,
2693 lwork
->notify
.notifier_id
, lwork
->notify
.payload
,
2694 lwork
->notify
.payload_len
);
2697 ceph_msg_put(lwork
->notify
.msg
);
2701 static void do_watch_error(struct work_struct
*w
)
2703 struct linger_work
*lwork
= container_of(w
, struct linger_work
, work
);
2704 struct ceph_osd_linger_request
*lreq
= lwork
->lreq
;
2706 if (!linger_registered(lreq
)) {
2707 dout("%s lreq %p not registered\n", __func__
, lreq
);
2711 dout("%s lreq %p err %d\n", __func__
, lreq
, lwork
->error
.err
);
2712 lreq
->errcb(lreq
->data
, lreq
->linger_id
, lwork
->error
.err
);
2718 static void queue_watch_error(struct ceph_osd_linger_request
*lreq
)
2720 struct linger_work
*lwork
;
2722 lwork
= lwork_alloc(lreq
, do_watch_error
);
2724 pr_err("failed to allocate error-lwork\n");
2728 lwork
->error
.err
= lreq
->last_error
;
2732 static void linger_reg_commit_complete(struct ceph_osd_linger_request
*lreq
,
2735 if (!completion_done(&lreq
->reg_commit_wait
)) {
2736 lreq
->reg_commit_error
= (result
<= 0 ? result
: 0);
2737 complete_all(&lreq
->reg_commit_wait
);
2741 static void linger_commit_cb(struct ceph_osd_request
*req
)
2743 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2745 mutex_lock(&lreq
->lock
);
2746 dout("%s lreq %p linger_id %llu result %d\n", __func__
, lreq
,
2747 lreq
->linger_id
, req
->r_result
);
2748 linger_reg_commit_complete(lreq
, req
->r_result
);
2749 lreq
->committed
= true;
2751 if (!lreq
->is_watch
) {
2752 struct ceph_osd_data
*osd_data
=
2753 osd_req_op_data(req
, 0, notify
, response_data
);
2754 void *p
= page_address(osd_data
->pages
[0]);
2756 WARN_ON(req
->r_ops
[0].op
!= CEPH_OSD_OP_NOTIFY
||
2757 osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
2759 /* make note of the notify_id */
2760 if (req
->r_ops
[0].outdata_len
>= sizeof(u64
)) {
2761 lreq
->notify_id
= ceph_decode_64(&p
);
2762 dout("lreq %p notify_id %llu\n", lreq
,
2765 dout("lreq %p no notify_id\n", lreq
);
2769 mutex_unlock(&lreq
->lock
);
2773 static int normalize_watch_error(int err
)
2776 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2777 * notification and a failure to reconnect because we raced with
2778 * the delete appear the same to the user.
2786 static void linger_reconnect_cb(struct ceph_osd_request
*req
)
2788 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2790 mutex_lock(&lreq
->lock
);
2791 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__
,
2792 lreq
, lreq
->linger_id
, req
->r_result
, lreq
->last_error
);
2793 if (req
->r_result
< 0) {
2794 if (!lreq
->last_error
) {
2795 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2796 queue_watch_error(lreq
);
2800 mutex_unlock(&lreq
->lock
);
2804 static void send_linger(struct ceph_osd_linger_request
*lreq
)
2806 struct ceph_osd_request
*req
= lreq
->reg_req
;
2807 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2809 verify_osdc_wrlocked(req
->r_osdc
);
2810 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
2813 cancel_linger_request(req
);
2815 request_reinit(req
);
2816 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
2817 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
2818 req
->r_flags
= lreq
->t
.flags
;
2819 req
->r_mtime
= lreq
->mtime
;
2821 mutex_lock(&lreq
->lock
);
2822 if (lreq
->is_watch
&& lreq
->committed
) {
2823 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2824 op
->watch
.cookie
!= lreq
->linger_id
);
2825 op
->watch
.op
= CEPH_OSD_WATCH_OP_RECONNECT
;
2826 op
->watch
.gen
= ++lreq
->register_gen
;
2827 dout("lreq %p reconnect register_gen %u\n", lreq
,
2829 req
->r_callback
= linger_reconnect_cb
;
2831 if (!lreq
->is_watch
)
2832 lreq
->notify_id
= 0;
2834 WARN_ON(op
->watch
.op
!= CEPH_OSD_WATCH_OP_WATCH
);
2835 dout("lreq %p register\n", lreq
);
2836 req
->r_callback
= linger_commit_cb
;
2838 mutex_unlock(&lreq
->lock
);
2840 req
->r_priv
= linger_get(lreq
);
2841 req
->r_linger
= true;
2843 submit_request(req
, true);
2846 static void linger_ping_cb(struct ceph_osd_request
*req
)
2848 struct ceph_osd_linger_request
*lreq
= req
->r_priv
;
2850 mutex_lock(&lreq
->lock
);
2851 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2852 __func__
, lreq
, lreq
->linger_id
, req
->r_result
, lreq
->ping_sent
,
2854 if (lreq
->register_gen
== req
->r_ops
[0].watch
.gen
) {
2855 if (!req
->r_result
) {
2856 lreq
->watch_valid_thru
= lreq
->ping_sent
;
2857 } else if (!lreq
->last_error
) {
2858 lreq
->last_error
= normalize_watch_error(req
->r_result
);
2859 queue_watch_error(lreq
);
2862 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq
,
2863 lreq
->register_gen
, req
->r_ops
[0].watch
.gen
);
2866 mutex_unlock(&lreq
->lock
);
2870 static void send_linger_ping(struct ceph_osd_linger_request
*lreq
)
2872 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2873 struct ceph_osd_request
*req
= lreq
->ping_req
;
2874 struct ceph_osd_req_op
*op
= &req
->r_ops
[0];
2876 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
)) {
2877 dout("%s PAUSERD\n", __func__
);
2881 lreq
->ping_sent
= jiffies
;
2882 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2883 __func__
, lreq
, lreq
->linger_id
, lreq
->ping_sent
,
2884 lreq
->register_gen
);
2887 cancel_linger_request(req
);
2889 request_reinit(req
);
2890 target_copy(&req
->r_t
, &lreq
->t
);
2892 WARN_ON(op
->op
!= CEPH_OSD_OP_WATCH
||
2893 op
->watch
.cookie
!= lreq
->linger_id
||
2894 op
->watch
.op
!= CEPH_OSD_WATCH_OP_PING
);
2895 op
->watch
.gen
= lreq
->register_gen
;
2896 req
->r_callback
= linger_ping_cb
;
2897 req
->r_priv
= linger_get(lreq
);
2898 req
->r_linger
= true;
2900 ceph_osdc_get_request(req
);
2901 account_request(req
);
2902 req
->r_tid
= atomic64_inc_return(&osdc
->last_tid
);
2903 link_request(lreq
->osd
, req
);
2907 static void linger_submit(struct ceph_osd_linger_request
*lreq
)
2909 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2910 struct ceph_osd
*osd
;
2912 calc_target(osdc
, &lreq
->t
, NULL
, false);
2913 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
2914 link_linger(osd
, lreq
);
2919 static void cancel_linger_map_check(struct ceph_osd_linger_request
*lreq
)
2921 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2922 struct ceph_osd_linger_request
*lookup_lreq
;
2924 verify_osdc_wrlocked(osdc
);
2926 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
2931 WARN_ON(lookup_lreq
!= lreq
);
2932 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
2937 * @lreq has to be both registered and linked.
2939 static void __linger_cancel(struct ceph_osd_linger_request
*lreq
)
2941 if (lreq
->is_watch
&& lreq
->ping_req
->r_osd
)
2942 cancel_linger_request(lreq
->ping_req
);
2943 if (lreq
->reg_req
->r_osd
)
2944 cancel_linger_request(lreq
->reg_req
);
2945 cancel_linger_map_check(lreq
);
2946 unlink_linger(lreq
->osd
, lreq
);
2947 linger_unregister(lreq
);
2950 static void linger_cancel(struct ceph_osd_linger_request
*lreq
)
2952 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2954 down_write(&osdc
->lock
);
2955 if (__linger_registered(lreq
))
2956 __linger_cancel(lreq
);
2957 up_write(&osdc
->lock
);
2960 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
);
2962 static void check_linger_pool_dne(struct ceph_osd_linger_request
*lreq
)
2964 struct ceph_osd_client
*osdc
= lreq
->osdc
;
2965 struct ceph_osdmap
*map
= osdc
->osdmap
;
2967 verify_osdc_wrlocked(osdc
);
2968 WARN_ON(!map
->epoch
);
2970 if (lreq
->register_gen
) {
2971 lreq
->map_dne_bound
= map
->epoch
;
2972 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__
,
2973 lreq
, lreq
->linger_id
);
2975 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2976 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
2980 if (lreq
->map_dne_bound
) {
2981 if (map
->epoch
>= lreq
->map_dne_bound
) {
2982 /* we had a new enough map */
2983 pr_info("linger_id %llu pool does not exist\n",
2985 linger_reg_commit_complete(lreq
, -ENOENT
);
2986 __linger_cancel(lreq
);
2989 send_linger_map_check(lreq
);
2993 static void linger_map_check_cb(struct ceph_mon_generic_request
*greq
)
2995 struct ceph_osd_client
*osdc
= &greq
->monc
->client
->osdc
;
2996 struct ceph_osd_linger_request
*lreq
;
2997 u64 linger_id
= greq
->private_data
;
2999 WARN_ON(greq
->result
|| !greq
->u
.newest
);
3001 down_write(&osdc
->lock
);
3002 lreq
= lookup_linger_mc(&osdc
->linger_map_checks
, linger_id
);
3004 dout("%s linger_id %llu dne\n", __func__
, linger_id
);
3008 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3009 __func__
, lreq
, lreq
->linger_id
, lreq
->map_dne_bound
,
3011 if (!lreq
->map_dne_bound
)
3012 lreq
->map_dne_bound
= greq
->u
.newest
;
3013 erase_linger_mc(&osdc
->linger_map_checks
, lreq
);
3014 check_linger_pool_dne(lreq
);
3018 up_write(&osdc
->lock
);
3021 static void send_linger_map_check(struct ceph_osd_linger_request
*lreq
)
3023 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3024 struct ceph_osd_linger_request
*lookup_lreq
;
3027 verify_osdc_wrlocked(osdc
);
3029 lookup_lreq
= lookup_linger_mc(&osdc
->linger_map_checks
,
3032 WARN_ON(lookup_lreq
!= lreq
);
3037 insert_linger_mc(&osdc
->linger_map_checks
, lreq
);
3038 ret
= ceph_monc_get_version_async(&osdc
->client
->monc
, "osdmap",
3039 linger_map_check_cb
, lreq
->linger_id
);
3043 static int linger_reg_commit_wait(struct ceph_osd_linger_request
*lreq
)
3047 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3048 ret
= wait_for_completion_interruptible(&lreq
->reg_commit_wait
);
3049 return ret
?: lreq
->reg_commit_error
;
3052 static int linger_notify_finish_wait(struct ceph_osd_linger_request
*lreq
)
3056 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
, lreq
->linger_id
);
3057 ret
= wait_for_completion_interruptible(&lreq
->notify_finish_wait
);
3058 return ret
?: lreq
->notify_finish_error
;
3062 * Timeout callback, called every N seconds. When 1 or more OSD
3063 * requests has been active for more than N seconds, we send a keepalive
3064 * (tag + timestamp) to its OSD to ensure any communications channel
3065 * reset is detected.
3067 static void handle_timeout(struct work_struct
*work
)
3069 struct ceph_osd_client
*osdc
=
3070 container_of(work
, struct ceph_osd_client
, timeout_work
.work
);
3071 struct ceph_options
*opts
= osdc
->client
->options
;
3072 unsigned long cutoff
= jiffies
- opts
->osd_keepalive_timeout
;
3073 unsigned long expiry_cutoff
= jiffies
- opts
->osd_request_timeout
;
3074 LIST_HEAD(slow_osds
);
3075 struct rb_node
*n
, *p
;
3077 dout("%s osdc %p\n", __func__
, osdc
);
3078 down_write(&osdc
->lock
);
3081 * ping osds that are a bit slow. this ensures that if there
3082 * is a break in the TCP connection we will notice, and reopen
3083 * a connection with that osd (from the fault callback).
3085 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
3086 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3089 for (p
= rb_first(&osd
->o_requests
); p
; ) {
3090 struct ceph_osd_request
*req
=
3091 rb_entry(p
, struct ceph_osd_request
, r_node
);
3093 p
= rb_next(p
); /* abort_request() */
3095 if (time_before(req
->r_stamp
, cutoff
)) {
3096 dout(" req %p tid %llu on osd%d is laggy\n",
3097 req
, req
->r_tid
, osd
->o_osd
);
3100 if (opts
->osd_request_timeout
&&
3101 time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3102 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3103 req
->r_tid
, osd
->o_osd
);
3104 abort_request(req
, -ETIMEDOUT
);
3107 for (p
= rb_first(&osd
->o_linger_requests
); p
; p
= rb_next(p
)) {
3108 struct ceph_osd_linger_request
*lreq
=
3109 rb_entry(p
, struct ceph_osd_linger_request
, node
);
3111 dout(" lreq %p linger_id %llu is served by osd%d\n",
3112 lreq
, lreq
->linger_id
, osd
->o_osd
);
3115 mutex_lock(&lreq
->lock
);
3116 if (lreq
->is_watch
&& lreq
->committed
&& !lreq
->last_error
)
3117 send_linger_ping(lreq
);
3118 mutex_unlock(&lreq
->lock
);
3122 list_move_tail(&osd
->o_keepalive_item
, &slow_osds
);
3125 if (opts
->osd_request_timeout
) {
3126 for (p
= rb_first(&osdc
->homeless_osd
.o_requests
); p
; ) {
3127 struct ceph_osd_request
*req
=
3128 rb_entry(p
, struct ceph_osd_request
, r_node
);
3130 p
= rb_next(p
); /* abort_request() */
3132 if (time_before(req
->r_start_stamp
, expiry_cutoff
)) {
3133 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3134 req
->r_tid
, osdc
->homeless_osd
.o_osd
);
3135 abort_request(req
, -ETIMEDOUT
);
3140 if (atomic_read(&osdc
->num_homeless
) || !list_empty(&slow_osds
))
3141 maybe_request_map(osdc
);
3143 while (!list_empty(&slow_osds
)) {
3144 struct ceph_osd
*osd
= list_first_entry(&slow_osds
,
3147 list_del_init(&osd
->o_keepalive_item
);
3148 ceph_con_keepalive(&osd
->o_con
);
3151 up_write(&osdc
->lock
);
3152 schedule_delayed_work(&osdc
->timeout_work
,
3153 osdc
->client
->options
->osd_keepalive_timeout
);
3156 static void handle_osds_timeout(struct work_struct
*work
)
3158 struct ceph_osd_client
*osdc
=
3159 container_of(work
, struct ceph_osd_client
,
3160 osds_timeout_work
.work
);
3161 unsigned long delay
= osdc
->client
->options
->osd_idle_ttl
/ 4;
3162 struct ceph_osd
*osd
, *nosd
;
3164 dout("%s osdc %p\n", __func__
, osdc
);
3165 down_write(&osdc
->lock
);
3166 list_for_each_entry_safe(osd
, nosd
, &osdc
->osd_lru
, o_osd_lru
) {
3167 if (time_before(jiffies
, osd
->lru_ttl
))
3170 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_requests
));
3171 WARN_ON(!RB_EMPTY_ROOT(&osd
->o_linger_requests
));
3175 up_write(&osdc
->lock
);
3176 schedule_delayed_work(&osdc
->osds_timeout_work
,
3177 round_jiffies_relative(delay
));
3180 static int ceph_oloc_decode(void **p
, void *end
,
3181 struct ceph_object_locator
*oloc
)
3183 u8 struct_v
, struct_cv
;
3188 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3189 struct_v
= ceph_decode_8(p
);
3190 struct_cv
= ceph_decode_8(p
);
3192 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3193 struct_v
, struct_cv
);
3196 if (struct_cv
> 6) {
3197 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3198 struct_v
, struct_cv
);
3201 len
= ceph_decode_32(p
);
3202 ceph_decode_need(p
, end
, len
, e_inval
);
3203 struct_end
= *p
+ len
;
3205 oloc
->pool
= ceph_decode_64(p
);
3206 *p
+= 4; /* skip preferred */
3208 len
= ceph_decode_32(p
);
3210 pr_warn("ceph_object_locator::key is set\n");
3214 if (struct_v
>= 5) {
3215 bool changed
= false;
3217 len
= ceph_decode_32(p
);
3219 ceph_decode_need(p
, end
, len
, e_inval
);
3220 if (!oloc
->pool_ns
||
3221 ceph_compare_string(oloc
->pool_ns
, *p
, len
))
3229 /* redirect changes namespace */
3230 pr_warn("ceph_object_locator::nspace is changed\n");
3235 if (struct_v
>= 6) {
3236 s64 hash
= ceph_decode_64(p
);
3238 pr_warn("ceph_object_locator::hash is set\n");
3253 static int ceph_redirect_decode(void **p
, void *end
,
3254 struct ceph_request_redirect
*redir
)
3256 u8 struct_v
, struct_cv
;
3261 ceph_decode_need(p
, end
, 1 + 1 + 4, e_inval
);
3262 struct_v
= ceph_decode_8(p
);
3263 struct_cv
= ceph_decode_8(p
);
3264 if (struct_cv
> 1) {
3265 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3266 struct_v
, struct_cv
);
3269 len
= ceph_decode_32(p
);
3270 ceph_decode_need(p
, end
, len
, e_inval
);
3271 struct_end
= *p
+ len
;
3273 ret
= ceph_oloc_decode(p
, end
, &redir
->oloc
);
3277 len
= ceph_decode_32(p
);
3279 pr_warn("ceph_request_redirect::object_name is set\n");
3283 len
= ceph_decode_32(p
);
3284 *p
+= len
; /* skip osd_instructions */
3296 struct MOSDOpReply
{
3297 struct ceph_pg pgid
;
3302 u32 outdata_len
[CEPH_OSD_MAX_OPS
];
3303 s32 rval
[CEPH_OSD_MAX_OPS
];
3305 struct ceph_eversion replay_version
;
3307 struct ceph_request_redirect redirect
;
3310 static int decode_MOSDOpReply(const struct ceph_msg
*msg
, struct MOSDOpReply
*m
)
3312 void *p
= msg
->front
.iov_base
;
3313 void *const end
= p
+ msg
->front
.iov_len
;
3314 u16 version
= le16_to_cpu(msg
->hdr
.version
);
3315 struct ceph_eversion bad_replay_version
;
3321 ceph_decode_32_safe(&p
, end
, len
, e_inval
);
3322 ceph_decode_need(&p
, end
, len
, e_inval
);
3323 p
+= len
; /* skip oid */
3325 ret
= ceph_decode_pgid(&p
, end
, &m
->pgid
);
3329 ceph_decode_64_safe(&p
, end
, m
->flags
, e_inval
);
3330 ceph_decode_32_safe(&p
, end
, m
->result
, e_inval
);
3331 ceph_decode_need(&p
, end
, sizeof(bad_replay_version
), e_inval
);
3332 memcpy(&bad_replay_version
, p
, sizeof(bad_replay_version
));
3333 p
+= sizeof(bad_replay_version
);
3334 ceph_decode_32_safe(&p
, end
, m
->epoch
, e_inval
);
3336 ceph_decode_32_safe(&p
, end
, m
->num_ops
, e_inval
);
3337 if (m
->num_ops
> ARRAY_SIZE(m
->outdata_len
))
3340 ceph_decode_need(&p
, end
, m
->num_ops
* sizeof(struct ceph_osd_op
),
3342 for (i
= 0; i
< m
->num_ops
; i
++) {
3343 struct ceph_osd_op
*op
= p
;
3345 m
->outdata_len
[i
] = le32_to_cpu(op
->payload_len
);
3349 ceph_decode_32_safe(&p
, end
, m
->retry_attempt
, e_inval
);
3350 for (i
= 0; i
< m
->num_ops
; i
++)
3351 ceph_decode_32_safe(&p
, end
, m
->rval
[i
], e_inval
);
3354 ceph_decode_need(&p
, end
, sizeof(m
->replay_version
), e_inval
);
3355 memcpy(&m
->replay_version
, p
, sizeof(m
->replay_version
));
3356 p
+= sizeof(m
->replay_version
);
3357 ceph_decode_64_safe(&p
, end
, m
->user_version
, e_inval
);
3359 m
->replay_version
= bad_replay_version
; /* struct */
3360 m
->user_version
= le64_to_cpu(m
->replay_version
.version
);
3365 ceph_decode_8_safe(&p
, end
, decode_redir
, e_inval
);
3373 ret
= ceph_redirect_decode(&p
, end
, &m
->redirect
);
3377 ceph_oloc_init(&m
->redirect
.oloc
);
3387 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3390 static void handle_reply(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
3392 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3393 struct ceph_osd_request
*req
;
3394 struct MOSDOpReply m
;
3395 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
3400 dout("%s msg %p tid %llu\n", __func__
, msg
, tid
);
3402 down_read(&osdc
->lock
);
3403 if (!osd_registered(osd
)) {
3404 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3405 goto out_unlock_osdc
;
3407 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
3409 mutex_lock(&osd
->lock
);
3410 req
= lookup_request(&osd
->o_requests
, tid
);
3412 dout("%s osd%d tid %llu unknown\n", __func__
, osd
->o_osd
, tid
);
3413 goto out_unlock_session
;
3416 m
.redirect
.oloc
.pool_ns
= req
->r_t
.target_oloc
.pool_ns
;
3417 ret
= decode_MOSDOpReply(msg
, &m
);
3418 m
.redirect
.oloc
.pool_ns
= NULL
;
3420 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3425 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3426 __func__
, req
, req
->r_tid
, m
.flags
, m
.pgid
.pool
, m
.pgid
.seed
,
3427 m
.epoch
, m
.retry_attempt
, le32_to_cpu(m
.replay_version
.epoch
),
3428 le64_to_cpu(m
.replay_version
.version
), m
.user_version
);
3430 if (m
.retry_attempt
>= 0) {
3431 if (m
.retry_attempt
!= req
->r_attempts
- 1) {
3432 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3433 req
, req
->r_tid
, m
.retry_attempt
,
3434 req
->r_attempts
- 1);
3435 goto out_unlock_session
;
3438 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3441 if (!ceph_oloc_empty(&m
.redirect
.oloc
)) {
3442 dout("req %p tid %llu redirect pool %lld\n", req
, req
->r_tid
,
3443 m
.redirect
.oloc
.pool
);
3444 unlink_request(osd
, req
);
3445 mutex_unlock(&osd
->lock
);
3448 * Not ceph_oloc_copy() - changing pool_ns is not
3451 req
->r_t
.target_oloc
.pool
= m
.redirect
.oloc
.pool
;
3452 req
->r_flags
|= CEPH_OSD_FLAG_REDIRECTED
;
3454 __submit_request(req
, false);
3455 goto out_unlock_osdc
;
3458 if (m
.num_ops
!= req
->r_num_ops
) {
3459 pr_err("num_ops %d != %d for tid %llu\n", m
.num_ops
,
3460 req
->r_num_ops
, req
->r_tid
);
3463 for (i
= 0; i
< req
->r_num_ops
; i
++) {
3464 dout(" req %p tid %llu op %d rval %d len %u\n", req
,
3465 req
->r_tid
, i
, m
.rval
[i
], m
.outdata_len
[i
]);
3466 req
->r_ops
[i
].rval
= m
.rval
[i
];
3467 req
->r_ops
[i
].outdata_len
= m
.outdata_len
[i
];
3468 data_len
+= m
.outdata_len
[i
];
3470 if (data_len
!= le32_to_cpu(msg
->hdr
.data_len
)) {
3471 pr_err("sum of lens %u != %u for tid %llu\n", data_len
,
3472 le32_to_cpu(msg
->hdr
.data_len
), req
->r_tid
);
3475 dout("%s req %p tid %llu result %d data_len %u\n", __func__
,
3476 req
, req
->r_tid
, m
.result
, data_len
);
3479 * Since we only ever request ONDISK, we should only ever get
3480 * one (type of) reply back.
3482 WARN_ON(!(m
.flags
& CEPH_OSD_FLAG_ONDISK
));
3483 req
->r_result
= m
.result
?: data_len
;
3484 finish_request(req
);
3485 mutex_unlock(&osd
->lock
);
3486 up_read(&osdc
->lock
);
3488 __complete_request(req
);
3489 complete_all(&req
->r_completion
);
3490 ceph_osdc_put_request(req
);
3494 complete_request(req
, -EIO
);
3496 mutex_unlock(&osd
->lock
);
3498 up_read(&osdc
->lock
);
3501 static void set_pool_was_full(struct ceph_osd_client
*osdc
)
3505 for (n
= rb_first(&osdc
->osdmap
->pg_pools
); n
; n
= rb_next(n
)) {
3506 struct ceph_pg_pool_info
*pi
=
3507 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3509 pi
->was_full
= __pool_full(pi
);
3513 static bool pool_cleared_full(struct ceph_osd_client
*osdc
, s64 pool_id
)
3515 struct ceph_pg_pool_info
*pi
;
3517 pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pool_id
);
3521 return pi
->was_full
&& !__pool_full(pi
);
3524 static enum calc_target_result
3525 recalc_linger_target(struct ceph_osd_linger_request
*lreq
)
3527 struct ceph_osd_client
*osdc
= lreq
->osdc
;
3528 enum calc_target_result ct_res
;
3530 ct_res
= calc_target(osdc
, &lreq
->t
, NULL
, true);
3531 if (ct_res
== CALC_TARGET_NEED_RESEND
) {
3532 struct ceph_osd
*osd
;
3534 osd
= lookup_create_osd(osdc
, lreq
->t
.osd
, true);
3535 if (osd
!= lreq
->osd
) {
3536 unlink_linger(lreq
->osd
, lreq
);
3537 link_linger(osd
, lreq
);
3545 * Requeue requests whose mapping to an OSD has changed.
3547 static void scan_requests(struct ceph_osd
*osd
,
3550 bool check_pool_cleared_full
,
3551 struct rb_root
*need_resend
,
3552 struct list_head
*need_resend_linger
)
3554 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3556 bool force_resend_writes
;
3558 for (n
= rb_first(&osd
->o_linger_requests
); n
; ) {
3559 struct ceph_osd_linger_request
*lreq
=
3560 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3561 enum calc_target_result ct_res
;
3563 n
= rb_next(n
); /* recalc_linger_target() */
3565 dout("%s lreq %p linger_id %llu\n", __func__
, lreq
,
3567 ct_res
= recalc_linger_target(lreq
);
3569 case CALC_TARGET_NO_ACTION
:
3570 force_resend_writes
= cleared_full
||
3571 (check_pool_cleared_full
&&
3572 pool_cleared_full(osdc
, lreq
->t
.base_oloc
.pool
));
3573 if (!force_resend
&& !force_resend_writes
)
3577 case CALC_TARGET_NEED_RESEND
:
3578 cancel_linger_map_check(lreq
);
3580 * scan_requests() for the previous epoch(s)
3581 * may have already added it to the list, since
3582 * it's not unlinked here.
3584 if (list_empty(&lreq
->scan_item
))
3585 list_add_tail(&lreq
->scan_item
, need_resend_linger
);
3587 case CALC_TARGET_POOL_DNE
:
3588 list_del_init(&lreq
->scan_item
);
3589 check_linger_pool_dne(lreq
);
3594 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3595 struct ceph_osd_request
*req
=
3596 rb_entry(n
, struct ceph_osd_request
, r_node
);
3597 enum calc_target_result ct_res
;
3599 n
= rb_next(n
); /* unlink_request(), check_pool_dne() */
3601 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
3602 ct_res
= calc_target(osdc
, &req
->r_t
, &req
->r_osd
->o_con
,
3605 case CALC_TARGET_NO_ACTION
:
3606 force_resend_writes
= cleared_full
||
3607 (check_pool_cleared_full
&&
3608 pool_cleared_full(osdc
, req
->r_t
.base_oloc
.pool
));
3609 if (!force_resend
&&
3610 (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
) ||
3611 !force_resend_writes
))
3615 case CALC_TARGET_NEED_RESEND
:
3616 cancel_map_check(req
);
3617 unlink_request(osd
, req
);
3618 insert_request(need_resend
, req
);
3620 case CALC_TARGET_POOL_DNE
:
3621 check_pool_dne(req
);
3627 static int handle_one_map(struct ceph_osd_client
*osdc
,
3628 void *p
, void *end
, bool incremental
,
3629 struct rb_root
*need_resend
,
3630 struct list_head
*need_resend_linger
)
3632 struct ceph_osdmap
*newmap
;
3634 bool skipped_map
= false;
3637 was_full
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3638 set_pool_was_full(osdc
);
3641 newmap
= osdmap_apply_incremental(&p
, end
, osdc
->osdmap
);
3643 newmap
= ceph_osdmap_decode(&p
, end
);
3645 return PTR_ERR(newmap
);
3647 if (newmap
!= osdc
->osdmap
) {
3649 * Preserve ->was_full before destroying the old map.
3650 * For pools that weren't in the old map, ->was_full
3653 for (n
= rb_first(&newmap
->pg_pools
); n
; n
= rb_next(n
)) {
3654 struct ceph_pg_pool_info
*pi
=
3655 rb_entry(n
, struct ceph_pg_pool_info
, node
);
3656 struct ceph_pg_pool_info
*old_pi
;
3658 old_pi
= ceph_pg_pool_by_id(osdc
->osdmap
, pi
->id
);
3660 pi
->was_full
= old_pi
->was_full
;
3662 WARN_ON(pi
->was_full
);
3665 if (osdc
->osdmap
->epoch
&&
3666 osdc
->osdmap
->epoch
+ 1 < newmap
->epoch
) {
3667 WARN_ON(incremental
);
3671 ceph_osdmap_destroy(osdc
->osdmap
);
3672 osdc
->osdmap
= newmap
;
3675 was_full
&= !ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
);
3676 scan_requests(&osdc
->homeless_osd
, skipped_map
, was_full
, true,
3677 need_resend
, need_resend_linger
);
3679 for (n
= rb_first(&osdc
->osds
); n
; ) {
3680 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
3682 n
= rb_next(n
); /* close_osd() */
3684 scan_requests(osd
, skipped_map
, was_full
, true, need_resend
,
3685 need_resend_linger
);
3686 if (!ceph_osd_is_up(osdc
->osdmap
, osd
->o_osd
) ||
3687 memcmp(&osd
->o_con
.peer_addr
,
3688 ceph_osd_addr(osdc
->osdmap
, osd
->o_osd
),
3689 sizeof(struct ceph_entity_addr
)))
3696 static void kick_requests(struct ceph_osd_client
*osdc
,
3697 struct rb_root
*need_resend
,
3698 struct list_head
*need_resend_linger
)
3700 struct ceph_osd_linger_request
*lreq
, *nlreq
;
3701 enum calc_target_result ct_res
;
3704 /* make sure need_resend targets reflect latest map */
3705 for (n
= rb_first(need_resend
); n
; ) {
3706 struct ceph_osd_request
*req
=
3707 rb_entry(n
, struct ceph_osd_request
, r_node
);
3711 if (req
->r_t
.epoch
< osdc
->osdmap
->epoch
) {
3712 ct_res
= calc_target(osdc
, &req
->r_t
, NULL
, false);
3713 if (ct_res
== CALC_TARGET_POOL_DNE
) {
3714 erase_request(need_resend
, req
);
3715 check_pool_dne(req
);
3720 for (n
= rb_first(need_resend
); n
; ) {
3721 struct ceph_osd_request
*req
=
3722 rb_entry(n
, struct ceph_osd_request
, r_node
);
3723 struct ceph_osd
*osd
;
3726 erase_request(need_resend
, req
); /* before link_request() */
3728 osd
= lookup_create_osd(osdc
, req
->r_t
.osd
, true);
3729 link_request(osd
, req
);
3730 if (!req
->r_linger
) {
3731 if (!osd_homeless(osd
) && !req
->r_t
.paused
)
3734 cancel_linger_request(req
);
3738 list_for_each_entry_safe(lreq
, nlreq
, need_resend_linger
, scan_item
) {
3739 if (!osd_homeless(lreq
->osd
))
3742 list_del_init(&lreq
->scan_item
);
3747 * Process updated osd map.
3749 * The message contains any number of incremental and full maps, normally
3750 * indicating some sort of topology change in the cluster. Kick requests
3751 * off to different OSDs as needed.
3753 void ceph_osdc_handle_map(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
3755 void *p
= msg
->front
.iov_base
;
3756 void *const end
= p
+ msg
->front
.iov_len
;
3757 u32 nr_maps
, maplen
;
3759 struct ceph_fsid fsid
;
3760 struct rb_root need_resend
= RB_ROOT
;
3761 LIST_HEAD(need_resend_linger
);
3762 bool handled_incremental
= false;
3763 bool was_pauserd
, was_pausewr
;
3764 bool pauserd
, pausewr
;
3767 dout("%s have %u\n", __func__
, osdc
->osdmap
->epoch
);
3768 down_write(&osdc
->lock
);
3771 ceph_decode_need(&p
, end
, sizeof(fsid
), bad
);
3772 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3773 if (ceph_check_fsid(osdc
->client
, &fsid
) < 0)
3776 was_pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3777 was_pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3778 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3779 have_pool_full(osdc
);
3781 /* incremental maps */
3782 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3783 dout(" %d inc maps\n", nr_maps
);
3784 while (nr_maps
> 0) {
3785 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3786 epoch
= ceph_decode_32(&p
);
3787 maplen
= ceph_decode_32(&p
);
3788 ceph_decode_need(&p
, end
, maplen
, bad
);
3789 if (osdc
->osdmap
->epoch
&&
3790 osdc
->osdmap
->epoch
+ 1 == epoch
) {
3791 dout("applying incremental map %u len %d\n",
3793 err
= handle_one_map(osdc
, p
, p
+ maplen
, true,
3794 &need_resend
, &need_resend_linger
);
3797 handled_incremental
= true;
3799 dout("ignoring incremental map %u len %d\n",
3805 if (handled_incremental
)
3809 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
3810 dout(" %d full maps\n", nr_maps
);
3812 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3813 epoch
= ceph_decode_32(&p
);
3814 maplen
= ceph_decode_32(&p
);
3815 ceph_decode_need(&p
, end
, maplen
, bad
);
3817 dout("skipping non-latest full map %u len %d\n",
3819 } else if (osdc
->osdmap
->epoch
>= epoch
) {
3820 dout("skipping full map %u len %d, "
3821 "older than our %u\n", epoch
, maplen
,
3822 osdc
->osdmap
->epoch
);
3824 dout("taking full map %u len %d\n", epoch
, maplen
);
3825 err
= handle_one_map(osdc
, p
, p
+ maplen
, false,
3826 &need_resend
, &need_resend_linger
);
3836 * subscribe to subsequent osdmap updates if full to ensure
3837 * we find out when we are no longer full and stop returning
3840 pauserd
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSERD
);
3841 pausewr
= ceph_osdmap_flag(osdc
, CEPH_OSDMAP_PAUSEWR
) ||
3842 ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) ||
3843 have_pool_full(osdc
);
3844 if (was_pauserd
|| was_pausewr
|| pauserd
|| pausewr
||
3845 osdc
->osdmap
->epoch
< osdc
->epoch_barrier
)
3846 maybe_request_map(osdc
);
3848 kick_requests(osdc
, &need_resend
, &need_resend_linger
);
3850 ceph_osdc_abort_on_full(osdc
);
3851 ceph_monc_got_map(&osdc
->client
->monc
, CEPH_SUB_OSDMAP
,
3852 osdc
->osdmap
->epoch
);
3853 up_write(&osdc
->lock
);
3854 wake_up_all(&osdc
->client
->auth_wq
);
3858 pr_err("osdc handle_map corrupt msg\n");
3860 up_write(&osdc
->lock
);
3864 * Resubmit requests pending on the given osd.
3866 static void kick_osd_requests(struct ceph_osd
*osd
)
3870 clear_backoffs(osd
);
3872 for (n
= rb_first(&osd
->o_requests
); n
; ) {
3873 struct ceph_osd_request
*req
=
3874 rb_entry(n
, struct ceph_osd_request
, r_node
);
3876 n
= rb_next(n
); /* cancel_linger_request() */
3878 if (!req
->r_linger
) {
3879 if (!req
->r_t
.paused
)
3882 cancel_linger_request(req
);
3885 for (n
= rb_first(&osd
->o_linger_requests
); n
; n
= rb_next(n
)) {
3886 struct ceph_osd_linger_request
*lreq
=
3887 rb_entry(n
, struct ceph_osd_linger_request
, node
);
3894 * If the osd connection drops, we need to resubmit all requests.
3896 static void osd_fault(struct ceph_connection
*con
)
3898 struct ceph_osd
*osd
= con
->private;
3899 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
3901 dout("%s osd %p osd%d\n", __func__
, osd
, osd
->o_osd
);
3903 down_write(&osdc
->lock
);
3904 if (!osd_registered(osd
)) {
3905 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
3909 if (!reopen_osd(osd
))
3910 kick_osd_requests(osd
);
3911 maybe_request_map(osdc
);
3914 up_write(&osdc
->lock
);
3917 struct MOSDBackoff
{
3918 struct ceph_spg spgid
;
3922 struct ceph_hobject_id
*begin
;
3923 struct ceph_hobject_id
*end
;
3926 static int decode_MOSDBackoff(const struct ceph_msg
*msg
, struct MOSDBackoff
*m
)
3928 void *p
= msg
->front
.iov_base
;
3929 void *const end
= p
+ msg
->front
.iov_len
;
3934 ret
= ceph_start_decoding(&p
, end
, 1, "spg_t", &struct_v
, &struct_len
);
3938 ret
= ceph_decode_pgid(&p
, end
, &m
->spgid
.pgid
);
3942 ceph_decode_8_safe(&p
, end
, m
->spgid
.shard
, e_inval
);
3943 ceph_decode_32_safe(&p
, end
, m
->map_epoch
, e_inval
);
3944 ceph_decode_8_safe(&p
, end
, m
->op
, e_inval
);
3945 ceph_decode_64_safe(&p
, end
, m
->id
, e_inval
);
3947 m
->begin
= kzalloc(sizeof(*m
->begin
), GFP_NOIO
);
3951 ret
= decode_hoid(&p
, end
, m
->begin
);
3953 free_hoid(m
->begin
);
3957 m
->end
= kzalloc(sizeof(*m
->end
), GFP_NOIO
);
3959 free_hoid(m
->begin
);
3963 ret
= decode_hoid(&p
, end
, m
->end
);
3965 free_hoid(m
->begin
);
3976 static struct ceph_msg
*create_backoff_message(
3977 const struct ceph_osd_backoff
*backoff
,
3980 struct ceph_msg
*msg
;
3984 msg_size
= CEPH_ENCODING_START_BLK_LEN
+
3985 CEPH_PGID_ENCODING_LEN
+ 1; /* spgid */
3986 msg_size
+= 4 + 1 + 8; /* map_epoch, op, id */
3987 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
3988 hoid_encoding_size(backoff
->begin
);
3989 msg_size
+= CEPH_ENCODING_START_BLK_LEN
+
3990 hoid_encoding_size(backoff
->end
);
3992 msg
= ceph_msg_new(CEPH_MSG_OSD_BACKOFF
, msg_size
, GFP_NOIO
, true);
3996 p
= msg
->front
.iov_base
;
3997 end
= p
+ msg
->front_alloc_len
;
3999 encode_spgid(&p
, &backoff
->spgid
);
4000 ceph_encode_32(&p
, map_epoch
);
4001 ceph_encode_8(&p
, CEPH_OSD_BACKOFF_OP_ACK_BLOCK
);
4002 ceph_encode_64(&p
, backoff
->id
);
4003 encode_hoid(&p
, end
, backoff
->begin
);
4004 encode_hoid(&p
, end
, backoff
->end
);
4007 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
4008 msg
->hdr
.version
= cpu_to_le16(1); /* MOSDBackoff v1 */
4009 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
4014 static void handle_backoff_block(struct ceph_osd
*osd
, struct MOSDBackoff
*m
)
4016 struct ceph_spg_mapping
*spg
;
4017 struct ceph_osd_backoff
*backoff
;
4018 struct ceph_msg
*msg
;
4020 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4021 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4023 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &m
->spgid
);
4025 spg
= alloc_spg_mapping();
4027 pr_err("%s failed to allocate spg\n", __func__
);
4030 spg
->spgid
= m
->spgid
; /* struct */
4031 insert_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4034 backoff
= alloc_backoff();
4036 pr_err("%s failed to allocate backoff\n", __func__
);
4039 backoff
->spgid
= m
->spgid
; /* struct */
4040 backoff
->id
= m
->id
;
4041 backoff
->begin
= m
->begin
;
4042 m
->begin
= NULL
; /* backoff now owns this */
4043 backoff
->end
= m
->end
;
4044 m
->end
= NULL
; /* ditto */
4046 insert_backoff(&spg
->backoffs
, backoff
);
4047 insert_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4050 * Ack with original backoff's epoch so that the OSD can
4051 * discard this if there was a PG split.
4053 msg
= create_backoff_message(backoff
, m
->map_epoch
);
4055 pr_err("%s failed to allocate msg\n", __func__
);
4058 ceph_con_send(&osd
->o_con
, msg
);
4061 static bool target_contained_by(const struct ceph_osd_request_target
*t
,
4062 const struct ceph_hobject_id
*begin
,
4063 const struct ceph_hobject_id
*end
)
4065 struct ceph_hobject_id hoid
;
4068 hoid_fill_from_target(&hoid
, t
);
4069 cmp
= hoid_compare(&hoid
, begin
);
4070 return !cmp
|| (cmp
> 0 && hoid_compare(&hoid
, end
) < 0);
4073 static void handle_backoff_unblock(struct ceph_osd
*osd
,
4074 const struct MOSDBackoff
*m
)
4076 struct ceph_spg_mapping
*spg
;
4077 struct ceph_osd_backoff
*backoff
;
4080 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__
, osd
->o_osd
,
4081 m
->spgid
.pgid
.pool
, m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4083 backoff
= lookup_backoff_by_id(&osd
->o_backoffs_by_id
, m
->id
);
4085 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4086 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4087 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4091 if (hoid_compare(backoff
->begin
, m
->begin
) &&
4092 hoid_compare(backoff
->end
, m
->end
)) {
4093 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4094 __func__
, osd
->o_osd
, m
->spgid
.pgid
.pool
,
4095 m
->spgid
.pgid
.seed
, m
->spgid
.shard
, m
->id
);
4096 /* unblock it anyway... */
4099 spg
= lookup_spg_mapping(&osd
->o_backoff_mappings
, &backoff
->spgid
);
4102 erase_backoff(&spg
->backoffs
, backoff
);
4103 erase_backoff_by_id(&osd
->o_backoffs_by_id
, backoff
);
4104 free_backoff(backoff
);
4106 if (RB_EMPTY_ROOT(&spg
->backoffs
)) {
4107 erase_spg_mapping(&osd
->o_backoff_mappings
, spg
);
4108 free_spg_mapping(spg
);
4111 for (n
= rb_first(&osd
->o_requests
); n
; n
= rb_next(n
)) {
4112 struct ceph_osd_request
*req
=
4113 rb_entry(n
, struct ceph_osd_request
, r_node
);
4115 if (!ceph_spg_compare(&req
->r_t
.spgid
, &m
->spgid
)) {
4117 * Match against @m, not @backoff -- the PG may
4118 * have split on the OSD.
4120 if (target_contained_by(&req
->r_t
, m
->begin
, m
->end
)) {
4122 * If no other installed backoff applies,
4131 static void handle_backoff(struct ceph_osd
*osd
, struct ceph_msg
*msg
)
4133 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
4134 struct MOSDBackoff m
;
4137 down_read(&osdc
->lock
);
4138 if (!osd_registered(osd
)) {
4139 dout("%s osd%d unknown\n", __func__
, osd
->o_osd
);
4140 up_read(&osdc
->lock
);
4143 WARN_ON(osd
->o_osd
!= le64_to_cpu(msg
->hdr
.src
.num
));
4145 mutex_lock(&osd
->lock
);
4146 ret
= decode_MOSDBackoff(msg
, &m
);
4148 pr_err("failed to decode MOSDBackoff: %d\n", ret
);
4154 case CEPH_OSD_BACKOFF_OP_BLOCK
:
4155 handle_backoff_block(osd
, &m
);
4157 case CEPH_OSD_BACKOFF_OP_UNBLOCK
:
4158 handle_backoff_unblock(osd
, &m
);
4161 pr_err("%s osd%d unknown op %d\n", __func__
, osd
->o_osd
, m
.op
);
4168 mutex_unlock(&osd
->lock
);
4169 up_read(&osdc
->lock
);
4173 * Process osd watch notifications
4175 static void handle_watch_notify(struct ceph_osd_client
*osdc
,
4176 struct ceph_msg
*msg
)
4178 void *p
= msg
->front
.iov_base
;
4179 void *const end
= p
+ msg
->front
.iov_len
;
4180 struct ceph_osd_linger_request
*lreq
;
4181 struct linger_work
*lwork
;
4182 u8 proto_ver
, opcode
;
4183 u64 cookie
, notify_id
;
4184 u64 notifier_id
= 0;
4185 s32 return_code
= 0;
4186 void *payload
= NULL
;
4187 u32 payload_len
= 0;
4189 ceph_decode_8_safe(&p
, end
, proto_ver
, bad
);
4190 ceph_decode_8_safe(&p
, end
, opcode
, bad
);
4191 ceph_decode_64_safe(&p
, end
, cookie
, bad
);
4192 p
+= 8; /* skip ver */
4193 ceph_decode_64_safe(&p
, end
, notify_id
, bad
);
4195 if (proto_ver
>= 1) {
4196 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
4197 ceph_decode_need(&p
, end
, payload_len
, bad
);
4202 if (le16_to_cpu(msg
->hdr
.version
) >= 2)
4203 ceph_decode_32_safe(&p
, end
, return_code
, bad
);
4205 if (le16_to_cpu(msg
->hdr
.version
) >= 3)
4206 ceph_decode_64_safe(&p
, end
, notifier_id
, bad
);
4208 down_read(&osdc
->lock
);
4209 lreq
= lookup_linger_osdc(&osdc
->linger_requests
, cookie
);
4211 dout("%s opcode %d cookie %llu dne\n", __func__
, opcode
,
4213 goto out_unlock_osdc
;
4216 mutex_lock(&lreq
->lock
);
4217 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__
,
4218 opcode
, cookie
, lreq
, lreq
->is_watch
);
4219 if (opcode
== CEPH_WATCH_EVENT_DISCONNECT
) {
4220 if (!lreq
->last_error
) {
4221 lreq
->last_error
= -ENOTCONN
;
4222 queue_watch_error(lreq
);
4224 } else if (!lreq
->is_watch
) {
4225 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4226 if (lreq
->notify_id
&& lreq
->notify_id
!= notify_id
) {
4227 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq
,
4228 lreq
->notify_id
, notify_id
);
4229 } else if (!completion_done(&lreq
->notify_finish_wait
)) {
4230 struct ceph_msg_data
*data
=
4231 list_first_entry_or_null(&msg
->data
,
4232 struct ceph_msg_data
,
4236 if (lreq
->preply_pages
) {
4237 WARN_ON(data
->type
!=
4238 CEPH_MSG_DATA_PAGES
);
4239 *lreq
->preply_pages
= data
->pages
;
4240 *lreq
->preply_len
= data
->length
;
4242 ceph_release_page_vector(data
->pages
,
4243 calc_pages_for(0, data
->length
));
4246 lreq
->notify_finish_error
= return_code
;
4247 complete_all(&lreq
->notify_finish_wait
);
4250 /* CEPH_WATCH_EVENT_NOTIFY */
4251 lwork
= lwork_alloc(lreq
, do_watch_notify
);
4253 pr_err("failed to allocate notify-lwork\n");
4254 goto out_unlock_lreq
;
4257 lwork
->notify
.notify_id
= notify_id
;
4258 lwork
->notify
.notifier_id
= notifier_id
;
4259 lwork
->notify
.payload
= payload
;
4260 lwork
->notify
.payload_len
= payload_len
;
4261 lwork
->notify
.msg
= ceph_msg_get(msg
);
4266 mutex_unlock(&lreq
->lock
);
4268 up_read(&osdc
->lock
);
4272 pr_err("osdc handle_watch_notify corrupt msg\n");
4276 * Register request, send initial attempt.
4278 int ceph_osdc_start_request(struct ceph_osd_client
*osdc
,
4279 struct ceph_osd_request
*req
,
4282 down_read(&osdc
->lock
);
4283 submit_request(req
, false);
4284 up_read(&osdc
->lock
);
4288 EXPORT_SYMBOL(ceph_osdc_start_request
);
4291 * Unregister a registered request. The request is not completed:
4292 * ->r_result isn't set and __complete_request() isn't called.
4294 void ceph_osdc_cancel_request(struct ceph_osd_request
*req
)
4296 struct ceph_osd_client
*osdc
= req
->r_osdc
;
4298 down_write(&osdc
->lock
);
4300 cancel_request(req
);
4301 up_write(&osdc
->lock
);
4303 EXPORT_SYMBOL(ceph_osdc_cancel_request
);
4306 * @timeout: in jiffies, 0 means "wait forever"
4308 static int wait_request_timeout(struct ceph_osd_request
*req
,
4309 unsigned long timeout
)
4313 dout("%s req %p tid %llu\n", __func__
, req
, req
->r_tid
);
4314 left
= wait_for_completion_killable_timeout(&req
->r_completion
,
4315 ceph_timeout_jiffies(timeout
));
4317 left
= left
?: -ETIMEDOUT
;
4318 ceph_osdc_cancel_request(req
);
4320 left
= req
->r_result
; /* completed */
4327 * wait for a request to complete
4329 int ceph_osdc_wait_request(struct ceph_osd_client
*osdc
,
4330 struct ceph_osd_request
*req
)
4332 return wait_request_timeout(req
, 0);
4334 EXPORT_SYMBOL(ceph_osdc_wait_request
);
4337 * sync - wait for all in-flight requests to flush. avoid starvation.
4339 void ceph_osdc_sync(struct ceph_osd_client
*osdc
)
4341 struct rb_node
*n
, *p
;
4342 u64 last_tid
= atomic64_read(&osdc
->last_tid
);
4345 down_read(&osdc
->lock
);
4346 for (n
= rb_first(&osdc
->osds
); n
; n
= rb_next(n
)) {
4347 struct ceph_osd
*osd
= rb_entry(n
, struct ceph_osd
, o_node
);
4349 mutex_lock(&osd
->lock
);
4350 for (p
= rb_first(&osd
->o_requests
); p
; p
= rb_next(p
)) {
4351 struct ceph_osd_request
*req
=
4352 rb_entry(p
, struct ceph_osd_request
, r_node
);
4354 if (req
->r_tid
> last_tid
)
4357 if (!(req
->r_flags
& CEPH_OSD_FLAG_WRITE
))
4360 ceph_osdc_get_request(req
);
4361 mutex_unlock(&osd
->lock
);
4362 up_read(&osdc
->lock
);
4363 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4364 __func__
, req
, req
->r_tid
, last_tid
);
4365 wait_for_completion(&req
->r_completion
);
4366 ceph_osdc_put_request(req
);
4370 mutex_unlock(&osd
->lock
);
4373 up_read(&osdc
->lock
);
4374 dout("%s done last_tid %llu\n", __func__
, last_tid
);
4376 EXPORT_SYMBOL(ceph_osdc_sync
);
4378 static struct ceph_osd_request
*
4379 alloc_linger_request(struct ceph_osd_linger_request
*lreq
)
4381 struct ceph_osd_request
*req
;
4383 req
= ceph_osdc_alloc_request(lreq
->osdc
, NULL
, 1, false, GFP_NOIO
);
4387 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4388 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4390 if (ceph_osdc_alloc_messages(req
, GFP_NOIO
)) {
4391 ceph_osdc_put_request(req
);
4399 * Returns a handle, caller owns a ref.
4401 struct ceph_osd_linger_request
*
4402 ceph_osdc_watch(struct ceph_osd_client
*osdc
,
4403 struct ceph_object_id
*oid
,
4404 struct ceph_object_locator
*oloc
,
4405 rados_watchcb2_t wcb
,
4406 rados_watcherrcb_t errcb
,
4409 struct ceph_osd_linger_request
*lreq
;
4412 lreq
= linger_alloc(osdc
);
4414 return ERR_PTR(-ENOMEM
);
4416 lreq
->is_watch
= true;
4418 lreq
->errcb
= errcb
;
4420 lreq
->watch_valid_thru
= jiffies
;
4422 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4423 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4424 lreq
->t
.flags
= CEPH_OSD_FLAG_WRITE
;
4425 ktime_get_real_ts(&lreq
->mtime
);
4427 lreq
->reg_req
= alloc_linger_request(lreq
);
4428 if (!lreq
->reg_req
) {
4433 lreq
->ping_req
= alloc_linger_request(lreq
);
4434 if (!lreq
->ping_req
) {
4439 down_write(&osdc
->lock
);
4440 linger_register(lreq
); /* before osd_req_op_* */
4441 osd_req_op_watch_init(lreq
->reg_req
, 0, lreq
->linger_id
,
4442 CEPH_OSD_WATCH_OP_WATCH
);
4443 osd_req_op_watch_init(lreq
->ping_req
, 0, lreq
->linger_id
,
4444 CEPH_OSD_WATCH_OP_PING
);
4445 linger_submit(lreq
);
4446 up_write(&osdc
->lock
);
4448 ret
= linger_reg_commit_wait(lreq
);
4450 linger_cancel(lreq
);
4458 return ERR_PTR(ret
);
4460 EXPORT_SYMBOL(ceph_osdc_watch
);
4465 * Times out after mount_timeout to preserve rbd unmap behaviour
4466 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4467 * with mount_timeout").
4469 int ceph_osdc_unwatch(struct ceph_osd_client
*osdc
,
4470 struct ceph_osd_linger_request
*lreq
)
4472 struct ceph_options
*opts
= osdc
->client
->options
;
4473 struct ceph_osd_request
*req
;
4476 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4480 ceph_oid_copy(&req
->r_base_oid
, &lreq
->t
.base_oid
);
4481 ceph_oloc_copy(&req
->r_base_oloc
, &lreq
->t
.base_oloc
);
4482 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
4483 ktime_get_real_ts(&req
->r_mtime
);
4484 osd_req_op_watch_init(req
, 0, lreq
->linger_id
,
4485 CEPH_OSD_WATCH_OP_UNWATCH
);
4487 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4491 ceph_osdc_start_request(osdc
, req
, false);
4492 linger_cancel(lreq
);
4494 ret
= wait_request_timeout(req
, opts
->mount_timeout
);
4497 ceph_osdc_put_request(req
);
4500 EXPORT_SYMBOL(ceph_osdc_unwatch
);
4502 static int osd_req_op_notify_ack_init(struct ceph_osd_request
*req
, int which
,
4503 u64 notify_id
, u64 cookie
, void *payload
,
4506 struct ceph_osd_req_op
*op
;
4507 struct ceph_pagelist
*pl
;
4510 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY_ACK
, 0);
4512 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
4516 ceph_pagelist_init(pl
);
4517 ret
= ceph_pagelist_encode_64(pl
, notify_id
);
4518 ret
|= ceph_pagelist_encode_64(pl
, cookie
);
4520 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4521 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4523 ret
|= ceph_pagelist_encode_32(pl
, 0);
4526 ceph_pagelist_release(pl
);
4530 ceph_osd_data_pagelist_init(&op
->notify_ack
.request_data
, pl
);
4531 op
->indata_len
= pl
->length
;
4535 int ceph_osdc_notify_ack(struct ceph_osd_client
*osdc
,
4536 struct ceph_object_id
*oid
,
4537 struct ceph_object_locator
*oloc
,
4543 struct ceph_osd_request
*req
;
4546 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4550 ceph_oid_copy(&req
->r_base_oid
, oid
);
4551 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4552 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4554 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4558 ret
= osd_req_op_notify_ack_init(req
, 0, notify_id
, cookie
, payload
,
4563 ceph_osdc_start_request(osdc
, req
, false);
4564 ret
= ceph_osdc_wait_request(osdc
, req
);
4567 ceph_osdc_put_request(req
);
4570 EXPORT_SYMBOL(ceph_osdc_notify_ack
);
4572 static int osd_req_op_notify_init(struct ceph_osd_request
*req
, int which
,
4573 u64 cookie
, u32 prot_ver
, u32 timeout
,
4574 void *payload
, size_t payload_len
)
4576 struct ceph_osd_req_op
*op
;
4577 struct ceph_pagelist
*pl
;
4580 op
= _osd_req_op_init(req
, which
, CEPH_OSD_OP_NOTIFY
, 0);
4581 op
->notify
.cookie
= cookie
;
4583 pl
= kmalloc(sizeof(*pl
), GFP_NOIO
);
4587 ceph_pagelist_init(pl
);
4588 ret
= ceph_pagelist_encode_32(pl
, 1); /* prot_ver */
4589 ret
|= ceph_pagelist_encode_32(pl
, timeout
);
4590 ret
|= ceph_pagelist_encode_32(pl
, payload_len
);
4591 ret
|= ceph_pagelist_append(pl
, payload
, payload_len
);
4593 ceph_pagelist_release(pl
);
4597 ceph_osd_data_pagelist_init(&op
->notify
.request_data
, pl
);
4598 op
->indata_len
= pl
->length
;
4603 * @timeout: in seconds
4605 * @preply_{pages,len} are initialized both on success and error.
4606 * The caller is responsible for:
4608 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4610 int ceph_osdc_notify(struct ceph_osd_client
*osdc
,
4611 struct ceph_object_id
*oid
,
4612 struct ceph_object_locator
*oloc
,
4616 struct page
***preply_pages
,
4619 struct ceph_osd_linger_request
*lreq
;
4620 struct page
**pages
;
4625 *preply_pages
= NULL
;
4629 lreq
= linger_alloc(osdc
);
4633 lreq
->preply_pages
= preply_pages
;
4634 lreq
->preply_len
= preply_len
;
4636 ceph_oid_copy(&lreq
->t
.base_oid
, oid
);
4637 ceph_oloc_copy(&lreq
->t
.base_oloc
, oloc
);
4638 lreq
->t
.flags
= CEPH_OSD_FLAG_READ
;
4640 lreq
->reg_req
= alloc_linger_request(lreq
);
4641 if (!lreq
->reg_req
) {
4647 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4648 if (IS_ERR(pages
)) {
4649 ret
= PTR_ERR(pages
);
4653 down_write(&osdc
->lock
);
4654 linger_register(lreq
); /* before osd_req_op_* */
4655 ret
= osd_req_op_notify_init(lreq
->reg_req
, 0, lreq
->linger_id
, 1,
4656 timeout
, payload
, payload_len
);
4658 linger_unregister(lreq
);
4659 up_write(&osdc
->lock
);
4660 ceph_release_page_vector(pages
, 1);
4663 ceph_osd_data_pages_init(osd_req_op_data(lreq
->reg_req
, 0, notify
,
4665 pages
, PAGE_SIZE
, 0, false, true);
4666 linger_submit(lreq
);
4667 up_write(&osdc
->lock
);
4669 ret
= linger_reg_commit_wait(lreq
);
4671 ret
= linger_notify_finish_wait(lreq
);
4673 dout("lreq %p failed to initiate notify %d\n", lreq
, ret
);
4675 linger_cancel(lreq
);
4680 EXPORT_SYMBOL(ceph_osdc_notify
);
4683 * Return the number of milliseconds since the watch was last
4684 * confirmed, or an error. If there is an error, the watch is no
4685 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4687 int ceph_osdc_watch_check(struct ceph_osd_client
*osdc
,
4688 struct ceph_osd_linger_request
*lreq
)
4690 unsigned long stamp
, age
;
4693 down_read(&osdc
->lock
);
4694 mutex_lock(&lreq
->lock
);
4695 stamp
= lreq
->watch_valid_thru
;
4696 if (!list_empty(&lreq
->pending_lworks
)) {
4697 struct linger_work
*lwork
=
4698 list_first_entry(&lreq
->pending_lworks
,
4702 if (time_before(lwork
->queued_stamp
, stamp
))
4703 stamp
= lwork
->queued_stamp
;
4705 age
= jiffies
- stamp
;
4706 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__
,
4707 lreq
, lreq
->linger_id
, age
, lreq
->last_error
);
4708 /* we are truncating to msecs, so return a safe upper bound */
4709 ret
= lreq
->last_error
?: 1 + jiffies_to_msecs(age
);
4711 mutex_unlock(&lreq
->lock
);
4712 up_read(&osdc
->lock
);
4716 static int decode_watcher(void **p
, void *end
, struct ceph_watch_item
*item
)
4722 ret
= ceph_start_decoding(p
, end
, 2, "watch_item_t",
4723 &struct_v
, &struct_len
);
4727 ceph_decode_copy(p
, &item
->name
, sizeof(item
->name
));
4728 item
->cookie
= ceph_decode_64(p
);
4729 *p
+= 4; /* skip timeout_seconds */
4730 if (struct_v
>= 2) {
4731 ceph_decode_copy(p
, &item
->addr
, sizeof(item
->addr
));
4732 ceph_decode_addr(&item
->addr
);
4735 dout("%s %s%llu cookie %llu addr %s\n", __func__
,
4736 ENTITY_NAME(item
->name
), item
->cookie
,
4737 ceph_pr_addr(&item
->addr
.in_addr
));
4741 static int decode_watchers(void **p
, void *end
,
4742 struct ceph_watch_item
**watchers
,
4750 ret
= ceph_start_decoding(p
, end
, 1, "obj_list_watch_response_t",
4751 &struct_v
, &struct_len
);
4755 *num_watchers
= ceph_decode_32(p
);
4756 *watchers
= kcalloc(*num_watchers
, sizeof(**watchers
), GFP_NOIO
);
4760 for (i
= 0; i
< *num_watchers
; i
++) {
4761 ret
= decode_watcher(p
, end
, *watchers
+ i
);
4772 * On success, the caller is responsible for:
4776 int ceph_osdc_list_watchers(struct ceph_osd_client
*osdc
,
4777 struct ceph_object_id
*oid
,
4778 struct ceph_object_locator
*oloc
,
4779 struct ceph_watch_item
**watchers
,
4782 struct ceph_osd_request
*req
;
4783 struct page
**pages
;
4786 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4790 ceph_oid_copy(&req
->r_base_oid
, oid
);
4791 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4792 req
->r_flags
= CEPH_OSD_FLAG_READ
;
4794 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4798 pages
= ceph_alloc_page_vector(1, GFP_NOIO
);
4799 if (IS_ERR(pages
)) {
4800 ret
= PTR_ERR(pages
);
4804 osd_req_op_init(req
, 0, CEPH_OSD_OP_LIST_WATCHERS
, 0);
4805 ceph_osd_data_pages_init(osd_req_op_data(req
, 0, list_watchers
,
4807 pages
, PAGE_SIZE
, 0, false, true);
4809 ceph_osdc_start_request(osdc
, req
, false);
4810 ret
= ceph_osdc_wait_request(osdc
, req
);
4812 void *p
= page_address(pages
[0]);
4813 void *const end
= p
+ req
->r_ops
[0].outdata_len
;
4815 ret
= decode_watchers(&p
, end
, watchers
, num_watchers
);
4819 ceph_osdc_put_request(req
);
4822 EXPORT_SYMBOL(ceph_osdc_list_watchers
);
4825 * Call all pending notify callbacks - for use after a watch is
4826 * unregistered, to make sure no more callbacks for it will be invoked
4828 void ceph_osdc_flush_notifies(struct ceph_osd_client
*osdc
)
4830 dout("%s osdc %p\n", __func__
, osdc
);
4831 flush_workqueue(osdc
->notify_wq
);
4833 EXPORT_SYMBOL(ceph_osdc_flush_notifies
);
4835 void ceph_osdc_maybe_request_map(struct ceph_osd_client
*osdc
)
4837 down_read(&osdc
->lock
);
4838 maybe_request_map(osdc
);
4839 up_read(&osdc
->lock
);
4841 EXPORT_SYMBOL(ceph_osdc_maybe_request_map
);
4844 * Execute an OSD class method on an object.
4846 * @flags: CEPH_OSD_FLAG_*
4847 * @resp_len: in/out param for reply length
4849 int ceph_osdc_call(struct ceph_osd_client
*osdc
,
4850 struct ceph_object_id
*oid
,
4851 struct ceph_object_locator
*oloc
,
4852 const char *class, const char *method
,
4854 struct page
*req_page
, size_t req_len
,
4855 struct page
*resp_page
, size_t *resp_len
)
4857 struct ceph_osd_request
*req
;
4860 if (req_len
> PAGE_SIZE
|| (resp_page
&& *resp_len
> PAGE_SIZE
))
4863 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_NOIO
);
4867 ceph_oid_copy(&req
->r_base_oid
, oid
);
4868 ceph_oloc_copy(&req
->r_base_oloc
, oloc
);
4869 req
->r_flags
= flags
;
4871 ret
= ceph_osdc_alloc_messages(req
, GFP_NOIO
);
4875 osd_req_op_cls_init(req
, 0, CEPH_OSD_OP_CALL
, class, method
);
4877 osd_req_op_cls_request_data_pages(req
, 0, &req_page
, req_len
,
4880 osd_req_op_cls_response_data_pages(req
, 0, &resp_page
,
4881 *resp_len
, 0, false, false);
4883 ceph_osdc_start_request(osdc
, req
, false);
4884 ret
= ceph_osdc_wait_request(osdc
, req
);
4886 ret
= req
->r_ops
[0].rval
;
4888 *resp_len
= req
->r_ops
[0].outdata_len
;
4892 ceph_osdc_put_request(req
);
4895 EXPORT_SYMBOL(ceph_osdc_call
);
4900 int ceph_osdc_init(struct ceph_osd_client
*osdc
, struct ceph_client
*client
)
4905 osdc
->client
= client
;
4906 init_rwsem(&osdc
->lock
);
4907 osdc
->osds
= RB_ROOT
;
4908 INIT_LIST_HEAD(&osdc
->osd_lru
);
4909 spin_lock_init(&osdc
->osd_lru_lock
);
4910 osd_init(&osdc
->homeless_osd
);
4911 osdc
->homeless_osd
.o_osdc
= osdc
;
4912 osdc
->homeless_osd
.o_osd
= CEPH_HOMELESS_OSD
;
4913 osdc
->last_linger_id
= CEPH_LINGER_ID_START
;
4914 osdc
->linger_requests
= RB_ROOT
;
4915 osdc
->map_checks
= RB_ROOT
;
4916 osdc
->linger_map_checks
= RB_ROOT
;
4917 INIT_DELAYED_WORK(&osdc
->timeout_work
, handle_timeout
);
4918 INIT_DELAYED_WORK(&osdc
->osds_timeout_work
, handle_osds_timeout
);
4921 osdc
->osdmap
= ceph_osdmap_alloc();
4925 osdc
->req_mempool
= mempool_create_slab_pool(10,
4926 ceph_osd_request_cache
);
4927 if (!osdc
->req_mempool
)
4930 err
= ceph_msgpool_init(&osdc
->msgpool_op
, CEPH_MSG_OSD_OP
,
4931 PAGE_SIZE
, 10, true, "osd_op");
4934 err
= ceph_msgpool_init(&osdc
->msgpool_op_reply
, CEPH_MSG_OSD_OPREPLY
,
4935 PAGE_SIZE
, 10, true, "osd_op_reply");
4940 osdc
->notify_wq
= create_singlethread_workqueue("ceph-watch-notify");
4941 if (!osdc
->notify_wq
)
4942 goto out_msgpool_reply
;
4944 schedule_delayed_work(&osdc
->timeout_work
,
4945 osdc
->client
->options
->osd_keepalive_timeout
);
4946 schedule_delayed_work(&osdc
->osds_timeout_work
,
4947 round_jiffies_relative(osdc
->client
->options
->osd_idle_ttl
));
4952 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4954 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4956 mempool_destroy(osdc
->req_mempool
);
4958 ceph_osdmap_destroy(osdc
->osdmap
);
4963 void ceph_osdc_stop(struct ceph_osd_client
*osdc
)
4965 flush_workqueue(osdc
->notify_wq
);
4966 destroy_workqueue(osdc
->notify_wq
);
4967 cancel_delayed_work_sync(&osdc
->timeout_work
);
4968 cancel_delayed_work_sync(&osdc
->osds_timeout_work
);
4970 down_write(&osdc
->lock
);
4971 while (!RB_EMPTY_ROOT(&osdc
->osds
)) {
4972 struct ceph_osd
*osd
= rb_entry(rb_first(&osdc
->osds
),
4973 struct ceph_osd
, o_node
);
4976 up_write(&osdc
->lock
);
4977 WARN_ON(refcount_read(&osdc
->homeless_osd
.o_ref
) != 1);
4978 osd_cleanup(&osdc
->homeless_osd
);
4980 WARN_ON(!list_empty(&osdc
->osd_lru
));
4981 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_requests
));
4982 WARN_ON(!RB_EMPTY_ROOT(&osdc
->map_checks
));
4983 WARN_ON(!RB_EMPTY_ROOT(&osdc
->linger_map_checks
));
4984 WARN_ON(atomic_read(&osdc
->num_requests
));
4985 WARN_ON(atomic_read(&osdc
->num_homeless
));
4987 ceph_osdmap_destroy(osdc
->osdmap
);
4988 mempool_destroy(osdc
->req_mempool
);
4989 ceph_msgpool_destroy(&osdc
->msgpool_op
);
4990 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
4994 * Read some contiguous pages. If we cross a stripe boundary, shorten
4995 * *plen. Return number of bytes read, or error.
4997 int ceph_osdc_readpages(struct ceph_osd_client
*osdc
,
4998 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
5000 u32 truncate_seq
, u64 truncate_size
,
5001 struct page
**pages
, int num_pages
, int page_align
)
5003 struct ceph_osd_request
*req
;
5006 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
5007 vino
.snap
, off
, *plen
);
5008 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
, 0, 1,
5009 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
5010 NULL
, truncate_seq
, truncate_size
,
5013 return PTR_ERR(req
);
5015 /* it may be a short read due to an object boundary */
5016 osd_req_op_extent_osd_data_pages(req
, 0,
5017 pages
, *plen
, page_align
, false, false);
5019 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
5020 off
, *plen
, *plen
, page_align
);
5022 rc
= ceph_osdc_start_request(osdc
, req
, false);
5024 rc
= ceph_osdc_wait_request(osdc
, req
);
5026 ceph_osdc_put_request(req
);
5027 dout("readpages result %d\n", rc
);
5030 EXPORT_SYMBOL(ceph_osdc_readpages
);
5033 * do a synchronous write on N pages
5035 int ceph_osdc_writepages(struct ceph_osd_client
*osdc
, struct ceph_vino vino
,
5036 struct ceph_file_layout
*layout
,
5037 struct ceph_snap_context
*snapc
,
5039 u32 truncate_seq
, u64 truncate_size
,
5040 struct timespec
*mtime
,
5041 struct page
**pages
, int num_pages
)
5043 struct ceph_osd_request
*req
;
5045 int page_align
= off
& ~PAGE_MASK
;
5047 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
, 0, 1,
5048 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
5049 snapc
, truncate_seq
, truncate_size
,
5052 return PTR_ERR(req
);
5054 /* it may be a short write due to an object boundary */
5055 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_align
,
5057 dout("writepages %llu~%llu (%llu bytes)\n", off
, len
, len
);
5059 req
->r_mtime
= *mtime
;
5060 rc
= ceph_osdc_start_request(osdc
, req
, true);
5062 rc
= ceph_osdc_wait_request(osdc
, req
);
5064 ceph_osdc_put_request(req
);
5067 dout("writepages result %d\n", rc
);
5070 EXPORT_SYMBOL(ceph_osdc_writepages
);
5072 int ceph_osdc_setup(void)
5074 size_t size
= sizeof(struct ceph_osd_request
) +
5075 CEPH_OSD_SLAB_OPS
* sizeof(struct ceph_osd_req_op
);
5077 BUG_ON(ceph_osd_request_cache
);
5078 ceph_osd_request_cache
= kmem_cache_create("ceph_osd_request", size
,
5081 return ceph_osd_request_cache
? 0 : -ENOMEM
;
5083 EXPORT_SYMBOL(ceph_osdc_setup
);
5085 void ceph_osdc_cleanup(void)
5087 BUG_ON(!ceph_osd_request_cache
);
5088 kmem_cache_destroy(ceph_osd_request_cache
);
5089 ceph_osd_request_cache
= NULL
;
5091 EXPORT_SYMBOL(ceph_osdc_cleanup
);
5094 * handle incoming message
5096 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
5098 struct ceph_osd
*osd
= con
->private;
5099 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5100 int type
= le16_to_cpu(msg
->hdr
.type
);
5103 case CEPH_MSG_OSD_MAP
:
5104 ceph_osdc_handle_map(osdc
, msg
);
5106 case CEPH_MSG_OSD_OPREPLY
:
5107 handle_reply(osd
, msg
);
5109 case CEPH_MSG_OSD_BACKOFF
:
5110 handle_backoff(osd
, msg
);
5112 case CEPH_MSG_WATCH_NOTIFY
:
5113 handle_watch_notify(osdc
, msg
);
5117 pr_err("received unknown message type %d %s\n", type
,
5118 ceph_msg_type_name(type
));
5125 * Lookup and return message for incoming reply. Don't try to do
5126 * anything about a larger than preallocated data portion of the
5127 * message at the moment - for now, just skip the message.
5129 static struct ceph_msg
*get_reply(struct ceph_connection
*con
,
5130 struct ceph_msg_header
*hdr
,
5133 struct ceph_osd
*osd
= con
->private;
5134 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
5135 struct ceph_msg
*m
= NULL
;
5136 struct ceph_osd_request
*req
;
5137 int front_len
= le32_to_cpu(hdr
->front_len
);
5138 int data_len
= le32_to_cpu(hdr
->data_len
);
5139 u64 tid
= le64_to_cpu(hdr
->tid
);
5141 down_read(&osdc
->lock
);
5142 if (!osd_registered(osd
)) {
5143 dout("%s osd%d unknown, skipping\n", __func__
, osd
->o_osd
);
5145 goto out_unlock_osdc
;
5147 WARN_ON(osd
->o_osd
!= le64_to_cpu(hdr
->src
.num
));
5149 mutex_lock(&osd
->lock
);
5150 req
= lookup_request(&osd
->o_requests
, tid
);
5152 dout("%s osd%d tid %llu unknown, skipping\n", __func__
,
5155 goto out_unlock_session
;
5158 ceph_msg_revoke_incoming(req
->r_reply
);
5160 if (front_len
> req
->r_reply
->front_alloc_len
) {
5161 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5162 __func__
, osd
->o_osd
, req
->r_tid
, front_len
,
5163 req
->r_reply
->front_alloc_len
);
5164 m
= ceph_msg_new(CEPH_MSG_OSD_OPREPLY
, front_len
, GFP_NOFS
,
5167 goto out_unlock_session
;
5168 ceph_msg_put(req
->r_reply
);
5172 if (data_len
> req
->r_reply
->data_length
) {
5173 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5174 __func__
, osd
->o_osd
, req
->r_tid
, data_len
,
5175 req
->r_reply
->data_length
);
5178 goto out_unlock_session
;
5181 m
= ceph_msg_get(req
->r_reply
);
5182 dout("get_reply tid %lld %p\n", tid
, m
);
5185 mutex_unlock(&osd
->lock
);
5187 up_read(&osdc
->lock
);
5192 * TODO: switch to a msg-owned pagelist
5194 static struct ceph_msg
*alloc_msg_with_page_vector(struct ceph_msg_header
*hdr
)
5197 int type
= le16_to_cpu(hdr
->type
);
5198 u32 front_len
= le32_to_cpu(hdr
->front_len
);
5199 u32 data_len
= le32_to_cpu(hdr
->data_len
);
5201 m
= ceph_msg_new(type
, front_len
, GFP_NOIO
, false);
5206 struct page
**pages
;
5207 struct ceph_osd_data osd_data
;
5209 pages
= ceph_alloc_page_vector(calc_pages_for(0, data_len
),
5211 if (IS_ERR(pages
)) {
5216 ceph_osd_data_pages_init(&osd_data
, pages
, data_len
, 0, false,
5218 ceph_osdc_msg_data_add(m
, &osd_data
);
5224 static struct ceph_msg
*alloc_msg(struct ceph_connection
*con
,
5225 struct ceph_msg_header
*hdr
,
5228 struct ceph_osd
*osd
= con
->private;
5229 int type
= le16_to_cpu(hdr
->type
);
5233 case CEPH_MSG_OSD_MAP
:
5234 case CEPH_MSG_OSD_BACKOFF
:
5235 case CEPH_MSG_WATCH_NOTIFY
:
5236 return alloc_msg_with_page_vector(hdr
);
5237 case CEPH_MSG_OSD_OPREPLY
:
5238 return get_reply(con
, hdr
, skip
);
5240 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__
,
5248 * Wrappers to refcount containing ceph_osd struct
5250 static struct ceph_connection
*get_osd_con(struct ceph_connection
*con
)
5252 struct ceph_osd
*osd
= con
->private;
5258 static void put_osd_con(struct ceph_connection
*con
)
5260 struct ceph_osd
*osd
= con
->private;
5268 * Note: returned pointer is the address of a structure that's
5269 * managed separately. Caller must *not* attempt to free it.
5271 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
5272 int *proto
, int force_new
)
5274 struct ceph_osd
*o
= con
->private;
5275 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5276 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5277 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5279 if (force_new
&& auth
->authorizer
) {
5280 ceph_auth_destroy_authorizer(auth
->authorizer
);
5281 auth
->authorizer
= NULL
;
5283 if (!auth
->authorizer
) {
5284 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5287 return ERR_PTR(ret
);
5289 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
,
5292 return ERR_PTR(ret
);
5294 *proto
= ac
->protocol
;
5300 static int verify_authorizer_reply(struct ceph_connection
*con
)
5302 struct ceph_osd
*o
= con
->private;
5303 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5304 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5306 return ceph_auth_verify_authorizer_reply(ac
, o
->o_auth
.authorizer
);
5309 static int invalidate_authorizer(struct ceph_connection
*con
)
5311 struct ceph_osd
*o
= con
->private;
5312 struct ceph_osd_client
*osdc
= o
->o_osdc
;
5313 struct ceph_auth_client
*ac
= osdc
->client
->monc
.auth
;
5315 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_OSD
);
5316 return ceph_monc_validate_auth(&osdc
->client
->monc
);
5319 static void osd_reencode_message(struct ceph_msg
*msg
)
5321 int type
= le16_to_cpu(msg
->hdr
.type
);
5323 if (type
== CEPH_MSG_OSD_OP
)
5324 encode_request_finish(msg
);
5327 static int osd_sign_message(struct ceph_msg
*msg
)
5329 struct ceph_osd
*o
= msg
->con
->private;
5330 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5332 return ceph_auth_sign_message(auth
, msg
);
5335 static int osd_check_message_signature(struct ceph_msg
*msg
)
5337 struct ceph_osd
*o
= msg
->con
->private;
5338 struct ceph_auth_handshake
*auth
= &o
->o_auth
;
5340 return ceph_auth_check_message_signature(auth
, msg
);
5343 static const struct ceph_connection_operations osd_con_ops
= {
5346 .dispatch
= dispatch
,
5347 .get_authorizer
= get_authorizer
,
5348 .verify_authorizer_reply
= verify_authorizer_reply
,
5349 .invalidate_authorizer
= invalidate_authorizer
,
5350 .alloc_msg
= alloc_msg
,
5351 .reencode_message
= osd_reencode_message
,
5352 .sign_message
= osd_sign_message
,
5353 .check_message_signature
= osd_check_message_signature
,