]> git.proxmox.com Git - ceph.git/blob - ceph/src/osdc/Objecter.cc
97f97e0275003504c3abbbc2e7d11ffda1f9f73e
[ceph.git] / ceph / src / osdc / Objecter.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "Objecter.h"
16 #include "osd/OSDMap.h"
17 #include "Filer.h"
18
19 #include "mon/MonClient.h"
20
21 #include "msg/Messenger.h"
22 #include "msg/Message.h"
23
24 #include "messages/MPing.h"
25 #include "messages/MOSDOp.h"
26 #include "messages/MOSDOpReply.h"
27 #include "messages/MOSDBackoff.h"
28 #include "messages/MOSDMap.h"
29
30 #include "messages/MPoolOp.h"
31 #include "messages/MPoolOpReply.h"
32
33 #include "messages/MGetPoolStats.h"
34 #include "messages/MGetPoolStatsReply.h"
35 #include "messages/MStatfs.h"
36 #include "messages/MStatfsReply.h"
37
38 #include "messages/MMonCommand.h"
39
40 #include "messages/MCommand.h"
41 #include "messages/MCommandReply.h"
42
43 #include "messages/MWatchNotify.h"
44
45 #include <errno.h>
46
47 #include "common/config.h"
48 #include "common/perf_counters.h"
49 #include "common/scrub_types.h"
50 #include "include/str_list.h"
51 #include "common/errno.h"
52 #include "common/EventTrace.h"
53
54 using ceph::real_time;
55 using ceph::real_clock;
56
57 using ceph::mono_clock;
58 using ceph::mono_time;
59
60 using ceph::timespan;
61
62
63 #define dout_subsys ceph_subsys_objecter
64 #undef dout_prefix
65 #define dout_prefix *_dout << messenger->get_myname() << ".objecter "
66
67
68 enum {
69 l_osdc_first = 123200,
70 l_osdc_op_active,
71 l_osdc_op_laggy,
72 l_osdc_op_send,
73 l_osdc_op_send_bytes,
74 l_osdc_op_resend,
75 l_osdc_op_reply,
76
77 l_osdc_op,
78 l_osdc_op_r,
79 l_osdc_op_w,
80 l_osdc_op_rmw,
81 l_osdc_op_pg,
82
83 l_osdc_osdop_stat,
84 l_osdc_osdop_create,
85 l_osdc_osdop_read,
86 l_osdc_osdop_write,
87 l_osdc_osdop_writefull,
88 l_osdc_osdop_writesame,
89 l_osdc_osdop_append,
90 l_osdc_osdop_zero,
91 l_osdc_osdop_truncate,
92 l_osdc_osdop_delete,
93 l_osdc_osdop_mapext,
94 l_osdc_osdop_sparse_read,
95 l_osdc_osdop_clonerange,
96 l_osdc_osdop_getxattr,
97 l_osdc_osdop_setxattr,
98 l_osdc_osdop_cmpxattr,
99 l_osdc_osdop_rmxattr,
100 l_osdc_osdop_resetxattrs,
101 l_osdc_osdop_tmap_up,
102 l_osdc_osdop_tmap_put,
103 l_osdc_osdop_tmap_get,
104 l_osdc_osdop_call,
105 l_osdc_osdop_watch,
106 l_osdc_osdop_notify,
107 l_osdc_osdop_src_cmpxattr,
108 l_osdc_osdop_pgls,
109 l_osdc_osdop_pgls_filter,
110 l_osdc_osdop_other,
111
112 l_osdc_linger_active,
113 l_osdc_linger_send,
114 l_osdc_linger_resend,
115 l_osdc_linger_ping,
116
117 l_osdc_poolop_active,
118 l_osdc_poolop_send,
119 l_osdc_poolop_resend,
120
121 l_osdc_poolstat_active,
122 l_osdc_poolstat_send,
123 l_osdc_poolstat_resend,
124
125 l_osdc_statfs_active,
126 l_osdc_statfs_send,
127 l_osdc_statfs_resend,
128
129 l_osdc_command_active,
130 l_osdc_command_send,
131 l_osdc_command_resend,
132
133 l_osdc_map_epoch,
134 l_osdc_map_full,
135 l_osdc_map_inc,
136
137 l_osdc_osd_sessions,
138 l_osdc_osd_session_open,
139 l_osdc_osd_session_close,
140 l_osdc_osd_laggy,
141
142 l_osdc_osdop_omap_wr,
143 l_osdc_osdop_omap_rd,
144 l_osdc_osdop_omap_del,
145
146 l_osdc_last,
147 };
148
149
150 // config obs ----------------------------
151
152 static const char *config_keys[] = {
153 "crush_location",
154 NULL
155 };
156
157 class Objecter::RequestStateHook : public AdminSocketHook {
158 Objecter *m_objecter;
159 public:
160 explicit RequestStateHook(Objecter *objecter);
161 bool call(std::string command, cmdmap_t& cmdmap, std::string format,
162 bufferlist& out) override;
163 };
164
165 /**
166 * This is a more limited form of C_Contexts, but that requires
167 * a ceph_context which we don't have here.
168 */
169 class ObjectOperation::C_TwoContexts : public Context {
170 Context *first;
171 Context *second;
172 public:
173 C_TwoContexts(Context *first, Context *second) :
174 first(first), second(second) {}
175 void finish(int r) override {
176 first->complete(r);
177 second->complete(r);
178 first = NULL;
179 second = NULL;
180 }
181
182 ~C_TwoContexts() override {
183 delete first;
184 delete second;
185 }
186 };
187
188 void ObjectOperation::add_handler(Context *extra) {
189 size_t last = out_handler.size() - 1;
190 Context *orig = out_handler[last];
191 if (orig) {
192 Context *wrapper = new C_TwoContexts(orig, extra);
193 out_handler[last] = wrapper;
194 } else {
195 out_handler[last] = extra;
196 }
197 }
198
199 Objecter::OSDSession::unique_completion_lock Objecter::OSDSession::get_lock(
200 object_t& oid)
201 {
202 if (oid.name.empty())
203 return unique_completion_lock();
204
205 static constexpr uint32_t HASH_PRIME = 1021;
206 uint32_t h = ceph_str_hash_linux(oid.name.c_str(), oid.name.size())
207 % HASH_PRIME;
208
209 return unique_completion_lock(completion_locks[h % num_locks],
210 std::defer_lock);
211 }
212
213 const char** Objecter::get_tracked_conf_keys() const
214 {
215 return config_keys;
216 }
217
218
219 void Objecter::handle_conf_change(const struct md_config_t *conf,
220 const std::set <std::string> &changed)
221 {
222 if (changed.count("crush_location")) {
223 update_crush_location();
224 }
225 }
226
227 void Objecter::update_crush_location()
228 {
229 unique_lock wl(rwlock);
230 crush_location = cct->crush_location.get_location();
231 }
232
233 // messages ------------------------------
234
235 /*
236 * initialize only internal data structures, don't initiate cluster interaction
237 */
238 void Objecter::init()
239 {
240 assert(!initialized);
241
242 if (!logger) {
243 PerfCountersBuilder pcb(cct, "objecter", l_osdc_first, l_osdc_last);
244
245 pcb.add_u64(l_osdc_op_active, "op_active", "Operations active", "actv",
246 PerfCountersBuilder::PRIO_CRITICAL);
247 pcb.add_u64(l_osdc_op_laggy, "op_laggy", "Laggy operations");
248 pcb.add_u64_counter(l_osdc_op_send, "op_send", "Sent operations");
249 pcb.add_u64_counter(l_osdc_op_send_bytes, "op_send_bytes", "Sent data");
250 pcb.add_u64_counter(l_osdc_op_resend, "op_resend", "Resent operations");
251 pcb.add_u64_counter(l_osdc_op_reply, "op_reply", "Operation reply");
252
253 pcb.add_u64_counter(l_osdc_op, "op", "Operations");
254 pcb.add_u64_counter(l_osdc_op_r, "op_r", "Read operations", "rd",
255 PerfCountersBuilder::PRIO_CRITICAL);
256 pcb.add_u64_counter(l_osdc_op_w, "op_w", "Write operations", "wr",
257 PerfCountersBuilder::PRIO_CRITICAL);
258 pcb.add_u64_counter(l_osdc_op_rmw, "op_rmw", "Read-modify-write operations",
259 "rdwr", PerfCountersBuilder::PRIO_INTERESTING);
260 pcb.add_u64_counter(l_osdc_op_pg, "op_pg", "PG operation");
261
262 pcb.add_u64_counter(l_osdc_osdop_stat, "osdop_stat", "Stat operations");
263 pcb.add_u64_counter(l_osdc_osdop_create, "osdop_create",
264 "Create object operations");
265 pcb.add_u64_counter(l_osdc_osdop_read, "osdop_read", "Read operations");
266 pcb.add_u64_counter(l_osdc_osdop_write, "osdop_write", "Write operations");
267 pcb.add_u64_counter(l_osdc_osdop_writefull, "osdop_writefull",
268 "Write full object operations");
269 pcb.add_u64_counter(l_osdc_osdop_writesame, "osdop_writesame",
270 "Write same operations");
271 pcb.add_u64_counter(l_osdc_osdop_append, "osdop_append",
272 "Append operation");
273 pcb.add_u64_counter(l_osdc_osdop_zero, "osdop_zero",
274 "Set object to zero operations");
275 pcb.add_u64_counter(l_osdc_osdop_truncate, "osdop_truncate",
276 "Truncate object operations");
277 pcb.add_u64_counter(l_osdc_osdop_delete, "osdop_delete",
278 "Delete object operations");
279 pcb.add_u64_counter(l_osdc_osdop_mapext, "osdop_mapext",
280 "Map extent operations");
281 pcb.add_u64_counter(l_osdc_osdop_sparse_read, "osdop_sparse_read",
282 "Sparse read operations");
283 pcb.add_u64_counter(l_osdc_osdop_clonerange, "osdop_clonerange",
284 "Clone range operations");
285 pcb.add_u64_counter(l_osdc_osdop_getxattr, "osdop_getxattr",
286 "Get xattr operations");
287 pcb.add_u64_counter(l_osdc_osdop_setxattr, "osdop_setxattr",
288 "Set xattr operations");
289 pcb.add_u64_counter(l_osdc_osdop_cmpxattr, "osdop_cmpxattr",
290 "Xattr comparison operations");
291 pcb.add_u64_counter(l_osdc_osdop_rmxattr, "osdop_rmxattr",
292 "Remove xattr operations");
293 pcb.add_u64_counter(l_osdc_osdop_resetxattrs, "osdop_resetxattrs",
294 "Reset xattr operations");
295 pcb.add_u64_counter(l_osdc_osdop_tmap_up, "osdop_tmap_up",
296 "TMAP update operations");
297 pcb.add_u64_counter(l_osdc_osdop_tmap_put, "osdop_tmap_put",
298 "TMAP put operations");
299 pcb.add_u64_counter(l_osdc_osdop_tmap_get, "osdop_tmap_get",
300 "TMAP get operations");
301 pcb.add_u64_counter(l_osdc_osdop_call, "osdop_call",
302 "Call (execute) operations");
303 pcb.add_u64_counter(l_osdc_osdop_watch, "osdop_watch",
304 "Watch by object operations");
305 pcb.add_u64_counter(l_osdc_osdop_notify, "osdop_notify",
306 "Notify about object operations");
307 pcb.add_u64_counter(l_osdc_osdop_src_cmpxattr, "osdop_src_cmpxattr",
308 "Extended attribute comparison in multi operations");
309 pcb.add_u64_counter(l_osdc_osdop_pgls, "osdop_pgls");
310 pcb.add_u64_counter(l_osdc_osdop_pgls_filter, "osdop_pgls_filter");
311 pcb.add_u64_counter(l_osdc_osdop_other, "osdop_other", "Other operations");
312
313 pcb.add_u64(l_osdc_linger_active, "linger_active",
314 "Active lingering operations");
315 pcb.add_u64_counter(l_osdc_linger_send, "linger_send",
316 "Sent lingering operations");
317 pcb.add_u64_counter(l_osdc_linger_resend, "linger_resend",
318 "Resent lingering operations");
319 pcb.add_u64_counter(l_osdc_linger_ping, "linger_ping",
320 "Sent pings to lingering operations");
321
322 pcb.add_u64(l_osdc_poolop_active, "poolop_active",
323 "Active pool operations");
324 pcb.add_u64_counter(l_osdc_poolop_send, "poolop_send",
325 "Sent pool operations");
326 pcb.add_u64_counter(l_osdc_poolop_resend, "poolop_resend",
327 "Resent pool operations");
328
329 pcb.add_u64(l_osdc_poolstat_active, "poolstat_active",
330 "Active get pool stat operations");
331 pcb.add_u64_counter(l_osdc_poolstat_send, "poolstat_send",
332 "Pool stat operations sent");
333 pcb.add_u64_counter(l_osdc_poolstat_resend, "poolstat_resend",
334 "Resent pool stats");
335
336 pcb.add_u64(l_osdc_statfs_active, "statfs_active", "Statfs operations");
337 pcb.add_u64_counter(l_osdc_statfs_send, "statfs_send", "Sent FS stats");
338 pcb.add_u64_counter(l_osdc_statfs_resend, "statfs_resend",
339 "Resent FS stats");
340
341 pcb.add_u64(l_osdc_command_active, "command_active", "Active commands");
342 pcb.add_u64_counter(l_osdc_command_send, "command_send",
343 "Sent commands");
344 pcb.add_u64_counter(l_osdc_command_resend, "command_resend",
345 "Resent commands");
346
347 pcb.add_u64(l_osdc_map_epoch, "map_epoch", "OSD map epoch");
348 pcb.add_u64_counter(l_osdc_map_full, "map_full",
349 "Full OSD maps received");
350 pcb.add_u64_counter(l_osdc_map_inc, "map_inc",
351 "Incremental OSD maps received");
352
353 pcb.add_u64(l_osdc_osd_sessions, "osd_sessions",
354 "Open sessions"); // open sessions
355 pcb.add_u64_counter(l_osdc_osd_session_open, "osd_session_open",
356 "Sessions opened");
357 pcb.add_u64_counter(l_osdc_osd_session_close, "osd_session_close",
358 "Sessions closed");
359 pcb.add_u64(l_osdc_osd_laggy, "osd_laggy", "Laggy OSD sessions");
360
361 pcb.add_u64_counter(l_osdc_osdop_omap_wr, "omap_wr",
362 "OSD OMAP write operations");
363 pcb.add_u64_counter(l_osdc_osdop_omap_rd, "omap_rd",
364 "OSD OMAP read operations");
365 pcb.add_u64_counter(l_osdc_osdop_omap_del, "omap_del",
366 "OSD OMAP delete operations");
367
368 logger = pcb.create_perf_counters();
369 cct->get_perfcounters_collection()->add(logger);
370 }
371
372 m_request_state_hook = new RequestStateHook(this);
373 AdminSocket* admin_socket = cct->get_admin_socket();
374 int ret = admin_socket->register_command("objecter_requests",
375 "objecter_requests",
376 m_request_state_hook,
377 "show in-progress osd requests");
378
379 /* Don't warn on EEXIST, happens if multiple ceph clients
380 * are instantiated from one process */
381 if (ret < 0 && ret != -EEXIST) {
382 lderr(cct) << "error registering admin socket command: "
383 << cpp_strerror(ret) << dendl;
384 }
385
386 update_crush_location();
387
388 cct->_conf->add_observer(this);
389
390 initialized = true;
391 }
392
393 /*
394 * ok, cluster interaction can happen
395 */
396 void Objecter::start(const OSDMap* o)
397 {
398 shared_lock rl(rwlock);
399
400 start_tick();
401 if (o) {
402 osdmap->deepish_copy_from(*o);
403 } else if (osdmap->get_epoch() == 0) {
404 _maybe_request_map();
405 }
406 }
407
408 void Objecter::shutdown()
409 {
410 assert(initialized);
411
412 unique_lock wl(rwlock);
413
414 initialized = false;
415
416 cct->_conf->remove_observer(this);
417
418 map<int,OSDSession*>::iterator p;
419 while (!osd_sessions.empty()) {
420 p = osd_sessions.begin();
421 close_session(p->second);
422 }
423
424 while(!check_latest_map_lingers.empty()) {
425 map<uint64_t, LingerOp*>::iterator i = check_latest_map_lingers.begin();
426 i->second->put();
427 check_latest_map_lingers.erase(i->first);
428 }
429
430 while(!check_latest_map_ops.empty()) {
431 map<ceph_tid_t, Op*>::iterator i = check_latest_map_ops.begin();
432 i->second->put();
433 check_latest_map_ops.erase(i->first);
434 }
435
436 while(!check_latest_map_commands.empty()) {
437 map<ceph_tid_t, CommandOp*>::iterator i
438 = check_latest_map_commands.begin();
439 i->second->put();
440 check_latest_map_commands.erase(i->first);
441 }
442
443 while(!poolstat_ops.empty()) {
444 map<ceph_tid_t,PoolStatOp*>::iterator i = poolstat_ops.begin();
445 delete i->second;
446 poolstat_ops.erase(i->first);
447 }
448
449 while(!statfs_ops.empty()) {
450 map<ceph_tid_t, StatfsOp*>::iterator i = statfs_ops.begin();
451 delete i->second;
452 statfs_ops.erase(i->first);
453 }
454
455 while(!pool_ops.empty()) {
456 map<ceph_tid_t, PoolOp*>::iterator i = pool_ops.begin();
457 delete i->second;
458 pool_ops.erase(i->first);
459 }
460
461 ldout(cct, 20) << __func__ << " clearing up homeless session..." << dendl;
462 while(!homeless_session->linger_ops.empty()) {
463 std::map<uint64_t, LingerOp*>::iterator i
464 = homeless_session->linger_ops.begin();
465 ldout(cct, 10) << " linger_op " << i->first << dendl;
466 LingerOp *lop = i->second;
467 {
468 OSDSession::unique_lock swl(homeless_session->lock);
469 _session_linger_op_remove(homeless_session, lop);
470 }
471 linger_ops.erase(lop->linger_id);
472 linger_ops_set.erase(lop);
473 lop->put();
474 }
475
476 while(!homeless_session->ops.empty()) {
477 std::map<ceph_tid_t, Op*>::iterator i = homeless_session->ops.begin();
478 ldout(cct, 10) << " op " << i->first << dendl;
479 Op *op = i->second;
480 {
481 OSDSession::unique_lock swl(homeless_session->lock);
482 _session_op_remove(homeless_session, op);
483 }
484 op->put();
485 }
486
487 while(!homeless_session->command_ops.empty()) {
488 std::map<ceph_tid_t, CommandOp*>::iterator i
489 = homeless_session->command_ops.begin();
490 ldout(cct, 10) << " command_op " << i->first << dendl;
491 CommandOp *cop = i->second;
492 {
493 OSDSession::unique_lock swl(homeless_session->lock);
494 _session_command_op_remove(homeless_session, cop);
495 }
496 cop->put();
497 }
498
499 if (tick_event) {
500 if (timer.cancel_event(tick_event)) {
501 ldout(cct, 10) << " successfully canceled tick" << dendl;
502 }
503 tick_event = 0;
504 }
505
506 if (logger) {
507 cct->get_perfcounters_collection()->remove(logger);
508 delete logger;
509 logger = NULL;
510 }
511
512 // Let go of Objecter write lock so timer thread can shutdown
513 wl.unlock();
514
515 // Outside of lock to avoid cycle WRT calls to RequestStateHook
516 // This is safe because we guarantee no concurrent calls to
517 // shutdown() with the ::initialized check at start.
518 if (m_request_state_hook) {
519 AdminSocket* admin_socket = cct->get_admin_socket();
520 admin_socket->unregister_command("objecter_requests");
521 delete m_request_state_hook;
522 m_request_state_hook = NULL;
523 }
524 }
525
526 void Objecter::_send_linger(LingerOp *info,
527 shunique_lock& sul)
528 {
529 assert(sul.owns_lock() && sul.mutex() == &rwlock);
530
531 vector<OSDOp> opv;
532 Context *oncommit = NULL;
533 LingerOp::shared_lock watchl(info->watch_lock);
534 bufferlist *poutbl = NULL;
535 if (info->registered && info->is_watch) {
536 ldout(cct, 15) << "send_linger " << info->linger_id << " reconnect"
537 << dendl;
538 opv.push_back(OSDOp());
539 opv.back().op.op = CEPH_OSD_OP_WATCH;
540 opv.back().op.watch.cookie = info->get_cookie();
541 opv.back().op.watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
542 opv.back().op.watch.gen = ++info->register_gen;
543 oncommit = new C_Linger_Reconnect(this, info);
544 } else {
545 ldout(cct, 15) << "send_linger " << info->linger_id << " register"
546 << dendl;
547 opv = info->ops;
548 C_Linger_Commit *c = new C_Linger_Commit(this, info);
549 if (!info->is_watch) {
550 info->notify_id = 0;
551 poutbl = &c->outbl;
552 }
553 oncommit = c;
554 }
555 watchl.unlock();
556 Op *o = new Op(info->target.base_oid, info->target.base_oloc,
557 opv, info->target.flags | CEPH_OSD_FLAG_READ,
558 oncommit, info->pobjver);
559 o->outbl = poutbl;
560 o->snapid = info->snap;
561 o->snapc = info->snapc;
562 o->mtime = info->mtime;
563
564 o->target = info->target;
565 o->tid = ++last_tid;
566
567 // do not resend this; we will send a new op to reregister
568 o->should_resend = false;
569
570 if (info->register_tid) {
571 // repeat send. cancel old registeration op, if any.
572 OSDSession::unique_lock sl(info->session->lock);
573 if (info->session->ops.count(info->register_tid)) {
574 Op *o = info->session->ops[info->register_tid];
575 _op_cancel_map_check(o);
576 _cancel_linger_op(o);
577 }
578 sl.unlock();
579
580 _op_submit(o, sul, &info->register_tid);
581 } else {
582 // first send
583 _op_submit_with_budget(o, sul, &info->register_tid);
584 }
585
586 logger->inc(l_osdc_linger_send);
587 }
588
589 void Objecter::_linger_commit(LingerOp *info, int r, bufferlist& outbl)
590 {
591 LingerOp::unique_lock wl(info->watch_lock);
592 ldout(cct, 10) << "_linger_commit " << info->linger_id << dendl;
593 if (info->on_reg_commit) {
594 info->on_reg_commit->complete(r);
595 info->on_reg_commit = NULL;
596 }
597
598 // only tell the user the first time we do this
599 info->registered = true;
600 info->pobjver = NULL;
601
602 if (!info->is_watch) {
603 // make note of the notify_id
604 bufferlist::iterator p = outbl.begin();
605 try {
606 ::decode(info->notify_id, p);
607 ldout(cct, 10) << "_linger_commit notify_id=" << info->notify_id
608 << dendl;
609 }
610 catch (buffer::error& e) {
611 }
612 }
613 }
614
615 struct C_DoWatchError : public Context {
616 Objecter *objecter;
617 Objecter::LingerOp *info;
618 int err;
619 C_DoWatchError(Objecter *o, Objecter::LingerOp *i, int r)
620 : objecter(o), info(i), err(r) {
621 info->get();
622 info->_queued_async();
623 }
624 void finish(int r) override {
625 Objecter::unique_lock wl(objecter->rwlock);
626 bool canceled = info->canceled;
627 wl.unlock();
628
629 if (!canceled) {
630 info->watch_context->handle_error(info->get_cookie(), err);
631 }
632
633 info->finished_async();
634 info->put();
635 }
636 };
637
638 int Objecter::_normalize_watch_error(int r)
639 {
640 // translate ENOENT -> ENOTCONN so that a delete->disconnection
641 // notification and a failure to reconnect becuase we raced with
642 // the delete appear the same to the user.
643 if (r == -ENOENT)
644 r = -ENOTCONN;
645 return r;
646 }
647
648 void Objecter::_linger_reconnect(LingerOp *info, int r)
649 {
650 ldout(cct, 10) << __func__ << " " << info->linger_id << " = " << r
651 << " (last_error " << info->last_error << ")" << dendl;
652 if (r < 0) {
653 LingerOp::unique_lock wl(info->watch_lock);
654 if (!info->last_error) {
655 r = _normalize_watch_error(r);
656 info->last_error = r;
657 if (info->watch_context) {
658 finisher->queue(new C_DoWatchError(this, info, r));
659 }
660 }
661 wl.unlock();
662 }
663 }
664
665 void Objecter::_send_linger_ping(LingerOp *info)
666 {
667 // rwlock is locked unique
668 // info->session->lock is locked
669
670 if (cct->_conf->objecter_inject_no_watch_ping) {
671 ldout(cct, 10) << __func__ << " " << info->linger_id << " SKIPPING"
672 << dendl;
673 return;
674 }
675 if (osdmap->test_flag(CEPH_OSDMAP_PAUSERD)) {
676 ldout(cct, 10) << __func__ << " PAUSERD" << dendl;
677 return;
678 }
679
680 ceph::mono_time now = ceph::mono_clock::now();
681 ldout(cct, 10) << __func__ << " " << info->linger_id << " now " << now
682 << dendl;
683
684 vector<OSDOp> opv(1);
685 opv[0].op.op = CEPH_OSD_OP_WATCH;
686 opv[0].op.watch.cookie = info->get_cookie();
687 opv[0].op.watch.op = CEPH_OSD_WATCH_OP_PING;
688 opv[0].op.watch.gen = info->register_gen;
689 C_Linger_Ping *onack = new C_Linger_Ping(this, info);
690 Op *o = new Op(info->target.base_oid, info->target.base_oloc,
691 opv, info->target.flags | CEPH_OSD_FLAG_READ,
692 onack, NULL, NULL);
693 o->target = info->target;
694 o->should_resend = false;
695 _send_op_account(o);
696 MOSDOp *m = _prepare_osd_op(o);
697 o->tid = ++last_tid;
698 _session_op_assign(info->session, o);
699 _send_op(o, m);
700 info->ping_tid = o->tid;
701
702 onack->sent = now;
703 logger->inc(l_osdc_linger_ping);
704 }
705
706 void Objecter::_linger_ping(LingerOp *info, int r, mono_time sent,
707 uint32_t register_gen)
708 {
709 LingerOp::unique_lock l(info->watch_lock);
710 ldout(cct, 10) << __func__ << " " << info->linger_id
711 << " sent " << sent << " gen " << register_gen << " = " << r
712 << " (last_error " << info->last_error
713 << " register_gen " << info->register_gen << ")" << dendl;
714 if (info->register_gen == register_gen) {
715 if (r == 0) {
716 info->watch_valid_thru = sent;
717 } else if (r < 0 && !info->last_error) {
718 r = _normalize_watch_error(r);
719 info->last_error = r;
720 if (info->watch_context) {
721 finisher->queue(new C_DoWatchError(this, info, r));
722 }
723 }
724 } else {
725 ldout(cct, 20) << " ignoring old gen" << dendl;
726 }
727 }
728
729 int Objecter::linger_check(LingerOp *info)
730 {
731 LingerOp::shared_lock l(info->watch_lock);
732
733 mono_time stamp = info->watch_valid_thru;
734 if (!info->watch_pending_async.empty())
735 stamp = MIN(info->watch_valid_thru, info->watch_pending_async.front());
736 auto age = mono_clock::now() - stamp;
737
738 ldout(cct, 10) << __func__ << " " << info->linger_id
739 << " err " << info->last_error
740 << " age " << age << dendl;
741 if (info->last_error)
742 return info->last_error;
743 // return a safe upper bound (we are truncating to ms)
744 return
745 1 + std::chrono::duration_cast<std::chrono::milliseconds>(age).count();
746 }
747
748 void Objecter::linger_cancel(LingerOp *info)
749 {
750 unique_lock wl(rwlock);
751 _linger_cancel(info);
752 info->put();
753 }
754
755 void Objecter::_linger_cancel(LingerOp *info)
756 {
757 // rwlock is locked unique
758 ldout(cct, 20) << __func__ << " linger_id=" << info->linger_id << dendl;
759 if (!info->canceled) {
760 OSDSession *s = info->session;
761 OSDSession::unique_lock sl(s->lock);
762 _session_linger_op_remove(s, info);
763 sl.unlock();
764
765 linger_ops.erase(info->linger_id);
766 linger_ops_set.erase(info);
767 assert(linger_ops.size() == linger_ops_set.size());
768
769 info->canceled = true;
770 info->put();
771
772 logger->dec(l_osdc_linger_active);
773 }
774 }
775
776
777
778 Objecter::LingerOp *Objecter::linger_register(const object_t& oid,
779 const object_locator_t& oloc,
780 int flags)
781 {
782 LingerOp *info = new LingerOp;
783 info->target.base_oid = oid;
784 info->target.base_oloc = oloc;
785 if (info->target.base_oloc.key == oid)
786 info->target.base_oloc.key.clear();
787 info->target.flags = flags;
788 info->watch_valid_thru = mono_clock::now();
789
790 unique_lock l(rwlock);
791
792 // Acquire linger ID
793 info->linger_id = ++max_linger_id;
794 ldout(cct, 10) << __func__ << " info " << info
795 << " linger_id " << info->linger_id
796 << " cookie " << info->get_cookie()
797 << dendl;
798 linger_ops[info->linger_id] = info;
799 linger_ops_set.insert(info);
800 assert(linger_ops.size() == linger_ops_set.size());
801
802 info->get(); // for the caller
803 return info;
804 }
805
806 ceph_tid_t Objecter::linger_watch(LingerOp *info,
807 ObjectOperation& op,
808 const SnapContext& snapc,
809 real_time mtime,
810 bufferlist& inbl,
811 Context *oncommit,
812 version_t *objver)
813 {
814 info->is_watch = true;
815 info->snapc = snapc;
816 info->mtime = mtime;
817 info->target.flags |= CEPH_OSD_FLAG_WRITE;
818 info->ops = op.ops;
819 info->inbl = inbl;
820 info->poutbl = NULL;
821 info->pobjver = objver;
822 info->on_reg_commit = oncommit;
823
824 shunique_lock sul(rwlock, ceph::acquire_unique);
825 _linger_submit(info, sul);
826 logger->inc(l_osdc_linger_active);
827
828 return info->linger_id;
829 }
830
831 ceph_tid_t Objecter::linger_notify(LingerOp *info,
832 ObjectOperation& op,
833 snapid_t snap, bufferlist& inbl,
834 bufferlist *poutbl,
835 Context *onfinish,
836 version_t *objver)
837 {
838 info->snap = snap;
839 info->target.flags |= CEPH_OSD_FLAG_READ;
840 info->ops = op.ops;
841 info->inbl = inbl;
842 info->poutbl = poutbl;
843 info->pobjver = objver;
844 info->on_reg_commit = onfinish;
845
846 shunique_lock sul(rwlock, ceph::acquire_unique);
847 _linger_submit(info, sul);
848 logger->inc(l_osdc_linger_active);
849
850 return info->linger_id;
851 }
852
853 void Objecter::_linger_submit(LingerOp *info, shunique_lock& sul)
854 {
855 assert(sul.owns_lock() && sul.mutex() == &rwlock);
856 assert(info->linger_id);
857
858 // Populate Op::target
859 OSDSession *s = NULL;
860 _calc_target(&info->target, nullptr);
861
862 // Create LingerOp<->OSDSession relation
863 int r = _get_session(info->target.osd, &s, sul);
864 assert(r == 0);
865 OSDSession::unique_lock sl(s->lock);
866 _session_linger_op_assign(s, info);
867 sl.unlock();
868 put_session(s);
869
870 _send_linger(info, sul);
871 }
872
873 struct C_DoWatchNotify : public Context {
874 Objecter *objecter;
875 Objecter::LingerOp *info;
876 MWatchNotify *msg;
877 C_DoWatchNotify(Objecter *o, Objecter::LingerOp *i, MWatchNotify *m)
878 : objecter(o), info(i), msg(m) {
879 info->get();
880 info->_queued_async();
881 msg->get();
882 }
883 void finish(int r) override {
884 objecter->_do_watch_notify(info, msg);
885 }
886 };
887
888 void Objecter::handle_watch_notify(MWatchNotify *m)
889 {
890 shared_lock l(rwlock);
891 if (!initialized) {
892 return;
893 }
894
895 LingerOp *info = reinterpret_cast<LingerOp*>(m->cookie);
896 if (linger_ops_set.count(info) == 0) {
897 ldout(cct, 7) << __func__ << " cookie " << m->cookie << " dne" << dendl;
898 return;
899 }
900 LingerOp::unique_lock wl(info->watch_lock);
901 if (m->opcode == CEPH_WATCH_EVENT_DISCONNECT) {
902 if (!info->last_error) {
903 info->last_error = -ENOTCONN;
904 if (info->watch_context) {
905 finisher->queue(new C_DoWatchError(this, info, -ENOTCONN));
906 }
907 }
908 } else if (!info->is_watch) {
909 // we have CEPH_WATCH_EVENT_NOTIFY_COMPLETE; we can do this inline
910 // since we know the only user (librados) is safe to call in
911 // fast-dispatch context
912 if (info->notify_id &&
913 info->notify_id != m->notify_id) {
914 ldout(cct, 10) << __func__ << " reply notify " << m->notify_id
915 << " != " << info->notify_id << ", ignoring" << dendl;
916 } else if (info->on_notify_finish) {
917 info->notify_result_bl->claim(m->get_data());
918 info->on_notify_finish->complete(m->return_code);
919
920 // if we race with reconnect we might get a second notify; only
921 // notify the caller once!
922 info->on_notify_finish = NULL;
923 }
924 } else {
925 finisher->queue(new C_DoWatchNotify(this, info, m));
926 }
927 }
928
929 void Objecter::_do_watch_notify(LingerOp *info, MWatchNotify *m)
930 {
931 ldout(cct, 10) << __func__ << " " << *m << dendl;
932
933 shared_lock l(rwlock);
934 assert(initialized);
935
936 if (info->canceled) {
937 l.unlock();
938 goto out;
939 }
940
941 // notify completion?
942 assert(info->is_watch);
943 assert(info->watch_context);
944 assert(m->opcode != CEPH_WATCH_EVENT_DISCONNECT);
945
946 l.unlock();
947
948 switch (m->opcode) {
949 case CEPH_WATCH_EVENT_NOTIFY:
950 info->watch_context->handle_notify(m->notify_id, m->cookie,
951 m->notifier_gid, m->bl);
952 break;
953 }
954
955 out:
956 info->finished_async();
957 info->put();
958 m->put();
959 }
960
961 bool Objecter::ms_dispatch(Message *m)
962 {
963 ldout(cct, 10) << __func__ << " " << cct << " " << *m << dendl;
964 if (!initialized)
965 return false;
966
967 switch (m->get_type()) {
968 // these we exlusively handle
969 case CEPH_MSG_OSD_OPREPLY:
970 handle_osd_op_reply(static_cast<MOSDOpReply*>(m));
971 return true;
972
973 case CEPH_MSG_OSD_BACKOFF:
974 handle_osd_backoff(static_cast<MOSDBackoff*>(m));
975 return true;
976
977 case CEPH_MSG_WATCH_NOTIFY:
978 handle_watch_notify(static_cast<MWatchNotify*>(m));
979 m->put();
980 return true;
981
982 case MSG_COMMAND_REPLY:
983 if (m->get_source().type() == CEPH_ENTITY_TYPE_OSD) {
984 handle_command_reply(static_cast<MCommandReply*>(m));
985 return true;
986 } else {
987 return false;
988 }
989
990 case MSG_GETPOOLSTATSREPLY:
991 handle_get_pool_stats_reply(static_cast<MGetPoolStatsReply*>(m));
992 return true;
993
994 case CEPH_MSG_POOLOP_REPLY:
995 handle_pool_op_reply(static_cast<MPoolOpReply*>(m));
996 return true;
997
998 case CEPH_MSG_STATFS_REPLY:
999 handle_fs_stats_reply(static_cast<MStatfsReply*>(m));
1000 return true;
1001
1002 // these we give others a chance to inspect
1003
1004 // MDS, OSD
1005 case CEPH_MSG_OSD_MAP:
1006 handle_osd_map(static_cast<MOSDMap*>(m));
1007 return false;
1008 }
1009 return false;
1010 }
1011
1012 void Objecter::_scan_requests(OSDSession *s,
1013 bool force_resend,
1014 bool cluster_full,
1015 map<int64_t, bool> *pool_full_map,
1016 map<ceph_tid_t, Op*>& need_resend,
1017 list<LingerOp*>& need_resend_linger,
1018 map<ceph_tid_t, CommandOp*>& need_resend_command,
1019 shunique_lock& sul)
1020 {
1021 assert(sul.owns_lock() && sul.mutex() == &rwlock);
1022
1023 list<LingerOp*> unregister_lingers;
1024
1025 OSDSession::unique_lock sl(s->lock);
1026
1027 // check for changed linger mappings (_before_ regular ops)
1028 map<ceph_tid_t,LingerOp*>::iterator lp = s->linger_ops.begin();
1029 while (lp != s->linger_ops.end()) {
1030 LingerOp *op = lp->second;
1031 assert(op->session == s);
1032 // check_linger_pool_dne() may touch linger_ops; prevent iterator
1033 // invalidation
1034 ++lp;
1035 ldout(cct, 10) << " checking linger op " << op->linger_id << dendl;
1036 bool unregister, force_resend_writes = cluster_full;
1037 int r = _recalc_linger_op_target(op, sul);
1038 if (pool_full_map)
1039 force_resend_writes = force_resend_writes ||
1040 (*pool_full_map)[op->target.base_oloc.pool];
1041 switch (r) {
1042 case RECALC_OP_TARGET_NO_ACTION:
1043 if (!force_resend && !force_resend_writes)
1044 break;
1045 // -- fall-thru --
1046 case RECALC_OP_TARGET_NEED_RESEND:
1047 need_resend_linger.push_back(op);
1048 _linger_cancel_map_check(op);
1049 break;
1050 case RECALC_OP_TARGET_POOL_DNE:
1051 _check_linger_pool_dne(op, &unregister);
1052 if (unregister) {
1053 ldout(cct, 10) << " need to unregister linger op "
1054 << op->linger_id << dendl;
1055 op->get();
1056 unregister_lingers.push_back(op);
1057 }
1058 break;
1059 }
1060 }
1061
1062 // check for changed request mappings
1063 map<ceph_tid_t,Op*>::iterator p = s->ops.begin();
1064 while (p != s->ops.end()) {
1065 Op *op = p->second;
1066 ++p; // check_op_pool_dne() may touch ops; prevent iterator invalidation
1067 ldout(cct, 10) << " checking op " << op->tid << dendl;
1068 bool force_resend_writes = cluster_full;
1069 if (pool_full_map)
1070 force_resend_writes = force_resend_writes ||
1071 (*pool_full_map)[op->target.base_oloc.pool];
1072 int r = _calc_target(&op->target,
1073 op->session ? op->session->con.get() : nullptr);
1074 switch (r) {
1075 case RECALC_OP_TARGET_NO_ACTION:
1076 if (!force_resend && !(force_resend_writes && op->respects_full()))
1077 break;
1078 // -- fall-thru --
1079 case RECALC_OP_TARGET_NEED_RESEND:
1080 if (op->session) {
1081 _session_op_remove(op->session, op);
1082 }
1083 need_resend[op->tid] = op;
1084 _op_cancel_map_check(op);
1085 break;
1086 case RECALC_OP_TARGET_POOL_DNE:
1087 _check_op_pool_dne(op, &sl);
1088 break;
1089 }
1090 }
1091
1092 // commands
1093 map<ceph_tid_t,CommandOp*>::iterator cp = s->command_ops.begin();
1094 while (cp != s->command_ops.end()) {
1095 CommandOp *c = cp->second;
1096 ++cp;
1097 ldout(cct, 10) << " checking command " << c->tid << dendl;
1098 bool force_resend_writes = cluster_full;
1099 if (pool_full_map)
1100 force_resend_writes = force_resend_writes ||
1101 (*pool_full_map)[c->target_pg.pool()];
1102 int r = _calc_command_target(c, sul);
1103 switch (r) {
1104 case RECALC_OP_TARGET_NO_ACTION:
1105 // resend if skipped map; otherwise do nothing.
1106 if (!force_resend && !force_resend_writes)
1107 break;
1108 // -- fall-thru --
1109 case RECALC_OP_TARGET_NEED_RESEND:
1110 need_resend_command[c->tid] = c;
1111 if (c->session) {
1112 _session_command_op_remove(c->session, c);
1113 }
1114 _command_cancel_map_check(c);
1115 break;
1116 case RECALC_OP_TARGET_POOL_DNE:
1117 case RECALC_OP_TARGET_OSD_DNE:
1118 case RECALC_OP_TARGET_OSD_DOWN:
1119 _check_command_map_dne(c);
1120 break;
1121 }
1122 }
1123
1124 sl.unlock();
1125
1126 for (list<LingerOp*>::iterator iter = unregister_lingers.begin();
1127 iter != unregister_lingers.end();
1128 ++iter) {
1129 _linger_cancel(*iter);
1130 (*iter)->put();
1131 }
1132 }
1133
1134 void Objecter::handle_osd_map(MOSDMap *m)
1135 {
1136 shunique_lock sul(rwlock, acquire_unique);
1137 if (!initialized)
1138 return;
1139
1140 assert(osdmap);
1141
1142 if (m->fsid != monc->get_fsid()) {
1143 ldout(cct, 0) << "handle_osd_map fsid " << m->fsid
1144 << " != " << monc->get_fsid() << dendl;
1145 return;
1146 }
1147
1148 bool was_pauserd = osdmap->test_flag(CEPH_OSDMAP_PAUSERD);
1149 bool cluster_full = _osdmap_full_flag();
1150 bool was_pausewr = osdmap->test_flag(CEPH_OSDMAP_PAUSEWR) || cluster_full ||
1151 _osdmap_has_pool_full();
1152 map<int64_t, bool> pool_full_map;
1153 for (map<int64_t, pg_pool_t>::const_iterator it
1154 = osdmap->get_pools().begin();
1155 it != osdmap->get_pools().end(); ++it)
1156 pool_full_map[it->first] = _osdmap_pool_full(it->second);
1157
1158
1159 list<LingerOp*> need_resend_linger;
1160 map<ceph_tid_t, Op*> need_resend;
1161 map<ceph_tid_t, CommandOp*> need_resend_command;
1162
1163 if (m->get_last() <= osdmap->get_epoch()) {
1164 ldout(cct, 3) << "handle_osd_map ignoring epochs ["
1165 << m->get_first() << "," << m->get_last()
1166 << "] <= " << osdmap->get_epoch() << dendl;
1167 } else {
1168 ldout(cct, 3) << "handle_osd_map got epochs ["
1169 << m->get_first() << "," << m->get_last()
1170 << "] > " << osdmap->get_epoch() << dendl;
1171
1172 if (osdmap->get_epoch()) {
1173 bool skipped_map = false;
1174 // we want incrementals
1175 for (epoch_t e = osdmap->get_epoch() + 1;
1176 e <= m->get_last();
1177 e++) {
1178
1179 if (osdmap->get_epoch() == e-1 &&
1180 m->incremental_maps.count(e)) {
1181 ldout(cct, 3) << "handle_osd_map decoding incremental epoch " << e
1182 << dendl;
1183 OSDMap::Incremental inc(m->incremental_maps[e]);
1184 osdmap->apply_incremental(inc);
1185
1186 emit_blacklist_events(inc);
1187
1188 logger->inc(l_osdc_map_inc);
1189 }
1190 else if (m->maps.count(e)) {
1191 ldout(cct, 3) << "handle_osd_map decoding full epoch " << e << dendl;
1192 OSDMap *new_osdmap = new OSDMap();
1193 new_osdmap->decode(m->maps[e]);
1194
1195 emit_blacklist_events(*osdmap, *new_osdmap);
1196
1197 osdmap = new_osdmap;
1198
1199 logger->inc(l_osdc_map_full);
1200 }
1201 else {
1202 if (e >= m->get_oldest()) {
1203 ldout(cct, 3) << "handle_osd_map requesting missing epoch "
1204 << osdmap->get_epoch()+1 << dendl;
1205 _maybe_request_map();
1206 break;
1207 }
1208 ldout(cct, 3) << "handle_osd_map missing epoch "
1209 << osdmap->get_epoch()+1
1210 << ", jumping to " << m->get_oldest() << dendl;
1211 e = m->get_oldest() - 1;
1212 skipped_map = true;
1213 continue;
1214 }
1215 logger->set(l_osdc_map_epoch, osdmap->get_epoch());
1216
1217 cluster_full = cluster_full || _osdmap_full_flag();
1218 update_pool_full_map(pool_full_map);
1219
1220 // check all outstanding requests on every epoch
1221 _scan_requests(homeless_session, skipped_map, cluster_full,
1222 &pool_full_map, need_resend,
1223 need_resend_linger, need_resend_command, sul);
1224 for (map<int,OSDSession*>::iterator p = osd_sessions.begin();
1225 p != osd_sessions.end(); ) {
1226 OSDSession *s = p->second;
1227 _scan_requests(s, skipped_map, cluster_full,
1228 &pool_full_map, need_resend,
1229 need_resend_linger, need_resend_command, sul);
1230 ++p;
1231 // osd down or addr change?
1232 if (!osdmap->is_up(s->osd) ||
1233 (s->con &&
1234 s->con->get_peer_addr() != osdmap->get_inst(s->osd).addr)) {
1235 close_session(s);
1236 }
1237 }
1238
1239 assert(e == osdmap->get_epoch());
1240 }
1241
1242 } else {
1243 // first map. we want the full thing.
1244 if (m->maps.count(m->get_last())) {
1245 for (map<int,OSDSession*>::iterator p = osd_sessions.begin();
1246 p != osd_sessions.end(); ++p) {
1247 OSDSession *s = p->second;
1248 _scan_requests(s, false, false, NULL, need_resend,
1249 need_resend_linger, need_resend_command, sul);
1250 }
1251 ldout(cct, 3) << "handle_osd_map decoding full epoch "
1252 << m->get_last() << dendl;
1253 osdmap->decode(m->maps[m->get_last()]);
1254
1255 _scan_requests(homeless_session, false, false, NULL,
1256 need_resend, need_resend_linger,
1257 need_resend_command, sul);
1258 } else {
1259 ldout(cct, 3) << "handle_osd_map hmm, i want a full map, requesting"
1260 << dendl;
1261 monc->sub_want("osdmap", 0, CEPH_SUBSCRIBE_ONETIME);
1262 monc->renew_subs();
1263 }
1264 }
1265 }
1266
1267 // make sure need_resend targets reflect latest map
1268 for (auto p = need_resend.begin(); p != need_resend.end(); ) {
1269 Op *op = p->second;
1270 if (op->target.epoch < osdmap->get_epoch()) {
1271 ldout(cct, 10) << __func__ << " checking op " << p->first << dendl;
1272 int r = _calc_target(&op->target, nullptr);
1273 if (r == RECALC_OP_TARGET_POOL_DNE) {
1274 p = need_resend.erase(p);
1275 _check_op_pool_dne(op, nullptr);
1276 } else {
1277 ++p;
1278 }
1279 } else {
1280 ++p;
1281 }
1282 }
1283
1284 bool pauserd = osdmap->test_flag(CEPH_OSDMAP_PAUSERD);
1285 bool pausewr = osdmap->test_flag(CEPH_OSDMAP_PAUSEWR) || _osdmap_full_flag()
1286 || _osdmap_has_pool_full();
1287
1288 // was/is paused?
1289 if (was_pauserd || was_pausewr || pauserd || pausewr ||
1290 osdmap->get_epoch() < epoch_barrier) {
1291 _maybe_request_map();
1292 }
1293
1294 // resend requests
1295 for (map<ceph_tid_t, Op*>::iterator p = need_resend.begin();
1296 p != need_resend.end(); ++p) {
1297 Op *op = p->second;
1298 OSDSession *s = op->session;
1299 bool mapped_session = false;
1300 if (!s) {
1301 int r = _map_session(&op->target, &s, sul);
1302 assert(r == 0);
1303 mapped_session = true;
1304 } else {
1305 get_session(s);
1306 }
1307 OSDSession::unique_lock sl(s->lock);
1308 if (mapped_session) {
1309 _session_op_assign(s, op);
1310 }
1311 if (op->should_resend) {
1312 if (!op->session->is_homeless() && !op->target.paused) {
1313 logger->inc(l_osdc_op_resend);
1314 _send_op(op);
1315 }
1316 } else {
1317 _op_cancel_map_check(op);
1318 _cancel_linger_op(op);
1319 }
1320 sl.unlock();
1321 put_session(s);
1322 }
1323 for (list<LingerOp*>::iterator p = need_resend_linger.begin();
1324 p != need_resend_linger.end(); ++p) {
1325 LingerOp *op = *p;
1326 if (!op->session) {
1327 _calc_target(&op->target, nullptr);
1328 OSDSession *s = NULL;
1329 int const r = _get_session(op->target.osd, &s, sul);
1330 assert(r == 0);
1331 assert(s != NULL);
1332 op->session = s;
1333 put_session(s);
1334 }
1335 if (!op->session->is_homeless()) {
1336 logger->inc(l_osdc_linger_resend);
1337 _send_linger(op, sul);
1338 }
1339 }
1340 for (map<ceph_tid_t,CommandOp*>::iterator p = need_resend_command.begin();
1341 p != need_resend_command.end(); ++p) {
1342 CommandOp *c = p->second;
1343 if (c->target.osd >= 0) {
1344 _assign_command_session(c, sul);
1345 if (c->session && !c->session->is_homeless()) {
1346 _send_command(c);
1347 }
1348 }
1349 }
1350
1351 _dump_active();
1352
1353 // finish any Contexts that were waiting on a map update
1354 map<epoch_t,list< pair< Context*, int > > >::iterator p =
1355 waiting_for_map.begin();
1356 while (p != waiting_for_map.end() &&
1357 p->first <= osdmap->get_epoch()) {
1358 //go through the list and call the onfinish methods
1359 for (list<pair<Context*, int> >::iterator i = p->second.begin();
1360 i != p->second.end(); ++i) {
1361 i->first->complete(i->second);
1362 }
1363 waiting_for_map.erase(p++);
1364 }
1365
1366 monc->sub_got("osdmap", osdmap->get_epoch());
1367
1368 if (!waiting_for_map.empty()) {
1369 _maybe_request_map();
1370 }
1371 }
1372
1373 void Objecter::enable_blacklist_events()
1374 {
1375 unique_lock wl(rwlock);
1376
1377 blacklist_events_enabled = true;
1378 }
1379
1380 void Objecter::consume_blacklist_events(std::set<entity_addr_t> *events)
1381 {
1382 unique_lock wl(rwlock);
1383
1384 if (events->empty()) {
1385 events->swap(blacklist_events);
1386 } else {
1387 for (const auto &i : blacklist_events) {
1388 events->insert(i);
1389 }
1390 blacklist_events.clear();
1391 }
1392 }
1393
1394 void Objecter::emit_blacklist_events(const OSDMap::Incremental &inc)
1395 {
1396 if (!blacklist_events_enabled) {
1397 return;
1398 }
1399
1400 for (const auto &i : inc.new_blacklist) {
1401 blacklist_events.insert(i.first);
1402 }
1403 }
1404
1405 void Objecter::emit_blacklist_events(const OSDMap &old_osd_map,
1406 const OSDMap &new_osd_map)
1407 {
1408 if (!blacklist_events_enabled) {
1409 return;
1410 }
1411
1412 std::set<entity_addr_t> old_set;
1413 std::set<entity_addr_t> new_set;
1414
1415 old_osd_map.get_blacklist(&old_set);
1416 new_osd_map.get_blacklist(&new_set);
1417
1418 std::set<entity_addr_t> delta_set;
1419 std::set_difference(
1420 new_set.begin(), new_set.end(), old_set.begin(), old_set.end(),
1421 std::inserter(delta_set, delta_set.begin()));
1422 blacklist_events.insert(delta_set.begin(), delta_set.end());
1423 }
1424
1425 // op pool check
1426
1427 void Objecter::C_Op_Map_Latest::finish(int r)
1428 {
1429 if (r == -EAGAIN || r == -ECANCELED)
1430 return;
1431
1432 lgeneric_subdout(objecter->cct, objecter, 10)
1433 << "op_map_latest r=" << r << " tid=" << tid
1434 << " latest " << latest << dendl;
1435
1436 Objecter::unique_lock wl(objecter->rwlock);
1437
1438 map<ceph_tid_t, Op*>::iterator iter =
1439 objecter->check_latest_map_ops.find(tid);
1440 if (iter == objecter->check_latest_map_ops.end()) {
1441 lgeneric_subdout(objecter->cct, objecter, 10)
1442 << "op_map_latest op "<< tid << " not found" << dendl;
1443 return;
1444 }
1445
1446 Op *op = iter->second;
1447 objecter->check_latest_map_ops.erase(iter);
1448
1449 lgeneric_subdout(objecter->cct, objecter, 20)
1450 << "op_map_latest op "<< op << dendl;
1451
1452 if (op->map_dne_bound == 0)
1453 op->map_dne_bound = latest;
1454
1455 OSDSession::unique_lock sl(op->session->lock, defer_lock);
1456 objecter->_check_op_pool_dne(op, &sl);
1457
1458 op->put();
1459 }
1460
1461 int Objecter::pool_snap_by_name(int64_t poolid, const char *snap_name,
1462 snapid_t *snap) const
1463 {
1464 shared_lock rl(rwlock);
1465
1466 auto& pools = osdmap->get_pools();
1467 auto iter = pools.find(poolid);
1468 if (iter == pools.end()) {
1469 return -ENOENT;
1470 }
1471 const pg_pool_t& pg_pool = iter->second;
1472 for (auto p = pg_pool.snaps.begin();
1473 p != pg_pool.snaps.end();
1474 ++p) {
1475 if (p->second.name == snap_name) {
1476 *snap = p->first;
1477 return 0;
1478 }
1479 }
1480 return -ENOENT;
1481 }
1482
1483 int Objecter::pool_snap_get_info(int64_t poolid, snapid_t snap,
1484 pool_snap_info_t *info) const
1485 {
1486 shared_lock rl(rwlock);
1487
1488 auto& pools = osdmap->get_pools();
1489 auto iter = pools.find(poolid);
1490 if (iter == pools.end()) {
1491 return -ENOENT;
1492 }
1493 const pg_pool_t& pg_pool = iter->second;
1494 auto p = pg_pool.snaps.find(snap);
1495 if (p == pg_pool.snaps.end())
1496 return -ENOENT;
1497 *info = p->second;
1498
1499 return 0;
1500 }
1501
1502 int Objecter::pool_snap_list(int64_t poolid, vector<uint64_t> *snaps)
1503 {
1504 shared_lock rl(rwlock);
1505
1506 const pg_pool_t *pi = osdmap->get_pg_pool(poolid);
1507 if (!pi)
1508 return -ENOENT;
1509 for (map<snapid_t,pool_snap_info_t>::const_iterator p = pi->snaps.begin();
1510 p != pi->snaps.end();
1511 ++p) {
1512 snaps->push_back(p->first);
1513 }
1514 return 0;
1515 }
1516
1517 // sl may be unlocked.
1518 void Objecter::_check_op_pool_dne(Op *op, unique_lock *sl)
1519 {
1520 // rwlock is locked unique
1521
1522 if (op->target.pool_ever_existed) {
1523 // the pool previously existed and now it does not, which means it
1524 // was deleted.
1525 op->map_dne_bound = osdmap->get_epoch();
1526 ldout(cct, 10) << "check_op_pool_dne tid " << op->tid
1527 << " pool previously exists but now does not"
1528 << dendl;
1529 } else {
1530 ldout(cct, 10) << "check_op_pool_dne tid " << op->tid
1531 << " current " << osdmap->get_epoch()
1532 << " map_dne_bound " << op->map_dne_bound
1533 << dendl;
1534 }
1535 if (op->map_dne_bound > 0) {
1536 if (osdmap->get_epoch() >= op->map_dne_bound) {
1537 // we had a new enough map
1538 ldout(cct, 10) << "check_op_pool_dne tid " << op->tid
1539 << " concluding pool " << op->target.base_pgid.pool()
1540 << " dne" << dendl;
1541 if (op->onfinish) {
1542 op->onfinish->complete(-ENOENT);
1543 }
1544
1545 OSDSession *s = op->session;
1546 if (s) {
1547 assert(s != NULL);
1548 assert(sl->mutex() == &s->lock);
1549 bool session_locked = sl->owns_lock();
1550 if (!session_locked) {
1551 sl->lock();
1552 }
1553 _finish_op(op, 0);
1554 if (!session_locked) {
1555 sl->unlock();
1556 }
1557 } else {
1558 _finish_op(op, 0); // no session
1559 }
1560 }
1561 } else {
1562 _send_op_map_check(op);
1563 }
1564 }
1565
1566 void Objecter::_send_op_map_check(Op *op)
1567 {
1568 // rwlock is locked unique
1569 // ask the monitor
1570 if (check_latest_map_ops.count(op->tid) == 0) {
1571 op->get();
1572 check_latest_map_ops[op->tid] = op;
1573 C_Op_Map_Latest *c = new C_Op_Map_Latest(this, op->tid);
1574 monc->get_version("osdmap", &c->latest, NULL, c);
1575 }
1576 }
1577
1578 void Objecter::_op_cancel_map_check(Op *op)
1579 {
1580 // rwlock is locked unique
1581 map<ceph_tid_t, Op*>::iterator iter =
1582 check_latest_map_ops.find(op->tid);
1583 if (iter != check_latest_map_ops.end()) {
1584 Op *op = iter->second;
1585 op->put();
1586 check_latest_map_ops.erase(iter);
1587 }
1588 }
1589
1590 // linger pool check
1591
1592 void Objecter::C_Linger_Map_Latest::finish(int r)
1593 {
1594 if (r == -EAGAIN || r == -ECANCELED) {
1595 // ignore callback; we will retry in resend_mon_ops()
1596 return;
1597 }
1598
1599 unique_lock wl(objecter->rwlock);
1600
1601 map<uint64_t, LingerOp*>::iterator iter =
1602 objecter->check_latest_map_lingers.find(linger_id);
1603 if (iter == objecter->check_latest_map_lingers.end()) {
1604 return;
1605 }
1606
1607 LingerOp *op = iter->second;
1608 objecter->check_latest_map_lingers.erase(iter);
1609
1610 if (op->map_dne_bound == 0)
1611 op->map_dne_bound = latest;
1612
1613 bool unregister;
1614 objecter->_check_linger_pool_dne(op, &unregister);
1615
1616 if (unregister) {
1617 objecter->_linger_cancel(op);
1618 }
1619
1620 op->put();
1621 }
1622
1623 void Objecter::_check_linger_pool_dne(LingerOp *op, bool *need_unregister)
1624 {
1625 // rwlock is locked unique
1626
1627 *need_unregister = false;
1628
1629 if (op->register_gen > 0) {
1630 ldout(cct, 10) << "_check_linger_pool_dne linger_id " << op->linger_id
1631 << " pool previously existed but now does not"
1632 << dendl;
1633 op->map_dne_bound = osdmap->get_epoch();
1634 } else {
1635 ldout(cct, 10) << "_check_linger_pool_dne linger_id " << op->linger_id
1636 << " current " << osdmap->get_epoch()
1637 << " map_dne_bound " << op->map_dne_bound
1638 << dendl;
1639 }
1640 if (op->map_dne_bound > 0) {
1641 if (osdmap->get_epoch() >= op->map_dne_bound) {
1642 if (op->on_reg_commit) {
1643 op->on_reg_commit->complete(-ENOENT);
1644 }
1645 *need_unregister = true;
1646 }
1647 } else {
1648 _send_linger_map_check(op);
1649 }
1650 }
1651
1652 void Objecter::_send_linger_map_check(LingerOp *op)
1653 {
1654 // ask the monitor
1655 if (check_latest_map_lingers.count(op->linger_id) == 0) {
1656 op->get();
1657 check_latest_map_lingers[op->linger_id] = op;
1658 C_Linger_Map_Latest *c = new C_Linger_Map_Latest(this, op->linger_id);
1659 monc->get_version("osdmap", &c->latest, NULL, c);
1660 }
1661 }
1662
1663 void Objecter::_linger_cancel_map_check(LingerOp *op)
1664 {
1665 // rwlock is locked unique
1666
1667 map<uint64_t, LingerOp*>::iterator iter =
1668 check_latest_map_lingers.find(op->linger_id);
1669 if (iter != check_latest_map_lingers.end()) {
1670 LingerOp *op = iter->second;
1671 op->put();
1672 check_latest_map_lingers.erase(iter);
1673 }
1674 }
1675
1676 // command pool check
1677
1678 void Objecter::C_Command_Map_Latest::finish(int r)
1679 {
1680 if (r == -EAGAIN || r == -ECANCELED) {
1681 // ignore callback; we will retry in resend_mon_ops()
1682 return;
1683 }
1684
1685 unique_lock wl(objecter->rwlock);
1686
1687 map<uint64_t, CommandOp*>::iterator iter =
1688 objecter->check_latest_map_commands.find(tid);
1689 if (iter == objecter->check_latest_map_commands.end()) {
1690 return;
1691 }
1692
1693 CommandOp *c = iter->second;
1694 objecter->check_latest_map_commands.erase(iter);
1695
1696 if (c->map_dne_bound == 0)
1697 c->map_dne_bound = latest;
1698
1699 objecter->_check_command_map_dne(c);
1700
1701 c->put();
1702 }
1703
1704 void Objecter::_check_command_map_dne(CommandOp *c)
1705 {
1706 // rwlock is locked unique
1707
1708 ldout(cct, 10) << "_check_command_map_dne tid " << c->tid
1709 << " current " << osdmap->get_epoch()
1710 << " map_dne_bound " << c->map_dne_bound
1711 << dendl;
1712 if (c->map_dne_bound > 0) {
1713 if (osdmap->get_epoch() >= c->map_dne_bound) {
1714 _finish_command(c, c->map_check_error, c->map_check_error_str);
1715 }
1716 } else {
1717 _send_command_map_check(c);
1718 }
1719 }
1720
1721 void Objecter::_send_command_map_check(CommandOp *c)
1722 {
1723 // rwlock is locked unique
1724
1725 // ask the monitor
1726 if (check_latest_map_commands.count(c->tid) == 0) {
1727 c->get();
1728 check_latest_map_commands[c->tid] = c;
1729 C_Command_Map_Latest *f = new C_Command_Map_Latest(this, c->tid);
1730 monc->get_version("osdmap", &f->latest, NULL, f);
1731 }
1732 }
1733
1734 void Objecter::_command_cancel_map_check(CommandOp *c)
1735 {
1736 // rwlock is locked uniqe
1737
1738 map<uint64_t, CommandOp*>::iterator iter =
1739 check_latest_map_commands.find(c->tid);
1740 if (iter != check_latest_map_commands.end()) {
1741 CommandOp *c = iter->second;
1742 c->put();
1743 check_latest_map_commands.erase(iter);
1744 }
1745 }
1746
1747
1748 /**
1749 * Look up OSDSession by OSD id.
1750 *
1751 * @returns 0 on success, or -EAGAIN if the lock context requires
1752 * promotion to write.
1753 */
1754 int Objecter::_get_session(int osd, OSDSession **session, shunique_lock& sul)
1755 {
1756 assert(sul && sul.mutex() == &rwlock);
1757
1758 if (osd < 0) {
1759 *session = homeless_session;
1760 ldout(cct, 20) << __func__ << " osd=" << osd << " returning homeless"
1761 << dendl;
1762 return 0;
1763 }
1764
1765 map<int,OSDSession*>::iterator p = osd_sessions.find(osd);
1766 if (p != osd_sessions.end()) {
1767 OSDSession *s = p->second;
1768 s->get();
1769 *session = s;
1770 ldout(cct, 20) << __func__ << " s=" << s << " osd=" << osd << " "
1771 << s->get_nref() << dendl;
1772 return 0;
1773 }
1774 if (!sul.owns_lock()) {
1775 return -EAGAIN;
1776 }
1777 OSDSession *s = new OSDSession(cct, osd);
1778 osd_sessions[osd] = s;
1779 s->con = messenger->get_connection(osdmap->get_inst(osd));
1780 s->con->set_priv(s->get());
1781 logger->inc(l_osdc_osd_session_open);
1782 logger->set(l_osdc_osd_sessions, osd_sessions.size());
1783 s->get();
1784 *session = s;
1785 ldout(cct, 20) << __func__ << " s=" << s << " osd=" << osd << " "
1786 << s->get_nref() << dendl;
1787 return 0;
1788 }
1789
1790 void Objecter::put_session(Objecter::OSDSession *s)
1791 {
1792 if (s && !s->is_homeless()) {
1793 ldout(cct, 20) << __func__ << " s=" << s << " osd=" << s->osd << " "
1794 << s->get_nref() << dendl;
1795 s->put();
1796 }
1797 }
1798
1799 void Objecter::get_session(Objecter::OSDSession *s)
1800 {
1801 assert(s != NULL);
1802
1803 if (!s->is_homeless()) {
1804 ldout(cct, 20) << __func__ << " s=" << s << " osd=" << s->osd << " "
1805 << s->get_nref() << dendl;
1806 s->get();
1807 }
1808 }
1809
1810 void Objecter::_reopen_session(OSDSession *s)
1811 {
1812 // s->lock is locked
1813
1814 entity_inst_t inst = osdmap->get_inst(s->osd);
1815 ldout(cct, 10) << "reopen_session osd." << s->osd << " session, addr now "
1816 << inst << dendl;
1817 if (s->con) {
1818 s->con->set_priv(NULL);
1819 s->con->mark_down();
1820 logger->inc(l_osdc_osd_session_close);
1821 }
1822 s->con = messenger->get_connection(inst);
1823 s->con->set_priv(s->get());
1824 s->incarnation++;
1825 logger->inc(l_osdc_osd_session_open);
1826 }
1827
1828 void Objecter::close_session(OSDSession *s)
1829 {
1830 // rwlock is locked unique
1831
1832 ldout(cct, 10) << "close_session for osd." << s->osd << dendl;
1833 if (s->con) {
1834 s->con->set_priv(NULL);
1835 s->con->mark_down();
1836 logger->inc(l_osdc_osd_session_close);
1837 }
1838 OSDSession::unique_lock sl(s->lock);
1839
1840 std::list<LingerOp*> homeless_lingers;
1841 std::list<CommandOp*> homeless_commands;
1842 std::list<Op*> homeless_ops;
1843
1844 while (!s->linger_ops.empty()) {
1845 std::map<uint64_t, LingerOp*>::iterator i = s->linger_ops.begin();
1846 ldout(cct, 10) << " linger_op " << i->first << dendl;
1847 homeless_lingers.push_back(i->second);
1848 _session_linger_op_remove(s, i->second);
1849 }
1850
1851 while (!s->ops.empty()) {
1852 std::map<ceph_tid_t, Op*>::iterator i = s->ops.begin();
1853 ldout(cct, 10) << " op " << i->first << dendl;
1854 homeless_ops.push_back(i->second);
1855 _session_op_remove(s, i->second);
1856 }
1857
1858 while (!s->command_ops.empty()) {
1859 std::map<ceph_tid_t, CommandOp*>::iterator i = s->command_ops.begin();
1860 ldout(cct, 10) << " command_op " << i->first << dendl;
1861 homeless_commands.push_back(i->second);
1862 _session_command_op_remove(s, i->second);
1863 }
1864
1865 osd_sessions.erase(s->osd);
1866 sl.unlock();
1867 put_session(s);
1868
1869 // Assign any leftover ops to the homeless session
1870 {
1871 OSDSession::unique_lock hsl(homeless_session->lock);
1872 for (std::list<LingerOp*>::iterator i = homeless_lingers.begin();
1873 i != homeless_lingers.end(); ++i) {
1874 _session_linger_op_assign(homeless_session, *i);
1875 }
1876 for (std::list<Op*>::iterator i = homeless_ops.begin();
1877 i != homeless_ops.end(); ++i) {
1878 _session_op_assign(homeless_session, *i);
1879 }
1880 for (std::list<CommandOp*>::iterator i = homeless_commands.begin();
1881 i != homeless_commands.end(); ++i) {
1882 _session_command_op_assign(homeless_session, *i);
1883 }
1884 }
1885
1886 logger->set(l_osdc_osd_sessions, osd_sessions.size());
1887 }
1888
1889 void Objecter::wait_for_osd_map()
1890 {
1891 unique_lock l(rwlock);
1892 if (osdmap->get_epoch()) {
1893 l.unlock();
1894 return;
1895 }
1896
1897 // Leave this since it goes with C_SafeCond
1898 Mutex lock("");
1899 Cond cond;
1900 bool done;
1901 lock.Lock();
1902 C_SafeCond *context = new C_SafeCond(&lock, &cond, &done, NULL);
1903 waiting_for_map[0].push_back(pair<Context*, int>(context, 0));
1904 l.unlock();
1905 while (!done)
1906 cond.Wait(lock);
1907 lock.Unlock();
1908 }
1909
1910 struct C_Objecter_GetVersion : public Context {
1911 Objecter *objecter;
1912 uint64_t oldest, newest;
1913 Context *fin;
1914 C_Objecter_GetVersion(Objecter *o, Context *c)
1915 : objecter(o), oldest(0), newest(0), fin(c) {}
1916 void finish(int r) override {
1917 if (r >= 0) {
1918 objecter->get_latest_version(oldest, newest, fin);
1919 } else if (r == -EAGAIN) { // try again as instructed
1920 objecter->wait_for_latest_osdmap(fin);
1921 } else {
1922 // it doesn't return any other error codes!
1923 ceph_abort();
1924 }
1925 }
1926 };
1927
1928 void Objecter::wait_for_latest_osdmap(Context *fin)
1929 {
1930 ldout(cct, 10) << __func__ << dendl;
1931 C_Objecter_GetVersion *c = new C_Objecter_GetVersion(this, fin);
1932 monc->get_version("osdmap", &c->newest, &c->oldest, c);
1933 }
1934
1935 void Objecter::get_latest_version(epoch_t oldest, epoch_t newest, Context *fin)
1936 {
1937 unique_lock wl(rwlock);
1938 _get_latest_version(oldest, newest, fin);
1939 }
1940
1941 void Objecter::_get_latest_version(epoch_t oldest, epoch_t newest,
1942 Context *fin)
1943 {
1944 // rwlock is locked unique
1945 if (osdmap->get_epoch() >= newest) {
1946 ldout(cct, 10) << __func__ << " latest " << newest << ", have it" << dendl;
1947 if (fin)
1948 fin->complete(0);
1949 return;
1950 }
1951
1952 ldout(cct, 10) << __func__ << " latest " << newest << ", waiting" << dendl;
1953 _wait_for_new_map(fin, newest, 0);
1954 }
1955
1956 void Objecter::maybe_request_map()
1957 {
1958 shared_lock rl(rwlock);
1959 _maybe_request_map();
1960 }
1961
1962 void Objecter::_maybe_request_map()
1963 {
1964 // rwlock is locked
1965 int flag = 0;
1966 if (_osdmap_full_flag()
1967 || osdmap->test_flag(CEPH_OSDMAP_PAUSERD)
1968 || osdmap->test_flag(CEPH_OSDMAP_PAUSEWR)) {
1969 ldout(cct, 10) << "_maybe_request_map subscribing (continuous) to next "
1970 "osd map (FULL flag is set)" << dendl;
1971 } else {
1972 ldout(cct, 10)
1973 << "_maybe_request_map subscribing (onetime) to next osd map" << dendl;
1974 flag = CEPH_SUBSCRIBE_ONETIME;
1975 }
1976 epoch_t epoch = osdmap->get_epoch() ? osdmap->get_epoch()+1 : 0;
1977 if (monc->sub_want("osdmap", epoch, flag)) {
1978 monc->renew_subs();
1979 }
1980 }
1981
1982 void Objecter::_wait_for_new_map(Context *c, epoch_t epoch, int err)
1983 {
1984 // rwlock is locked unique
1985 waiting_for_map[epoch].push_back(pair<Context *, int>(c, err));
1986 _maybe_request_map();
1987 }
1988
1989
1990 /**
1991 * Use this together with wait_for_map: this is a pre-check to avoid
1992 * allocating a Context for wait_for_map if we can see that we
1993 * definitely already have the epoch.
1994 *
1995 * This does *not* replace the need to handle the return value of
1996 * wait_for_map: just because we don't have it in this pre-check
1997 * doesn't mean we won't have it when calling back into wait_for_map,
1998 * since the objecter lock is dropped in between.
1999 */
2000 bool Objecter::have_map(const epoch_t epoch)
2001 {
2002 shared_lock rl(rwlock);
2003 if (osdmap->get_epoch() >= epoch) {
2004 return true;
2005 } else {
2006 return false;
2007 }
2008 }
2009
2010 bool Objecter::wait_for_map(epoch_t epoch, Context *c, int err)
2011 {
2012 unique_lock wl(rwlock);
2013 if (osdmap->get_epoch() >= epoch) {
2014 return true;
2015 }
2016 _wait_for_new_map(c, epoch, err);
2017 return false;
2018 }
2019
2020 void Objecter::kick_requests(OSDSession *session)
2021 {
2022 ldout(cct, 10) << "kick_requests for osd." << session->osd << dendl;
2023
2024 map<uint64_t, LingerOp *> lresend;
2025 unique_lock wl(rwlock);
2026
2027 OSDSession::unique_lock sl(session->lock);
2028 _kick_requests(session, lresend);
2029 sl.unlock();
2030
2031 _linger_ops_resend(lresend, wl);
2032 }
2033
2034 void Objecter::_kick_requests(OSDSession *session,
2035 map<uint64_t, LingerOp *>& lresend)
2036 {
2037 // rwlock is locked unique
2038
2039 // clear backoffs
2040 session->backoffs.clear();
2041 session->backoffs_by_id.clear();
2042
2043 // resend ops
2044 map<ceph_tid_t,Op*> resend; // resend in tid order
2045 for (map<ceph_tid_t, Op*>::iterator p = session->ops.begin();
2046 p != session->ops.end();) {
2047 Op *op = p->second;
2048 ++p;
2049 logger->inc(l_osdc_op_resend);
2050 if (op->should_resend) {
2051 if (!op->target.paused)
2052 resend[op->tid] = op;
2053 } else {
2054 _op_cancel_map_check(op);
2055 _cancel_linger_op(op);
2056 }
2057 }
2058
2059 while (!resend.empty()) {
2060 _send_op(resend.begin()->second);
2061 resend.erase(resend.begin());
2062 }
2063
2064 // resend lingers
2065 for (map<ceph_tid_t, LingerOp*>::iterator j = session->linger_ops.begin();
2066 j != session->linger_ops.end(); ++j) {
2067 LingerOp *op = j->second;
2068 op->get();
2069 logger->inc(l_osdc_linger_resend);
2070 assert(lresend.count(j->first) == 0);
2071 lresend[j->first] = op;
2072 }
2073
2074 // resend commands
2075 map<uint64_t,CommandOp*> cresend; // resend in order
2076 for (map<ceph_tid_t, CommandOp*>::iterator k = session->command_ops.begin();
2077 k != session->command_ops.end(); ++k) {
2078 logger->inc(l_osdc_command_resend);
2079 cresend[k->first] = k->second;
2080 }
2081 while (!cresend.empty()) {
2082 _send_command(cresend.begin()->second);
2083 cresend.erase(cresend.begin());
2084 }
2085 }
2086
2087 void Objecter::_linger_ops_resend(map<uint64_t, LingerOp *>& lresend,
2088 unique_lock& ul)
2089 {
2090 assert(ul.owns_lock());
2091 shunique_lock sul(std::move(ul));
2092 while (!lresend.empty()) {
2093 LingerOp *op = lresend.begin()->second;
2094 if (!op->canceled) {
2095 _send_linger(op, sul);
2096 }
2097 op->put();
2098 lresend.erase(lresend.begin());
2099 }
2100 ul = unique_lock(sul.release_to_unique());
2101 }
2102
2103 void Objecter::start_tick()
2104 {
2105 assert(tick_event == 0);
2106 tick_event =
2107 timer.add_event(ceph::make_timespan(cct->_conf->objecter_tick_interval),
2108 &Objecter::tick, this);
2109 }
2110
2111 void Objecter::tick()
2112 {
2113 shared_lock rl(rwlock);
2114
2115 ldout(cct, 10) << "tick" << dendl;
2116
2117 // we are only called by C_Tick
2118 tick_event = 0;
2119
2120 if (!initialized) {
2121 // we raced with shutdown
2122 ldout(cct, 10) << __func__ << " raced with shutdown" << dendl;
2123 return;
2124 }
2125
2126 set<OSDSession*> toping;
2127
2128
2129 // look for laggy requests
2130 auto cutoff = ceph::mono_clock::now();
2131 cutoff -= ceph::make_timespan(cct->_conf->objecter_timeout); // timeout
2132
2133 unsigned laggy_ops = 0;
2134
2135 for (map<int,OSDSession*>::iterator siter = osd_sessions.begin();
2136 siter != osd_sessions.end(); ++siter) {
2137 OSDSession *s = siter->second;
2138 OSDSession::lock_guard l(s->lock);
2139 bool found = false;
2140 for (map<ceph_tid_t,Op*>::iterator p = s->ops.begin();
2141 p != s->ops.end();
2142 ++p) {
2143 Op *op = p->second;
2144 assert(op->session);
2145 if (op->stamp < cutoff) {
2146 ldout(cct, 2) << " tid " << p->first << " on osd." << op->session->osd
2147 << " is laggy" << dendl;
2148 found = true;
2149 ++laggy_ops;
2150 }
2151 }
2152 for (map<uint64_t,LingerOp*>::iterator p = s->linger_ops.begin();
2153 p != s->linger_ops.end();
2154 ++p) {
2155 LingerOp *op = p->second;
2156 LingerOp::unique_lock wl(op->watch_lock);
2157 assert(op->session);
2158 ldout(cct, 10) << " pinging osd that serves lingering tid " << p->first
2159 << " (osd." << op->session->osd << ")" << dendl;
2160 found = true;
2161 if (op->is_watch && op->registered && !op->last_error)
2162 _send_linger_ping(op);
2163 }
2164 for (map<uint64_t,CommandOp*>::iterator p = s->command_ops.begin();
2165 p != s->command_ops.end();
2166 ++p) {
2167 CommandOp *op = p->second;
2168 assert(op->session);
2169 ldout(cct, 10) << " pinging osd that serves command tid " << p->first
2170 << " (osd." << op->session->osd << ")" << dendl;
2171 found = true;
2172 }
2173 if (found)
2174 toping.insert(s);
2175 }
2176 if (num_homeless_ops || !toping.empty()) {
2177 _maybe_request_map();
2178 }
2179
2180 logger->set(l_osdc_op_laggy, laggy_ops);
2181 logger->set(l_osdc_osd_laggy, toping.size());
2182
2183 if (!toping.empty()) {
2184 // send a ping to these osds, to ensure we detect any session resets
2185 // (osd reply message policy is lossy)
2186 for (set<OSDSession*>::const_iterator i = toping.begin();
2187 i != toping.end();
2188 ++i) {
2189 (*i)->con->send_message(new MPing);
2190 }
2191 }
2192
2193 // Make sure we don't resechedule if we wake up after shutdown
2194 if (initialized) {
2195 tick_event = timer.reschedule_me(ceph::make_timespan(
2196 cct->_conf->objecter_tick_interval));
2197 }
2198 }
2199
2200 void Objecter::resend_mon_ops()
2201 {
2202 unique_lock wl(rwlock);
2203
2204 ldout(cct, 10) << "resend_mon_ops" << dendl;
2205
2206 for (map<ceph_tid_t,PoolStatOp*>::iterator p = poolstat_ops.begin();
2207 p != poolstat_ops.end();
2208 ++p) {
2209 _poolstat_submit(p->second);
2210 logger->inc(l_osdc_poolstat_resend);
2211 }
2212
2213 for (map<ceph_tid_t,StatfsOp*>::iterator p = statfs_ops.begin();
2214 p != statfs_ops.end();
2215 ++p) {
2216 _fs_stats_submit(p->second);
2217 logger->inc(l_osdc_statfs_resend);
2218 }
2219
2220 for (map<ceph_tid_t,PoolOp*>::iterator p = pool_ops.begin();
2221 p != pool_ops.end();
2222 ++p) {
2223 _pool_op_submit(p->second);
2224 logger->inc(l_osdc_poolop_resend);
2225 }
2226
2227 for (map<ceph_tid_t, Op*>::iterator p = check_latest_map_ops.begin();
2228 p != check_latest_map_ops.end();
2229 ++p) {
2230 C_Op_Map_Latest *c = new C_Op_Map_Latest(this, p->second->tid);
2231 monc->get_version("osdmap", &c->latest, NULL, c);
2232 }
2233
2234 for (map<uint64_t, LingerOp*>::iterator p = check_latest_map_lingers.begin();
2235 p != check_latest_map_lingers.end();
2236 ++p) {
2237 C_Linger_Map_Latest *c
2238 = new C_Linger_Map_Latest(this, p->second->linger_id);
2239 monc->get_version("osdmap", &c->latest, NULL, c);
2240 }
2241
2242 for (map<uint64_t, CommandOp*>::iterator p
2243 = check_latest_map_commands.begin();
2244 p != check_latest_map_commands.end();
2245 ++p) {
2246 C_Command_Map_Latest *c = new C_Command_Map_Latest(this, p->second->tid);
2247 monc->get_version("osdmap", &c->latest, NULL, c);
2248 }
2249 }
2250
2251 // read | write ---------------------------
2252
2253 void Objecter::op_submit(Op *op, ceph_tid_t *ptid, int *ctx_budget)
2254 {
2255 shunique_lock rl(rwlock, ceph::acquire_shared);
2256 ceph_tid_t tid = 0;
2257 if (!ptid)
2258 ptid = &tid;
2259 op->trace.event("op submit");
2260 _op_submit_with_budget(op, rl, ptid, ctx_budget);
2261 }
2262
2263 void Objecter::_op_submit_with_budget(Op *op, shunique_lock& sul,
2264 ceph_tid_t *ptid,
2265 int *ctx_budget)
2266 {
2267 assert(initialized);
2268
2269 assert(op->ops.size() == op->out_bl.size());
2270 assert(op->ops.size() == op->out_rval.size());
2271 assert(op->ops.size() == op->out_handler.size());
2272
2273 // throttle. before we look at any state, because
2274 // _take_op_budget() may drop our lock while it blocks.
2275 if (!op->ctx_budgeted || (ctx_budget && (*ctx_budget == -1))) {
2276 int op_budget = _take_op_budget(op, sul);
2277 // take and pass out the budget for the first OP
2278 // in the context session
2279 if (ctx_budget && (*ctx_budget == -1)) {
2280 *ctx_budget = op_budget;
2281 }
2282 }
2283
2284 if (osd_timeout > timespan(0)) {
2285 if (op->tid == 0)
2286 op->tid = ++last_tid;
2287 auto tid = op->tid;
2288 op->ontimeout = timer.add_event(osd_timeout,
2289 [this, tid]() {
2290 op_cancel(tid, -ETIMEDOUT); });
2291 }
2292
2293 _op_submit(op, sul, ptid);
2294 }
2295
2296 void Objecter::_send_op_account(Op *op)
2297 {
2298 inflight_ops++;
2299
2300 // add to gather set(s)
2301 if (op->onfinish) {
2302 num_in_flight++;
2303 } else {
2304 ldout(cct, 20) << " note: not requesting reply" << dendl;
2305 }
2306
2307 logger->inc(l_osdc_op_active);
2308 logger->inc(l_osdc_op);
2309
2310 if ((op->target.flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)) ==
2311 (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE))
2312 logger->inc(l_osdc_op_rmw);
2313 else if (op->target.flags & CEPH_OSD_FLAG_WRITE)
2314 logger->inc(l_osdc_op_w);
2315 else if (op->target.flags & CEPH_OSD_FLAG_READ)
2316 logger->inc(l_osdc_op_r);
2317
2318 if (op->target.flags & CEPH_OSD_FLAG_PGOP)
2319 logger->inc(l_osdc_op_pg);
2320
2321 for (vector<OSDOp>::iterator p = op->ops.begin(); p != op->ops.end(); ++p) {
2322 int code = l_osdc_osdop_other;
2323 switch (p->op.op) {
2324 case CEPH_OSD_OP_STAT: code = l_osdc_osdop_stat; break;
2325 case CEPH_OSD_OP_CREATE: code = l_osdc_osdop_create; break;
2326 case CEPH_OSD_OP_READ: code = l_osdc_osdop_read; break;
2327 case CEPH_OSD_OP_WRITE: code = l_osdc_osdop_write; break;
2328 case CEPH_OSD_OP_WRITEFULL: code = l_osdc_osdop_writefull; break;
2329 case CEPH_OSD_OP_WRITESAME: code = l_osdc_osdop_writesame; break;
2330 case CEPH_OSD_OP_APPEND: code = l_osdc_osdop_append; break;
2331 case CEPH_OSD_OP_ZERO: code = l_osdc_osdop_zero; break;
2332 case CEPH_OSD_OP_TRUNCATE: code = l_osdc_osdop_truncate; break;
2333 case CEPH_OSD_OP_DELETE: code = l_osdc_osdop_delete; break;
2334 case CEPH_OSD_OP_MAPEXT: code = l_osdc_osdop_mapext; break;
2335 case CEPH_OSD_OP_SPARSE_READ: code = l_osdc_osdop_sparse_read; break;
2336 case CEPH_OSD_OP_GETXATTR: code = l_osdc_osdop_getxattr; break;
2337 case CEPH_OSD_OP_SETXATTR: code = l_osdc_osdop_setxattr; break;
2338 case CEPH_OSD_OP_CMPXATTR: code = l_osdc_osdop_cmpxattr; break;
2339 case CEPH_OSD_OP_RMXATTR: code = l_osdc_osdop_rmxattr; break;
2340 case CEPH_OSD_OP_RESETXATTRS: code = l_osdc_osdop_resetxattrs; break;
2341 case CEPH_OSD_OP_TMAPUP: code = l_osdc_osdop_tmap_up; break;
2342 case CEPH_OSD_OP_TMAPPUT: code = l_osdc_osdop_tmap_put; break;
2343 case CEPH_OSD_OP_TMAPGET: code = l_osdc_osdop_tmap_get; break;
2344
2345 // OMAP read operations
2346 case CEPH_OSD_OP_OMAPGETVALS:
2347 case CEPH_OSD_OP_OMAPGETKEYS:
2348 case CEPH_OSD_OP_OMAPGETHEADER:
2349 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
2350 case CEPH_OSD_OP_OMAP_CMP: code = l_osdc_osdop_omap_rd; break;
2351
2352 // OMAP write operations
2353 case CEPH_OSD_OP_OMAPSETVALS:
2354 case CEPH_OSD_OP_OMAPSETHEADER: code = l_osdc_osdop_omap_wr; break;
2355
2356 // OMAP del operations
2357 case CEPH_OSD_OP_OMAPCLEAR:
2358 case CEPH_OSD_OP_OMAPRMKEYS: code = l_osdc_osdop_omap_del; break;
2359
2360 case CEPH_OSD_OP_CALL: code = l_osdc_osdop_call; break;
2361 case CEPH_OSD_OP_WATCH: code = l_osdc_osdop_watch; break;
2362 case CEPH_OSD_OP_NOTIFY: code = l_osdc_osdop_notify; break;
2363 }
2364 if (code)
2365 logger->inc(code);
2366 }
2367 }
2368
2369 void Objecter::_op_submit(Op *op, shunique_lock& sul, ceph_tid_t *ptid)
2370 {
2371 // rwlock is locked
2372
2373 ldout(cct, 10) << __func__ << " op " << op << dendl;
2374
2375 // pick target
2376 assert(op->session == NULL);
2377 OSDSession *s = NULL;
2378
2379 bool check_for_latest_map = _calc_target(&op->target, nullptr)
2380 == RECALC_OP_TARGET_POOL_DNE;
2381
2382 // Try to get a session, including a retry if we need to take write lock
2383 int r = _get_session(op->target.osd, &s, sul);
2384 if (r == -EAGAIN ||
2385 (check_for_latest_map && sul.owns_lock_shared())) {
2386 epoch_t orig_epoch = osdmap->get_epoch();
2387 sul.unlock();
2388 if (cct->_conf->objecter_debug_inject_relock_delay) {
2389 sleep(1);
2390 }
2391 sul.lock();
2392 if (orig_epoch != osdmap->get_epoch()) {
2393 // map changed; recalculate mapping
2394 ldout(cct, 10) << __func__ << " relock raced with osdmap, recalc target"
2395 << dendl;
2396 check_for_latest_map = _calc_target(&op->target, nullptr)
2397 == RECALC_OP_TARGET_POOL_DNE;
2398 if (s) {
2399 put_session(s);
2400 s = NULL;
2401 r = -EAGAIN;
2402 }
2403 }
2404 }
2405 if (r == -EAGAIN) {
2406 assert(s == NULL);
2407 r = _get_session(op->target.osd, &s, sul);
2408 }
2409 assert(r == 0);
2410 assert(s); // may be homeless
2411
2412 _send_op_account(op);
2413
2414 // send?
2415
2416 assert(op->target.flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE));
2417
2418 if (osdmap_full_try) {
2419 op->target.flags |= CEPH_OSD_FLAG_FULL_TRY;
2420 }
2421
2422 bool need_send = false;
2423
2424 if (osdmap->get_epoch() < epoch_barrier) {
2425 ldout(cct, 10) << " barrier, paused " << op << " tid " << op->tid
2426 << dendl;
2427 op->target.paused = true;
2428 _maybe_request_map();
2429 } else if ((op->target.flags & CEPH_OSD_FLAG_WRITE) &&
2430 osdmap->test_flag(CEPH_OSDMAP_PAUSEWR)) {
2431 ldout(cct, 10) << " paused modify " << op << " tid " << op->tid
2432 << dendl;
2433 op->target.paused = true;
2434 _maybe_request_map();
2435 } else if ((op->target.flags & CEPH_OSD_FLAG_READ) &&
2436 osdmap->test_flag(CEPH_OSDMAP_PAUSERD)) {
2437 ldout(cct, 10) << " paused read " << op << " tid " << op->tid
2438 << dendl;
2439 op->target.paused = true;
2440 _maybe_request_map();
2441 } else if (op->respects_full() &&
2442 (_osdmap_full_flag() ||
2443 _osdmap_pool_full(op->target.base_oloc.pool))) {
2444 ldout(cct, 0) << " FULL, paused modify " << op << " tid "
2445 << op->tid << dendl;
2446 op->target.paused = true;
2447 _maybe_request_map();
2448 } else if (!s->is_homeless()) {
2449 need_send = true;
2450 } else {
2451 _maybe_request_map();
2452 }
2453
2454 MOSDOp *m = NULL;
2455 if (need_send) {
2456 m = _prepare_osd_op(op);
2457 }
2458
2459 OSDSession::unique_lock sl(s->lock);
2460 if (op->tid == 0)
2461 op->tid = ++last_tid;
2462
2463 ldout(cct, 10) << "_op_submit oid " << op->target.base_oid
2464 << " '" << op->target.base_oloc << "' '"
2465 << op->target.target_oloc << "' " << op->ops << " tid "
2466 << op->tid << " osd." << (!s->is_homeless() ? s->osd : -1)
2467 << dendl;
2468
2469 _session_op_assign(s, op);
2470
2471 if (need_send) {
2472 _send_op(op, m);
2473 }
2474
2475 // Last chance to touch Op here, after giving up session lock it can
2476 // be freed at any time by response handler.
2477 ceph_tid_t tid = op->tid;
2478 if (check_for_latest_map) {
2479 _send_op_map_check(op);
2480 }
2481 if (ptid)
2482 *ptid = tid;
2483 op = NULL;
2484
2485 sl.unlock();
2486 put_session(s);
2487
2488 ldout(cct, 5) << num_in_flight << " in flight" << dendl;
2489 }
2490
2491 int Objecter::op_cancel(OSDSession *s, ceph_tid_t tid, int r)
2492 {
2493 assert(initialized);
2494
2495 OSDSession::unique_lock sl(s->lock);
2496
2497 map<ceph_tid_t, Op*>::iterator p = s->ops.find(tid);
2498 if (p == s->ops.end()) {
2499 ldout(cct, 10) << __func__ << " tid " << tid << " dne in session "
2500 << s->osd << dendl;
2501 return -ENOENT;
2502 }
2503
2504 if (s->con) {
2505 ldout(cct, 20) << " revoking rx buffer for " << tid
2506 << " on " << s->con << dendl;
2507 s->con->revoke_rx_buffer(tid);
2508 }
2509
2510 ldout(cct, 10) << __func__ << " tid " << tid << " in session " << s->osd
2511 << dendl;
2512 Op *op = p->second;
2513 if (op->onfinish) {
2514 num_in_flight--;
2515 op->onfinish->complete(r);
2516 op->onfinish = NULL;
2517 }
2518 _op_cancel_map_check(op);
2519 _finish_op(op, r);
2520 sl.unlock();
2521
2522 return 0;
2523 }
2524
2525 int Objecter::op_cancel(ceph_tid_t tid, int r)
2526 {
2527 int ret = 0;
2528
2529 unique_lock wl(rwlock);
2530 ret = _op_cancel(tid, r);
2531
2532 return ret;
2533 }
2534
2535 int Objecter::_op_cancel(ceph_tid_t tid, int r)
2536 {
2537 int ret = 0;
2538
2539 ldout(cct, 5) << __func__ << ": cancelling tid " << tid << " r=" << r
2540 << dendl;
2541
2542 start:
2543
2544 for (map<int, OSDSession *>::iterator siter = osd_sessions.begin();
2545 siter != osd_sessions.end(); ++siter) {
2546 OSDSession *s = siter->second;
2547 OSDSession::shared_lock sl(s->lock);
2548 if (s->ops.find(tid) != s->ops.end()) {
2549 sl.unlock();
2550 ret = op_cancel(s, tid, r);
2551 if (ret == -ENOENT) {
2552 /* oh no! raced, maybe tid moved to another session, restarting */
2553 goto start;
2554 }
2555 return ret;
2556 }
2557 }
2558
2559 ldout(cct, 5) << __func__ << ": tid " << tid
2560 << " not found in live sessions" << dendl;
2561
2562 // Handle case where the op is in homeless session
2563 OSDSession::shared_lock sl(homeless_session->lock);
2564 if (homeless_session->ops.find(tid) != homeless_session->ops.end()) {
2565 sl.unlock();
2566 ret = op_cancel(homeless_session, tid, r);
2567 if (ret == -ENOENT) {
2568 /* oh no! raced, maybe tid moved to another session, restarting */
2569 goto start;
2570 } else {
2571 return ret;
2572 }
2573 } else {
2574 sl.unlock();
2575 }
2576
2577 ldout(cct, 5) << __func__ << ": tid " << tid
2578 << " not found in homeless session" << dendl;
2579
2580 return ret;
2581 }
2582
2583
2584 epoch_t Objecter::op_cancel_writes(int r, int64_t pool)
2585 {
2586 unique_lock wl(rwlock);
2587
2588 std::vector<ceph_tid_t> to_cancel;
2589 bool found = false;
2590
2591 for (map<int, OSDSession *>::iterator siter = osd_sessions.begin();
2592 siter != osd_sessions.end(); ++siter) {
2593 OSDSession *s = siter->second;
2594 OSDSession::shared_lock sl(s->lock);
2595 for (map<ceph_tid_t, Op*>::iterator op_i = s->ops.begin();
2596 op_i != s->ops.end(); ++op_i) {
2597 if (op_i->second->target.flags & CEPH_OSD_FLAG_WRITE
2598 && (pool == -1 || op_i->second->target.target_oloc.pool == pool)) {
2599 to_cancel.push_back(op_i->first);
2600 }
2601 }
2602 sl.unlock();
2603
2604 for (std::vector<ceph_tid_t>::iterator titer = to_cancel.begin();
2605 titer != to_cancel.end();
2606 ++titer) {
2607 int cancel_result = op_cancel(s, *titer, r);
2608 // We hold rwlock across search and cancellation, so cancels
2609 // should always succeed
2610 assert(cancel_result == 0);
2611 }
2612 if (!found && to_cancel.size())
2613 found = true;
2614 to_cancel.clear();
2615 }
2616
2617 const epoch_t epoch = osdmap->get_epoch();
2618
2619 wl.unlock();
2620
2621 if (found) {
2622 return epoch;
2623 } else {
2624 return -1;
2625 }
2626 }
2627
2628 bool Objecter::is_pg_changed(
2629 int oldprimary,
2630 const vector<int>& oldacting,
2631 int newprimary,
2632 const vector<int>& newacting,
2633 bool any_change)
2634 {
2635 if (OSDMap::primary_changed(
2636 oldprimary,
2637 oldacting,
2638 newprimary,
2639 newacting))
2640 return true;
2641 if (any_change && oldacting != newacting)
2642 return true;
2643 return false; // same primary (tho replicas may have changed)
2644 }
2645
2646 bool Objecter::target_should_be_paused(op_target_t *t)
2647 {
2648 const pg_pool_t *pi = osdmap->get_pg_pool(t->base_oloc.pool);
2649 bool pauserd = osdmap->test_flag(CEPH_OSDMAP_PAUSERD);
2650 bool pausewr = osdmap->test_flag(CEPH_OSDMAP_PAUSEWR) ||
2651 _osdmap_full_flag() || _osdmap_pool_full(*pi);
2652
2653 return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
2654 (t->flags & CEPH_OSD_FLAG_WRITE && pausewr) ||
2655 (osdmap->get_epoch() < epoch_barrier);
2656 }
2657
2658 /**
2659 * Locking public accessor for _osdmap_full_flag
2660 */
2661 bool Objecter::osdmap_full_flag() const
2662 {
2663 shared_lock rl(rwlock);
2664
2665 return _osdmap_full_flag();
2666 }
2667
2668 bool Objecter::osdmap_pool_full(const int64_t pool_id) const
2669 {
2670 shared_lock rl(rwlock);
2671
2672 if (_osdmap_full_flag()) {
2673 return true;
2674 }
2675
2676 return _osdmap_pool_full(pool_id);
2677 }
2678
2679 bool Objecter::_osdmap_pool_full(const int64_t pool_id) const
2680 {
2681 const pg_pool_t *pool = osdmap->get_pg_pool(pool_id);
2682 if (pool == NULL) {
2683 ldout(cct, 4) << __func__ << ": DNE pool " << pool_id << dendl;
2684 return false;
2685 }
2686
2687 return _osdmap_pool_full(*pool);
2688 }
2689
2690 bool Objecter::_osdmap_has_pool_full() const
2691 {
2692 for (map<int64_t, pg_pool_t>::const_iterator it
2693 = osdmap->get_pools().begin();
2694 it != osdmap->get_pools().end(); ++it) {
2695 if (_osdmap_pool_full(it->second))
2696 return true;
2697 }
2698 return false;
2699 }
2700
2701 bool Objecter::_osdmap_pool_full(const pg_pool_t &p) const
2702 {
2703 return p.has_flag(pg_pool_t::FLAG_FULL) && honor_osdmap_full;
2704 }
2705
2706 /**
2707 * Wrapper around osdmap->test_flag for special handling of the FULL flag.
2708 */
2709 bool Objecter::_osdmap_full_flag() const
2710 {
2711 // Ignore the FULL flag if the caller has honor_osdmap_full
2712 return osdmap->test_flag(CEPH_OSDMAP_FULL) && honor_osdmap_full;
2713 }
2714
2715 void Objecter::update_pool_full_map(map<int64_t, bool>& pool_full_map)
2716 {
2717 for (map<int64_t, pg_pool_t>::const_iterator it
2718 = osdmap->get_pools().begin();
2719 it != osdmap->get_pools().end(); ++it) {
2720 if (pool_full_map.find(it->first) == pool_full_map.end()) {
2721 pool_full_map[it->first] = _osdmap_pool_full(it->second);
2722 } else {
2723 pool_full_map[it->first] = _osdmap_pool_full(it->second) ||
2724 pool_full_map[it->first];
2725 }
2726 }
2727 }
2728
2729 int64_t Objecter::get_object_hash_position(int64_t pool, const string& key,
2730 const string& ns)
2731 {
2732 shared_lock rl(rwlock);
2733 const pg_pool_t *p = osdmap->get_pg_pool(pool);
2734 if (!p)
2735 return -ENOENT;
2736 return p->hash_key(key, ns);
2737 }
2738
2739 int64_t Objecter::get_object_pg_hash_position(int64_t pool, const string& key,
2740 const string& ns)
2741 {
2742 shared_lock rl(rwlock);
2743 const pg_pool_t *p = osdmap->get_pg_pool(pool);
2744 if (!p)
2745 return -ENOENT;
2746 return p->raw_hash_to_pg(p->hash_key(key, ns));
2747 }
2748
2749 int Objecter::_calc_target(op_target_t *t, Connection *con, bool any_change)
2750 {
2751 // rwlock is locked
2752 bool is_read = t->flags & CEPH_OSD_FLAG_READ;
2753 bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
2754 t->epoch = osdmap->get_epoch();
2755 ldout(cct,20) << __func__ << " epoch " << t->epoch
2756 << " base " << t->base_oid << " " << t->base_oloc
2757 << " precalc_pgid " << (int)t->precalc_pgid
2758 << " pgid " << t->base_pgid
2759 << (is_read ? " is_read" : "")
2760 << (is_write ? " is_write" : "")
2761 << dendl;
2762
2763 const pg_pool_t *pi = osdmap->get_pg_pool(t->base_oloc.pool);
2764 if (!pi) {
2765 t->osd = -1;
2766 return RECALC_OP_TARGET_POOL_DNE;
2767 }
2768 ldout(cct,30) << __func__ << " base pi " << pi
2769 << " pg_num " << pi->get_pg_num() << dendl;
2770
2771 bool force_resend = false;
2772 if (osdmap->get_epoch() == pi->last_force_op_resend) {
2773 if (t->last_force_resend < pi->last_force_op_resend) {
2774 t->last_force_resend = pi->last_force_op_resend;
2775 force_resend = true;
2776 } else if (t->last_force_resend == 0) {
2777 force_resend = true;
2778 }
2779 }
2780
2781 // apply tiering
2782 t->target_oid = t->base_oid;
2783 t->target_oloc = t->base_oloc;
2784 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
2785 if (is_read && pi->has_read_tier())
2786 t->target_oloc.pool = pi->read_tier;
2787 if (is_write && pi->has_write_tier())
2788 t->target_oloc.pool = pi->write_tier;
2789 pi = osdmap->get_pg_pool(t->target_oloc.pool);
2790 if (!pi) {
2791 t->osd = -1;
2792 return RECALC_OP_TARGET_POOL_DNE;
2793 }
2794 }
2795
2796 pg_t pgid;
2797 if (t->precalc_pgid) {
2798 assert(t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY);
2799 assert(t->base_oid.name.empty()); // make sure this is a pg op
2800 assert(t->base_oloc.pool == (int64_t)t->base_pgid.pool());
2801 pgid = t->base_pgid;
2802 } else {
2803 int ret = osdmap->object_locator_to_pg(t->target_oid, t->target_oloc,
2804 pgid);
2805 if (ret == -ENOENT) {
2806 t->osd = -1;
2807 return RECALC_OP_TARGET_POOL_DNE;
2808 }
2809 }
2810 ldout(cct,20) << __func__ << " target " << t->target_oid << " "
2811 << t->target_oloc << " -> pgid " << pgid << dendl;
2812 ldout(cct,30) << __func__ << " target pi " << pi
2813 << " pg_num " << pi->get_pg_num() << dendl;
2814 t->pool_ever_existed = true;
2815
2816 int size = pi->size;
2817 int min_size = pi->min_size;
2818 unsigned pg_num = pi->get_pg_num();
2819 int up_primary, acting_primary;
2820 vector<int> up, acting;
2821 osdmap->pg_to_up_acting_osds(pgid, &up, &up_primary,
2822 &acting, &acting_primary);
2823 bool sort_bitwise = osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE);
2824 unsigned prev_seed = ceph_stable_mod(pgid.ps(), t->pg_num, t->pg_num_mask);
2825 pg_t prev_pgid(prev_seed, pgid.pool());
2826 if (any_change && PastIntervals::is_new_interval(
2827 t->acting_primary,
2828 acting_primary,
2829 t->acting,
2830 acting,
2831 t->up_primary,
2832 up_primary,
2833 t->up,
2834 up,
2835 t->size,
2836 size,
2837 t->min_size,
2838 min_size,
2839 t->pg_num,
2840 pg_num,
2841 t->sort_bitwise,
2842 sort_bitwise,
2843 prev_pgid)) {
2844 force_resend = true;
2845 }
2846
2847 bool unpaused = false;
2848 if (t->paused && !target_should_be_paused(t)) {
2849 t->paused = false;
2850 unpaused = true;
2851 }
2852
2853 bool legacy_change =
2854 t->pgid != pgid ||
2855 is_pg_changed(
2856 t->acting_primary, t->acting, acting_primary, acting,
2857 t->used_replica || any_change);
2858 bool split = false;
2859 if (t->pg_num) {
2860 split = prev_pgid.is_split(t->pg_num, pg_num, nullptr);
2861 }
2862
2863 if (legacy_change || split || force_resend) {
2864 t->pgid = pgid;
2865 t->acting = acting;
2866 t->acting_primary = acting_primary;
2867 t->up_primary = up_primary;
2868 t->up = up;
2869 t->size = size;
2870 t->min_size = min_size;
2871 t->pg_num = pg_num;
2872 t->pg_num_mask = pi->get_pg_num_mask();
2873 osdmap->get_primary_shard(
2874 pg_t(ceph_stable_mod(pgid.ps(), t->pg_num, t->pg_num_mask), pgid.pool()),
2875 &t->actual_pgid);
2876 t->sort_bitwise = sort_bitwise;
2877 ldout(cct, 10) << __func__ << " "
2878 << " raw pgid " << pgid << " -> actual " << t->actual_pgid
2879 << " acting " << acting
2880 << " primary " << acting_primary << dendl;
2881 t->used_replica = false;
2882 if (acting_primary == -1) {
2883 t->osd = -1;
2884 } else {
2885 int osd;
2886 bool read = is_read && !is_write;
2887 if (read && (t->flags & CEPH_OSD_FLAG_BALANCE_READS)) {
2888 int p = rand() % acting.size();
2889 if (p)
2890 t->used_replica = true;
2891 osd = acting[p];
2892 ldout(cct, 10) << " chose random osd." << osd << " of " << acting
2893 << dendl;
2894 } else if (read && (t->flags & CEPH_OSD_FLAG_LOCALIZE_READS) &&
2895 acting.size() > 1) {
2896 // look for a local replica. prefer the primary if the
2897 // distance is the same.
2898 int best = -1;
2899 int best_locality = 0;
2900 for (unsigned i = 0; i < acting.size(); ++i) {
2901 int locality = osdmap->crush->get_common_ancestor_distance(
2902 cct, acting[i], crush_location);
2903 ldout(cct, 20) << __func__ << " localize: rank " << i
2904 << " osd." << acting[i]
2905 << " locality " << locality << dendl;
2906 if (i == 0 ||
2907 (locality >= 0 && best_locality >= 0 &&
2908 locality < best_locality) ||
2909 (best_locality < 0 && locality >= 0)) {
2910 best = i;
2911 best_locality = locality;
2912 if (i)
2913 t->used_replica = true;
2914 }
2915 }
2916 assert(best >= 0);
2917 osd = acting[best];
2918 } else {
2919 osd = acting_primary;
2920 }
2921 t->osd = osd;
2922 }
2923 }
2924 if (legacy_change || unpaused || force_resend) {
2925 return RECALC_OP_TARGET_NEED_RESEND;
2926 }
2927 if (split && con && con->has_features(CEPH_FEATUREMASK_RESEND_ON_SPLIT)) {
2928 return RECALC_OP_TARGET_NEED_RESEND;
2929 }
2930 return RECALC_OP_TARGET_NO_ACTION;
2931 }
2932
2933 int Objecter::_map_session(op_target_t *target, OSDSession **s,
2934 shunique_lock& sul)
2935 {
2936 _calc_target(target, nullptr);
2937 return _get_session(target->osd, s, sul);
2938 }
2939
2940 void Objecter::_session_op_assign(OSDSession *to, Op *op)
2941 {
2942 // to->lock is locked
2943 assert(op->session == NULL);
2944 assert(op->tid);
2945
2946 get_session(to);
2947 op->session = to;
2948 to->ops[op->tid] = op;
2949
2950 if (to->is_homeless()) {
2951 num_homeless_ops++;
2952 }
2953
2954 ldout(cct, 15) << __func__ << " " << to->osd << " " << op->tid << dendl;
2955 }
2956
2957 void Objecter::_session_op_remove(OSDSession *from, Op *op)
2958 {
2959 assert(op->session == from);
2960 // from->lock is locked
2961
2962 if (from->is_homeless()) {
2963 num_homeless_ops--;
2964 }
2965
2966 from->ops.erase(op->tid);
2967 put_session(from);
2968 op->session = NULL;
2969
2970 ldout(cct, 15) << __func__ << " " << from->osd << " " << op->tid << dendl;
2971 }
2972
2973 void Objecter::_session_linger_op_assign(OSDSession *to, LingerOp *op)
2974 {
2975 // to lock is locked unique
2976 assert(op->session == NULL);
2977
2978 if (to->is_homeless()) {
2979 num_homeless_ops++;
2980 }
2981
2982 get_session(to);
2983 op->session = to;
2984 to->linger_ops[op->linger_id] = op;
2985
2986 ldout(cct, 15) << __func__ << " " << to->osd << " " << op->linger_id
2987 << dendl;
2988 }
2989
2990 void Objecter::_session_linger_op_remove(OSDSession *from, LingerOp *op)
2991 {
2992 assert(from == op->session);
2993 // from->lock is locked unique
2994
2995 if (from->is_homeless()) {
2996 num_homeless_ops--;
2997 }
2998
2999 from->linger_ops.erase(op->linger_id);
3000 put_session(from);
3001 op->session = NULL;
3002
3003 ldout(cct, 15) << __func__ << " " << from->osd << " " << op->linger_id
3004 << dendl;
3005 }
3006
3007 void Objecter::_session_command_op_remove(OSDSession *from, CommandOp *op)
3008 {
3009 assert(from == op->session);
3010 // from->lock is locked
3011
3012 if (from->is_homeless()) {
3013 num_homeless_ops--;
3014 }
3015
3016 from->command_ops.erase(op->tid);
3017 put_session(from);
3018 op->session = NULL;
3019
3020 ldout(cct, 15) << __func__ << " " << from->osd << " " << op->tid << dendl;
3021 }
3022
3023 void Objecter::_session_command_op_assign(OSDSession *to, CommandOp *op)
3024 {
3025 // to->lock is locked
3026 assert(op->session == NULL);
3027 assert(op->tid);
3028
3029 if (to->is_homeless()) {
3030 num_homeless_ops++;
3031 }
3032
3033 get_session(to);
3034 op->session = to;
3035 to->command_ops[op->tid] = op;
3036
3037 ldout(cct, 15) << __func__ << " " << to->osd << " " << op->tid << dendl;
3038 }
3039
3040 int Objecter::_recalc_linger_op_target(LingerOp *linger_op,
3041 shunique_lock& sul)
3042 {
3043 // rwlock is locked unique
3044
3045 int r = _calc_target(&linger_op->target, nullptr, true);
3046 if (r == RECALC_OP_TARGET_NEED_RESEND) {
3047 ldout(cct, 10) << "recalc_linger_op_target tid " << linger_op->linger_id
3048 << " pgid " << linger_op->target.pgid
3049 << " acting " << linger_op->target.acting << dendl;
3050
3051 OSDSession *s = NULL;
3052 r = _get_session(linger_op->target.osd, &s, sul);
3053 assert(r == 0);
3054
3055 if (linger_op->session != s) {
3056 // NB locking two sessions (s and linger_op->session) at the
3057 // same time here is only safe because we are the only one that
3058 // takes two, and we are holding rwlock for write. Disable
3059 // lockdep because it doesn't know that.
3060 OSDSession::unique_lock sl(s->lock);
3061 _session_linger_op_remove(linger_op->session, linger_op);
3062 _session_linger_op_assign(s, linger_op);
3063 }
3064
3065 put_session(s);
3066 return RECALC_OP_TARGET_NEED_RESEND;
3067 }
3068 return r;
3069 }
3070
3071 void Objecter::_cancel_linger_op(Op *op)
3072 {
3073 ldout(cct, 15) << "cancel_op " << op->tid << dendl;
3074
3075 assert(!op->should_resend);
3076 if (op->onfinish) {
3077 delete op->onfinish;
3078 num_in_flight--;
3079 }
3080
3081 _finish_op(op, 0);
3082 }
3083
3084 void Objecter::_finish_op(Op *op, int r)
3085 {
3086 ldout(cct, 15) << "finish_op " << op->tid << dendl;
3087
3088 // op->session->lock is locked unique or op->session is null
3089
3090 if (!op->ctx_budgeted && op->budgeted)
3091 put_op_budget(op);
3092
3093 if (op->ontimeout && r != -ETIMEDOUT)
3094 timer.cancel_event(op->ontimeout);
3095
3096 if (op->session) {
3097 _session_op_remove(op->session, op);
3098 }
3099
3100 logger->dec(l_osdc_op_active);
3101
3102 assert(check_latest_map_ops.find(op->tid) == check_latest_map_ops.end());
3103
3104 inflight_ops--;
3105
3106 op->put();
3107 }
3108
3109 void Objecter::finish_op(OSDSession *session, ceph_tid_t tid)
3110 {
3111 ldout(cct, 15) << "finish_op " << tid << dendl;
3112 shared_lock rl(rwlock);
3113
3114 OSDSession::unique_lock wl(session->lock);
3115
3116 map<ceph_tid_t, Op *>::iterator iter = session->ops.find(tid);
3117 if (iter == session->ops.end())
3118 return;
3119
3120 Op *op = iter->second;
3121
3122 _finish_op(op, 0);
3123 }
3124
3125 MOSDOp *Objecter::_prepare_osd_op(Op *op)
3126 {
3127 // rwlock is locked
3128
3129 int flags = op->target.flags;
3130 flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
3131
3132 // Nothing checks this any longer, but needed for compatibility with
3133 // pre-luminous osds
3134 flags |= CEPH_OSD_FLAG_ONDISK;
3135
3136 if (!honor_osdmap_full)
3137 flags |= CEPH_OSD_FLAG_FULL_FORCE;
3138
3139 op->target.paused = false;
3140 op->stamp = ceph::mono_clock::now();
3141
3142 hobject_t hobj = op->target.get_hobj();
3143 MOSDOp *m = new MOSDOp(client_inc, op->tid,
3144 hobj, op->target.actual_pgid,
3145 osdmap->get_epoch(),
3146 flags, op->features);
3147
3148 m->set_snapid(op->snapid);
3149 m->set_snap_seq(op->snapc.seq);
3150 m->set_snaps(op->snapc.snaps);
3151
3152 m->ops = op->ops;
3153 m->set_mtime(op->mtime);
3154 m->set_retry_attempt(op->attempts++);
3155
3156 if (!op->trace.valid() && cct->_conf->osdc_blkin_trace_all) {
3157 op->trace.init("op", &trace_endpoint);
3158 }
3159
3160 if (op->priority)
3161 m->set_priority(op->priority);
3162 else
3163 m->set_priority(cct->_conf->osd_client_op_priority);
3164
3165 if (op->reqid != osd_reqid_t()) {
3166 m->set_reqid(op->reqid);
3167 }
3168
3169 logger->inc(l_osdc_op_send);
3170 logger->inc(l_osdc_op_send_bytes, m->get_data().length());
3171
3172 return m;
3173 }
3174
3175 void Objecter::_send_op(Op *op, MOSDOp *m)
3176 {
3177 // rwlock is locked
3178 // op->session->lock is locked
3179
3180 // backoff?
3181 hobject_t hoid = op->target.get_hobj();
3182 auto p = op->session->backoffs.find(op->target.actual_pgid);
3183 if (p != op->session->backoffs.end()) {
3184 auto q = p->second.lower_bound(hoid);
3185 if (q != p->second.begin()) {
3186 --q;
3187 if (hoid >= q->second.end) {
3188 ++q;
3189 }
3190 }
3191 if (q != p->second.end()) {
3192 ldout(cct, 20) << __func__ << " ? " << q->first << " [" << q->second.begin
3193 << "," << q->second.end << ")" << dendl;
3194 int r = cmp(hoid, q->second.begin);
3195 if (r == 0 || (r > 0 && hoid < q->second.end)) {
3196 ldout(cct, 10) << __func__ << " backoff " << op->target.actual_pgid
3197 << " id " << q->second.id << " on " << hoid
3198 << ", queuing " << op << " tid " << op->tid << dendl;
3199 return;
3200 }
3201 }
3202 }
3203
3204 if (!m) {
3205 assert(op->tid > 0);
3206 m = _prepare_osd_op(op);
3207 }
3208
3209 if (op->target.actual_pgid != m->get_spg()) {
3210 ldout(cct, 10) << __func__ << " " << op->tid << " pgid change from "
3211 << m->get_spg() << " to " << op->target.actual_pgid
3212 << ", updating and reencoding" << dendl;
3213 m->set_spg(op->target.actual_pgid);
3214 m->clear_payload(); // reencode
3215 }
3216
3217 ldout(cct, 15) << "_send_op " << op->tid << " to "
3218 << op->target.actual_pgid << " on osd." << op->session->osd
3219 << dendl;
3220
3221 ConnectionRef con = op->session->con;
3222 assert(con);
3223
3224 // preallocated rx buffer?
3225 if (op->con) {
3226 ldout(cct, 20) << " revoking rx buffer for " << op->tid << " on "
3227 << op->con << dendl;
3228 op->con->revoke_rx_buffer(op->tid);
3229 }
3230 if (op->outbl &&
3231 op->ontimeout == 0 && // only post rx_buffer if no timeout; see #9582
3232 op->outbl->length()) {
3233 ldout(cct, 20) << " posting rx buffer for " << op->tid << " on " << con
3234 << dendl;
3235 op->con = con;
3236 op->con->post_rx_buffer(op->tid, *op->outbl);
3237 }
3238
3239 op->incarnation = op->session->incarnation;
3240
3241 m->set_tid(op->tid);
3242
3243 if (op->trace.valid()) {
3244 m->trace.init("op msg", nullptr, &op->trace);
3245 }
3246 op->session->con->send_message(m);
3247 }
3248
3249 int Objecter::calc_op_budget(Op *op)
3250 {
3251 int op_budget = 0;
3252 for (vector<OSDOp>::iterator i = op->ops.begin();
3253 i != op->ops.end();
3254 ++i) {
3255 if (i->op.op & CEPH_OSD_OP_MODE_WR) {
3256 op_budget += i->indata.length();
3257 } else if (ceph_osd_op_mode_read(i->op.op)) {
3258 if (ceph_osd_op_type_data(i->op.op)) {
3259 if ((int64_t)i->op.extent.length > 0)
3260 op_budget += (int64_t)i->op.extent.length;
3261 } else if (ceph_osd_op_type_attr(i->op.op)) {
3262 op_budget += i->op.xattr.name_len + i->op.xattr.value_len;
3263 }
3264 }
3265 }
3266 return op_budget;
3267 }
3268
3269 void Objecter::_throttle_op(Op *op,
3270 shunique_lock& sul,
3271 int op_budget)
3272 {
3273 assert(sul && sul.mutex() == &rwlock);
3274 bool locked_for_write = sul.owns_lock();
3275
3276 if (!op_budget)
3277 op_budget = calc_op_budget(op);
3278 if (!op_throttle_bytes.get_or_fail(op_budget)) { //couldn't take right now
3279 sul.unlock();
3280 op_throttle_bytes.get(op_budget);
3281 if (locked_for_write)
3282 sul.lock();
3283 else
3284 sul.lock_shared();
3285 }
3286 if (!op_throttle_ops.get_or_fail(1)) { //couldn't take right now
3287 sul.unlock();
3288 op_throttle_ops.get(1);
3289 if (locked_for_write)
3290 sul.lock();
3291 else
3292 sul.lock_shared();
3293 }
3294 }
3295
3296 void Objecter::unregister_op(Op *op)
3297 {
3298 OSDSession::unique_lock sl(op->session->lock);
3299 op->session->ops.erase(op->tid);
3300 sl.unlock();
3301 put_session(op->session);
3302 op->session = NULL;
3303
3304 inflight_ops--;
3305 }
3306
3307 /* This function DOES put the passed message before returning */
3308 void Objecter::handle_osd_op_reply(MOSDOpReply *m)
3309 {
3310 ldout(cct, 10) << "in handle_osd_op_reply" << dendl;
3311
3312 // get pio
3313 ceph_tid_t tid = m->get_tid();
3314
3315 shunique_lock sul(rwlock, ceph::acquire_shared);
3316 if (!initialized) {
3317 m->put();
3318 return;
3319 }
3320
3321 ConnectionRef con = m->get_connection();
3322 OSDSession *s = static_cast<OSDSession*>(con->get_priv());
3323 if (!s || s->con != con) {
3324 ldout(cct, 7) << __func__ << " no session on con " << con << dendl;
3325 if (s) {
3326 s->put();
3327 }
3328 m->put();
3329 return;
3330 }
3331
3332 OSDSession::unique_lock sl(s->lock);
3333
3334 map<ceph_tid_t, Op *>::iterator iter = s->ops.find(tid);
3335 if (iter == s->ops.end()) {
3336 ldout(cct, 7) << "handle_osd_op_reply " << tid
3337 << (m->is_ondisk() ? " ondisk" : (m->is_onnvram() ?
3338 " onnvram" : " ack"))
3339 << " ... stray" << dendl;
3340 sl.unlock();
3341 put_session(s);
3342 m->put();
3343 return;
3344 }
3345
3346 ldout(cct, 7) << "handle_osd_op_reply " << tid
3347 << (m->is_ondisk() ? " ondisk" :
3348 (m->is_onnvram() ? " onnvram" : " ack"))
3349 << " uv " << m->get_user_version()
3350 << " in " << m->get_pg()
3351 << " attempt " << m->get_retry_attempt()
3352 << dendl;
3353 Op *op = iter->second;
3354 op->trace.event("osd op reply");
3355
3356 if (retry_writes_after_first_reply && op->attempts == 1 &&
3357 (op->target.flags & CEPH_OSD_FLAG_WRITE)) {
3358 ldout(cct, 7) << "retrying write after first reply: " << tid << dendl;
3359 if (op->onfinish) {
3360 num_in_flight--;
3361 }
3362 _session_op_remove(s, op);
3363 sl.unlock();
3364 put_session(s);
3365
3366 _op_submit(op, sul, NULL);
3367 m->put();
3368 return;
3369 }
3370
3371 if (m->get_retry_attempt() >= 0) {
3372 if (m->get_retry_attempt() != (op->attempts - 1)) {
3373 ldout(cct, 7) << " ignoring reply from attempt "
3374 << m->get_retry_attempt()
3375 << " from " << m->get_source_inst()
3376 << "; last attempt " << (op->attempts - 1) << " sent to "
3377 << op->session->con->get_peer_addr() << dendl;
3378 m->put();
3379 sl.unlock();
3380 put_session(s);
3381 return;
3382 }
3383 } else {
3384 // we don't know the request attempt because the server is old, so
3385 // just accept this one. we may do ACK callbacks we shouldn't
3386 // have, but that is better than doing callbacks out of order.
3387 }
3388
3389 Context *onfinish = 0;
3390
3391 int rc = m->get_result();
3392
3393 if (m->is_redirect_reply()) {
3394 ldout(cct, 5) << " got redirect reply; redirecting" << dendl;
3395 if (op->onfinish)
3396 num_in_flight--;
3397 _session_op_remove(s, op);
3398 sl.unlock();
3399 put_session(s);
3400
3401 // FIXME: two redirects could race and reorder
3402
3403 op->tid = 0;
3404 m->get_redirect().combine_with_locator(op->target.target_oloc,
3405 op->target.target_oid.name);
3406 op->target.flags |= CEPH_OSD_FLAG_REDIRECTED;
3407 _op_submit(op, sul, NULL);
3408 m->put();
3409 return;
3410 }
3411
3412 if (rc == -EAGAIN) {
3413 ldout(cct, 7) << " got -EAGAIN, resubmitting" << dendl;
3414
3415 // new tid
3416 s->ops.erase(op->tid);
3417 op->tid = ++last_tid;
3418
3419 _send_op(op);
3420 sl.unlock();
3421 put_session(s);
3422 m->put();
3423 return;
3424 }
3425
3426 sul.unlock();
3427
3428 if (op->objver)
3429 *op->objver = m->get_user_version();
3430 if (op->reply_epoch)
3431 *op->reply_epoch = m->get_map_epoch();
3432 if (op->data_offset)
3433 *op->data_offset = m->get_header().data_off;
3434
3435 // got data?
3436 if (op->outbl) {
3437 if (op->con)
3438 op->con->revoke_rx_buffer(op->tid);
3439 m->claim_data(*op->outbl);
3440 op->outbl = 0;
3441 }
3442
3443 // per-op result demuxing
3444 vector<OSDOp> out_ops;
3445 m->claim_ops(out_ops);
3446
3447 if (out_ops.size() != op->ops.size())
3448 ldout(cct, 0) << "WARNING: tid " << op->tid << " reply ops " << out_ops
3449 << " != request ops " << op->ops
3450 << " from " << m->get_source_inst() << dendl;
3451
3452 vector<bufferlist*>::iterator pb = op->out_bl.begin();
3453 vector<int*>::iterator pr = op->out_rval.begin();
3454 vector<Context*>::iterator ph = op->out_handler.begin();
3455 assert(op->out_bl.size() == op->out_rval.size());
3456 assert(op->out_bl.size() == op->out_handler.size());
3457 vector<OSDOp>::iterator p = out_ops.begin();
3458 for (unsigned i = 0;
3459 p != out_ops.end() && pb != op->out_bl.end();
3460 ++i, ++p, ++pb, ++pr, ++ph) {
3461 ldout(cct, 10) << " op " << i << " rval " << p->rval
3462 << " len " << p->outdata.length() << dendl;
3463 if (*pb)
3464 **pb = p->outdata;
3465 // set rval before running handlers so that handlers
3466 // can change it if e.g. decoding fails
3467 if (*pr)
3468 **pr = ceph_to_hostos_errno(p->rval);
3469 if (*ph) {
3470 ldout(cct, 10) << " op " << i << " handler " << *ph << dendl;
3471 (*ph)->complete(ceph_to_hostos_errno(p->rval));
3472 *ph = NULL;
3473 }
3474 }
3475
3476 // NOTE: we assume that since we only request ONDISK ever we will
3477 // only ever get back one (type of) ack ever.
3478
3479 if (op->onfinish) {
3480 num_in_flight--;
3481 onfinish = op->onfinish;
3482 op->onfinish = NULL;
3483 }
3484 logger->inc(l_osdc_op_reply);
3485
3486 /* get it before we call _finish_op() */
3487 auto completion_lock = s->get_lock(op->target.base_oid);
3488
3489 ldout(cct, 15) << "handle_osd_op_reply completed tid " << tid << dendl;
3490 _finish_op(op, 0);
3491
3492 ldout(cct, 5) << num_in_flight << " in flight" << dendl;
3493
3494 // serialize completions
3495 if (completion_lock.mutex()) {
3496 completion_lock.lock();
3497 }
3498 sl.unlock();
3499
3500 // do callbacks
3501 if (onfinish) {
3502 onfinish->complete(rc);
3503 }
3504 if (completion_lock.mutex()) {
3505 completion_lock.unlock();
3506 }
3507
3508 m->put();
3509 put_session(s);
3510 }
3511
3512 void Objecter::handle_osd_backoff(MOSDBackoff *m)
3513 {
3514 ldout(cct, 10) << __func__ << " " << *m << dendl;
3515 shunique_lock sul(rwlock, ceph::acquire_shared);
3516 if (!initialized) {
3517 m->put();
3518 return;
3519 }
3520
3521 ConnectionRef con = m->get_connection();
3522 OSDSession *s = static_cast<OSDSession*>(con->get_priv());
3523 if (!s || s->con != con) {
3524 ldout(cct, 7) << __func__ << " no session on con " << con << dendl;
3525 if (s)
3526 s->put();
3527 m->put();
3528 return;
3529 }
3530
3531 get_session(s);
3532 s->put(); // from get_priv() above
3533
3534 OSDSession::unique_lock sl(s->lock);
3535
3536 switch (m->op) {
3537 case CEPH_OSD_BACKOFF_OP_BLOCK:
3538 {
3539 // register
3540 OSDBackoff& b = s->backoffs[m->pgid][m->begin];
3541 s->backoffs_by_id.insert(make_pair(m->id, &b));
3542 b.pgid = m->pgid;
3543 b.id = m->id;
3544 b.begin = m->begin;
3545 b.end = m->end;
3546
3547 // ack with original backoff's epoch so that the osd can discard this if
3548 // there was a pg split.
3549 Message *r = new MOSDBackoff(m->pgid,
3550 m->map_epoch,
3551 CEPH_OSD_BACKOFF_OP_ACK_BLOCK,
3552 m->id, m->begin, m->end);
3553 // this priority must match the MOSDOps from _prepare_osd_op
3554 r->set_priority(cct->_conf->osd_client_op_priority);
3555 con->send_message(r);
3556 }
3557 break;
3558
3559 case CEPH_OSD_BACKOFF_OP_UNBLOCK:
3560 {
3561 auto p = s->backoffs_by_id.find(m->id);
3562 if (p != s->backoffs_by_id.end()) {
3563 OSDBackoff *b = p->second;
3564 if (b->begin != m->begin &&
3565 b->end != m->end) {
3566 lderr(cct) << __func__ << " got " << m->pgid << " id " << m->id
3567 << " unblock on ["
3568 << m->begin << "," << m->end << ") but backoff is ["
3569 << b->begin << "," << b->end << ")" << dendl;
3570 // hrmpf, unblock it anyway.
3571 }
3572 ldout(cct, 10) << __func__ << " unblock backoff " << b->pgid
3573 << " id " << b->id
3574 << " [" << b->begin << "," << b->end
3575 << ")" << dendl;
3576 auto spgp = s->backoffs.find(b->pgid);
3577 assert(spgp != s->backoffs.end());
3578 spgp->second.erase(b->begin);
3579 if (spgp->second.empty()) {
3580 s->backoffs.erase(spgp);
3581 }
3582 s->backoffs_by_id.erase(p);
3583
3584 // check for any ops to resend
3585 for (auto& q : s->ops) {
3586 if (q.second->target.actual_pgid == m->pgid) {
3587 int r = q.second->target.contained_by(m->begin, m->end);
3588 ldout(cct, 20) << __func__ << " contained_by " << r << " on "
3589 << q.second->target.get_hobj() << dendl;
3590 if (r) {
3591 _send_op(q.second);
3592 }
3593 }
3594 }
3595 } else {
3596 lderr(cct) << __func__ << " " << m->pgid << " id " << m->id
3597 << " unblock on ["
3598 << m->begin << "," << m->end << ") but backoff dne" << dendl;
3599 }
3600 }
3601 break;
3602
3603 default:
3604 ldout(cct, 10) << __func__ << " unrecognized op " << (int)m->op << dendl;
3605 }
3606
3607 sul.unlock();
3608 sl.unlock();
3609
3610 m->put();
3611 put_session(s);
3612 }
3613
3614 uint32_t Objecter::list_nobjects_seek(NListContext *list_context,
3615 uint32_t pos)
3616 {
3617 shared_lock rl(rwlock);
3618 list_context->pos = hobject_t(object_t(), string(), CEPH_NOSNAP,
3619 pos, list_context->pool_id, string());
3620 ldout(cct, 10) << __func__ << list_context
3621 << " pos " << pos << " -> " << list_context->pos << dendl;
3622 pg_t actual = osdmap->raw_pg_to_pg(pg_t(pos, list_context->pool_id));
3623 list_context->current_pg = actual.ps();
3624 list_context->at_end_of_pool = false;
3625 return pos;
3626 }
3627
3628 uint32_t Objecter::list_nobjects_seek(NListContext *list_context,
3629 const hobject_t& cursor)
3630 {
3631 shared_lock rl(rwlock);
3632 ldout(cct, 10) << "list_nobjects_seek " << list_context << dendl;
3633 list_context->pos = cursor;
3634 list_context->at_end_of_pool = false;
3635 pg_t actual = osdmap->raw_pg_to_pg(pg_t(cursor.get_hash(), list_context->pool_id));
3636 list_context->current_pg = actual.ps();
3637 list_context->sort_bitwise = true;
3638 return list_context->current_pg;
3639 }
3640
3641 void Objecter::list_nobjects_get_cursor(NListContext *list_context,
3642 hobject_t *cursor)
3643 {
3644 shared_lock rl(rwlock);
3645 if (list_context->list.empty()) {
3646 *cursor = list_context->pos;
3647 } else {
3648 const librados::ListObjectImpl& entry = list_context->list.front();
3649 const string *key = (entry.locator.empty() ? &entry.oid : &entry.locator);
3650 uint32_t h = osdmap->get_pg_pool(list_context->pool_id)->hash_key(*key, entry.nspace);
3651 *cursor = hobject_t(entry.oid, entry.locator, list_context->pool_snap_seq, h, list_context->pool_id, entry.nspace);
3652 }
3653 }
3654
3655 void Objecter::list_nobjects(NListContext *list_context, Context *onfinish)
3656 {
3657 ldout(cct, 10) << __func__ << " pool_id " << list_context->pool_id
3658 << " pool_snap_seq " << list_context->pool_snap_seq
3659 << " max_entries " << list_context->max_entries
3660 << " list_context " << list_context
3661 << " onfinish " << onfinish
3662 << " current_pg " << list_context->current_pg
3663 << " pos " << list_context->pos << dendl;
3664
3665 shared_lock rl(rwlock);
3666 const pg_pool_t *pool = osdmap->get_pg_pool(list_context->pool_id);
3667 if (!pool) { // pool is gone
3668 rl.unlock();
3669 put_nlist_context_budget(list_context);
3670 onfinish->complete(-ENOENT);
3671 return;
3672 }
3673 int pg_num = pool->get_pg_num();
3674 bool sort_bitwise = osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE);
3675
3676 if (list_context->pos.is_min()) {
3677 list_context->starting_pg_num = 0;
3678 list_context->sort_bitwise = sort_bitwise;
3679 list_context->starting_pg_num = pg_num;
3680 }
3681 if (list_context->sort_bitwise != sort_bitwise) {
3682 list_context->pos = hobject_t(
3683 object_t(), string(), CEPH_NOSNAP,
3684 list_context->current_pg, list_context->pool_id, string());
3685 list_context->sort_bitwise = sort_bitwise;
3686 ldout(cct, 10) << " hobject sort order changed, restarting this pg at "
3687 << list_context->pos << dendl;
3688 }
3689 if (list_context->starting_pg_num != pg_num) {
3690 if (!sort_bitwise) {
3691 // start reading from the beginning; the pgs have changed
3692 ldout(cct, 10) << " pg_num changed; restarting with " << pg_num << dendl;
3693 list_context->pos = collection_list_handle_t();
3694 }
3695 list_context->starting_pg_num = pg_num;
3696 }
3697
3698 if (list_context->pos.is_max()) {
3699 ldout(cct, 20) << __func__ << " end of pool, list "
3700 << list_context->list << dendl;
3701 if (list_context->list.empty()) {
3702 list_context->at_end_of_pool = true;
3703 }
3704 // release the listing context's budget once all
3705 // OPs (in the session) are finished
3706 put_nlist_context_budget(list_context);
3707 onfinish->complete(0);
3708 return;
3709 }
3710
3711 ObjectOperation op;
3712 op.pg_nls(list_context->max_entries, list_context->filter,
3713 list_context->pos, osdmap->get_epoch());
3714 list_context->bl.clear();
3715 C_NList *onack = new C_NList(list_context, onfinish, this);
3716 object_locator_t oloc(list_context->pool_id, list_context->nspace);
3717
3718 // note current_pg in case we don't have (or lose) SORTBITWISE
3719 list_context->current_pg = pool->raw_hash_to_pg(list_context->pos.get_hash());
3720 rl.unlock();
3721
3722 pg_read(list_context->current_pg, oloc, op,
3723 &list_context->bl, 0, onack, &onack->epoch,
3724 &list_context->ctx_budget);
3725 }
3726
3727 void Objecter::_nlist_reply(NListContext *list_context, int r,
3728 Context *final_finish, epoch_t reply_epoch)
3729 {
3730 ldout(cct, 10) << __func__ << " " << list_context << dendl;
3731
3732 bufferlist::iterator iter = list_context->bl.begin();
3733 pg_nls_response_t response;
3734 bufferlist extra_info;
3735 ::decode(response, iter);
3736 if (!iter.end()) {
3737 ::decode(extra_info, iter);
3738 }
3739
3740 // if the osd returns 1 (newer code), or handle MAX, it means we
3741 // hit the end of the pg.
3742 if ((response.handle.is_max() || r == 1) &&
3743 !list_context->sort_bitwise) {
3744 // legacy OSD and !sortbitwise, figure out the next PG on our own
3745 ++list_context->current_pg;
3746 if (list_context->current_pg == list_context->starting_pg_num) {
3747 // end of pool
3748 list_context->pos = hobject_t::get_max();
3749 } else {
3750 // next pg
3751 list_context->pos = hobject_t(object_t(), string(), CEPH_NOSNAP,
3752 list_context->current_pg,
3753 list_context->pool_id, string());
3754 }
3755 } else {
3756 list_context->pos = response.handle;
3757 }
3758
3759 int response_size = response.entries.size();
3760 ldout(cct, 20) << " response.entries.size " << response_size
3761 << ", response.entries " << response.entries
3762 << ", handle " << response.handle
3763 << ", tentative new pos " << list_context->pos << dendl;
3764 list_context->extra_info.append(extra_info);
3765 if (response_size) {
3766 list_context->list.splice(list_context->list.end(), response.entries);
3767 }
3768
3769 if (list_context->list.size() >= list_context->max_entries) {
3770 ldout(cct, 20) << " hit max, returning results so far, "
3771 << list_context->list << dendl;
3772 // release the listing context's budget once all
3773 // OPs (in the session) are finished
3774 put_nlist_context_budget(list_context);
3775 final_finish->complete(0);
3776 return;
3777 }
3778
3779 // continue!
3780 list_nobjects(list_context, final_finish);
3781 }
3782
3783 void Objecter::put_nlist_context_budget(NListContext *list_context)
3784 {
3785 if (list_context->ctx_budget >= 0) {
3786 ldout(cct, 10) << " release listing context's budget " <<
3787 list_context->ctx_budget << dendl;
3788 put_op_budget_bytes(list_context->ctx_budget);
3789 list_context->ctx_budget = -1;
3790 }
3791 }
3792
3793 // snapshots
3794
3795 int Objecter::create_pool_snap(int64_t pool, string& snap_name,
3796 Context *onfinish)
3797 {
3798 unique_lock wl(rwlock);
3799 ldout(cct, 10) << "create_pool_snap; pool: " << pool << "; snap: "
3800 << snap_name << dendl;
3801
3802 const pg_pool_t *p = osdmap->get_pg_pool(pool);
3803 if (!p)
3804 return -EINVAL;
3805 if (p->snap_exists(snap_name.c_str()))
3806 return -EEXIST;
3807
3808 PoolOp *op = new PoolOp;
3809 if (!op)
3810 return -ENOMEM;
3811 op->tid = ++last_tid;
3812 op->pool = pool;
3813 op->name = snap_name;
3814 op->onfinish = onfinish;
3815 op->pool_op = POOL_OP_CREATE_SNAP;
3816 pool_ops[op->tid] = op;
3817
3818 pool_op_submit(op);
3819
3820 return 0;
3821 }
3822
3823 struct C_SelfmanagedSnap : public Context {
3824 bufferlist bl;
3825 snapid_t *psnapid;
3826 Context *fin;
3827 C_SelfmanagedSnap(snapid_t *ps, Context *f) : psnapid(ps), fin(f) {}
3828 void finish(int r) override {
3829 if (r == 0) {
3830 bufferlist::iterator p = bl.begin();
3831 ::decode(*psnapid, p);
3832 }
3833 fin->complete(r);
3834 }
3835 };
3836
3837 int Objecter::allocate_selfmanaged_snap(int64_t pool, snapid_t *psnapid,
3838 Context *onfinish)
3839 {
3840 unique_lock wl(rwlock);
3841 ldout(cct, 10) << "allocate_selfmanaged_snap; pool: " << pool << dendl;
3842 PoolOp *op = new PoolOp;
3843 if (!op) return -ENOMEM;
3844 op->tid = ++last_tid;
3845 op->pool = pool;
3846 C_SelfmanagedSnap *fin = new C_SelfmanagedSnap(psnapid, onfinish);
3847 op->onfinish = fin;
3848 op->blp = &fin->bl;
3849 op->pool_op = POOL_OP_CREATE_UNMANAGED_SNAP;
3850 pool_ops[op->tid] = op;
3851
3852 pool_op_submit(op);
3853 return 0;
3854 }
3855
3856 int Objecter::delete_pool_snap(int64_t pool, string& snap_name,
3857 Context *onfinish)
3858 {
3859 unique_lock wl(rwlock);
3860 ldout(cct, 10) << "delete_pool_snap; pool: " << pool << "; snap: "
3861 << snap_name << dendl;
3862
3863 const pg_pool_t *p = osdmap->get_pg_pool(pool);
3864 if (!p)
3865 return -EINVAL;
3866 if (!p->snap_exists(snap_name.c_str()))
3867 return -ENOENT;
3868
3869 PoolOp *op = new PoolOp;
3870 if (!op)
3871 return -ENOMEM;
3872 op->tid = ++last_tid;
3873 op->pool = pool;
3874 op->name = snap_name;
3875 op->onfinish = onfinish;
3876 op->pool_op = POOL_OP_DELETE_SNAP;
3877 pool_ops[op->tid] = op;
3878
3879 pool_op_submit(op);
3880
3881 return 0;
3882 }
3883
3884 int Objecter::delete_selfmanaged_snap(int64_t pool, snapid_t snap,
3885 Context *onfinish)
3886 {
3887 unique_lock wl(rwlock);
3888 ldout(cct, 10) << "delete_selfmanaged_snap; pool: " << pool << "; snap: "
3889 << snap << dendl;
3890 PoolOp *op = new PoolOp;
3891 if (!op) return -ENOMEM;
3892 op->tid = ++last_tid;
3893 op->pool = pool;
3894 op->onfinish = onfinish;
3895 op->pool_op = POOL_OP_DELETE_UNMANAGED_SNAP;
3896 op->snapid = snap;
3897 pool_ops[op->tid] = op;
3898
3899 pool_op_submit(op);
3900
3901 return 0;
3902 }
3903
3904 int Objecter::create_pool(string& name, Context *onfinish, uint64_t auid,
3905 int crush_rule)
3906 {
3907 unique_lock wl(rwlock);
3908 ldout(cct, 10) << "create_pool name=" << name << dendl;
3909
3910 if (osdmap->lookup_pg_pool_name(name) >= 0)
3911 return -EEXIST;
3912
3913 PoolOp *op = new PoolOp;
3914 if (!op)
3915 return -ENOMEM;
3916 op->tid = ++last_tid;
3917 op->pool = 0;
3918 op->name = name;
3919 op->onfinish = onfinish;
3920 op->pool_op = POOL_OP_CREATE;
3921 pool_ops[op->tid] = op;
3922 op->auid = auid;
3923 op->crush_rule = crush_rule;
3924
3925 pool_op_submit(op);
3926
3927 return 0;
3928 }
3929
3930 int Objecter::delete_pool(int64_t pool, Context *onfinish)
3931 {
3932 unique_lock wl(rwlock);
3933 ldout(cct, 10) << "delete_pool " << pool << dendl;
3934
3935 if (!osdmap->have_pg_pool(pool))
3936 return -ENOENT;
3937
3938 _do_delete_pool(pool, onfinish);
3939 return 0;
3940 }
3941
3942 int Objecter::delete_pool(const string &pool_name, Context *onfinish)
3943 {
3944 unique_lock wl(rwlock);
3945 ldout(cct, 10) << "delete_pool " << pool_name << dendl;
3946
3947 int64_t pool = osdmap->lookup_pg_pool_name(pool_name);
3948 if (pool < 0)
3949 return pool;
3950
3951 _do_delete_pool(pool, onfinish);
3952 return 0;
3953 }
3954
3955 void Objecter::_do_delete_pool(int64_t pool, Context *onfinish)
3956 {
3957 PoolOp *op = new PoolOp;
3958 op->tid = ++last_tid;
3959 op->pool = pool;
3960 op->name = "delete";
3961 op->onfinish = onfinish;
3962 op->pool_op = POOL_OP_DELETE;
3963 pool_ops[op->tid] = op;
3964 pool_op_submit(op);
3965 }
3966
3967 /**
3968 * change the auid owner of a pool by contacting the monitor.
3969 * This requires the current connection to have write permissions
3970 * on both the pool's current auid and the new (parameter) auid.
3971 * Uses the standard Context callback when done.
3972 */
3973 int Objecter::change_pool_auid(int64_t pool, Context *onfinish, uint64_t auid)
3974 {
3975 unique_lock wl(rwlock);
3976 ldout(cct, 10) << "change_pool_auid " << pool << " to " << auid << dendl;
3977 PoolOp *op = new PoolOp;
3978 if (!op) return -ENOMEM;
3979 op->tid = ++last_tid;
3980 op->pool = pool;
3981 op->name = "change_pool_auid";
3982 op->onfinish = onfinish;
3983 op->pool_op = POOL_OP_AUID_CHANGE;
3984 op->auid = auid;
3985 pool_ops[op->tid] = op;
3986
3987 logger->set(l_osdc_poolop_active, pool_ops.size());
3988
3989 pool_op_submit(op);
3990 return 0;
3991 }
3992
3993 void Objecter::pool_op_submit(PoolOp *op)
3994 {
3995 // rwlock is locked
3996 if (mon_timeout > timespan(0)) {
3997 op->ontimeout = timer.add_event(mon_timeout,
3998 [this, op]() {
3999 pool_op_cancel(op->tid, -ETIMEDOUT); });
4000 }
4001 _pool_op_submit(op);
4002 }
4003
4004 void Objecter::_pool_op_submit(PoolOp *op)
4005 {
4006 // rwlock is locked unique
4007
4008 ldout(cct, 10) << "pool_op_submit " << op->tid << dendl;
4009 MPoolOp *m = new MPoolOp(monc->get_fsid(), op->tid, op->pool,
4010 op->name, op->pool_op,
4011 op->auid, last_seen_osdmap_version);
4012 if (op->snapid) m->snapid = op->snapid;
4013 if (op->crush_rule) m->crush_rule = op->crush_rule;
4014 monc->send_mon_message(m);
4015 op->last_submit = ceph::mono_clock::now();
4016
4017 logger->inc(l_osdc_poolop_send);
4018 }
4019
4020 /**
4021 * Handle a reply to a PoolOp message. Check that we sent the message
4022 * and give the caller responsibility for the returned bufferlist.
4023 * Then either call the finisher or stash the PoolOp, depending on if we
4024 * have a new enough map.
4025 * Lastly, clean up the message and PoolOp.
4026 */
4027 void Objecter::handle_pool_op_reply(MPoolOpReply *m)
4028 {
4029 FUNCTRACE();
4030 shunique_lock sul(rwlock, acquire_shared);
4031 if (!initialized) {
4032 sul.unlock();
4033 m->put();
4034 return;
4035 }
4036
4037 ldout(cct, 10) << "handle_pool_op_reply " << *m << dendl;
4038 ceph_tid_t tid = m->get_tid();
4039 map<ceph_tid_t, PoolOp *>::iterator iter = pool_ops.find(tid);
4040 if (iter != pool_ops.end()) {
4041 PoolOp *op = iter->second;
4042 ldout(cct, 10) << "have request " << tid << " at " << op << " Op: "
4043 << ceph_pool_op_name(op->pool_op) << dendl;
4044 if (op->blp)
4045 op->blp->claim(m->response_data);
4046 if (m->version > last_seen_osdmap_version)
4047 last_seen_osdmap_version = m->version;
4048 if (osdmap->get_epoch() < m->epoch) {
4049 sul.unlock();
4050 sul.lock();
4051 // recheck op existence since we have let go of rwlock
4052 // (for promotion) above.
4053 iter = pool_ops.find(tid);
4054 if (iter == pool_ops.end())
4055 goto done; // op is gone.
4056 if (osdmap->get_epoch() < m->epoch) {
4057 ldout(cct, 20) << "waiting for client to reach epoch " << m->epoch
4058 << " before calling back" << dendl;
4059 _wait_for_new_map(op->onfinish, m->epoch, m->replyCode);
4060 } else {
4061 // map epoch changed, probably because a MOSDMap message
4062 // sneaked in. Do caller-specified callback now or else
4063 // we lose it forever.
4064 assert(op->onfinish);
4065 op->onfinish->complete(m->replyCode);
4066 }
4067 } else {
4068 assert(op->onfinish);
4069 op->onfinish->complete(m->replyCode);
4070 }
4071 op->onfinish = NULL;
4072 if (!sul.owns_lock()) {
4073 sul.unlock();
4074 sul.lock();
4075 }
4076 iter = pool_ops.find(tid);
4077 if (iter != pool_ops.end()) {
4078 _finish_pool_op(op, 0);
4079 }
4080 } else {
4081 ldout(cct, 10) << "unknown request " << tid << dendl;
4082 }
4083
4084 done:
4085 // Not strictly necessary, since we'll release it on return.
4086 sul.unlock();
4087
4088 ldout(cct, 10) << "done" << dendl;
4089 m->put();
4090 }
4091
4092 int Objecter::pool_op_cancel(ceph_tid_t tid, int r)
4093 {
4094 assert(initialized);
4095
4096 unique_lock wl(rwlock);
4097
4098 map<ceph_tid_t, PoolOp*>::iterator it = pool_ops.find(tid);
4099 if (it == pool_ops.end()) {
4100 ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
4101 return -ENOENT;
4102 }
4103
4104 ldout(cct, 10) << __func__ << " tid " << tid << dendl;
4105
4106 PoolOp *op = it->second;
4107 if (op->onfinish)
4108 op->onfinish->complete(r);
4109
4110 _finish_pool_op(op, r);
4111 return 0;
4112 }
4113
4114 void Objecter::_finish_pool_op(PoolOp *op, int r)
4115 {
4116 // rwlock is locked unique
4117 pool_ops.erase(op->tid);
4118 logger->set(l_osdc_poolop_active, pool_ops.size());
4119
4120 if (op->ontimeout && r != -ETIMEDOUT) {
4121 timer.cancel_event(op->ontimeout);
4122 }
4123
4124 delete op;
4125 }
4126
4127 // pool stats
4128
4129 void Objecter::get_pool_stats(list<string>& pools,
4130 map<string,pool_stat_t> *result,
4131 Context *onfinish)
4132 {
4133 ldout(cct, 10) << "get_pool_stats " << pools << dendl;
4134
4135 PoolStatOp *op = new PoolStatOp;
4136 op->tid = ++last_tid;
4137 op->pools = pools;
4138 op->pool_stats = result;
4139 op->onfinish = onfinish;
4140 if (mon_timeout > timespan(0)) {
4141 op->ontimeout = timer.add_event(mon_timeout,
4142 [this, op]() {
4143 pool_stat_op_cancel(op->tid,
4144 -ETIMEDOUT); });
4145 } else {
4146 op->ontimeout = 0;
4147 }
4148
4149 unique_lock wl(rwlock);
4150
4151 poolstat_ops[op->tid] = op;
4152
4153 logger->set(l_osdc_poolstat_active, poolstat_ops.size());
4154
4155 _poolstat_submit(op);
4156 }
4157
4158 void Objecter::_poolstat_submit(PoolStatOp *op)
4159 {
4160 ldout(cct, 10) << "_poolstat_submit " << op->tid << dendl;
4161 monc->send_mon_message(new MGetPoolStats(monc->get_fsid(), op->tid,
4162 op->pools,
4163 last_seen_pgmap_version));
4164 op->last_submit = ceph::mono_clock::now();
4165
4166 logger->inc(l_osdc_poolstat_send);
4167 }
4168
4169 void Objecter::handle_get_pool_stats_reply(MGetPoolStatsReply *m)
4170 {
4171 ldout(cct, 10) << "handle_get_pool_stats_reply " << *m << dendl;
4172 ceph_tid_t tid = m->get_tid();
4173
4174 unique_lock wl(rwlock);
4175 if (!initialized) {
4176 m->put();
4177 return;
4178 }
4179
4180 map<ceph_tid_t, PoolStatOp *>::iterator iter = poolstat_ops.find(tid);
4181 if (iter != poolstat_ops.end()) {
4182 PoolStatOp *op = poolstat_ops[tid];
4183 ldout(cct, 10) << "have request " << tid << " at " << op << dendl;
4184 *op->pool_stats = m->pool_stats;
4185 if (m->version > last_seen_pgmap_version) {
4186 last_seen_pgmap_version = m->version;
4187 }
4188 op->onfinish->complete(0);
4189 _finish_pool_stat_op(op, 0);
4190 } else {
4191 ldout(cct, 10) << "unknown request " << tid << dendl;
4192 }
4193 ldout(cct, 10) << "done" << dendl;
4194 m->put();
4195 }
4196
4197 int Objecter::pool_stat_op_cancel(ceph_tid_t tid, int r)
4198 {
4199 assert(initialized);
4200
4201 unique_lock wl(rwlock);
4202
4203 map<ceph_tid_t, PoolStatOp*>::iterator it = poolstat_ops.find(tid);
4204 if (it == poolstat_ops.end()) {
4205 ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
4206 return -ENOENT;
4207 }
4208
4209 ldout(cct, 10) << __func__ << " tid " << tid << dendl;
4210
4211 PoolStatOp *op = it->second;
4212 if (op->onfinish)
4213 op->onfinish->complete(r);
4214 _finish_pool_stat_op(op, r);
4215 return 0;
4216 }
4217
4218 void Objecter::_finish_pool_stat_op(PoolStatOp *op, int r)
4219 {
4220 // rwlock is locked unique
4221
4222 poolstat_ops.erase(op->tid);
4223 logger->set(l_osdc_poolstat_active, poolstat_ops.size());
4224
4225 if (op->ontimeout && r != -ETIMEDOUT)
4226 timer.cancel_event(op->ontimeout);
4227
4228 delete op;
4229 }
4230
4231 void Objecter::get_fs_stats(ceph_statfs& result, Context *onfinish)
4232 {
4233 ldout(cct, 10) << "get_fs_stats" << dendl;
4234 unique_lock l(rwlock);
4235
4236 StatfsOp *op = new StatfsOp;
4237 op->tid = ++last_tid;
4238 op->stats = &result;
4239 op->onfinish = onfinish;
4240 if (mon_timeout > timespan(0)) {
4241 op->ontimeout = timer.add_event(mon_timeout,
4242 [this, op]() {
4243 statfs_op_cancel(op->tid,
4244 -ETIMEDOUT); });
4245 } else {
4246 op->ontimeout = 0;
4247 }
4248 statfs_ops[op->tid] = op;
4249
4250 logger->set(l_osdc_statfs_active, statfs_ops.size());
4251
4252 _fs_stats_submit(op);
4253 }
4254
4255 void Objecter::_fs_stats_submit(StatfsOp *op)
4256 {
4257 // rwlock is locked unique
4258
4259 ldout(cct, 10) << "fs_stats_submit" << op->tid << dendl;
4260 monc->send_mon_message(new MStatfs(monc->get_fsid(), op->tid,
4261 last_seen_pgmap_version));
4262 op->last_submit = ceph::mono_clock::now();
4263
4264 logger->inc(l_osdc_statfs_send);
4265 }
4266
4267 void Objecter::handle_fs_stats_reply(MStatfsReply *m)
4268 {
4269 unique_lock wl(rwlock);
4270 if (!initialized) {
4271 m->put();
4272 return;
4273 }
4274
4275 ldout(cct, 10) << "handle_fs_stats_reply " << *m << dendl;
4276 ceph_tid_t tid = m->get_tid();
4277
4278 if (statfs_ops.count(tid)) {
4279 StatfsOp *op = statfs_ops[tid];
4280 ldout(cct, 10) << "have request " << tid << " at " << op << dendl;
4281 *(op->stats) = m->h.st;
4282 if (m->h.version > last_seen_pgmap_version)
4283 last_seen_pgmap_version = m->h.version;
4284 op->onfinish->complete(0);
4285 _finish_statfs_op(op, 0);
4286 } else {
4287 ldout(cct, 10) << "unknown request " << tid << dendl;
4288 }
4289 m->put();
4290 ldout(cct, 10) << "done" << dendl;
4291 }
4292
4293 int Objecter::statfs_op_cancel(ceph_tid_t tid, int r)
4294 {
4295 assert(initialized);
4296
4297 unique_lock wl(rwlock);
4298
4299 map<ceph_tid_t, StatfsOp*>::iterator it = statfs_ops.find(tid);
4300 if (it == statfs_ops.end()) {
4301 ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
4302 return -ENOENT;
4303 }
4304
4305 ldout(cct, 10) << __func__ << " tid " << tid << dendl;
4306
4307 StatfsOp *op = it->second;
4308 if (op->onfinish)
4309 op->onfinish->complete(r);
4310 _finish_statfs_op(op, r);
4311 return 0;
4312 }
4313
4314 void Objecter::_finish_statfs_op(StatfsOp *op, int r)
4315 {
4316 // rwlock is locked unique
4317
4318 statfs_ops.erase(op->tid);
4319 logger->set(l_osdc_statfs_active, statfs_ops.size());
4320
4321 if (op->ontimeout && r != -ETIMEDOUT)
4322 timer.cancel_event(op->ontimeout);
4323
4324 delete op;
4325 }
4326
4327 // scatter/gather
4328
4329 void Objecter::_sg_read_finish(vector<ObjectExtent>& extents,
4330 vector<bufferlist>& resultbl,
4331 bufferlist *bl, Context *onfinish)
4332 {
4333 // all done
4334 ldout(cct, 15) << "_sg_read_finish" << dendl;
4335
4336 if (extents.size() > 1) {
4337 Striper::StripedReadResult r;
4338 vector<bufferlist>::iterator bit = resultbl.begin();
4339 for (vector<ObjectExtent>::iterator eit = extents.begin();
4340 eit != extents.end();
4341 ++eit, ++bit) {
4342 r.add_partial_result(cct, *bit, eit->buffer_extents);
4343 }
4344 bl->clear();
4345 r.assemble_result(cct, *bl, false);
4346 } else {
4347 ldout(cct, 15) << " only one frag" << dendl;
4348 bl->claim(resultbl[0]);
4349 }
4350
4351 // done
4352 uint64_t bytes_read = bl->length();
4353 ldout(cct, 7) << "_sg_read_finish " << bytes_read << " bytes" << dendl;
4354
4355 if (onfinish) {
4356 onfinish->complete(bytes_read);// > 0 ? bytes_read:m->get_result());
4357 }
4358 }
4359
4360
4361 void Objecter::ms_handle_connect(Connection *con)
4362 {
4363 ldout(cct, 10) << "ms_handle_connect " << con << dendl;
4364 if (!initialized)
4365 return;
4366
4367 if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON)
4368 resend_mon_ops();
4369 }
4370
4371 bool Objecter::ms_handle_reset(Connection *con)
4372 {
4373 if (!initialized)
4374 return false;
4375 if (con->get_peer_type() == CEPH_ENTITY_TYPE_OSD) {
4376 OSDSession *session = static_cast<OSDSession*>(con->get_priv());
4377 if (session) {
4378 ldout(cct, 1) << "ms_handle_reset " << con << " session " << session
4379 << " osd." << session->osd << dendl;
4380 unique_lock wl(rwlock);
4381 if (!initialized) {
4382 wl.unlock();
4383 return false;
4384 }
4385 map<uint64_t, LingerOp *> lresend;
4386 OSDSession::unique_lock sl(session->lock);
4387 _reopen_session(session);
4388 _kick_requests(session, lresend);
4389 sl.unlock();
4390 _linger_ops_resend(lresend, wl);
4391 wl.unlock();
4392 maybe_request_map();
4393 session->put();
4394 }
4395 return true;
4396 }
4397 return false;
4398 }
4399
4400 void Objecter::ms_handle_remote_reset(Connection *con)
4401 {
4402 /*
4403 * treat these the same.
4404 */
4405 ms_handle_reset(con);
4406 }
4407
4408 bool Objecter::ms_handle_refused(Connection *con)
4409 {
4410 // just log for now
4411 if (osdmap && (con->get_peer_type() == CEPH_ENTITY_TYPE_OSD)) {
4412 int osd = osdmap->identify_osd(con->get_peer_addr());
4413 if (osd >= 0) {
4414 ldout(cct, 1) << "ms_handle_refused on osd." << osd << dendl;
4415 }
4416 }
4417 return false;
4418 }
4419
4420 bool Objecter::ms_get_authorizer(int dest_type,
4421 AuthAuthorizer **authorizer,
4422 bool force_new)
4423 {
4424 if (!initialized)
4425 return false;
4426 if (dest_type == CEPH_ENTITY_TYPE_MON)
4427 return true;
4428 *authorizer = monc->build_authorizer(dest_type);
4429 return *authorizer != NULL;
4430 }
4431
4432 void Objecter::op_target_t::dump(Formatter *f) const
4433 {
4434 f->dump_stream("pg") << pgid;
4435 f->dump_int("osd", osd);
4436 f->dump_stream("object_id") << base_oid;
4437 f->dump_stream("object_locator") << base_oloc;
4438 f->dump_stream("target_object_id") << target_oid;
4439 f->dump_stream("target_object_locator") << target_oloc;
4440 f->dump_int("paused", (int)paused);
4441 f->dump_int("used_replica", (int)used_replica);
4442 f->dump_int("precalc_pgid", (int)precalc_pgid);
4443 }
4444
4445 void Objecter::_dump_active(OSDSession *s)
4446 {
4447 for (map<ceph_tid_t,Op*>::iterator p = s->ops.begin();
4448 p != s->ops.end();
4449 ++p) {
4450 Op *op = p->second;
4451 ldout(cct, 20) << op->tid << "\t" << op->target.pgid
4452 << "\tosd." << (op->session ? op->session->osd : -1)
4453 << "\t" << op->target.base_oid
4454 << "\t" << op->ops << dendl;
4455 }
4456 }
4457
4458 void Objecter::_dump_active()
4459 {
4460 ldout(cct, 20) << "dump_active .. " << num_homeless_ops << " homeless"
4461 << dendl;
4462 for (map<int, OSDSession *>::iterator siter = osd_sessions.begin();
4463 siter != osd_sessions.end(); ++siter) {
4464 OSDSession *s = siter->second;
4465 OSDSession::shared_lock sl(s->lock);
4466 _dump_active(s);
4467 sl.unlock();
4468 }
4469 _dump_active(homeless_session);
4470 }
4471
4472 void Objecter::dump_active()
4473 {
4474 shared_lock rl(rwlock);
4475 _dump_active();
4476 rl.unlock();
4477 }
4478
4479 void Objecter::dump_requests(Formatter *fmt)
4480 {
4481 // Read-lock on Objecter held here
4482 fmt->open_object_section("requests");
4483 dump_ops(fmt);
4484 dump_linger_ops(fmt);
4485 dump_pool_ops(fmt);
4486 dump_pool_stat_ops(fmt);
4487 dump_statfs_ops(fmt);
4488 dump_command_ops(fmt);
4489 fmt->close_section(); // requests object
4490 }
4491
4492 void Objecter::_dump_ops(const OSDSession *s, Formatter *fmt)
4493 {
4494 for (map<ceph_tid_t,Op*>::const_iterator p = s->ops.begin();
4495 p != s->ops.end();
4496 ++p) {
4497 Op *op = p->second;
4498 fmt->open_object_section("op");
4499 fmt->dump_unsigned("tid", op->tid);
4500 op->target.dump(fmt);
4501 fmt->dump_stream("last_sent") << op->stamp;
4502 fmt->dump_int("attempts", op->attempts);
4503 fmt->dump_stream("snapid") << op->snapid;
4504 fmt->dump_stream("snap_context") << op->snapc;
4505 fmt->dump_stream("mtime") << op->mtime;
4506
4507 fmt->open_array_section("osd_ops");
4508 for (vector<OSDOp>::const_iterator it = op->ops.begin();
4509 it != op->ops.end();
4510 ++it) {
4511 fmt->dump_stream("osd_op") << *it;
4512 }
4513 fmt->close_section(); // osd_ops array
4514
4515 fmt->close_section(); // op object
4516 }
4517 }
4518
4519 void Objecter::dump_ops(Formatter *fmt)
4520 {
4521 // Read-lock on Objecter held
4522 fmt->open_array_section("ops");
4523 for (map<int, OSDSession *>::const_iterator siter = osd_sessions.begin();
4524 siter != osd_sessions.end(); ++siter) {
4525 OSDSession *s = siter->second;
4526 OSDSession::shared_lock sl(s->lock);
4527 _dump_ops(s, fmt);
4528 sl.unlock();
4529 }
4530 _dump_ops(homeless_session, fmt);
4531 fmt->close_section(); // ops array
4532 }
4533
4534 void Objecter::_dump_linger_ops(const OSDSession *s, Formatter *fmt)
4535 {
4536 for (map<uint64_t, LingerOp*>::const_iterator p = s->linger_ops.begin();
4537 p != s->linger_ops.end();
4538 ++p) {
4539 LingerOp *op = p->second;
4540 fmt->open_object_section("linger_op");
4541 fmt->dump_unsigned("linger_id", op->linger_id);
4542 op->target.dump(fmt);
4543 fmt->dump_stream("snapid") << op->snap;
4544 fmt->dump_stream("registered") << op->registered;
4545 fmt->close_section(); // linger_op object
4546 }
4547 }
4548
4549 void Objecter::dump_linger_ops(Formatter *fmt)
4550 {
4551 // We have a read-lock on the objecter
4552 fmt->open_array_section("linger_ops");
4553 for (map<int, OSDSession *>::const_iterator siter = osd_sessions.begin();
4554 siter != osd_sessions.end(); ++siter) {
4555 OSDSession *s = siter->second;
4556 OSDSession::shared_lock sl(s->lock);
4557 _dump_linger_ops(s, fmt);
4558 sl.unlock();
4559 }
4560 _dump_linger_ops(homeless_session, fmt);
4561 fmt->close_section(); // linger_ops array
4562 }
4563
4564 void Objecter::_dump_command_ops(const OSDSession *s, Formatter *fmt)
4565 {
4566 for (map<uint64_t, CommandOp*>::const_iterator p = s->command_ops.begin();
4567 p != s->command_ops.end();
4568 ++p) {
4569 CommandOp *op = p->second;
4570 fmt->open_object_section("command_op");
4571 fmt->dump_unsigned("command_id", op->tid);
4572 fmt->dump_int("osd", op->session ? op->session->osd : -1);
4573 fmt->open_array_section("command");
4574 for (vector<string>::const_iterator q = op->cmd.begin();
4575 q != op->cmd.end(); ++q)
4576 fmt->dump_string("word", *q);
4577 fmt->close_section();
4578 if (op->target_osd >= 0)
4579 fmt->dump_int("target_osd", op->target_osd);
4580 else
4581 fmt->dump_stream("target_pg") << op->target_pg;
4582 fmt->close_section(); // command_op object
4583 }
4584 }
4585
4586 void Objecter::dump_command_ops(Formatter *fmt)
4587 {
4588 // We have a read-lock on the Objecter here
4589 fmt->open_array_section("command_ops");
4590 for (map<int, OSDSession *>::const_iterator siter = osd_sessions.begin();
4591 siter != osd_sessions.end(); ++siter) {
4592 OSDSession *s = siter->second;
4593 OSDSession::shared_lock sl(s->lock);
4594 _dump_command_ops(s, fmt);
4595 sl.unlock();
4596 }
4597 _dump_command_ops(homeless_session, fmt);
4598 fmt->close_section(); // command_ops array
4599 }
4600
4601 void Objecter::dump_pool_ops(Formatter *fmt) const
4602 {
4603 fmt->open_array_section("pool_ops");
4604 for (map<ceph_tid_t, PoolOp*>::const_iterator p = pool_ops.begin();
4605 p != pool_ops.end();
4606 ++p) {
4607 PoolOp *op = p->second;
4608 fmt->open_object_section("pool_op");
4609 fmt->dump_unsigned("tid", op->tid);
4610 fmt->dump_int("pool", op->pool);
4611 fmt->dump_string("name", op->name);
4612 fmt->dump_int("operation_type", op->pool_op);
4613 fmt->dump_unsigned("auid", op->auid);
4614 fmt->dump_unsigned("crush_rule", op->crush_rule);
4615 fmt->dump_stream("snapid") << op->snapid;
4616 fmt->dump_stream("last_sent") << op->last_submit;
4617 fmt->close_section(); // pool_op object
4618 }
4619 fmt->close_section(); // pool_ops array
4620 }
4621
4622 void Objecter::dump_pool_stat_ops(Formatter *fmt) const
4623 {
4624 fmt->open_array_section("pool_stat_ops");
4625 for (map<ceph_tid_t, PoolStatOp*>::const_iterator p = poolstat_ops.begin();
4626 p != poolstat_ops.end();
4627 ++p) {
4628 PoolStatOp *op = p->second;
4629 fmt->open_object_section("pool_stat_op");
4630 fmt->dump_unsigned("tid", op->tid);
4631 fmt->dump_stream("last_sent") << op->last_submit;
4632
4633 fmt->open_array_section("pools");
4634 for (list<string>::const_iterator it = op->pools.begin();
4635 it != op->pools.end();
4636 ++it) {
4637 fmt->dump_string("pool", *it);
4638 }
4639 fmt->close_section(); // pools array
4640
4641 fmt->close_section(); // pool_stat_op object
4642 }
4643 fmt->close_section(); // pool_stat_ops array
4644 }
4645
4646 void Objecter::dump_statfs_ops(Formatter *fmt) const
4647 {
4648 fmt->open_array_section("statfs_ops");
4649 for (map<ceph_tid_t, StatfsOp*>::const_iterator p = statfs_ops.begin();
4650 p != statfs_ops.end();
4651 ++p) {
4652 StatfsOp *op = p->second;
4653 fmt->open_object_section("statfs_op");
4654 fmt->dump_unsigned("tid", op->tid);
4655 fmt->dump_stream("last_sent") << op->last_submit;
4656 fmt->close_section(); // statfs_op object
4657 }
4658 fmt->close_section(); // statfs_ops array
4659 }
4660
4661 Objecter::RequestStateHook::RequestStateHook(Objecter *objecter) :
4662 m_objecter(objecter)
4663 {
4664 }
4665
4666 bool Objecter::RequestStateHook::call(std::string command, cmdmap_t& cmdmap,
4667 std::string format, bufferlist& out)
4668 {
4669 Formatter *f = Formatter::create(format, "json-pretty", "json-pretty");
4670 shared_lock rl(m_objecter->rwlock);
4671 m_objecter->dump_requests(f);
4672 f->flush(out);
4673 delete f;
4674 return true;
4675 }
4676
4677 void Objecter::blacklist_self(bool set)
4678 {
4679 ldout(cct, 10) << "blacklist_self " << (set ? "add" : "rm") << dendl;
4680
4681 vector<string> cmd;
4682 cmd.push_back("{\"prefix\":\"osd blacklist\", ");
4683 if (set)
4684 cmd.push_back("\"blacklistop\":\"add\",");
4685 else
4686 cmd.push_back("\"blacklistop\":\"rm\",");
4687 stringstream ss;
4688 ss << messenger->get_myaddr();
4689 cmd.push_back("\"addr\":\"" + ss.str() + "\"");
4690
4691 MMonCommand *m = new MMonCommand(monc->get_fsid());
4692 m->cmd = cmd;
4693
4694 monc->send_mon_message(m);
4695 }
4696
4697 // commands
4698
4699 void Objecter::handle_command_reply(MCommandReply *m)
4700 {
4701 unique_lock wl(rwlock);
4702 if (!initialized) {
4703 m->put();
4704 return;
4705 }
4706
4707 ConnectionRef con = m->get_connection();
4708 OSDSession *s = static_cast<OSDSession*>(con->get_priv());
4709 if (!s || s->con != con) {
4710 ldout(cct, 7) << __func__ << " no session on con " << con << dendl;
4711 m->put();
4712 if (s)
4713 s->put();
4714 return;
4715 }
4716
4717 OSDSession::shared_lock sl(s->lock);
4718 map<ceph_tid_t,CommandOp*>::iterator p = s->command_ops.find(m->get_tid());
4719 if (p == s->command_ops.end()) {
4720 ldout(cct, 10) << "handle_command_reply tid " << m->get_tid()
4721 << " not found" << dendl;
4722 m->put();
4723 sl.unlock();
4724 if (s)
4725 s->put();
4726 return;
4727 }
4728
4729 CommandOp *c = p->second;
4730 if (!c->session ||
4731 m->get_connection() != c->session->con) {
4732 ldout(cct, 10) << "handle_command_reply tid " << m->get_tid()
4733 << " got reply from wrong connection "
4734 << m->get_connection() << " " << m->get_source_inst()
4735 << dendl;
4736 m->put();
4737 sl.unlock();
4738 if (s)
4739 s->put();
4740 return;
4741 }
4742 if (c->poutbl) {
4743 c->poutbl->claim(m->get_data());
4744 }
4745
4746 sl.unlock();
4747
4748
4749 _finish_command(c, m->r, m->rs);
4750 m->put();
4751 if (s)
4752 s->put();
4753 }
4754
4755 void Objecter::submit_command(CommandOp *c, ceph_tid_t *ptid)
4756 {
4757 shunique_lock sul(rwlock, ceph::acquire_unique);
4758
4759 ceph_tid_t tid = ++last_tid;
4760 ldout(cct, 10) << "_submit_command " << tid << " " << c->cmd << dendl;
4761 c->tid = tid;
4762
4763 {
4764 OSDSession::unique_lock hs_wl(homeless_session->lock);
4765 _session_command_op_assign(homeless_session, c);
4766 }
4767
4768 _calc_command_target(c, sul);
4769 _assign_command_session(c, sul);
4770 if (osd_timeout > timespan(0)) {
4771 c->ontimeout = timer.add_event(osd_timeout,
4772 [this, c, tid]() {
4773 command_op_cancel(c->session, tid,
4774 -ETIMEDOUT); });
4775 }
4776
4777 if (!c->session->is_homeless()) {
4778 _send_command(c);
4779 } else {
4780 _maybe_request_map();
4781 }
4782 if (c->map_check_error)
4783 _send_command_map_check(c);
4784 *ptid = tid;
4785
4786 logger->inc(l_osdc_command_active);
4787 }
4788
4789 int Objecter::_calc_command_target(CommandOp *c, shunique_lock& sul)
4790 {
4791 assert(sul.owns_lock() && sul.mutex() == &rwlock);
4792
4793 c->map_check_error = 0;
4794
4795 // ignore overlays, just like we do with pg ops
4796 c->target.flags |= CEPH_OSD_FLAG_IGNORE_OVERLAY;
4797
4798 if (c->target_osd >= 0) {
4799 if (!osdmap->exists(c->target_osd)) {
4800 c->map_check_error = -ENOENT;
4801 c->map_check_error_str = "osd dne";
4802 c->target.osd = -1;
4803 return RECALC_OP_TARGET_OSD_DNE;
4804 }
4805 if (osdmap->is_down(c->target_osd)) {
4806 c->map_check_error = -ENXIO;
4807 c->map_check_error_str = "osd down";
4808 c->target.osd = -1;
4809 return RECALC_OP_TARGET_OSD_DOWN;
4810 }
4811 c->target.osd = c->target_osd;
4812 } else {
4813 int ret = _calc_target(&(c->target), nullptr, true);
4814 if (ret == RECALC_OP_TARGET_POOL_DNE) {
4815 c->map_check_error = -ENOENT;
4816 c->map_check_error_str = "pool dne";
4817 c->target.osd = -1;
4818 return ret;
4819 } else if (ret == RECALC_OP_TARGET_OSD_DOWN) {
4820 c->map_check_error = -ENXIO;
4821 c->map_check_error_str = "osd down";
4822 c->target.osd = -1;
4823 return ret;
4824 }
4825 }
4826
4827 OSDSession *s;
4828 int r = _get_session(c->target.osd, &s, sul);
4829 assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */
4830
4831 if (c->session != s) {
4832 put_session(s);
4833 return RECALC_OP_TARGET_NEED_RESEND;
4834 }
4835
4836 put_session(s);
4837
4838 ldout(cct, 20) << "_recalc_command_target " << c->tid << " no change, "
4839 << c->session << dendl;
4840
4841 return RECALC_OP_TARGET_NO_ACTION;
4842 }
4843
4844 void Objecter::_assign_command_session(CommandOp *c,
4845 shunique_lock& sul)
4846 {
4847 assert(sul.owns_lock() && sul.mutex() == &rwlock);
4848
4849 OSDSession *s;
4850 int r = _get_session(c->target.osd, &s, sul);
4851 assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */
4852
4853 if (c->session != s) {
4854 if (c->session) {
4855 OSDSession *cs = c->session;
4856 OSDSession::unique_lock csl(cs->lock);
4857 _session_command_op_remove(c->session, c);
4858 csl.unlock();
4859 }
4860 OSDSession::unique_lock sl(s->lock);
4861 _session_command_op_assign(s, c);
4862 }
4863
4864 put_session(s);
4865 }
4866
4867 void Objecter::_send_command(CommandOp *c)
4868 {
4869 ldout(cct, 10) << "_send_command " << c->tid << dendl;
4870 assert(c->session);
4871 assert(c->session->con);
4872 MCommand *m = new MCommand(monc->monmap.fsid);
4873 m->cmd = c->cmd;
4874 m->set_data(c->inbl);
4875 m->set_tid(c->tid);
4876 c->session->con->send_message(m);
4877 logger->inc(l_osdc_command_send);
4878 }
4879
4880 int Objecter::command_op_cancel(OSDSession *s, ceph_tid_t tid, int r)
4881 {
4882 assert(initialized);
4883
4884 unique_lock wl(rwlock);
4885
4886 map<ceph_tid_t, CommandOp*>::iterator it = s->command_ops.find(tid);
4887 if (it == s->command_ops.end()) {
4888 ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
4889 return -ENOENT;
4890 }
4891
4892 ldout(cct, 10) << __func__ << " tid " << tid << dendl;
4893
4894 CommandOp *op = it->second;
4895 _command_cancel_map_check(op);
4896 _finish_command(op, r, "");
4897 return 0;
4898 }
4899
4900 void Objecter::_finish_command(CommandOp *c, int r, string rs)
4901 {
4902 // rwlock is locked unique
4903
4904 ldout(cct, 10) << "_finish_command " << c->tid << " = " << r << " "
4905 << rs << dendl;
4906 if (c->prs)
4907 *c->prs = rs;
4908 if (c->onfinish)
4909 c->onfinish->complete(r);
4910
4911 if (c->ontimeout && r != -ETIMEDOUT)
4912 timer.cancel_event(c->ontimeout);
4913
4914 OSDSession *s = c->session;
4915 OSDSession::unique_lock sl(s->lock);
4916 _session_command_op_remove(c->session, c);
4917 sl.unlock();
4918
4919 c->put();
4920
4921 logger->dec(l_osdc_command_active);
4922 }
4923
4924 Objecter::OSDSession::~OSDSession()
4925 {
4926 // Caller is responsible for re-assigning or
4927 // destroying any ops that were assigned to us
4928 assert(ops.empty());
4929 assert(linger_ops.empty());
4930 assert(command_ops.empty());
4931 }
4932
4933 Objecter::~Objecter()
4934 {
4935 delete osdmap;
4936
4937 assert(homeless_session->get_nref() == 1);
4938 assert(num_homeless_ops == 0);
4939 homeless_session->put();
4940
4941 assert(osd_sessions.empty());
4942 assert(poolstat_ops.empty());
4943 assert(statfs_ops.empty());
4944 assert(pool_ops.empty());
4945 assert(waiting_for_map.empty());
4946 assert(linger_ops.empty());
4947 assert(check_latest_map_lingers.empty());
4948 assert(check_latest_map_ops.empty());
4949 assert(check_latest_map_commands.empty());
4950
4951 assert(!m_request_state_hook);
4952 assert(!logger);
4953 }
4954
4955 /**
4956 * Wait until this OSD map epoch is received before
4957 * sending any more operations to OSDs. Use this
4958 * when it is known that the client can't trust
4959 * anything from before this epoch (e.g. due to
4960 * client blacklist at this epoch).
4961 */
4962 void Objecter::set_epoch_barrier(epoch_t epoch)
4963 {
4964 unique_lock wl(rwlock);
4965
4966 ldout(cct, 7) << __func__ << ": barrier " << epoch << " (was "
4967 << epoch_barrier << ") current epoch " << osdmap->get_epoch()
4968 << dendl;
4969 if (epoch > epoch_barrier) {
4970 epoch_barrier = epoch;
4971 _maybe_request_map();
4972 }
4973 }
4974
4975
4976
4977 hobject_t Objecter::enumerate_objects_begin()
4978 {
4979 return hobject_t();
4980 }
4981
4982 hobject_t Objecter::enumerate_objects_end()
4983 {
4984 return hobject_t::get_max();
4985 }
4986
4987 struct C_EnumerateReply : public Context {
4988 bufferlist bl;
4989
4990 Objecter *objecter;
4991 hobject_t *next;
4992 std::list<librados::ListObjectImpl> *result;
4993 const hobject_t end;
4994 const int64_t pool_id;
4995 Context *on_finish;
4996
4997 epoch_t epoch;
4998 int budget;
4999
5000 C_EnumerateReply(Objecter *objecter_, hobject_t *next_,
5001 std::list<librados::ListObjectImpl> *result_,
5002 const hobject_t end_, const int64_t pool_id_, Context *on_finish_) :
5003 objecter(objecter_), next(next_), result(result_),
5004 end(end_), pool_id(pool_id_), on_finish(on_finish_),
5005 epoch(0), budget(0)
5006 {}
5007
5008 void finish(int r) override {
5009 objecter->_enumerate_reply(
5010 bl, r, end, pool_id, budget, epoch, result, next, on_finish);
5011 }
5012 };
5013
5014 void Objecter::enumerate_objects(
5015 int64_t pool_id,
5016 const std::string &ns,
5017 const hobject_t &start,
5018 const hobject_t &end,
5019 const uint32_t max,
5020 const bufferlist &filter_bl,
5021 std::list<librados::ListObjectImpl> *result,
5022 hobject_t *next,
5023 Context *on_finish)
5024 {
5025 assert(result);
5026
5027 if (!end.is_max() && start > end) {
5028 lderr(cct) << __func__ << ": start " << start << " > end " << end << dendl;
5029 on_finish->complete(-EINVAL);
5030 return;
5031 }
5032
5033 if (max < 1) {
5034 lderr(cct) << __func__ << ": result size may not be zero" << dendl;
5035 on_finish->complete(-EINVAL);
5036 return;
5037 }
5038
5039 if (start.is_max()) {
5040 on_finish->complete(0);
5041 return;
5042 }
5043
5044 shared_lock rl(rwlock);
5045 assert(osdmap->get_epoch());
5046 if (!osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE)) {
5047 rl.unlock();
5048 lderr(cct) << __func__ << ": SORTBITWISE cluster flag not set" << dendl;
5049 on_finish->complete(-EOPNOTSUPP);
5050 return;
5051 }
5052 const pg_pool_t *p = osdmap->get_pg_pool(pool_id);
5053 if (!p) {
5054 lderr(cct) << __func__ << ": pool " << pool_id << " DNE in osd epoch "
5055 << osdmap->get_epoch() << dendl;
5056 rl.unlock();
5057 on_finish->complete(-ENOENT);
5058 return;
5059 } else {
5060 rl.unlock();
5061 }
5062
5063 ldout(cct, 20) << __func__ << ": start=" << start << " end=" << end << dendl;
5064
5065 // Stash completion state
5066 C_EnumerateReply *on_ack = new C_EnumerateReply(
5067 this, next, result, end, pool_id, on_finish);
5068
5069 ObjectOperation op;
5070 op.pg_nls(max, filter_bl, start, 0);
5071
5072 // Issue. See you later in _enumerate_reply
5073 object_locator_t oloc(pool_id, ns);
5074 pg_read(start.get_hash(), oloc, op,
5075 &on_ack->bl, 0, on_ack, &on_ack->epoch, &on_ack->budget);
5076 }
5077
5078 void Objecter::_enumerate_reply(
5079 bufferlist &bl,
5080 int r,
5081 const hobject_t &end,
5082 const int64_t pool_id,
5083 int budget,
5084 epoch_t reply_epoch,
5085 std::list<librados::ListObjectImpl> *result,
5086 hobject_t *next,
5087 Context *on_finish)
5088 {
5089 if (budget > 0) {
5090 put_op_budget_bytes(budget);
5091 }
5092
5093 if (r < 0) {
5094 ldout(cct, 4) << __func__ << ": remote error " << r << dendl;
5095 on_finish->complete(r);
5096 return;
5097 }
5098
5099 assert(next != NULL);
5100
5101 // Decode the results
5102 bufferlist::iterator iter = bl.begin();
5103 pg_nls_response_t response;
5104
5105 // XXX extra_info doesn't seem used anywhere?
5106 bufferlist extra_info;
5107 ::decode(response, iter);
5108 if (!iter.end()) {
5109 ::decode(extra_info, iter);
5110 }
5111
5112 ldout(cct, 10) << __func__ << ": got " << response.entries.size()
5113 << " handle " << response.handle
5114 << " reply_epoch " << reply_epoch << dendl;
5115 ldout(cct, 20) << __func__ << ": response.entries.size "
5116 << response.entries.size() << ", response.entries "
5117 << response.entries << dendl;
5118 if (response.handle <= end) {
5119 *next = response.handle;
5120 } else {
5121 ldout(cct, 10) << __func__ << ": adjusted next down to end " << end
5122 << dendl;
5123 *next = end;
5124
5125 // drop anything after 'end'
5126 shared_lock rl(rwlock);
5127 const pg_pool_t *pool = osdmap->get_pg_pool(pool_id);
5128 if (!pool) {
5129 // pool is gone, drop any results which are now meaningless.
5130 rl.unlock();
5131 on_finish->complete(-ENOENT);
5132 return;
5133 }
5134 while (!response.entries.empty()) {
5135 uint32_t hash = response.entries.back().locator.empty() ?
5136 pool->hash_key(response.entries.back().oid,
5137 response.entries.back().nspace) :
5138 pool->hash_key(response.entries.back().locator,
5139 response.entries.back().nspace);
5140 hobject_t last(response.entries.back().oid,
5141 response.entries.back().locator,
5142 CEPH_NOSNAP,
5143 hash,
5144 pool_id,
5145 response.entries.back().nspace);
5146 if (last < end)
5147 break;
5148 ldout(cct, 20) << __func__ << " dropping item " << last
5149 << " >= end " << end << dendl;
5150 response.entries.pop_back();
5151 }
5152 rl.unlock();
5153 }
5154 if (!response.entries.empty()) {
5155 result->merge(response.entries);
5156 }
5157
5158 // release the listing context's budget once all
5159 // OPs (in the session) are finished
5160 #if 0
5161 put_nlist_context_budget(list_context);
5162 #endif
5163 on_finish->complete(r);
5164 return;
5165 }
5166
5167 namespace {
5168 using namespace librados;
5169
5170 template <typename T>
5171 void do_decode(std::vector<T>& items, std::vector<bufferlist>& bls)
5172 {
5173 for (auto bl : bls) {
5174 auto p = bl.begin();
5175 T t;
5176 decode(t, p);
5177 items.push_back(t);
5178 }
5179 }
5180
5181 struct C_ObjectOperation_scrub_ls : public Context {
5182 bufferlist bl;
5183 uint32_t *interval;
5184 std::vector<inconsistent_obj_t> *objects = nullptr;
5185 std::vector<inconsistent_snapset_t> *snapsets = nullptr;
5186 int *rval;
5187
5188 C_ObjectOperation_scrub_ls(uint32_t *interval,
5189 std::vector<inconsistent_obj_t> *objects,
5190 int *rval)
5191 : interval(interval), objects(objects), rval(rval) {}
5192 C_ObjectOperation_scrub_ls(uint32_t *interval,
5193 std::vector<inconsistent_snapset_t> *snapsets,
5194 int *rval)
5195 : interval(interval), snapsets(snapsets), rval(rval) {}
5196 void finish(int r) override {
5197 if (r < 0 && r != -EAGAIN) {
5198 if (rval)
5199 *rval = r;
5200 return;
5201 }
5202
5203 if (rval)
5204 *rval = 0;
5205
5206 try {
5207 decode();
5208 } catch (buffer::error&) {
5209 if (rval)
5210 *rval = -EIO;
5211 }
5212 }
5213 private:
5214 void decode() {
5215 scrub_ls_result_t result;
5216 auto p = bl.begin();
5217 result.decode(p);
5218 *interval = result.interval;
5219 if (objects) {
5220 do_decode(*objects, result.vals);
5221 } else {
5222 do_decode(*snapsets, result.vals);
5223 }
5224 }
5225 };
5226
5227 template <typename T>
5228 void do_scrub_ls(::ObjectOperation *op,
5229 const scrub_ls_arg_t& arg,
5230 std::vector<T> *items,
5231 uint32_t *interval,
5232 int *rval)
5233 {
5234 OSDOp& osd_op = op->add_op(CEPH_OSD_OP_SCRUBLS);
5235 op->flags |= CEPH_OSD_FLAG_PGOP;
5236 assert(interval);
5237 arg.encode(osd_op.indata);
5238 unsigned p = op->ops.size() - 1;
5239 auto *h = new C_ObjectOperation_scrub_ls{interval, items, rval};
5240 op->out_handler[p] = h;
5241 op->out_bl[p] = &h->bl;
5242 op->out_rval[p] = rval;
5243 }
5244 }
5245
5246 void ::ObjectOperation::scrub_ls(const librados::object_id_t& start_after,
5247 uint64_t max_to_get,
5248 std::vector<librados::inconsistent_obj_t> *objects,
5249 uint32_t *interval,
5250 int *rval)
5251 {
5252 scrub_ls_arg_t arg = {*interval, 0, start_after, max_to_get};
5253 do_scrub_ls(this, arg, objects, interval, rval);
5254 }
5255
5256 void ::ObjectOperation::scrub_ls(const librados::object_id_t& start_after,
5257 uint64_t max_to_get,
5258 std::vector<librados::inconsistent_snapset_t> *snapsets,
5259 uint32_t *interval,
5260 int *rval)
5261 {
5262 scrub_ls_arg_t arg = {*interval, 1, start_after, max_to_get};
5263 do_scrub_ls(this, arg, snapsets, interval, rval);
5264 }