]> git.proxmox.com Git - ceph.git/blob - ceph/src/os/bluestore/BlueStore.cc
update sources to v12.2.0
[ceph.git] / ceph / src / os / bluestore / BlueStore.cc
1 // vim: ts=8 sw=2 smarttab
2 /*
3 * Ceph - scalable distributed file system
4 *
5 * Copyright (C) 2014 Red Hat
6 *
7 * This is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License version 2.1, as published by the Free Software
10 * Foundation. See file COPYING.
11 *
12 */
13
14 #include <unistd.h>
15 #include <stdlib.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <fcntl.h>
19
20 #include "include/cpp-btree/btree_set.h"
21
22 #include "BlueStore.h"
23 #include "os/kv.h"
24 #include "include/compat.h"
25 #include "include/intarith.h"
26 #include "include/stringify.h"
27 #include "common/errno.h"
28 #include "common/safe_io.h"
29 #include "Allocator.h"
30 #include "FreelistManager.h"
31 #include "BlueFS.h"
32 #include "BlueRocksEnv.h"
33 #include "auth/Crypto.h"
34 #include "common/EventTrace.h"
35
36 #define dout_context cct
37 #define dout_subsys ceph_subsys_bluestore
38
39 using bid_t = decltype(BlueStore::Blob::id);
40
41 // bluestore_cache_onode
42 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Onode, bluestore_onode,
43 bluestore_cache_onode);
44
45 // bluestore_cache_other
46 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Buffer, bluestore_buffer,
47 bluestore_cache_other);
48 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Extent, bluestore_extent,
49 bluestore_cache_other);
50 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Blob, bluestore_blob,
51 bluestore_cache_other);
52 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::SharedBlob, bluestore_shared_blob,
53 bluestore_cache_other);
54
55 // bluestore_txc
56 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::TransContext, bluestore_transcontext,
57 bluestore_txc);
58
59
60 // kv store prefixes
61 const string PREFIX_SUPER = "S"; // field -> value
62 const string PREFIX_STAT = "T"; // field -> value(int64 array)
63 const string PREFIX_COLL = "C"; // collection name -> cnode_t
64 const string PREFIX_OBJ = "O"; // object name -> onode_t
65 const string PREFIX_OMAP = "M"; // u64 + keyname -> value
66 const string PREFIX_DEFERRED = "L"; // id -> deferred_transaction_t
67 const string PREFIX_ALLOC = "B"; // u64 offset -> u64 length (freelist)
68 const string PREFIX_SHARED_BLOB = "X"; // u64 offset -> shared_blob_t
69
70 // write a label in the first block. always use this size. note that
71 // bluefs makes a matching assumption about the location of its
72 // superblock (always the second block of the device).
73 #define BDEV_LABEL_BLOCK_SIZE 4096
74
75 // reserve: label (4k) + bluefs super (4k), which means we start at 8k.
76 #define SUPER_RESERVED 8192
77
78 #define OBJECT_MAX_SIZE 0xffffffff // 32 bits
79
80
81 /*
82 * extent map blob encoding
83 *
84 * we use the low bits of the blobid field to indicate some common scenarios
85 * and spanning vs local ids. See ExtentMap::{encode,decode}_some().
86 */
87 #define BLOBID_FLAG_CONTIGUOUS 0x1 // this extent starts at end of previous
88 #define BLOBID_FLAG_ZEROOFFSET 0x2 // blob_offset is 0
89 #define BLOBID_FLAG_SAMELENGTH 0x4 // length matches previous extent
90 #define BLOBID_FLAG_SPANNING 0x8 // has spanning blob id
91 #define BLOBID_SHIFT_BITS 4
92
93 /*
94 * object name key structure
95 *
96 * encoded u8: shard + 2^7 (so that it sorts properly)
97 * encoded u64: poolid + 2^63 (so that it sorts properly)
98 * encoded u32: hash (bit reversed)
99 *
100 * escaped string: namespace
101 *
102 * escaped string: key or object name
103 * 1 char: '<', '=', or '>'. if =, then object key == object name, and
104 * we are done. otherwise, we are followed by the object name.
105 * escaped string: object name (unless '=' above)
106 *
107 * encoded u64: snap
108 * encoded u64: generation
109 * 'o'
110 */
111 #define ONODE_KEY_SUFFIX 'o'
112
113 /*
114 * extent shard key
115 *
116 * object prefix key
117 * u32
118 * 'x'
119 */
120 #define EXTENT_SHARD_KEY_SUFFIX 'x'
121
122 /*
123 * string encoding in the key
124 *
125 * The key string needs to lexicographically sort the same way that
126 * ghobject_t does. We do this by escaping anything <= to '#' with #
127 * plus a 2 digit hex string, and anything >= '~' with ~ plus the two
128 * hex digits.
129 *
130 * We use ! as a terminator for strings; this works because it is < #
131 * and will get escaped if it is present in the string.
132 *
133 */
134 template<typename S>
135 static void append_escaped(const string &in, S *out)
136 {
137 char hexbyte[in.length() * 3 + 1];
138 char* ptr = &hexbyte[0];
139 for (string::const_iterator i = in.begin(); i != in.end(); ++i) {
140 if (*i <= '#') {
141 *ptr++ = '#';
142 *ptr++ = "0123456789abcdef"[(*i >> 4) & 0x0f];
143 *ptr++ = "0123456789abcdef"[*i & 0x0f];
144 } else if (*i >= '~') {
145 *ptr++ = '~';
146 *ptr++ = "0123456789abcdef"[(*i >> 4) & 0x0f];
147 *ptr++ = "0123456789abcdef"[*i & 0x0f];
148 } else {
149 *ptr++ = *i;
150 }
151 }
152 *ptr++ = '!';
153 out->append(hexbyte, ptr - &hexbyte[0]);
154 }
155
156 inline unsigned h2i(char c)
157 {
158 if ((c >= '0') && (c <= '9')) {
159 return c - 0x30;
160 } else if ((c >= 'a') && (c <= 'f')) {
161 return c - 'a' + 10;
162 } else if ((c >= 'A') && (c <= 'F')) {
163 return c - 'A' + 10;
164 } else {
165 return 256; // make it always larger than 255
166 }
167 }
168
169 static int decode_escaped(const char *p, string *out)
170 {
171 char buff[256];
172 char* ptr = &buff[0];
173 char* max = &buff[252];
174 const char *orig_p = p;
175 while (*p && *p != '!') {
176 if (*p == '#' || *p == '~') {
177 unsigned hex = 0;
178 p++;
179 hex = h2i(*p++) << 4;
180 if (hex > 255) {
181 return -EINVAL;
182 }
183 hex |= h2i(*p++);
184 if (hex > 255) {
185 return -EINVAL;
186 }
187 *ptr++ = hex;
188 } else {
189 *ptr++ = *p++;
190 }
191 if (ptr > max) {
192 out->append(buff, ptr-buff);
193 ptr = &buff[0];
194 }
195 }
196 if (ptr != buff) {
197 out->append(buff, ptr-buff);
198 }
199 return p - orig_p;
200 }
201
202 // some things we encode in binary (as le32 or le64); print the
203 // resulting key strings nicely
204 template<typename S>
205 static string pretty_binary_string(const S& in)
206 {
207 char buf[10];
208 string out;
209 out.reserve(in.length() * 3);
210 enum { NONE, HEX, STRING } mode = NONE;
211 unsigned from = 0, i;
212 for (i=0; i < in.length(); ++i) {
213 if ((in[i] < 32 || (unsigned char)in[i] > 126) ||
214 (mode == HEX && in.length() - i >= 4 &&
215 ((in[i] < 32 || (unsigned char)in[i] > 126) ||
216 (in[i+1] < 32 || (unsigned char)in[i+1] > 126) ||
217 (in[i+2] < 32 || (unsigned char)in[i+2] > 126) ||
218 (in[i+3] < 32 || (unsigned char)in[i+3] > 126)))) {
219 if (mode == STRING) {
220 out.append(in.c_str() + from, i - from);
221 out.push_back('\'');
222 }
223 if (mode != HEX) {
224 out.append("0x");
225 mode = HEX;
226 }
227 if (in.length() - i >= 4) {
228 // print a whole u32 at once
229 snprintf(buf, sizeof(buf), "%08x",
230 (uint32_t)(((unsigned char)in[i] << 24) |
231 ((unsigned char)in[i+1] << 16) |
232 ((unsigned char)in[i+2] << 8) |
233 ((unsigned char)in[i+3] << 0)));
234 i += 3;
235 } else {
236 snprintf(buf, sizeof(buf), "%02x", (int)(unsigned char)in[i]);
237 }
238 out.append(buf);
239 } else {
240 if (mode != STRING) {
241 out.push_back('\'');
242 mode = STRING;
243 from = i;
244 }
245 }
246 }
247 if (mode == STRING) {
248 out.append(in.c_str() + from, i - from);
249 out.push_back('\'');
250 }
251 return out;
252 }
253
254 template<typename T>
255 static void _key_encode_shard(shard_id_t shard, T *key)
256 {
257 key->push_back((char)((uint8_t)shard.id + (uint8_t)0x80));
258 }
259
260 static const char *_key_decode_shard(const char *key, shard_id_t *pshard)
261 {
262 pshard->id = (uint8_t)*key - (uint8_t)0x80;
263 return key + 1;
264 }
265
266 static void get_coll_key_range(const coll_t& cid, int bits,
267 string *temp_start, string *temp_end,
268 string *start, string *end)
269 {
270 temp_start->clear();
271 temp_end->clear();
272 start->clear();
273 end->clear();
274
275 spg_t pgid;
276 if (cid.is_pg(&pgid)) {
277 _key_encode_shard(pgid.shard, start);
278 *temp_start = *start;
279
280 _key_encode_u64(pgid.pool() + 0x8000000000000000ull, start);
281 _key_encode_u64((-2ll - pgid.pool()) + 0x8000000000000000ull, temp_start);
282
283 *end = *start;
284 *temp_end = *temp_start;
285
286 uint32_t reverse_hash = hobject_t::_reverse_bits(pgid.ps());
287 _key_encode_u32(reverse_hash, start);
288 _key_encode_u32(reverse_hash, temp_start);
289
290 uint64_t end_hash = reverse_hash + (1ull << (32 - bits));
291 if (end_hash > 0xffffffffull)
292 end_hash = 0xffffffffull;
293
294 _key_encode_u32(end_hash, end);
295 _key_encode_u32(end_hash, temp_end);
296 } else {
297 _key_encode_shard(shard_id_t::NO_SHARD, start);
298 _key_encode_u64(-1ull + 0x8000000000000000ull, start);
299 *end = *start;
300 _key_encode_u32(0, start);
301 _key_encode_u32(0xffffffff, end);
302
303 // no separate temp section
304 *temp_start = *end;
305 *temp_end = *end;
306 }
307 }
308
309 static void get_shared_blob_key(uint64_t sbid, string *key)
310 {
311 key->clear();
312 _key_encode_u64(sbid, key);
313 }
314
315 static int get_key_shared_blob(const string& key, uint64_t *sbid)
316 {
317 const char *p = key.c_str();
318 if (key.length() < sizeof(uint64_t))
319 return -1;
320 _key_decode_u64(p, sbid);
321 return 0;
322 }
323
324 template<typename S>
325 static int get_key_object(const S& key, ghobject_t *oid)
326 {
327 int r;
328 const char *p = key.c_str();
329
330 if (key.length() < 1 + 8 + 4)
331 return -1;
332 p = _key_decode_shard(p, &oid->shard_id);
333
334 uint64_t pool;
335 p = _key_decode_u64(p, &pool);
336 oid->hobj.pool = pool - 0x8000000000000000ull;
337
338 unsigned hash;
339 p = _key_decode_u32(p, &hash);
340
341 oid->hobj.set_bitwise_key_u32(hash);
342
343 r = decode_escaped(p, &oid->hobj.nspace);
344 if (r < 0)
345 return -2;
346 p += r + 1;
347
348 string k;
349 r = decode_escaped(p, &k);
350 if (r < 0)
351 return -3;
352 p += r + 1;
353 if (*p == '=') {
354 // no key
355 ++p;
356 oid->hobj.oid.name = k;
357 } else if (*p == '<' || *p == '>') {
358 // key + name
359 ++p;
360 r = decode_escaped(p, &oid->hobj.oid.name);
361 if (r < 0)
362 return -5;
363 p += r + 1;
364 oid->hobj.set_key(k);
365 } else {
366 // malformed
367 return -6;
368 }
369
370 p = _key_decode_u64(p, &oid->hobj.snap.val);
371 p = _key_decode_u64(p, &oid->generation);
372
373 if (*p != ONODE_KEY_SUFFIX) {
374 return -7;
375 }
376 p++;
377 if (*p) {
378 // if we get something other than a null terminator here,
379 // something goes wrong.
380 return -8;
381 }
382
383 return 0;
384 }
385
386 template<typename S>
387 static void get_object_key(CephContext *cct, const ghobject_t& oid, S *key)
388 {
389 key->clear();
390
391 size_t max_len = 1 + 8 + 4 +
392 (oid.hobj.nspace.length() * 3 + 1) +
393 (oid.hobj.get_key().length() * 3 + 1) +
394 1 + // for '<', '=', or '>'
395 (oid.hobj.oid.name.length() * 3 + 1) +
396 8 + 8 + 1;
397 key->reserve(max_len);
398
399 _key_encode_shard(oid.shard_id, key);
400 _key_encode_u64(oid.hobj.pool + 0x8000000000000000ull, key);
401 _key_encode_u32(oid.hobj.get_bitwise_key_u32(), key);
402
403 append_escaped(oid.hobj.nspace, key);
404
405 if (oid.hobj.get_key().length()) {
406 // is a key... could be < = or >.
407 append_escaped(oid.hobj.get_key(), key);
408 // (ASCII chars < = and > sort in that order, yay)
409 int r = oid.hobj.get_key().compare(oid.hobj.oid.name);
410 if (r) {
411 key->append(r > 0 ? ">" : "<");
412 append_escaped(oid.hobj.oid.name, key);
413 } else {
414 // same as no key
415 key->append("=");
416 }
417 } else {
418 // no key
419 append_escaped(oid.hobj.oid.name, key);
420 key->append("=");
421 }
422
423 _key_encode_u64(oid.hobj.snap, key);
424 _key_encode_u64(oid.generation, key);
425
426 key->push_back(ONODE_KEY_SUFFIX);
427
428 // sanity check
429 if (true) {
430 ghobject_t t;
431 int r = get_key_object(*key, &t);
432 if (r || t != oid) {
433 derr << " r " << r << dendl;
434 derr << "key " << pretty_binary_string(*key) << dendl;
435 derr << "oid " << oid << dendl;
436 derr << " t " << t << dendl;
437 assert(r == 0 && t == oid);
438 }
439 }
440 }
441
442
443 // extent shard keys are the onode key, plus a u32, plus 'x'. the trailing
444 // char lets us quickly test whether it is a shard key without decoding any
445 // of the prefix bytes.
446 template<typename S>
447 static void get_extent_shard_key(const S& onode_key, uint32_t offset,
448 string *key)
449 {
450 key->clear();
451 key->reserve(onode_key.length() + 4 + 1);
452 key->append(onode_key.c_str(), onode_key.size());
453 _key_encode_u32(offset, key);
454 key->push_back(EXTENT_SHARD_KEY_SUFFIX);
455 }
456
457 static void rewrite_extent_shard_key(uint32_t offset, string *key)
458 {
459 assert(key->size() > sizeof(uint32_t) + 1);
460 assert(*key->rbegin() == EXTENT_SHARD_KEY_SUFFIX);
461 _key_encode_u32(offset, key->size() - sizeof(uint32_t) - 1, key);
462 }
463
464 template<typename S>
465 static void generate_extent_shard_key_and_apply(
466 const S& onode_key,
467 uint32_t offset,
468 string *key,
469 std::function<void(const string& final_key)> apply)
470 {
471 if (key->empty()) { // make full key
472 assert(!onode_key.empty());
473 get_extent_shard_key(onode_key, offset, key);
474 } else {
475 rewrite_extent_shard_key(offset, key);
476 }
477 apply(*key);
478 }
479
480 int get_key_extent_shard(const string& key, string *onode_key, uint32_t *offset)
481 {
482 assert(key.size() > sizeof(uint32_t) + 1);
483 assert(*key.rbegin() == EXTENT_SHARD_KEY_SUFFIX);
484 int okey_len = key.size() - sizeof(uint32_t) - 1;
485 *onode_key = key.substr(0, okey_len);
486 const char *p = key.data() + okey_len;
487 _key_decode_u32(p, offset);
488 return 0;
489 }
490
491 static bool is_extent_shard_key(const string& key)
492 {
493 return *key.rbegin() == EXTENT_SHARD_KEY_SUFFIX;
494 }
495
496 // '-' < '.' < '~'
497 static void get_omap_header(uint64_t id, string *out)
498 {
499 _key_encode_u64(id, out);
500 out->push_back('-');
501 }
502
503 // hmm, I don't think there's any need to escape the user key since we
504 // have a clean prefix.
505 static void get_omap_key(uint64_t id, const string& key, string *out)
506 {
507 _key_encode_u64(id, out);
508 out->push_back('.');
509 out->append(key);
510 }
511
512 static void rewrite_omap_key(uint64_t id, string old, string *out)
513 {
514 _key_encode_u64(id, out);
515 out->append(old.c_str() + out->length(), old.size() - out->length());
516 }
517
518 static void decode_omap_key(const string& key, string *user_key)
519 {
520 *user_key = key.substr(sizeof(uint64_t) + 1);
521 }
522
523 static void get_omap_tail(uint64_t id, string *out)
524 {
525 _key_encode_u64(id, out);
526 out->push_back('~');
527 }
528
529 static void get_deferred_key(uint64_t seq, string *out)
530 {
531 _key_encode_u64(seq, out);
532 }
533
534
535 // merge operators
536
537 struct Int64ArrayMergeOperator : public KeyValueDB::MergeOperator {
538 void merge_nonexistent(
539 const char *rdata, size_t rlen, std::string *new_value) override {
540 *new_value = std::string(rdata, rlen);
541 }
542 void merge(
543 const char *ldata, size_t llen,
544 const char *rdata, size_t rlen,
545 std::string *new_value) override {
546 assert(llen == rlen);
547 assert((rlen % 8) == 0);
548 new_value->resize(rlen);
549 const __le64* lv = (const __le64*)ldata;
550 const __le64* rv = (const __le64*)rdata;
551 __le64* nv = &(__le64&)new_value->at(0);
552 for (size_t i = 0; i < rlen >> 3; ++i) {
553 nv[i] = lv[i] + rv[i];
554 }
555 }
556 // We use each operator name and each prefix to construct the
557 // overall RocksDB operator name for consistency check at open time.
558 string name() const override {
559 return "int64_array";
560 }
561 };
562
563
564 // Buffer
565
566 ostream& operator<<(ostream& out, const BlueStore::Buffer& b)
567 {
568 out << "buffer(" << &b << " space " << b.space << " 0x" << std::hex
569 << b.offset << "~" << b.length << std::dec
570 << " " << BlueStore::Buffer::get_state_name(b.state);
571 if (b.flags)
572 out << " " << BlueStore::Buffer::get_flag_name(b.flags);
573 return out << ")";
574 }
575
576 // Garbage Collector
577
578 void BlueStore::GarbageCollector::process_protrusive_extents(
579 const BlueStore::ExtentMap& extent_map,
580 uint64_t start_offset,
581 uint64_t end_offset,
582 uint64_t start_touch_offset,
583 uint64_t end_touch_offset,
584 uint64_t min_alloc_size)
585 {
586 assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset);
587
588 uint64_t lookup_start_offset = P2ALIGN(start_offset, min_alloc_size);
589 uint64_t lookup_end_offset = ROUND_UP_TO(end_offset, min_alloc_size);
590
591 dout(30) << __func__ << " (hex): [" << std::hex
592 << lookup_start_offset << ", " << lookup_end_offset
593 << ")" << std::dec << dendl;
594
595 for (auto it = extent_map.seek_lextent(lookup_start_offset);
596 it != extent_map.extent_map.end() &&
597 it->logical_offset < lookup_end_offset;
598 ++it) {
599 uint64_t alloc_unit_start = it->logical_offset / min_alloc_size;
600 uint64_t alloc_unit_end = (it->logical_end() - 1) / min_alloc_size;
601
602 dout(30) << __func__ << " " << *it
603 << "alloc_units: " << alloc_unit_start << ".." << alloc_unit_end
604 << dendl;
605
606 Blob* b = it->blob.get();
607
608 if (it->logical_offset >=start_touch_offset &&
609 it->logical_end() <= end_touch_offset) {
610 // Process extents within the range affected by
611 // the current write request.
612 // Need to take into account if existing extents
613 // can be merged with them (uncompressed case)
614 if (!b->get_blob().is_compressed()) {
615 if (blob_info_counted && used_alloc_unit == alloc_unit_start) {
616 --blob_info_counted->expected_allocations; // don't need to allocate
617 // new AU for compressed
618 // data since another
619 // collocated uncompressed
620 // blob already exists
621 dout(30) << __func__ << " --expected:"
622 << alloc_unit_start << dendl;
623 }
624 used_alloc_unit = alloc_unit_end;
625 blob_info_counted = nullptr;
626 }
627 } else if (b->get_blob().is_compressed()) {
628
629 // additionally we take compressed blobs that were not impacted
630 // by the write into account too
631 BlobInfo& bi =
632 affected_blobs.emplace(
633 b, BlobInfo(b->get_referenced_bytes())).first->second;
634
635 int adjust =
636 (used_alloc_unit && used_alloc_unit == alloc_unit_start) ? 0 : 1;
637 bi.expected_allocations += alloc_unit_end - alloc_unit_start + adjust;
638 dout(30) << __func__ << " expected_allocations="
639 << bi.expected_allocations << " end_au:"
640 << alloc_unit_end << dendl;
641
642 blob_info_counted = &bi;
643 used_alloc_unit = alloc_unit_end;
644
645 assert(it->length <= bi.referenced_bytes);
646 bi.referenced_bytes -= it->length;
647 dout(30) << __func__ << " affected_blob:" << *b
648 << " unref 0x" << std::hex << it->length
649 << " referenced = 0x" << bi.referenced_bytes
650 << std::dec << dendl;
651 // NOTE: we can't move specific blob to resulting GC list here
652 // when reference counter == 0 since subsequent extents might
653 // decrement its expected_allocation.
654 // Hence need to enumerate all the extents first.
655 if (!bi.collect_candidate) {
656 bi.first_lextent = it;
657 bi.collect_candidate = true;
658 }
659 bi.last_lextent = it;
660 } else {
661 if (blob_info_counted && used_alloc_unit == alloc_unit_start) {
662 // don't need to allocate new AU for compressed data since another
663 // collocated uncompressed blob already exists
664 --blob_info_counted->expected_allocations;
665 dout(30) << __func__ << " --expected_allocations:"
666 << alloc_unit_start << dendl;
667 }
668 used_alloc_unit = alloc_unit_end;
669 blob_info_counted = nullptr;
670 }
671 }
672
673 for (auto b_it = affected_blobs.begin();
674 b_it != affected_blobs.end();
675 ++b_it) {
676 Blob* b = b_it->first;
677 BlobInfo& bi = b_it->second;
678 if (bi.referenced_bytes == 0) {
679 uint64_t len_on_disk = b_it->first->get_blob().get_ondisk_length();
680 int64_t blob_expected_for_release =
681 ROUND_UP_TO(len_on_disk, min_alloc_size) / min_alloc_size;
682
683 dout(30) << __func__ << " " << *(b_it->first)
684 << " expected4release=" << blob_expected_for_release
685 << " expected_allocations=" << bi.expected_allocations
686 << dendl;
687 int64_t benefit = blob_expected_for_release - bi.expected_allocations;
688 if (benefit >= g_conf->bluestore_gc_enable_blob_threshold) {
689 if (bi.collect_candidate) {
690 auto it = bi.first_lextent;
691 bool bExit = false;
692 do {
693 if (it->blob.get() == b) {
694 extents_to_collect.emplace_back(it->logical_offset, it->length);
695 }
696 bExit = it == bi.last_lextent;
697 ++it;
698 } while (!bExit);
699 }
700 expected_for_release += blob_expected_for_release;
701 expected_allocations += bi.expected_allocations;
702 }
703 }
704 }
705 }
706
707 int64_t BlueStore::GarbageCollector::estimate(
708 uint64_t start_offset,
709 uint64_t length,
710 const BlueStore::ExtentMap& extent_map,
711 const BlueStore::old_extent_map_t& old_extents,
712 uint64_t min_alloc_size)
713 {
714
715 affected_blobs.clear();
716 extents_to_collect.clear();
717 used_alloc_unit = boost::optional<uint64_t >();
718 blob_info_counted = nullptr;
719
720 gc_start_offset = start_offset;
721 gc_end_offset = start_offset + length;
722
723 uint64_t end_offset = start_offset + length;
724
725 for (auto it = old_extents.begin(); it != old_extents.end(); ++it) {
726 Blob* b = it->e.blob.get();
727 if (b->get_blob().is_compressed()) {
728
729 // update gc_start_offset/gc_end_offset if needed
730 gc_start_offset = min(gc_start_offset, (uint64_t)it->e.blob_start());
731 gc_end_offset = max(gc_end_offset, (uint64_t)it->e.blob_end());
732
733 auto o = it->e.logical_offset;
734 auto l = it->e.length;
735
736 uint64_t ref_bytes = b->get_referenced_bytes();
737 // micro optimization to bypass blobs that have no more references
738 if (ref_bytes != 0) {
739 dout(30) << __func__ << " affected_blob:" << *b
740 << " unref 0x" << std::hex << o << "~" << l
741 << std::dec << dendl;
742 affected_blobs.emplace(b, BlobInfo(ref_bytes));
743 }
744 }
745 }
746 dout(30) << __func__ << " gc range(hex): [" << std::hex
747 << gc_start_offset << ", " << gc_end_offset
748 << ")" << std::dec << dendl;
749
750 // enumerate preceeding extents to check if they reference affected blobs
751 if (gc_start_offset < start_offset || gc_end_offset > end_offset) {
752 process_protrusive_extents(extent_map,
753 gc_start_offset,
754 gc_end_offset,
755 start_offset,
756 end_offset,
757 min_alloc_size);
758 }
759 return expected_for_release - expected_allocations;
760 }
761
762 // Cache
763
764 BlueStore::Cache *BlueStore::Cache::create(CephContext* cct, string type,
765 PerfCounters *logger)
766 {
767 Cache *c = nullptr;
768
769 if (type == "lru")
770 c = new LRUCache(cct);
771 else if (type == "2q")
772 c = new TwoQCache(cct);
773 else
774 assert(0 == "unrecognized cache type");
775
776 c->logger = logger;
777 return c;
778 }
779
780 void BlueStore::Cache::trim_all()
781 {
782 std::lock_guard<std::recursive_mutex> l(lock);
783 _trim(0, 0);
784 }
785
786 void BlueStore::Cache::trim(
787 uint64_t target_bytes,
788 float target_meta_ratio,
789 float target_data_ratio,
790 float bytes_per_onode)
791 {
792 std::lock_guard<std::recursive_mutex> l(lock);
793 uint64_t current_meta = _get_num_onodes() * bytes_per_onode;
794 uint64_t current_buffer = _get_buffer_bytes();
795 uint64_t current = current_meta + current_buffer;
796
797 uint64_t target_meta = target_bytes * target_meta_ratio;
798 uint64_t target_buffer = target_bytes * target_data_ratio;
799
800 // correct for overflow or float imprecision
801 target_meta = min(target_bytes, target_meta);
802 target_buffer = min(target_bytes - target_meta, target_buffer);
803
804 if (current <= target_bytes) {
805 dout(10) << __func__
806 << " shard target " << pretty_si_t(target_bytes)
807 << " meta/data ratios " << target_meta_ratio
808 << " + " << target_data_ratio << " ("
809 << pretty_si_t(target_meta) << " + "
810 << pretty_si_t(target_buffer) << "), "
811 << " current " << pretty_si_t(current) << " ("
812 << pretty_si_t(current_meta) << " + "
813 << pretty_si_t(current_buffer) << ")"
814 << dendl;
815 return;
816 }
817
818 uint64_t need_to_free = current - target_bytes;
819 uint64_t free_buffer = 0;
820 uint64_t free_meta = 0;
821 if (current_buffer > target_buffer) {
822 free_buffer = current_buffer - target_buffer;
823 if (free_buffer > need_to_free) {
824 free_buffer = need_to_free;
825 }
826 }
827 free_meta = need_to_free - free_buffer;
828
829 // start bounds at what we have now
830 uint64_t max_buffer = current_buffer - free_buffer;
831 uint64_t max_meta = current_meta - free_meta;
832 uint64_t max_onodes = max_meta / bytes_per_onode;
833
834 dout(10) << __func__
835 << " shard target " << pretty_si_t(target_bytes)
836 << " ratio " << target_meta_ratio << " ("
837 << pretty_si_t(target_meta) << " + "
838 << pretty_si_t(target_buffer) << "), "
839 << " current " << pretty_si_t(current) << " ("
840 << pretty_si_t(current_meta) << " + "
841 << pretty_si_t(current_buffer) << "),"
842 << " need_to_free " << pretty_si_t(need_to_free) << " ("
843 << pretty_si_t(free_meta) << " + "
844 << pretty_si_t(free_buffer) << ")"
845 << " -> max " << max_onodes << " onodes + "
846 << max_buffer << " buffer"
847 << dendl;
848 _trim(max_onodes, max_buffer);
849 }
850
851
852 // LRUCache
853 #undef dout_prefix
854 #define dout_prefix *_dout << "bluestore.LRUCache(" << this << ") "
855
856 void BlueStore::LRUCache::_touch_onode(OnodeRef& o)
857 {
858 auto p = onode_lru.iterator_to(*o);
859 onode_lru.erase(p);
860 onode_lru.push_front(*o);
861 }
862
863 void BlueStore::LRUCache::_trim(uint64_t onode_max, uint64_t buffer_max)
864 {
865 dout(20) << __func__ << " onodes " << onode_lru.size() << " / " << onode_max
866 << " buffers " << buffer_size << " / " << buffer_max
867 << dendl;
868
869 _audit("trim start");
870
871 // buffers
872 while (buffer_size > buffer_max) {
873 auto i = buffer_lru.rbegin();
874 if (i == buffer_lru.rend()) {
875 // stop if buffer_lru is now empty
876 break;
877 }
878
879 Buffer *b = &*i;
880 assert(b->is_clean());
881 dout(20) << __func__ << " rm " << *b << dendl;
882 b->space->_rm_buffer(this, b);
883 }
884
885 // onodes
886 int num = onode_lru.size() - onode_max;
887 if (num <= 0)
888 return; // don't even try
889
890 auto p = onode_lru.end();
891 assert(p != onode_lru.begin());
892 --p;
893 int skipped = 0;
894 int max_skipped = g_conf->bluestore_cache_trim_max_skip_pinned;
895 while (num > 0) {
896 Onode *o = &*p;
897 int refs = o->nref.load();
898 if (refs > 1) {
899 dout(20) << __func__ << " " << o->oid << " has " << refs
900 << " refs, skipping" << dendl;
901 if (++skipped >= max_skipped) {
902 dout(20) << __func__ << " maximum skip pinned reached; stopping with "
903 << num << " left to trim" << dendl;
904 break;
905 }
906
907 if (p == onode_lru.begin()) {
908 break;
909 } else {
910 p--;
911 num--;
912 continue;
913 }
914 }
915 dout(30) << __func__ << " rm " << o->oid << dendl;
916 if (p != onode_lru.begin()) {
917 onode_lru.erase(p--);
918 } else {
919 onode_lru.erase(p);
920 assert(num == 1);
921 }
922 o->get(); // paranoia
923 o->c->onode_map.remove(o->oid);
924 o->put();
925 --num;
926 }
927 }
928
929 #ifdef DEBUG_CACHE
930 void BlueStore::LRUCache::_audit(const char *when)
931 {
932 dout(10) << __func__ << " " << when << " start" << dendl;
933 uint64_t s = 0;
934 for (auto i = buffer_lru.begin(); i != buffer_lru.end(); ++i) {
935 s += i->length;
936 }
937 if (s != buffer_size) {
938 derr << __func__ << " buffer_size " << buffer_size << " actual " << s
939 << dendl;
940 for (auto i = buffer_lru.begin(); i != buffer_lru.end(); ++i) {
941 derr << __func__ << " " << *i << dendl;
942 }
943 assert(s == buffer_size);
944 }
945 dout(20) << __func__ << " " << when << " buffer_size " << buffer_size
946 << " ok" << dendl;
947 }
948 #endif
949
950 // TwoQCache
951 #undef dout_prefix
952 #define dout_prefix *_dout << "bluestore.2QCache(" << this << ") "
953
954
955 void BlueStore::TwoQCache::_touch_onode(OnodeRef& o)
956 {
957 auto p = onode_lru.iterator_to(*o);
958 onode_lru.erase(p);
959 onode_lru.push_front(*o);
960 }
961
962 void BlueStore::TwoQCache::_add_buffer(Buffer *b, int level, Buffer *near)
963 {
964 dout(20) << __func__ << " level " << level << " near " << near
965 << " on " << *b
966 << " which has cache_private " << b->cache_private << dendl;
967 if (near) {
968 b->cache_private = near->cache_private;
969 switch (b->cache_private) {
970 case BUFFER_WARM_IN:
971 buffer_warm_in.insert(buffer_warm_in.iterator_to(*near), *b);
972 break;
973 case BUFFER_WARM_OUT:
974 assert(b->is_empty());
975 buffer_warm_out.insert(buffer_warm_out.iterator_to(*near), *b);
976 break;
977 case BUFFER_HOT:
978 buffer_hot.insert(buffer_hot.iterator_to(*near), *b);
979 break;
980 default:
981 assert(0 == "bad cache_private");
982 }
983 } else if (b->cache_private == BUFFER_NEW) {
984 b->cache_private = BUFFER_WARM_IN;
985 if (level > 0) {
986 buffer_warm_in.push_front(*b);
987 } else {
988 // take caller hint to start at the back of the warm queue
989 buffer_warm_in.push_back(*b);
990 }
991 } else {
992 // we got a hint from discard
993 switch (b->cache_private) {
994 case BUFFER_WARM_IN:
995 // stay in warm_in. move to front, even though 2Q doesn't actually
996 // do this.
997 dout(20) << __func__ << " move to front of warm " << *b << dendl;
998 buffer_warm_in.push_front(*b);
999 break;
1000 case BUFFER_WARM_OUT:
1001 b->cache_private = BUFFER_HOT;
1002 // move to hot. fall-thru
1003 case BUFFER_HOT:
1004 dout(20) << __func__ << " move to front of hot " << *b << dendl;
1005 buffer_hot.push_front(*b);
1006 break;
1007 default:
1008 assert(0 == "bad cache_private");
1009 }
1010 }
1011 if (!b->is_empty()) {
1012 buffer_bytes += b->length;
1013 buffer_list_bytes[b->cache_private] += b->length;
1014 }
1015 }
1016
1017 void BlueStore::TwoQCache::_rm_buffer(Buffer *b)
1018 {
1019 dout(20) << __func__ << " " << *b << dendl;
1020 if (!b->is_empty()) {
1021 assert(buffer_bytes >= b->length);
1022 buffer_bytes -= b->length;
1023 assert(buffer_list_bytes[b->cache_private] >= b->length);
1024 buffer_list_bytes[b->cache_private] -= b->length;
1025 }
1026 switch (b->cache_private) {
1027 case BUFFER_WARM_IN:
1028 buffer_warm_in.erase(buffer_warm_in.iterator_to(*b));
1029 break;
1030 case BUFFER_WARM_OUT:
1031 buffer_warm_out.erase(buffer_warm_out.iterator_to(*b));
1032 break;
1033 case BUFFER_HOT:
1034 buffer_hot.erase(buffer_hot.iterator_to(*b));
1035 break;
1036 default:
1037 assert(0 == "bad cache_private");
1038 }
1039 }
1040
1041 void BlueStore::TwoQCache::_move_buffer(Cache *srcc, Buffer *b)
1042 {
1043 TwoQCache *src = static_cast<TwoQCache*>(srcc);
1044 src->_rm_buffer(b);
1045
1046 // preserve which list we're on (even if we can't preserve the order!)
1047 switch (b->cache_private) {
1048 case BUFFER_WARM_IN:
1049 assert(!b->is_empty());
1050 buffer_warm_in.push_back(*b);
1051 break;
1052 case BUFFER_WARM_OUT:
1053 assert(b->is_empty());
1054 buffer_warm_out.push_back(*b);
1055 break;
1056 case BUFFER_HOT:
1057 assert(!b->is_empty());
1058 buffer_hot.push_back(*b);
1059 break;
1060 default:
1061 assert(0 == "bad cache_private");
1062 }
1063 if (!b->is_empty()) {
1064 buffer_bytes += b->length;
1065 buffer_list_bytes[b->cache_private] += b->length;
1066 }
1067 }
1068
1069 void BlueStore::TwoQCache::_adjust_buffer_size(Buffer *b, int64_t delta)
1070 {
1071 dout(20) << __func__ << " delta " << delta << " on " << *b << dendl;
1072 if (!b->is_empty()) {
1073 assert((int64_t)buffer_bytes + delta >= 0);
1074 buffer_bytes += delta;
1075 assert((int64_t)buffer_list_bytes[b->cache_private] + delta >= 0);
1076 buffer_list_bytes[b->cache_private] += delta;
1077 }
1078 }
1079
1080 void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max)
1081 {
1082 dout(20) << __func__ << " onodes " << onode_lru.size() << " / " << onode_max
1083 << " buffers " << buffer_bytes << " / " << buffer_max
1084 << dendl;
1085
1086 _audit("trim start");
1087
1088 // buffers
1089 if (buffer_bytes > buffer_max) {
1090 uint64_t kin = buffer_max * cct->_conf->bluestore_2q_cache_kin_ratio;
1091 uint64_t khot = buffer_max - kin;
1092
1093 // pre-calculate kout based on average buffer size too,
1094 // which is typical(the warm_in and hot lists may change later)
1095 uint64_t kout = 0;
1096 uint64_t buffer_num = buffer_hot.size() + buffer_warm_in.size();
1097 if (buffer_num) {
1098 uint64_t buffer_avg_size = buffer_bytes / buffer_num;
1099 assert(buffer_avg_size);
1100 uint64_t calculated_buffer_num = buffer_max / buffer_avg_size;
1101 kout = calculated_buffer_num * cct->_conf->bluestore_2q_cache_kout_ratio;
1102 }
1103
1104 if (buffer_list_bytes[BUFFER_HOT] < khot) {
1105 // hot is small, give slack to warm_in
1106 kin += khot - buffer_list_bytes[BUFFER_HOT];
1107 } else if (buffer_list_bytes[BUFFER_WARM_IN] < kin) {
1108 // warm_in is small, give slack to hot
1109 khot += kin - buffer_list_bytes[BUFFER_WARM_IN];
1110 }
1111
1112 // adjust warm_in list
1113 int64_t to_evict_bytes = buffer_list_bytes[BUFFER_WARM_IN] - kin;
1114 uint64_t evicted = 0;
1115
1116 while (to_evict_bytes > 0) {
1117 auto p = buffer_warm_in.rbegin();
1118 if (p == buffer_warm_in.rend()) {
1119 // stop if warm_in list is now empty
1120 break;
1121 }
1122
1123 Buffer *b = &*p;
1124 assert(b->is_clean());
1125 dout(20) << __func__ << " buffer_warm_in -> out " << *b << dendl;
1126 assert(buffer_bytes >= b->length);
1127 buffer_bytes -= b->length;
1128 assert(buffer_list_bytes[BUFFER_WARM_IN] >= b->length);
1129 buffer_list_bytes[BUFFER_WARM_IN] -= b->length;
1130 to_evict_bytes -= b->length;
1131 evicted += b->length;
1132 b->state = Buffer::STATE_EMPTY;
1133 b->data.clear();
1134 buffer_warm_in.erase(buffer_warm_in.iterator_to(*b));
1135 buffer_warm_out.push_front(*b);
1136 b->cache_private = BUFFER_WARM_OUT;
1137 }
1138
1139 if (evicted > 0) {
1140 dout(20) << __func__ << " evicted " << prettybyte_t(evicted)
1141 << " from warm_in list, done evicting warm_in buffers"
1142 << dendl;
1143 }
1144
1145 // adjust hot list
1146 to_evict_bytes = buffer_list_bytes[BUFFER_HOT] - khot;
1147 evicted = 0;
1148
1149 while (to_evict_bytes > 0) {
1150 auto p = buffer_hot.rbegin();
1151 if (p == buffer_hot.rend()) {
1152 // stop if hot list is now empty
1153 break;
1154 }
1155
1156 Buffer *b = &*p;
1157 dout(20) << __func__ << " buffer_hot rm " << *b << dendl;
1158 assert(b->is_clean());
1159 // adjust evict size before buffer goes invalid
1160 to_evict_bytes -= b->length;
1161 evicted += b->length;
1162 b->space->_rm_buffer(this, b);
1163 }
1164
1165 if (evicted > 0) {
1166 dout(20) << __func__ << " evicted " << prettybyte_t(evicted)
1167 << " from hot list, done evicting hot buffers"
1168 << dendl;
1169 }
1170
1171 // adjust warm out list too, if necessary
1172 int64_t num = buffer_warm_out.size() - kout;
1173 while (num-- > 0) {
1174 Buffer *b = &*buffer_warm_out.rbegin();
1175 assert(b->is_empty());
1176 dout(20) << __func__ << " buffer_warm_out rm " << *b << dendl;
1177 b->space->_rm_buffer(this, b);
1178 }
1179 }
1180
1181 // onodes
1182 int num = onode_lru.size() - onode_max;
1183 if (num <= 0)
1184 return; // don't even try
1185
1186 auto p = onode_lru.end();
1187 assert(p != onode_lru.begin());
1188 --p;
1189 int skipped = 0;
1190 int max_skipped = g_conf->bluestore_cache_trim_max_skip_pinned;
1191 while (num > 0) {
1192 Onode *o = &*p;
1193 dout(20) << __func__ << " considering " << o << dendl;
1194 int refs = o->nref.load();
1195 if (refs > 1) {
1196 dout(20) << __func__ << " " << o->oid << " has " << refs
1197 << " refs; skipping" << dendl;
1198 if (++skipped >= max_skipped) {
1199 dout(20) << __func__ << " maximum skip pinned reached; stopping with "
1200 << num << " left to trim" << dendl;
1201 break;
1202 }
1203
1204 if (p == onode_lru.begin()) {
1205 break;
1206 } else {
1207 p--;
1208 num--;
1209 continue;
1210 }
1211 }
1212 dout(30) << __func__ << " " << o->oid << " num=" << num <<" lru size="<<onode_lru.size()<< dendl;
1213 if (p != onode_lru.begin()) {
1214 onode_lru.erase(p--);
1215 } else {
1216 onode_lru.erase(p);
1217 assert(num == 1);
1218 }
1219 o->get(); // paranoia
1220 o->c->onode_map.remove(o->oid);
1221 o->put();
1222 --num;
1223 }
1224 }
1225
1226 #ifdef DEBUG_CACHE
1227 void BlueStore::TwoQCache::_audit(const char *when)
1228 {
1229 dout(10) << __func__ << " " << when << " start" << dendl;
1230 uint64_t s = 0;
1231 for (auto i = buffer_hot.begin(); i != buffer_hot.end(); ++i) {
1232 s += i->length;
1233 }
1234
1235 uint64_t hot_bytes = s;
1236 if (hot_bytes != buffer_list_bytes[BUFFER_HOT]) {
1237 derr << __func__ << " hot_list_bytes "
1238 << buffer_list_bytes[BUFFER_HOT]
1239 << " != actual " << hot_bytes
1240 << dendl;
1241 assert(hot_bytes == buffer_list_bytes[BUFFER_HOT]);
1242 }
1243
1244 for (auto i = buffer_warm_in.begin(); i != buffer_warm_in.end(); ++i) {
1245 s += i->length;
1246 }
1247
1248 uint64_t warm_in_bytes = s - hot_bytes;
1249 if (warm_in_bytes != buffer_list_bytes[BUFFER_WARM_IN]) {
1250 derr << __func__ << " warm_in_list_bytes "
1251 << buffer_list_bytes[BUFFER_WARM_IN]
1252 << " != actual " << warm_in_bytes
1253 << dendl;
1254 assert(warm_in_bytes == buffer_list_bytes[BUFFER_WARM_IN]);
1255 }
1256
1257 if (s != buffer_bytes) {
1258 derr << __func__ << " buffer_bytes " << buffer_bytes << " actual " << s
1259 << dendl;
1260 assert(s == buffer_bytes);
1261 }
1262
1263 dout(20) << __func__ << " " << when << " buffer_bytes " << buffer_bytes
1264 << " ok" << dendl;
1265 }
1266 #endif
1267
1268
1269 // BufferSpace
1270
1271 #undef dout_prefix
1272 #define dout_prefix *_dout << "bluestore.BufferSpace(" << this << " in " << cache << ") "
1273
1274 void BlueStore::BufferSpace::_clear(Cache* cache)
1275 {
1276 // note: we already hold cache->lock
1277 ldout(cache->cct, 20) << __func__ << dendl;
1278 while (!buffer_map.empty()) {
1279 _rm_buffer(cache, buffer_map.begin());
1280 }
1281 }
1282
1283 int BlueStore::BufferSpace::_discard(Cache* cache, uint32_t offset, uint32_t length)
1284 {
1285 // note: we already hold cache->lock
1286 ldout(cache->cct, 20) << __func__ << std::hex << " 0x" << offset << "~" << length
1287 << std::dec << dendl;
1288 int cache_private = 0;
1289 cache->_audit("discard start");
1290 auto i = _data_lower_bound(offset);
1291 uint32_t end = offset + length;
1292 while (i != buffer_map.end()) {
1293 Buffer *b = i->second.get();
1294 if (b->offset >= end) {
1295 break;
1296 }
1297 if (b->cache_private > cache_private) {
1298 cache_private = b->cache_private;
1299 }
1300 if (b->offset < offset) {
1301 int64_t front = offset - b->offset;
1302 if (b->end() > end) {
1303 // drop middle (split)
1304 uint32_t tail = b->end() - end;
1305 if (b->data.length()) {
1306 bufferlist bl;
1307 bl.substr_of(b->data, b->length - tail, tail);
1308 Buffer *nb = new Buffer(this, b->state, b->seq, end, bl);
1309 nb->maybe_rebuild();
1310 _add_buffer(cache, nb, 0, b);
1311 } else {
1312 _add_buffer(cache, new Buffer(this, b->state, b->seq, end, tail),
1313 0, b);
1314 }
1315 if (!b->is_writing()) {
1316 cache->_adjust_buffer_size(b, front - (int64_t)b->length);
1317 }
1318 b->truncate(front);
1319 b->maybe_rebuild();
1320 cache->_audit("discard end 1");
1321 break;
1322 } else {
1323 // drop tail
1324 if (!b->is_writing()) {
1325 cache->_adjust_buffer_size(b, front - (int64_t)b->length);
1326 }
1327 b->truncate(front);
1328 b->maybe_rebuild();
1329 ++i;
1330 continue;
1331 }
1332 }
1333 if (b->end() <= end) {
1334 // drop entire buffer
1335 _rm_buffer(cache, i++);
1336 continue;
1337 }
1338 // drop front
1339 uint32_t keep = b->end() - end;
1340 if (b->data.length()) {
1341 bufferlist bl;
1342 bl.substr_of(b->data, b->length - keep, keep);
1343 Buffer *nb = new Buffer(this, b->state, b->seq, end, bl);
1344 nb->maybe_rebuild();
1345 _add_buffer(cache, nb, 0, b);
1346 } else {
1347 _add_buffer(cache, new Buffer(this, b->state, b->seq, end, keep), 0, b);
1348 }
1349 _rm_buffer(cache, i);
1350 cache->_audit("discard end 2");
1351 break;
1352 }
1353 return cache_private;
1354 }
1355
1356 void BlueStore::BufferSpace::read(
1357 Cache* cache,
1358 uint32_t offset,
1359 uint32_t length,
1360 BlueStore::ready_regions_t& res,
1361 interval_set<uint32_t>& res_intervals)
1362 {
1363 res.clear();
1364 res_intervals.clear();
1365 uint32_t want_bytes = length;
1366 uint32_t end = offset + length;
1367
1368 {
1369 std::lock_guard<std::recursive_mutex> l(cache->lock);
1370 for (auto i = _data_lower_bound(offset);
1371 i != buffer_map.end() && offset < end && i->first < end;
1372 ++i) {
1373 Buffer *b = i->second.get();
1374 assert(b->end() > offset);
1375 if (b->is_writing() || b->is_clean()) {
1376 if (b->offset < offset) {
1377 uint32_t skip = offset - b->offset;
1378 uint32_t l = MIN(length, b->length - skip);
1379 res[offset].substr_of(b->data, skip, l);
1380 res_intervals.insert(offset, l);
1381 offset += l;
1382 length -= l;
1383 if (!b->is_writing()) {
1384 cache->_touch_buffer(b);
1385 }
1386 continue;
1387 }
1388 if (b->offset > offset) {
1389 uint32_t gap = b->offset - offset;
1390 if (length <= gap) {
1391 break;
1392 }
1393 offset += gap;
1394 length -= gap;
1395 }
1396 if (!b->is_writing()) {
1397 cache->_touch_buffer(b);
1398 }
1399 if (b->length > length) {
1400 res[offset].substr_of(b->data, 0, length);
1401 res_intervals.insert(offset, length);
1402 break;
1403 } else {
1404 res[offset].append(b->data);
1405 res_intervals.insert(offset, b->length);
1406 if (b->length == length)
1407 break;
1408 offset += b->length;
1409 length -= b->length;
1410 }
1411 }
1412 }
1413 }
1414
1415 uint64_t hit_bytes = res_intervals.size();
1416 assert(hit_bytes <= want_bytes);
1417 uint64_t miss_bytes = want_bytes - hit_bytes;
1418 cache->logger->inc(l_bluestore_buffer_hit_bytes, hit_bytes);
1419 cache->logger->inc(l_bluestore_buffer_miss_bytes, miss_bytes);
1420 }
1421
1422 void BlueStore::BufferSpace::finish_write(Cache* cache, uint64_t seq)
1423 {
1424 std::lock_guard<std::recursive_mutex> l(cache->lock);
1425
1426 auto i = writing.begin();
1427 while (i != writing.end()) {
1428 if (i->seq > seq) {
1429 break;
1430 }
1431 if (i->seq < seq) {
1432 ++i;
1433 continue;
1434 }
1435
1436 Buffer *b = &*i;
1437 assert(b->is_writing());
1438
1439 if (b->flags & Buffer::FLAG_NOCACHE) {
1440 writing.erase(i++);
1441 ldout(cache->cct, 20) << __func__ << " discard " << *b << dendl;
1442 buffer_map.erase(b->offset);
1443 } else {
1444 b->state = Buffer::STATE_CLEAN;
1445 writing.erase(i++);
1446 b->maybe_rebuild();
1447 b->data.reassign_to_mempool(mempool::mempool_bluestore_cache_data);
1448 cache->_add_buffer(b, 1, nullptr);
1449 ldout(cache->cct, 20) << __func__ << " added " << *b << dendl;
1450 }
1451 }
1452
1453 cache->_audit("finish_write end");
1454 }
1455
1456 void BlueStore::BufferSpace::split(Cache* cache, size_t pos, BlueStore::BufferSpace &r)
1457 {
1458 std::lock_guard<std::recursive_mutex> lk(cache->lock);
1459 if (buffer_map.empty())
1460 return;
1461
1462 auto p = --buffer_map.end();
1463 while (true) {
1464 if (p->second->end() <= pos)
1465 break;
1466
1467 if (p->second->offset < pos) {
1468 ldout(cache->cct, 30) << __func__ << " cut " << *p->second << dendl;
1469 size_t left = pos - p->second->offset;
1470 size_t right = p->second->length - left;
1471 if (p->second->data.length()) {
1472 bufferlist bl;
1473 bl.substr_of(p->second->data, left, right);
1474 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq, 0, bl),
1475 0, p->second.get());
1476 } else {
1477 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq, 0, right),
1478 0, p->second.get());
1479 }
1480 cache->_adjust_buffer_size(p->second.get(), -right);
1481 p->second->truncate(left);
1482 break;
1483 }
1484
1485 assert(p->second->end() > pos);
1486 ldout(cache->cct, 30) << __func__ << " move " << *p->second << dendl;
1487 if (p->second->data.length()) {
1488 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq,
1489 p->second->offset - pos, p->second->data),
1490 0, p->second.get());
1491 } else {
1492 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq,
1493 p->second->offset - pos, p->second->length),
1494 0, p->second.get());
1495 }
1496 if (p == buffer_map.begin()) {
1497 _rm_buffer(cache, p);
1498 break;
1499 } else {
1500 _rm_buffer(cache, p--);
1501 }
1502 }
1503 assert(writing.empty());
1504 }
1505
1506 // OnodeSpace
1507
1508 #undef dout_prefix
1509 #define dout_prefix *_dout << "bluestore.OnodeSpace(" << this << " in " << cache << ") "
1510
1511 BlueStore::OnodeRef BlueStore::OnodeSpace::add(const ghobject_t& oid, OnodeRef o)
1512 {
1513 std::lock_guard<std::recursive_mutex> l(cache->lock);
1514 auto p = onode_map.find(oid);
1515 if (p != onode_map.end()) {
1516 ldout(cache->cct, 30) << __func__ << " " << oid << " " << o
1517 << " raced, returning existing " << p->second
1518 << dendl;
1519 return p->second;
1520 }
1521 ldout(cache->cct, 30) << __func__ << " " << oid << " " << o << dendl;
1522 onode_map[oid] = o;
1523 cache->_add_onode(o, 1);
1524 return o;
1525 }
1526
1527 BlueStore::OnodeRef BlueStore::OnodeSpace::lookup(const ghobject_t& oid)
1528 {
1529 ldout(cache->cct, 30) << __func__ << dendl;
1530 OnodeRef o;
1531 bool hit = false;
1532
1533 {
1534 std::lock_guard<std::recursive_mutex> l(cache->lock);
1535 ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(oid);
1536 if (p == onode_map.end()) {
1537 ldout(cache->cct, 30) << __func__ << " " << oid << " miss" << dendl;
1538 } else {
1539 ldout(cache->cct, 30) << __func__ << " " << oid << " hit " << p->second
1540 << dendl;
1541 cache->_touch_onode(p->second);
1542 hit = true;
1543 o = p->second;
1544 }
1545 }
1546
1547 if (hit) {
1548 cache->logger->inc(l_bluestore_onode_hits);
1549 } else {
1550 cache->logger->inc(l_bluestore_onode_misses);
1551 }
1552 return o;
1553 }
1554
1555 void BlueStore::OnodeSpace::clear()
1556 {
1557 std::lock_guard<std::recursive_mutex> l(cache->lock);
1558 ldout(cache->cct, 10) << __func__ << dendl;
1559 for (auto &p : onode_map) {
1560 cache->_rm_onode(p.second);
1561 }
1562 onode_map.clear();
1563 }
1564
1565 bool BlueStore::OnodeSpace::empty()
1566 {
1567 std::lock_guard<std::recursive_mutex> l(cache->lock);
1568 return onode_map.empty();
1569 }
1570
1571 void BlueStore::OnodeSpace::rename(
1572 OnodeRef& oldo,
1573 const ghobject_t& old_oid,
1574 const ghobject_t& new_oid,
1575 const mempool::bluestore_cache_other::string& new_okey)
1576 {
1577 std::lock_guard<std::recursive_mutex> l(cache->lock);
1578 ldout(cache->cct, 30) << __func__ << " " << old_oid << " -> " << new_oid
1579 << dendl;
1580 ceph::unordered_map<ghobject_t,OnodeRef>::iterator po, pn;
1581 po = onode_map.find(old_oid);
1582 pn = onode_map.find(new_oid);
1583 assert(po != pn);
1584
1585 assert(po != onode_map.end());
1586 if (pn != onode_map.end()) {
1587 ldout(cache->cct, 30) << __func__ << " removing target " << pn->second
1588 << dendl;
1589 cache->_rm_onode(pn->second);
1590 onode_map.erase(pn);
1591 }
1592 OnodeRef o = po->second;
1593
1594 // install a non-existent onode at old location
1595 oldo.reset(new Onode(o->c, old_oid, o->key));
1596 po->second = oldo;
1597 cache->_add_onode(po->second, 1);
1598
1599 // add at new position and fix oid, key
1600 onode_map.insert(make_pair(new_oid, o));
1601 cache->_touch_onode(o);
1602 o->oid = new_oid;
1603 o->key = new_okey;
1604 }
1605
1606 bool BlueStore::OnodeSpace::map_any(std::function<bool(OnodeRef)> f)
1607 {
1608 std::lock_guard<std::recursive_mutex> l(cache->lock);
1609 ldout(cache->cct, 20) << __func__ << dendl;
1610 for (auto& i : onode_map) {
1611 if (f(i.second)) {
1612 return true;
1613 }
1614 }
1615 return false;
1616 }
1617
1618
1619 // SharedBlob
1620
1621 #undef dout_prefix
1622 #define dout_prefix *_dout << "bluestore.sharedblob(" << this << ") "
1623
1624 ostream& operator<<(ostream& out, const BlueStore::SharedBlob& sb)
1625 {
1626 out << "SharedBlob(" << &sb;
1627
1628 if (sb.loaded) {
1629 out << " loaded " << *sb.persistent;
1630 } else {
1631 out << " sbid 0x" << std::hex << sb.sbid_unloaded << std::dec;
1632 }
1633 return out << ")";
1634 }
1635
1636 BlueStore::SharedBlob::SharedBlob(uint64_t i, Collection *_coll)
1637 : coll(_coll), sbid_unloaded(i)
1638 {
1639 assert(sbid_unloaded > 0);
1640 if (get_cache()) {
1641 get_cache()->add_blob();
1642 }
1643 }
1644
1645 BlueStore::SharedBlob::~SharedBlob()
1646 {
1647 if (get_cache()) { // the dummy instances have a nullptr
1648 std::lock_guard<std::recursive_mutex> l(get_cache()->lock);
1649 bc._clear(get_cache());
1650 get_cache()->rm_blob();
1651 }
1652 if (loaded && persistent) {
1653 delete persistent;
1654 }
1655 }
1656
1657 void BlueStore::SharedBlob::put()
1658 {
1659 if (--nref == 0) {
1660 ldout(coll->store->cct, 20) << __func__ << " " << this
1661 << " removing self from set " << get_parent()
1662 << dendl;
1663 if (get_parent()) {
1664 if (get_parent()->remove(this)) {
1665 delete this;
1666 } else {
1667 ldout(coll->store->cct, 20)
1668 << __func__ << " " << this << " lost race to remove myself from set"
1669 << dendl;
1670 }
1671 } else {
1672 delete this;
1673 }
1674 }
1675 }
1676
1677 void BlueStore::SharedBlob::get_ref(uint64_t offset, uint32_t length)
1678 {
1679 assert(persistent);
1680 persistent->ref_map.get(offset, length);
1681 }
1682
1683 void BlueStore::SharedBlob::put_ref(uint64_t offset, uint32_t length,
1684 PExtentVector *r,
1685 set<SharedBlob*> *maybe_unshared)
1686 {
1687 assert(persistent);
1688 bool maybe = false;
1689 persistent->ref_map.put(offset, length, r, maybe_unshared ? &maybe : nullptr);
1690 if (maybe_unshared && maybe) {
1691 maybe_unshared->insert(this);
1692 }
1693 }
1694
1695 // Blob
1696
1697 #undef dout_prefix
1698 #define dout_prefix *_dout << "bluestore.blob(" << this << ") "
1699
1700 ostream& operator<<(ostream& out, const BlueStore::Blob& b)
1701 {
1702 out << "Blob(" << &b;
1703 if (b.is_spanning()) {
1704 out << " spanning " << b.id;
1705 }
1706 out << " " << b.get_blob() << " " << b.get_blob_use_tracker();
1707 if (b.shared_blob) {
1708 out << " " << *b.shared_blob;
1709 } else {
1710 out << " (shared_blob=NULL)";
1711 }
1712 out << ")";
1713 return out;
1714 }
1715
1716 void BlueStore::Blob::discard_unallocated(Collection *coll)
1717 {
1718 if (get_blob().is_shared()) {
1719 return;
1720 }
1721 if (get_blob().is_compressed()) {
1722 bool discard = false;
1723 bool all_invalid = true;
1724 for (auto e : get_blob().get_extents()) {
1725 if (!e.is_valid()) {
1726 discard = true;
1727 } else {
1728 all_invalid = false;
1729 }
1730 }
1731 assert(discard == all_invalid); // in case of compressed blob all
1732 // or none pextents are invalid.
1733 if (discard) {
1734 shared_blob->bc.discard(shared_blob->get_cache(), 0,
1735 get_blob().get_logical_length());
1736 }
1737 } else {
1738 size_t pos = 0;
1739 for (auto e : get_blob().get_extents()) {
1740 if (!e.is_valid()) {
1741 ldout(coll->store->cct, 20) << __func__ << " 0x" << std::hex << pos
1742 << "~" << e.length
1743 << std::dec << dendl;
1744 shared_blob->bc.discard(shared_blob->get_cache(), pos, e.length);
1745 }
1746 pos += e.length;
1747 }
1748 if (get_blob().can_prune_tail()) {
1749 dirty_blob().prune_tail();
1750 used_in_blob.prune_tail(get_blob().get_ondisk_length());
1751 auto cct = coll->store->cct; //used by dout
1752 dout(20) << __func__ << " pruned tail, now " << get_blob() << dendl;
1753 }
1754 }
1755 }
1756
1757 void BlueStore::Blob::get_ref(
1758 Collection *coll,
1759 uint32_t offset,
1760 uint32_t length)
1761 {
1762 // Caller has to initialize Blob's logical length prior to increment
1763 // references. Otherwise one is neither unable to determine required
1764 // amount of counters in case of per-au tracking nor obtain min_release_size
1765 // for single counter mode.
1766 assert(get_blob().get_logical_length() != 0);
1767 auto cct = coll->store->cct;
1768 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
1769 << std::dec << " " << *this << dendl;
1770
1771 if (used_in_blob.is_empty()) {
1772 uint32_t min_release_size =
1773 get_blob().get_release_size(coll->store->min_alloc_size);
1774 uint64_t l = get_blob().get_logical_length();
1775 dout(20) << __func__ << " init 0x" << std::hex << l << ", "
1776 << min_release_size << std::dec << dendl;
1777 used_in_blob.init(l, min_release_size);
1778 }
1779 used_in_blob.get(
1780 offset,
1781 length);
1782 }
1783
1784 bool BlueStore::Blob::put_ref(
1785 Collection *coll,
1786 uint32_t offset,
1787 uint32_t length,
1788 PExtentVector *r)
1789 {
1790 PExtentVector logical;
1791
1792 auto cct = coll->store->cct;
1793 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
1794 << std::dec << " " << *this << dendl;
1795
1796 bool empty = used_in_blob.put(
1797 offset,
1798 length,
1799 &logical);
1800 r->clear();
1801 // nothing to release
1802 if (!empty && logical.empty()) {
1803 return false;
1804 }
1805
1806 bluestore_blob_t& b = dirty_blob();
1807 return b.release_extents(empty, logical, r);
1808 }
1809
1810 bool BlueStore::Blob::can_reuse_blob(uint32_t min_alloc_size,
1811 uint32_t target_blob_size,
1812 uint32_t b_offset,
1813 uint32_t *length0) {
1814 assert(min_alloc_size);
1815 assert(target_blob_size);
1816 if (!get_blob().is_mutable()) {
1817 return false;
1818 }
1819
1820 uint32_t length = *length0;
1821 uint32_t end = b_offset + length;
1822
1823 // Currently for the sake of simplicity we omit blob reuse if data is
1824 // unaligned with csum chunk. Later we can perform padding if needed.
1825 if (get_blob().has_csum() &&
1826 ((b_offset % get_blob().get_csum_chunk_size()) != 0 ||
1827 (end % get_blob().get_csum_chunk_size()) != 0)) {
1828 return false;
1829 }
1830
1831 auto blen = get_blob().get_logical_length();
1832 uint32_t new_blen = blen;
1833
1834 // make sure target_blob_size isn't less than current blob len
1835 target_blob_size = MAX(blen, target_blob_size);
1836
1837 if (b_offset >= blen) {
1838 // new data totally stands out of the existing blob
1839 new_blen = end;
1840 } else {
1841 // new data overlaps with the existing blob
1842 new_blen = MAX(blen, end);
1843
1844 uint32_t overlap = 0;
1845 if (new_blen > blen) {
1846 overlap = blen - b_offset;
1847 } else {
1848 overlap = length;
1849 }
1850
1851 if (!get_blob().is_unallocated(b_offset, overlap)) {
1852 // abort if any piece of the overlap has already been allocated
1853 return false;
1854 }
1855 }
1856
1857 if (new_blen > blen) {
1858 int64_t overflow = int64_t(new_blen) - target_blob_size;
1859 // Unable to decrease the provided length to fit into max_blob_size
1860 if (overflow >= length) {
1861 return false;
1862 }
1863
1864 // FIXME: in some cases we could reduce unused resolution
1865 if (get_blob().has_unused()) {
1866 return false;
1867 }
1868
1869 if (overflow > 0) {
1870 new_blen -= overflow;
1871 length -= overflow;
1872 *length0 = length;
1873 }
1874
1875 if (new_blen > blen) {
1876 dirty_blob().add_tail(new_blen);
1877 used_in_blob.add_tail(new_blen,
1878 get_blob().get_release_size(min_alloc_size));
1879 }
1880 }
1881 return true;
1882 }
1883
1884 void BlueStore::Blob::split(Collection *coll, uint32_t blob_offset, Blob *r)
1885 {
1886 auto cct = coll->store->cct; //used by dout
1887 dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
1888 << " start " << *this << dendl;
1889 assert(blob.can_split());
1890 assert(used_in_blob.can_split());
1891 bluestore_blob_t &lb = dirty_blob();
1892 bluestore_blob_t &rb = r->dirty_blob();
1893
1894 used_in_blob.split(
1895 blob_offset,
1896 &(r->used_in_blob));
1897
1898 lb.split(blob_offset, rb);
1899 shared_blob->bc.split(shared_blob->get_cache(), blob_offset, r->shared_blob->bc);
1900
1901 dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
1902 << " finish " << *this << dendl;
1903 dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
1904 << " and " << *r << dendl;
1905 }
1906
1907 #ifndef CACHE_BLOB_BL
1908 void BlueStore::Blob::decode(
1909 Collection *coll,
1910 bufferptr::iterator& p,
1911 uint64_t struct_v,
1912 uint64_t* sbid,
1913 bool include_ref_map)
1914 {
1915 denc(blob, p, struct_v);
1916 if (blob.is_shared()) {
1917 denc(*sbid, p);
1918 }
1919 if (include_ref_map) {
1920 if (struct_v > 1) {
1921 used_in_blob.decode(p);
1922 } else {
1923 used_in_blob.clear();
1924 bluestore_extent_ref_map_t legacy_ref_map;
1925 legacy_ref_map.decode(p);
1926 for (auto r : legacy_ref_map.ref_map) {
1927 get_ref(
1928 coll,
1929 r.first,
1930 r.second.refs * r.second.length);
1931 }
1932 }
1933 }
1934 }
1935 #endif
1936
1937 // Extent
1938
1939 ostream& operator<<(ostream& out, const BlueStore::Extent& e)
1940 {
1941 return out << std::hex << "0x" << e.logical_offset << "~" << e.length
1942 << ": 0x" << e.blob_offset << "~" << e.length << std::dec
1943 << " " << *e.blob;
1944 }
1945
1946 // OldExtent
1947 BlueStore::OldExtent* BlueStore::OldExtent::create(CollectionRef c,
1948 uint32_t lo,
1949 uint32_t o,
1950 uint32_t l,
1951 BlobRef& b) {
1952 OldExtent* oe = new OldExtent(lo, o, l, b);
1953 b->put_ref(c.get(), o, l, &(oe->r));
1954 oe->blob_empty = b->get_referenced_bytes() == 0;
1955 return oe;
1956 }
1957
1958 // ExtentMap
1959
1960 #undef dout_prefix
1961 #define dout_prefix *_dout << "bluestore.extentmap(" << this << ") "
1962
1963 BlueStore::ExtentMap::ExtentMap(Onode *o)
1964 : onode(o),
1965 inline_bl(
1966 o->c->store->cct->_conf->bluestore_extent_map_inline_shard_prealloc_size) {
1967 }
1968
1969 void BlueStore::ExtentMap::update(KeyValueDB::Transaction t,
1970 bool force)
1971 {
1972 auto cct = onode->c->store->cct; //used by dout
1973 dout(20) << __func__ << " " << onode->oid << (force ? " force" : "") << dendl;
1974 if (onode->onode.extent_map_shards.empty()) {
1975 if (inline_bl.length() == 0) {
1976 unsigned n;
1977 // we need to encode inline_bl to measure encoded length
1978 bool never_happen = encode_some(0, OBJECT_MAX_SIZE, inline_bl, &n);
1979 assert(!never_happen);
1980 size_t len = inline_bl.length();
1981 dout(20) << __func__ << " inline shard " << len << " bytes from " << n
1982 << " extents" << dendl;
1983 if (!force && len > cct->_conf->bluestore_extent_map_shard_max_size) {
1984 request_reshard(0, OBJECT_MAX_SIZE);
1985 return;
1986 }
1987 }
1988 // will persist in the onode key.
1989 } else {
1990 // pending shard update
1991 struct dirty_shard_t {
1992 Shard *shard;
1993 bufferlist bl;
1994 dirty_shard_t(Shard *s) : shard(s) {}
1995 };
1996 vector<dirty_shard_t> encoded_shards;
1997 // allocate slots for all shards in a single call instead of
1998 // doing multiple allocations - one per each dirty shard
1999 encoded_shards.reserve(shards.size());
2000
2001 auto p = shards.begin();
2002 auto prev_p = p;
2003 while (p != shards.end()) {
2004 assert(p->shard_info->offset >= prev_p->shard_info->offset);
2005 auto n = p;
2006 ++n;
2007 if (p->dirty) {
2008 uint32_t endoff;
2009 if (n == shards.end()) {
2010 endoff = OBJECT_MAX_SIZE;
2011 } else {
2012 endoff = n->shard_info->offset;
2013 }
2014 encoded_shards.emplace_back(dirty_shard_t(&(*p)));
2015 bufferlist& bl = encoded_shards.back().bl;
2016 if (encode_some(p->shard_info->offset, endoff - p->shard_info->offset,
2017 bl, &p->extents)) {
2018 if (force) {
2019 derr << __func__ << " encode_some needs reshard" << dendl;
2020 assert(!force);
2021 }
2022 }
2023 size_t len = bl.length();
2024
2025 dout(20) << __func__ << " shard 0x" << std::hex
2026 << p->shard_info->offset << std::dec << " is " << len
2027 << " bytes (was " << p->shard_info->bytes << ") from "
2028 << p->extents << " extents" << dendl;
2029
2030 if (!force) {
2031 if (len > cct->_conf->bluestore_extent_map_shard_max_size) {
2032 // we are big; reshard ourselves
2033 request_reshard(p->shard_info->offset, endoff);
2034 }
2035 // avoid resharding the trailing shard, even if it is small
2036 else if (n != shards.end() &&
2037 len < g_conf->bluestore_extent_map_shard_min_size) {
2038 assert(endoff != OBJECT_MAX_SIZE);
2039 if (p == shards.begin()) {
2040 // we are the first shard, combine with next shard
2041 request_reshard(p->shard_info->offset, endoff + 1);
2042 } else {
2043 // combine either with the previous shard or the next,
2044 // whichever is smaller
2045 if (prev_p->shard_info->bytes > n->shard_info->bytes) {
2046 request_reshard(p->shard_info->offset, endoff + 1);
2047 } else {
2048 request_reshard(prev_p->shard_info->offset, endoff);
2049 }
2050 }
2051 }
2052 }
2053 }
2054 prev_p = p;
2055 p = n;
2056 }
2057 if (needs_reshard()) {
2058 return;
2059 }
2060
2061 // schedule DB update for dirty shards
2062 string key;
2063 for (auto& it : encoded_shards) {
2064 it.shard->dirty = false;
2065 it.shard->shard_info->bytes = it.bl.length();
2066 generate_extent_shard_key_and_apply(
2067 onode->key,
2068 it.shard->shard_info->offset,
2069 &key,
2070 [&](const string& final_key) {
2071 t->set(PREFIX_OBJ, final_key, it.bl);
2072 }
2073 );
2074 }
2075 }
2076 }
2077
2078 bid_t BlueStore::ExtentMap::allocate_spanning_blob_id()
2079 {
2080 if (spanning_blob_map.empty())
2081 return 0;
2082 bid_t bid = spanning_blob_map.rbegin()->first + 1;
2083 // bid is valid and available.
2084 if (bid >= 0)
2085 return bid;
2086 // Find next unused bid;
2087 bid = rand() % (numeric_limits<bid_t>::max() + 1);
2088 const auto begin_bid = bid;
2089 do {
2090 if (!spanning_blob_map.count(bid))
2091 return bid;
2092 else {
2093 bid++;
2094 if (bid < 0) bid = 0;
2095 }
2096 } while (bid != begin_bid);
2097 assert(0 == "no available blob id");
2098 }
2099
2100 void BlueStore::ExtentMap::reshard(
2101 KeyValueDB *db,
2102 KeyValueDB::Transaction t)
2103 {
2104 auto cct = onode->c->store->cct; // used by dout
2105
2106 dout(10) << __func__ << " 0x[" << std::hex << needs_reshard_begin << ","
2107 << needs_reshard_end << ")" << std::dec
2108 << " of " << onode->onode.extent_map_shards.size()
2109 << " shards on " << onode->oid << dendl;
2110 for (auto& p : spanning_blob_map) {
2111 dout(20) << __func__ << " spanning blob " << p.first << " " << *p.second
2112 << dendl;
2113 }
2114 // determine shard index range
2115 unsigned si_begin = 0, si_end = 0;
2116 if (!shards.empty()) {
2117 while (si_begin + 1 < shards.size() &&
2118 shards[si_begin + 1].shard_info->offset <= needs_reshard_begin) {
2119 ++si_begin;
2120 }
2121 needs_reshard_begin = shards[si_begin].shard_info->offset;
2122 for (si_end = si_begin; si_end < shards.size(); ++si_end) {
2123 if (shards[si_end].shard_info->offset >= needs_reshard_end) {
2124 needs_reshard_end = shards[si_end].shard_info->offset;
2125 break;
2126 }
2127 }
2128 if (si_end == shards.size()) {
2129 needs_reshard_end = OBJECT_MAX_SIZE;
2130 }
2131 dout(20) << __func__ << " shards [" << si_begin << "," << si_end << ")"
2132 << " over 0x[" << std::hex << needs_reshard_begin << ","
2133 << needs_reshard_end << ")" << std::dec << dendl;
2134 }
2135
2136 fault_range(db, needs_reshard_begin, needs_reshard_end);
2137
2138 // we may need to fault in a larger interval later must have all
2139 // referring extents for spanning blobs loaded in order to have
2140 // accurate use_tracker values.
2141 uint32_t spanning_scan_begin = needs_reshard_begin;
2142 uint32_t spanning_scan_end = needs_reshard_end;
2143
2144 // remove old keys
2145 string key;
2146 for (unsigned i = si_begin; i < si_end; ++i) {
2147 generate_extent_shard_key_and_apply(
2148 onode->key, shards[i].shard_info->offset, &key,
2149 [&](const string& final_key) {
2150 t->rmkey(PREFIX_OBJ, final_key);
2151 }
2152 );
2153 }
2154
2155 // calculate average extent size
2156 unsigned bytes = 0;
2157 unsigned extents = 0;
2158 if (onode->onode.extent_map_shards.empty()) {
2159 bytes = inline_bl.length();
2160 extents = extent_map.size();
2161 } else {
2162 for (unsigned i = si_begin; i < si_end; ++i) {
2163 bytes += shards[i].shard_info->bytes;
2164 extents += shards[i].extents;
2165 }
2166 }
2167 unsigned target = cct->_conf->bluestore_extent_map_shard_target_size;
2168 unsigned slop = target *
2169 cct->_conf->bluestore_extent_map_shard_target_size_slop;
2170 unsigned extent_avg = bytes / MAX(1, extents);
2171 dout(20) << __func__ << " extent_avg " << extent_avg << ", target " << target
2172 << ", slop " << slop << dendl;
2173
2174 // reshard
2175 unsigned estimate = 0;
2176 unsigned offset = needs_reshard_begin;
2177 vector<bluestore_onode_t::shard_info> new_shard_info;
2178 unsigned max_blob_end = 0;
2179 Extent dummy(needs_reshard_begin);
2180 for (auto e = extent_map.lower_bound(dummy);
2181 e != extent_map.end();
2182 ++e) {
2183 if (e->logical_offset >= needs_reshard_end) {
2184 break;
2185 }
2186 dout(30) << " extent " << *e << dendl;
2187
2188 // disfavor shard boundaries that span a blob
2189 bool would_span = (e->logical_offset < max_blob_end) || e->blob_offset;
2190 if (estimate &&
2191 estimate + extent_avg > target + (would_span ? slop : 0)) {
2192 // new shard
2193 if (offset == needs_reshard_begin) {
2194 new_shard_info.emplace_back(bluestore_onode_t::shard_info());
2195 new_shard_info.back().offset = offset;
2196 dout(20) << __func__ << " new shard 0x" << std::hex << offset
2197 << std::dec << dendl;
2198 }
2199 offset = e->logical_offset;
2200 new_shard_info.emplace_back(bluestore_onode_t::shard_info());
2201 new_shard_info.back().offset = offset;
2202 dout(20) << __func__ << " new shard 0x" << std::hex << offset
2203 << std::dec << dendl;
2204 estimate = 0;
2205 }
2206 estimate += extent_avg;
2207 unsigned bs = e->blob_start();
2208 if (bs < spanning_scan_begin) {
2209 spanning_scan_begin = bs;
2210 }
2211 uint32_t be = e->blob_end();
2212 if (be > max_blob_end) {
2213 max_blob_end = be;
2214 }
2215 if (be > spanning_scan_end) {
2216 spanning_scan_end = be;
2217 }
2218 }
2219 if (new_shard_info.empty() && (si_begin > 0 ||
2220 si_end < shards.size())) {
2221 // we resharded a partial range; we must produce at least one output
2222 // shard
2223 new_shard_info.emplace_back(bluestore_onode_t::shard_info());
2224 new_shard_info.back().offset = needs_reshard_begin;
2225 dout(20) << __func__ << " new shard 0x" << std::hex << needs_reshard_begin
2226 << std::dec << " (singleton degenerate case)" << dendl;
2227 }
2228
2229 auto& sv = onode->onode.extent_map_shards;
2230 dout(20) << __func__ << " new " << new_shard_info << dendl;
2231 dout(20) << __func__ << " old " << sv << dendl;
2232 if (sv.empty()) {
2233 // no old shards to keep
2234 sv.swap(new_shard_info);
2235 init_shards(true, true);
2236 } else {
2237 // splice in new shards
2238 sv.erase(sv.begin() + si_begin, sv.begin() + si_end);
2239 shards.erase(shards.begin() + si_begin, shards.begin() + si_end);
2240 sv.insert(
2241 sv.begin() + si_begin,
2242 new_shard_info.begin(),
2243 new_shard_info.end());
2244 shards.insert(shards.begin() + si_begin, new_shard_info.size(), Shard());
2245 si_end = si_begin + new_shard_info.size();
2246
2247 assert(sv.size() == shards.size());
2248
2249 // note that we need to update every shard_info of shards here,
2250 // as sv might have been totally re-allocated above
2251 for (unsigned i = 0; i < shards.size(); i++) {
2252 shards[i].shard_info = &sv[i];
2253 }
2254
2255 // mark newly added shards as dirty
2256 for (unsigned i = si_begin; i < si_end; ++i) {
2257 shards[i].loaded = true;
2258 shards[i].dirty = true;
2259 }
2260 }
2261 dout(20) << __func__ << " fin " << sv << dendl;
2262 inline_bl.clear();
2263
2264 if (sv.empty()) {
2265 // no more shards; unspan all previously spanning blobs
2266 auto p = spanning_blob_map.begin();
2267 while (p != spanning_blob_map.end()) {
2268 p->second->id = -1;
2269 dout(30) << __func__ << " un-spanning " << *p->second << dendl;
2270 p = spanning_blob_map.erase(p);
2271 }
2272 } else {
2273 // identify new spanning blobs
2274 dout(20) << __func__ << " checking spanning blobs 0x[" << std::hex
2275 << spanning_scan_begin << "," << spanning_scan_end << ")" << dendl;
2276 if (spanning_scan_begin < needs_reshard_begin) {
2277 fault_range(db, spanning_scan_begin,
2278 needs_reshard_begin - spanning_scan_begin);
2279 }
2280 if (spanning_scan_end > needs_reshard_end) {
2281 fault_range(db, needs_reshard_end,
2282 spanning_scan_end - needs_reshard_end);
2283 }
2284 auto sp = sv.begin() + si_begin;
2285 auto esp = sv.end();
2286 unsigned shard_start = sp->offset;
2287 unsigned shard_end;
2288 ++sp;
2289 if (sp == esp) {
2290 shard_end = OBJECT_MAX_SIZE;
2291 } else {
2292 shard_end = sp->offset;
2293 }
2294 Extent dummy(needs_reshard_begin);
2295 for (auto e = extent_map.lower_bound(dummy); e != extent_map.end(); ++e) {
2296 if (e->logical_offset >= needs_reshard_end) {
2297 break;
2298 }
2299 dout(30) << " extent " << *e << dendl;
2300 while (e->logical_offset >= shard_end) {
2301 shard_start = shard_end;
2302 assert(sp != esp);
2303 ++sp;
2304 if (sp == esp) {
2305 shard_end = OBJECT_MAX_SIZE;
2306 } else {
2307 shard_end = sp->offset;
2308 }
2309 dout(30) << __func__ << " shard 0x" << std::hex << shard_start
2310 << " to 0x" << shard_end << std::dec << dendl;
2311 }
2312 if (e->blob_escapes_range(shard_start, shard_end - shard_start)) {
2313 if (!e->blob->is_spanning()) {
2314 // We have two options: (1) split the blob into pieces at the
2315 // shard boundaries (and adjust extents accordingly), or (2)
2316 // mark it spanning. We prefer to cut the blob if we can. Note that
2317 // we may have to split it multiple times--potentially at every
2318 // shard boundary.
2319 bool must_span = false;
2320 BlobRef b = e->blob;
2321 if (b->can_split()) {
2322 uint32_t bstart = e->blob_start();
2323 uint32_t bend = e->blob_end();
2324 for (const auto& sh : shards) {
2325 if (bstart < sh.shard_info->offset &&
2326 bend > sh.shard_info->offset) {
2327 uint32_t blob_offset = sh.shard_info->offset - bstart;
2328 if (b->can_split_at(blob_offset)) {
2329 dout(20) << __func__ << " splitting blob, bstart 0x"
2330 << std::hex << bstart << " blob_offset 0x"
2331 << blob_offset << std::dec << " " << *b << dendl;
2332 b = split_blob(b, blob_offset, sh.shard_info->offset);
2333 // switch b to the new right-hand side, in case it
2334 // *also* has to get split.
2335 bstart += blob_offset;
2336 onode->c->store->logger->inc(l_bluestore_blob_split);
2337 } else {
2338 must_span = true;
2339 break;
2340 }
2341 }
2342 }
2343 } else {
2344 must_span = true;
2345 }
2346 if (must_span) {
2347 auto bid = allocate_spanning_blob_id();
2348 b->id = bid;
2349 spanning_blob_map[b->id] = b;
2350 dout(20) << __func__ << " adding spanning " << *b << dendl;
2351 }
2352 }
2353 } else {
2354 if (e->blob->is_spanning()) {
2355 spanning_blob_map.erase(e->blob->id);
2356 e->blob->id = -1;
2357 dout(30) << __func__ << " un-spanning " << *e->blob << dendl;
2358 }
2359 }
2360 }
2361 }
2362
2363 clear_needs_reshard();
2364 }
2365
2366 bool BlueStore::ExtentMap::encode_some(
2367 uint32_t offset,
2368 uint32_t length,
2369 bufferlist& bl,
2370 unsigned *pn)
2371 {
2372 auto cct = onode->c->store->cct; //used by dout
2373 Extent dummy(offset);
2374 auto start = extent_map.lower_bound(dummy);
2375 uint32_t end = offset + length;
2376
2377 __u8 struct_v = 2; // Version 2 differs from v1 in blob's ref_map
2378 // serialization only. Hence there is no specific
2379 // handling at ExtentMap level.
2380
2381 unsigned n = 0;
2382 size_t bound = 0;
2383 bool must_reshard = false;
2384 for (auto p = start;
2385 p != extent_map.end() && p->logical_offset < end;
2386 ++p, ++n) {
2387 assert(p->logical_offset >= offset);
2388 p->blob->last_encoded_id = -1;
2389 if (!p->blob->is_spanning() && p->blob_escapes_range(offset, length)) {
2390 dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
2391 << std::dec << " hit new spanning blob " << *p << dendl;
2392 request_reshard(p->blob_start(), p->blob_end());
2393 must_reshard = true;
2394 }
2395 if (!must_reshard) {
2396 denc_varint(0, bound); // blobid
2397 denc_varint(0, bound); // logical_offset
2398 denc_varint(0, bound); // len
2399 denc_varint(0, bound); // blob_offset
2400
2401 p->blob->bound_encode(
2402 bound,
2403 struct_v,
2404 p->blob->shared_blob->get_sbid(),
2405 false);
2406 }
2407 }
2408 if (must_reshard) {
2409 return true;
2410 }
2411
2412 denc(struct_v, bound);
2413 denc_varint(0, bound); // number of extents
2414
2415 {
2416 auto app = bl.get_contiguous_appender(bound);
2417 denc(struct_v, app);
2418 denc_varint(n, app);
2419 if (pn) {
2420 *pn = n;
2421 }
2422
2423 n = 0;
2424 uint64_t pos = 0;
2425 uint64_t prev_len = 0;
2426 for (auto p = start;
2427 p != extent_map.end() && p->logical_offset < end;
2428 ++p, ++n) {
2429 unsigned blobid;
2430 bool include_blob = false;
2431 if (p->blob->is_spanning()) {
2432 blobid = p->blob->id << BLOBID_SHIFT_BITS;
2433 blobid |= BLOBID_FLAG_SPANNING;
2434 } else if (p->blob->last_encoded_id < 0) {
2435 p->blob->last_encoded_id = n + 1; // so it is always non-zero
2436 include_blob = true;
2437 blobid = 0; // the decoder will infer the id from n
2438 } else {
2439 blobid = p->blob->last_encoded_id << BLOBID_SHIFT_BITS;
2440 }
2441 if (p->logical_offset == pos) {
2442 blobid |= BLOBID_FLAG_CONTIGUOUS;
2443 }
2444 if (p->blob_offset == 0) {
2445 blobid |= BLOBID_FLAG_ZEROOFFSET;
2446 }
2447 if (p->length == prev_len) {
2448 blobid |= BLOBID_FLAG_SAMELENGTH;
2449 } else {
2450 prev_len = p->length;
2451 }
2452 denc_varint(blobid, app);
2453 if ((blobid & BLOBID_FLAG_CONTIGUOUS) == 0) {
2454 denc_varint_lowz(p->logical_offset - pos, app);
2455 }
2456 if ((blobid & BLOBID_FLAG_ZEROOFFSET) == 0) {
2457 denc_varint_lowz(p->blob_offset, app);
2458 }
2459 if ((blobid & BLOBID_FLAG_SAMELENGTH) == 0) {
2460 denc_varint_lowz(p->length, app);
2461 }
2462 pos = p->logical_end();
2463 if (include_blob) {
2464 p->blob->encode(app, struct_v, p->blob->shared_blob->get_sbid(), false);
2465 }
2466 }
2467 }
2468 /*derr << __func__ << bl << dendl;
2469 derr << __func__ << ":";
2470 bl.hexdump(*_dout);
2471 *_dout << dendl;
2472 */
2473 return false;
2474 }
2475
2476 unsigned BlueStore::ExtentMap::decode_some(bufferlist& bl)
2477 {
2478 auto cct = onode->c->store->cct; //used by dout
2479 /*
2480 derr << __func__ << ":";
2481 bl.hexdump(*_dout);
2482 *_dout << dendl;
2483 */
2484
2485 assert(bl.get_num_buffers() <= 1);
2486 auto p = bl.front().begin_deep();
2487 __u8 struct_v;
2488 denc(struct_v, p);
2489 // Version 2 differs from v1 in blob's ref_map
2490 // serialization only. Hence there is no specific
2491 // handling at ExtentMap level below.
2492 assert(struct_v == 1 || struct_v == 2);
2493
2494 uint32_t num;
2495 denc_varint(num, p);
2496 vector<BlobRef> blobs(num);
2497 uint64_t pos = 0;
2498 uint64_t prev_len = 0;
2499 unsigned n = 0;
2500
2501 while (!p.end()) {
2502 Extent *le = new Extent();
2503 uint64_t blobid;
2504 denc_varint(blobid, p);
2505 if ((blobid & BLOBID_FLAG_CONTIGUOUS) == 0) {
2506 uint64_t gap;
2507 denc_varint_lowz(gap, p);
2508 pos += gap;
2509 }
2510 le->logical_offset = pos;
2511 if ((blobid & BLOBID_FLAG_ZEROOFFSET) == 0) {
2512 denc_varint_lowz(le->blob_offset, p);
2513 } else {
2514 le->blob_offset = 0;
2515 }
2516 if ((blobid & BLOBID_FLAG_SAMELENGTH) == 0) {
2517 denc_varint_lowz(prev_len, p);
2518 }
2519 le->length = prev_len;
2520
2521 if (blobid & BLOBID_FLAG_SPANNING) {
2522 dout(30) << __func__ << " getting spanning blob "
2523 << (blobid >> BLOBID_SHIFT_BITS) << dendl;
2524 le->assign_blob(get_spanning_blob(blobid >> BLOBID_SHIFT_BITS));
2525 } else {
2526 blobid >>= BLOBID_SHIFT_BITS;
2527 if (blobid) {
2528 le->assign_blob(blobs[blobid - 1]);
2529 assert(le->blob);
2530 } else {
2531 Blob *b = new Blob();
2532 uint64_t sbid = 0;
2533 b->decode(onode->c, p, struct_v, &sbid, false);
2534 blobs[n] = b;
2535 onode->c->open_shared_blob(sbid, b);
2536 le->assign_blob(b);
2537 }
2538 // we build ref_map dynamically for non-spanning blobs
2539 le->blob->get_ref(
2540 onode->c,
2541 le->blob_offset,
2542 le->length);
2543 }
2544 pos += prev_len;
2545 ++n;
2546 extent_map.insert(*le);
2547 }
2548
2549 assert(n == num);
2550 return num;
2551 }
2552
2553 void BlueStore::ExtentMap::bound_encode_spanning_blobs(size_t& p)
2554 {
2555 // Version 2 differs from v1 in blob's ref_map
2556 // serialization only. Hence there is no specific
2557 // handling at ExtentMap level.
2558 __u8 struct_v = 2;
2559
2560 denc(struct_v, p);
2561 denc_varint((uint32_t)0, p);
2562 size_t key_size = 0;
2563 denc_varint((uint32_t)0, key_size);
2564 p += spanning_blob_map.size() * key_size;
2565 for (const auto& i : spanning_blob_map) {
2566 i.second->bound_encode(p, struct_v, i.second->shared_blob->get_sbid(), true);
2567 }
2568 }
2569
2570 void BlueStore::ExtentMap::encode_spanning_blobs(
2571 bufferlist::contiguous_appender& p)
2572 {
2573 // Version 2 differs from v1 in blob's ref_map
2574 // serialization only. Hence there is no specific
2575 // handling at ExtentMap level.
2576 __u8 struct_v = 2;
2577
2578 denc(struct_v, p);
2579 denc_varint(spanning_blob_map.size(), p);
2580 for (auto& i : spanning_blob_map) {
2581 denc_varint(i.second->id, p);
2582 i.second->encode(p, struct_v, i.second->shared_blob->get_sbid(), true);
2583 }
2584 }
2585
2586 void BlueStore::ExtentMap::decode_spanning_blobs(
2587 bufferptr::iterator& p)
2588 {
2589 __u8 struct_v;
2590 denc(struct_v, p);
2591 // Version 2 differs from v1 in blob's ref_map
2592 // serialization only. Hence there is no specific
2593 // handling at ExtentMap level.
2594 assert(struct_v == 1 || struct_v == 2);
2595
2596 unsigned n;
2597 denc_varint(n, p);
2598 while (n--) {
2599 BlobRef b(new Blob());
2600 denc_varint(b->id, p);
2601 spanning_blob_map[b->id] = b;
2602 uint64_t sbid = 0;
2603 b->decode(onode->c, p, struct_v, &sbid, true);
2604 onode->c->open_shared_blob(sbid, b);
2605 }
2606 }
2607
2608 void BlueStore::ExtentMap::init_shards(bool loaded, bool dirty)
2609 {
2610 shards.resize(onode->onode.extent_map_shards.size());
2611 unsigned i = 0;
2612 for (auto &s : onode->onode.extent_map_shards) {
2613 shards[i].shard_info = &s;
2614 shards[i].loaded = loaded;
2615 shards[i].dirty = dirty;
2616 ++i;
2617 }
2618 }
2619
2620 void BlueStore::ExtentMap::fault_range(
2621 KeyValueDB *db,
2622 uint32_t offset,
2623 uint32_t length)
2624 {
2625 auto cct = onode->c->store->cct; //used by dout
2626 dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
2627 << std::dec << dendl;
2628 auto start = seek_shard(offset);
2629 auto last = seek_shard(offset + length);
2630
2631 if (start < 0)
2632 return;
2633
2634 assert(last >= start);
2635 string key;
2636 while (start <= last) {
2637 assert((size_t)start < shards.size());
2638 auto p = &shards[start];
2639 if (!p->loaded) {
2640 dout(30) << __func__ << " opening shard 0x" << std::hex
2641 << p->shard_info->offset << std::dec << dendl;
2642 bufferlist v;
2643 generate_extent_shard_key_and_apply(
2644 onode->key, p->shard_info->offset, &key,
2645 [&](const string& final_key) {
2646 int r = db->get(PREFIX_OBJ, final_key, &v);
2647 if (r < 0) {
2648 derr << __func__ << " missing shard 0x" << std::hex
2649 << p->shard_info->offset << std::dec << " for " << onode->oid
2650 << dendl;
2651 assert(r >= 0);
2652 }
2653 }
2654 );
2655 p->extents = decode_some(v);
2656 p->loaded = true;
2657 dout(20) << __func__ << " open shard 0x" << std::hex
2658 << p->shard_info->offset << std::dec
2659 << " (" << v.length() << " bytes)" << dendl;
2660 assert(p->dirty == false);
2661 assert(v.length() == p->shard_info->bytes);
2662 onode->c->store->logger->inc(l_bluestore_onode_shard_misses);
2663 } else {
2664 onode->c->store->logger->inc(l_bluestore_onode_shard_hits);
2665 }
2666 ++start;
2667 }
2668 }
2669
2670 void BlueStore::ExtentMap::dirty_range(
2671 uint32_t offset,
2672 uint32_t length)
2673 {
2674 auto cct = onode->c->store->cct; //used by dout
2675 dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
2676 << std::dec << dendl;
2677 if (shards.empty()) {
2678 dout(20) << __func__ << " mark inline shard dirty" << dendl;
2679 inline_bl.clear();
2680 return;
2681 }
2682 auto start = seek_shard(offset);
2683 auto last = seek_shard(offset + length);
2684 if (start < 0)
2685 return;
2686
2687 assert(last >= start);
2688 while (start <= last) {
2689 assert((size_t)start < shards.size());
2690 auto p = &shards[start];
2691 if (!p->loaded) {
2692 dout(20) << __func__ << " shard 0x" << std::hex << p->shard_info->offset
2693 << std::dec << " is not loaded, can't mark dirty" << dendl;
2694 assert(0 == "can't mark unloaded shard dirty");
2695 }
2696 if (!p->dirty) {
2697 dout(20) << __func__ << " mark shard 0x" << std::hex
2698 << p->shard_info->offset << std::dec << " dirty" << dendl;
2699 p->dirty = true;
2700 }
2701 ++start;
2702 }
2703 }
2704
2705 BlueStore::extent_map_t::iterator BlueStore::ExtentMap::find(
2706 uint64_t offset)
2707 {
2708 Extent dummy(offset);
2709 return extent_map.find(dummy);
2710 }
2711
2712 BlueStore::extent_map_t::iterator BlueStore::ExtentMap::seek_lextent(
2713 uint64_t offset)
2714 {
2715 Extent dummy(offset);
2716 auto fp = extent_map.lower_bound(dummy);
2717 if (fp != extent_map.begin()) {
2718 --fp;
2719 if (fp->logical_end() <= offset) {
2720 ++fp;
2721 }
2722 }
2723 return fp;
2724 }
2725
2726 BlueStore::extent_map_t::const_iterator BlueStore::ExtentMap::seek_lextent(
2727 uint64_t offset) const
2728 {
2729 Extent dummy(offset);
2730 auto fp = extent_map.lower_bound(dummy);
2731 if (fp != extent_map.begin()) {
2732 --fp;
2733 if (fp->logical_end() <= offset) {
2734 ++fp;
2735 }
2736 }
2737 return fp;
2738 }
2739
2740 bool BlueStore::ExtentMap::has_any_lextents(uint64_t offset, uint64_t length)
2741 {
2742 auto fp = seek_lextent(offset);
2743 if (fp == extent_map.end() || fp->logical_offset >= offset + length) {
2744 return false;
2745 }
2746 return true;
2747 }
2748
2749 int BlueStore::ExtentMap::compress_extent_map(
2750 uint64_t offset,
2751 uint64_t length)
2752 {
2753 auto cct = onode->c->store->cct; //used by dout
2754 if (extent_map.empty())
2755 return 0;
2756 int removed = 0;
2757 auto p = seek_lextent(offset);
2758 if (p != extent_map.begin()) {
2759 --p; // start to the left of offset
2760 }
2761 // the caller should have just written to this region
2762 assert(p != extent_map.end());
2763
2764 // identify the *next* shard
2765 auto pshard = shards.begin();
2766 while (pshard != shards.end() &&
2767 p->logical_offset >= pshard->shard_info->offset) {
2768 ++pshard;
2769 }
2770 uint64_t shard_end;
2771 if (pshard != shards.end()) {
2772 shard_end = pshard->shard_info->offset;
2773 } else {
2774 shard_end = OBJECT_MAX_SIZE;
2775 }
2776
2777 auto n = p;
2778 for (++n; n != extent_map.end(); p = n++) {
2779 if (n->logical_offset > offset + length) {
2780 break; // stop after end
2781 }
2782 while (n != extent_map.end() &&
2783 p->logical_end() == n->logical_offset &&
2784 p->blob == n->blob &&
2785 p->blob_offset + p->length == n->blob_offset &&
2786 n->logical_offset < shard_end) {
2787 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
2788 << " next shard 0x" << shard_end << std::dec
2789 << " merging " << *p << " and " << *n << dendl;
2790 p->length += n->length;
2791 rm(n++);
2792 ++removed;
2793 }
2794 if (n == extent_map.end()) {
2795 break;
2796 }
2797 if (n->logical_offset >= shard_end) {
2798 assert(pshard != shards.end());
2799 ++pshard;
2800 if (pshard != shards.end()) {
2801 shard_end = pshard->shard_info->offset;
2802 } else {
2803 shard_end = OBJECT_MAX_SIZE;
2804 }
2805 }
2806 }
2807 if (removed && onode) {
2808 onode->c->store->logger->inc(l_bluestore_extent_compress, removed);
2809 }
2810 return removed;
2811 }
2812
2813 void BlueStore::ExtentMap::punch_hole(
2814 CollectionRef &c,
2815 uint64_t offset,
2816 uint64_t length,
2817 old_extent_map_t *old_extents)
2818 {
2819 auto p = seek_lextent(offset);
2820 uint64_t end = offset + length;
2821 while (p != extent_map.end()) {
2822 if (p->logical_offset >= end) {
2823 break;
2824 }
2825 if (p->logical_offset < offset) {
2826 if (p->logical_end() > end) {
2827 // split and deref middle
2828 uint64_t front = offset - p->logical_offset;
2829 OldExtent* oe = OldExtent::create(c, offset, p->blob_offset + front,
2830 length, p->blob);
2831 old_extents->push_back(*oe);
2832 add(end,
2833 p->blob_offset + front + length,
2834 p->length - front - length,
2835 p->blob);
2836 p->length = front;
2837 break;
2838 } else {
2839 // deref tail
2840 assert(p->logical_end() > offset); // else seek_lextent bug
2841 uint64_t keep = offset - p->logical_offset;
2842 OldExtent* oe = OldExtent::create(c, offset, p->blob_offset + keep,
2843 p->length - keep, p->blob);
2844 old_extents->push_back(*oe);
2845 p->length = keep;
2846 ++p;
2847 continue;
2848 }
2849 }
2850 if (p->logical_offset + p->length <= end) {
2851 // deref whole lextent
2852 OldExtent* oe = OldExtent::create(c, p->logical_offset, p->blob_offset,
2853 p->length, p->blob);
2854 old_extents->push_back(*oe);
2855 rm(p++);
2856 continue;
2857 }
2858 // deref head
2859 uint64_t keep = p->logical_end() - end;
2860 OldExtent* oe = OldExtent::create(c, p->logical_offset, p->blob_offset,
2861 p->length - keep, p->blob);
2862 old_extents->push_back(*oe);
2863
2864 add(end, p->blob_offset + p->length - keep, keep, p->blob);
2865 rm(p);
2866 break;
2867 }
2868 }
2869
2870 BlueStore::Extent *BlueStore::ExtentMap::set_lextent(
2871 CollectionRef &c,
2872 uint64_t logical_offset,
2873 uint64_t blob_offset, uint64_t length, BlobRef b,
2874 old_extent_map_t *old_extents)
2875 {
2876 // We need to have completely initialized Blob to increment its ref counters.
2877 assert(b->get_blob().get_logical_length() != 0);
2878
2879 // Do get_ref prior to punch_hole to prevent from putting reused blob into
2880 // old_extents list if we overwre the blob totally
2881 // This might happen during WAL overwrite.
2882 b->get_ref(onode->c, blob_offset, length);
2883
2884 if (old_extents) {
2885 punch_hole(c, logical_offset, length, old_extents);
2886 }
2887
2888 Extent *le = new Extent(logical_offset, blob_offset, length, b);
2889 extent_map.insert(*le);
2890 if (spans_shard(logical_offset, length)) {
2891 request_reshard(logical_offset, logical_offset + length);
2892 }
2893 return le;
2894 }
2895
2896 BlueStore::BlobRef BlueStore::ExtentMap::split_blob(
2897 BlobRef lb,
2898 uint32_t blob_offset,
2899 uint32_t pos)
2900 {
2901 auto cct = onode->c->store->cct; //used by dout
2902
2903 uint32_t end_pos = pos + lb->get_blob().get_logical_length() - blob_offset;
2904 dout(20) << __func__ << " 0x" << std::hex << pos << " end 0x" << end_pos
2905 << " blob_offset 0x" << blob_offset << std::dec << " " << *lb
2906 << dendl;
2907 BlobRef rb = onode->c->new_blob();
2908 lb->split(onode->c, blob_offset, rb.get());
2909
2910 for (auto ep = seek_lextent(pos);
2911 ep != extent_map.end() && ep->logical_offset < end_pos;
2912 ++ep) {
2913 if (ep->blob != lb) {
2914 continue;
2915 }
2916 if (ep->logical_offset < pos) {
2917 // split extent
2918 size_t left = pos - ep->logical_offset;
2919 Extent *ne = new Extent(pos, 0, ep->length - left, rb);
2920 extent_map.insert(*ne);
2921 ep->length = left;
2922 dout(30) << __func__ << " split " << *ep << dendl;
2923 dout(30) << __func__ << " to " << *ne << dendl;
2924 } else {
2925 // switch blob
2926 assert(ep->blob_offset >= blob_offset);
2927
2928 ep->blob = rb;
2929 ep->blob_offset -= blob_offset;
2930 dout(30) << __func__ << " adjusted " << *ep << dendl;
2931 }
2932 }
2933 return rb;
2934 }
2935
2936 // Onode
2937
2938 #undef dout_prefix
2939 #define dout_prefix *_dout << "bluestore.onode(" << this << ")." << __func__ << " "
2940
2941 void BlueStore::Onode::flush()
2942 {
2943 if (flushing_count.load()) {
2944 ldout(c->store->cct, 20) << __func__ << " cnt:" << flushing_count << dendl;
2945 std::unique_lock<std::mutex> l(flush_lock);
2946 while (flushing_count.load()) {
2947 flush_cond.wait(l);
2948 }
2949 }
2950 ldout(c->store->cct, 20) << __func__ << " done" << dendl;
2951 }
2952
2953 // =======================================================
2954 // WriteContext
2955
2956 /// Checks for writes to the same pextent within a blob
2957 bool BlueStore::WriteContext::has_conflict(
2958 BlobRef b,
2959 uint64_t loffs,
2960 uint64_t loffs_end,
2961 uint64_t min_alloc_size)
2962 {
2963 assert((loffs % min_alloc_size) == 0);
2964 assert((loffs_end % min_alloc_size) == 0);
2965 for (auto w : writes) {
2966 if (b == w.b) {
2967 auto loffs2 = P2ALIGN(w.logical_offset, min_alloc_size);
2968 auto loffs2_end = P2ROUNDUP(w.logical_offset + w.length0, min_alloc_size);
2969 if ((loffs <= loffs2 && loffs_end > loffs2) ||
2970 (loffs >= loffs2 && loffs < loffs2_end)) {
2971 return true;
2972 }
2973 }
2974 }
2975 return false;
2976 }
2977
2978 // =======================================================
2979
2980 // DeferredBatch
2981 #undef dout_prefix
2982 #define dout_prefix *_dout << "bluestore.DeferredBatch(" << this << ") "
2983
2984 void BlueStore::DeferredBatch::prepare_write(
2985 CephContext *cct,
2986 uint64_t seq, uint64_t offset, uint64_t length,
2987 bufferlist::const_iterator& blp)
2988 {
2989 _discard(cct, offset, length);
2990 auto i = iomap.insert(make_pair(offset, deferred_io()));
2991 assert(i.second); // this should be a new insertion
2992 i.first->second.seq = seq;
2993 blp.copy(length, i.first->second.bl);
2994 i.first->second.bl.reassign_to_mempool(
2995 mempool::mempool_bluestore_writing_deferred);
2996 dout(20) << __func__ << " seq " << seq
2997 << " 0x" << std::hex << offset << "~" << length
2998 << " crc " << i.first->second.bl.crc32c(-1)
2999 << std::dec << dendl;
3000 seq_bytes[seq] += length;
3001 #ifdef DEBUG_DEFERRED
3002 _audit(cct);
3003 #endif
3004 }
3005
3006 void BlueStore::DeferredBatch::_discard(
3007 CephContext *cct, uint64_t offset, uint64_t length)
3008 {
3009 generic_dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
3010 << std::dec << dendl;
3011 auto p = iomap.lower_bound(offset);
3012 if (p != iomap.begin()) {
3013 --p;
3014 auto end = p->first + p->second.bl.length();
3015 if (end > offset) {
3016 bufferlist head;
3017 head.substr_of(p->second.bl, 0, offset - p->first);
3018 dout(20) << __func__ << " keep head " << p->second.seq
3019 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3020 << " -> 0x" << head.length() << std::dec << dendl;
3021 auto i = seq_bytes.find(p->second.seq);
3022 assert(i != seq_bytes.end());
3023 if (end > offset + length) {
3024 bufferlist tail;
3025 tail.substr_of(p->second.bl, offset + length - p->first,
3026 end - (offset + length));
3027 dout(20) << __func__ << " keep tail " << p->second.seq
3028 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3029 << " -> 0x" << tail.length() << std::dec << dendl;
3030 auto &n = iomap[offset + length];
3031 n.bl.swap(tail);
3032 n.seq = p->second.seq;
3033 i->second -= length;
3034 } else {
3035 i->second -= end - offset;
3036 }
3037 assert(i->second >= 0);
3038 p->second.bl.swap(head);
3039 }
3040 ++p;
3041 }
3042 while (p != iomap.end()) {
3043 if (p->first >= offset + length) {
3044 break;
3045 }
3046 auto i = seq_bytes.find(p->second.seq);
3047 assert(i != seq_bytes.end());
3048 auto end = p->first + p->second.bl.length();
3049 if (end > offset + length) {
3050 unsigned drop_front = offset + length - p->first;
3051 unsigned keep_tail = end - (offset + length);
3052 dout(20) << __func__ << " truncate front " << p->second.seq
3053 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3054 << " drop_front 0x" << drop_front << " keep_tail 0x" << keep_tail
3055 << " to 0x" << (offset + length) << "~" << keep_tail
3056 << std::dec << dendl;
3057 auto &s = iomap[offset + length];
3058 s.seq = p->second.seq;
3059 s.bl.substr_of(p->second.bl, drop_front, keep_tail);
3060 i->second -= drop_front;
3061 } else {
3062 dout(20) << __func__ << " drop " << p->second.seq
3063 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3064 << std::dec << dendl;
3065 i->second -= p->second.bl.length();
3066 }
3067 assert(i->second >= 0);
3068 p = iomap.erase(p);
3069 }
3070 }
3071
3072 void BlueStore::DeferredBatch::_audit(CephContext *cct)
3073 {
3074 map<uint64_t,int> sb;
3075 for (auto p : seq_bytes) {
3076 sb[p.first] = 0; // make sure we have the same set of keys
3077 }
3078 uint64_t pos = 0;
3079 for (auto& p : iomap) {
3080 assert(p.first >= pos);
3081 sb[p.second.seq] += p.second.bl.length();
3082 pos = p.first + p.second.bl.length();
3083 }
3084 assert(sb == seq_bytes);
3085 }
3086
3087
3088 // Collection
3089
3090 #undef dout_prefix
3091 #define dout_prefix *_dout << "bluestore(" << store->path << ").collection(" << cid << " " << this << ") "
3092
3093 BlueStore::Collection::Collection(BlueStore *ns, Cache *c, coll_t cid)
3094 : store(ns),
3095 cache(c),
3096 cid(cid),
3097 lock("BlueStore::Collection::lock", true, false),
3098 exists(true),
3099 onode_map(c)
3100 {
3101 }
3102
3103 void BlueStore::Collection::open_shared_blob(uint64_t sbid, BlobRef b)
3104 {
3105 assert(!b->shared_blob);
3106 const bluestore_blob_t& blob = b->get_blob();
3107 if (!blob.is_shared()) {
3108 b->shared_blob = new SharedBlob(this);
3109 return;
3110 }
3111
3112 b->shared_blob = shared_blob_set.lookup(sbid);
3113 if (b->shared_blob) {
3114 ldout(store->cct, 10) << __func__ << " sbid 0x" << std::hex << sbid
3115 << std::dec << " had " << *b->shared_blob << dendl;
3116 } else {
3117 b->shared_blob = new SharedBlob(sbid, this);
3118 shared_blob_set.add(this, b->shared_blob.get());
3119 ldout(store->cct, 10) << __func__ << " sbid 0x" << std::hex << sbid
3120 << std::dec << " opened " << *b->shared_blob
3121 << dendl;
3122 }
3123 }
3124
3125 void BlueStore::Collection::load_shared_blob(SharedBlobRef sb)
3126 {
3127 if (!sb->is_loaded()) {
3128
3129 bufferlist v;
3130 string key;
3131 auto sbid = sb->get_sbid();
3132 get_shared_blob_key(sbid, &key);
3133 int r = store->db->get(PREFIX_SHARED_BLOB, key, &v);
3134 if (r < 0) {
3135 lderr(store->cct) << __func__ << " sbid 0x" << std::hex << sbid
3136 << std::dec << " not found at key "
3137 << pretty_binary_string(key) << dendl;
3138 assert(0 == "uh oh, missing shared_blob");
3139 }
3140
3141 sb->loaded = true;
3142 sb->persistent = new bluestore_shared_blob_t(sbid);
3143 bufferlist::iterator p = v.begin();
3144 ::decode(*(sb->persistent), p);
3145 ldout(store->cct, 10) << __func__ << " sbid 0x" << std::hex << sbid
3146 << std::dec << " loaded shared_blob " << *sb << dendl;
3147 }
3148 }
3149
3150 void BlueStore::Collection::make_blob_shared(uint64_t sbid, BlobRef b)
3151 {
3152 ldout(store->cct, 10) << __func__ << " " << *b << dendl;
3153 assert(!b->shared_blob->is_loaded());
3154
3155 // update blob
3156 bluestore_blob_t& blob = b->dirty_blob();
3157 blob.set_flag(bluestore_blob_t::FLAG_SHARED);
3158
3159 // update shared blob
3160 b->shared_blob->loaded = true;
3161 b->shared_blob->persistent = new bluestore_shared_blob_t(sbid);
3162 shared_blob_set.add(this, b->shared_blob.get());
3163 for (auto p : blob.get_extents()) {
3164 if (p.is_valid()) {
3165 b->shared_blob->get_ref(
3166 p.offset,
3167 p.length);
3168 }
3169 }
3170 ldout(store->cct, 20) << __func__ << " now " << *b << dendl;
3171 }
3172
3173 uint64_t BlueStore::Collection::make_blob_unshared(SharedBlob *sb)
3174 {
3175 ldout(store->cct, 10) << __func__ << " " << *sb << dendl;
3176 assert(sb->is_loaded());
3177
3178 uint64_t sbid = sb->get_sbid();
3179 shared_blob_set.remove(sb);
3180 sb->loaded = false;
3181 delete sb->persistent;
3182 sb->sbid_unloaded = 0;
3183 ldout(store->cct, 20) << __func__ << " now " << *sb << dendl;
3184 return sbid;
3185 }
3186
3187 BlueStore::OnodeRef BlueStore::Collection::get_onode(
3188 const ghobject_t& oid,
3189 bool create)
3190 {
3191 assert(create ? lock.is_wlocked() : lock.is_locked());
3192
3193 spg_t pgid;
3194 if (cid.is_pg(&pgid)) {
3195 if (!oid.match(cnode.bits, pgid.ps())) {
3196 lderr(store->cct) << __func__ << " oid " << oid << " not part of "
3197 << pgid << " bits " << cnode.bits << dendl;
3198 ceph_abort();
3199 }
3200 }
3201
3202 OnodeRef o = onode_map.lookup(oid);
3203 if (o)
3204 return o;
3205
3206 mempool::bluestore_cache_other::string key;
3207 get_object_key(store->cct, oid, &key);
3208
3209 ldout(store->cct, 20) << __func__ << " oid " << oid << " key "
3210 << pretty_binary_string(key) << dendl;
3211
3212 bufferlist v;
3213 int r = store->db->get(PREFIX_OBJ, key.c_str(), key.size(), &v);
3214 ldout(store->cct, 20) << " r " << r << " v.len " << v.length() << dendl;
3215 Onode *on;
3216 if (v.length() == 0) {
3217 assert(r == -ENOENT);
3218 if (!store->cct->_conf->bluestore_debug_misc &&
3219 !create)
3220 return OnodeRef();
3221
3222 // new object, new onode
3223 on = new Onode(this, oid, key);
3224 } else {
3225 // loaded
3226 assert(r >= 0);
3227 on = new Onode(this, oid, key);
3228 on->exists = true;
3229 bufferptr::iterator p = v.front().begin_deep();
3230 on->onode.decode(p);
3231
3232 // initialize extent_map
3233 on->extent_map.decode_spanning_blobs(p);
3234 if (on->onode.extent_map_shards.empty()) {
3235 denc(on->extent_map.inline_bl, p);
3236 on->extent_map.decode_some(on->extent_map.inline_bl);
3237 } else {
3238 on->extent_map.init_shards(false, false);
3239 }
3240 }
3241 o.reset(on);
3242 return onode_map.add(oid, o);
3243 }
3244
3245 void BlueStore::Collection::split_cache(
3246 Collection *dest)
3247 {
3248 ldout(store->cct, 10) << __func__ << " to " << dest << dendl;
3249
3250 // lock (one or both) cache shards
3251 std::lock(cache->lock, dest->cache->lock);
3252 std::lock_guard<std::recursive_mutex> l(cache->lock, std::adopt_lock);
3253 std::lock_guard<std::recursive_mutex> l2(dest->cache->lock, std::adopt_lock);
3254
3255 int destbits = dest->cnode.bits;
3256 spg_t destpg;
3257 bool is_pg = dest->cid.is_pg(&destpg);
3258 assert(is_pg);
3259
3260 auto p = onode_map.onode_map.begin();
3261 while (p != onode_map.onode_map.end()) {
3262 if (!p->second->oid.match(destbits, destpg.pgid.ps())) {
3263 // onode does not belong to this child
3264 ++p;
3265 } else {
3266 OnodeRef o = p->second;
3267 ldout(store->cct, 20) << __func__ << " moving " << o << " " << o->oid
3268 << dendl;
3269
3270 cache->_rm_onode(p->second);
3271 p = onode_map.onode_map.erase(p);
3272
3273 o->c = dest;
3274 dest->cache->_add_onode(o, 1);
3275 dest->onode_map.onode_map[o->oid] = o;
3276 dest->onode_map.cache = dest->cache;
3277
3278 // move over shared blobs and buffers. cover shared blobs from
3279 // both extent map and spanning blob map (the full extent map
3280 // may not be faulted in)
3281 vector<SharedBlob*> sbvec;
3282 for (auto& e : o->extent_map.extent_map) {
3283 sbvec.push_back(e.blob->shared_blob.get());
3284 }
3285 for (auto& b : o->extent_map.spanning_blob_map) {
3286 sbvec.push_back(b.second->shared_blob.get());
3287 }
3288 for (auto sb : sbvec) {
3289 if (sb->coll == dest) {
3290 ldout(store->cct, 20) << __func__ << " already moved " << *sb
3291 << dendl;
3292 continue;
3293 }
3294 ldout(store->cct, 20) << __func__ << " moving " << *sb << dendl;
3295 sb->coll = dest;
3296 if (sb->get_sbid()) {
3297 ldout(store->cct, 20) << __func__
3298 << " moving registration " << *sb << dendl;
3299 shared_blob_set.remove(sb);
3300 dest->shared_blob_set.add(dest, sb);
3301 }
3302 if (dest->cache != cache) {
3303 for (auto& i : sb->bc.buffer_map) {
3304 if (!i.second->is_writing()) {
3305 ldout(store->cct, 20) << __func__ << " moving " << *i.second
3306 << dendl;
3307 dest->cache->_move_buffer(cache, i.second.get());
3308 }
3309 }
3310 }
3311 }
3312 }
3313 }
3314 }
3315
3316 // =======================================================
3317
3318 void *BlueStore::MempoolThread::entry()
3319 {
3320 Mutex::Locker l(lock);
3321 while (!stop) {
3322 uint64_t meta_bytes =
3323 mempool::bluestore_cache_other::allocated_bytes() +
3324 mempool::bluestore_cache_onode::allocated_bytes();
3325 uint64_t onode_num =
3326 mempool::bluestore_cache_onode::allocated_items();
3327
3328 if (onode_num < 2) {
3329 onode_num = 2;
3330 }
3331
3332 float bytes_per_onode = (float)meta_bytes / (float)onode_num;
3333 size_t num_shards = store->cache_shards.size();
3334 float target_ratio = store->cache_meta_ratio + store->cache_data_ratio;
3335 // A little sloppy but should be close enough
3336 uint64_t shard_target = target_ratio * (store->cache_size / num_shards);
3337
3338 for (auto i : store->cache_shards) {
3339 i->trim(shard_target,
3340 store->cache_meta_ratio,
3341 store->cache_data_ratio,
3342 bytes_per_onode);
3343 }
3344
3345 store->_update_cache_logger();
3346
3347 utime_t wait;
3348 wait += store->cct->_conf->bluestore_cache_trim_interval;
3349 cond.WaitInterval(lock, wait);
3350 }
3351 stop = false;
3352 return NULL;
3353 }
3354
3355 // =======================================================
3356
3357 // OmapIteratorImpl
3358
3359 #undef dout_prefix
3360 #define dout_prefix *_dout << "bluestore.OmapIteratorImpl(" << this << ") "
3361
3362 BlueStore::OmapIteratorImpl::OmapIteratorImpl(
3363 CollectionRef c, OnodeRef o, KeyValueDB::Iterator it)
3364 : c(c), o(o), it(it)
3365 {
3366 RWLock::RLocker l(c->lock);
3367 if (o->onode.has_omap()) {
3368 get_omap_key(o->onode.nid, string(), &head);
3369 get_omap_tail(o->onode.nid, &tail);
3370 it->lower_bound(head);
3371 }
3372 }
3373
3374 int BlueStore::OmapIteratorImpl::seek_to_first()
3375 {
3376 RWLock::RLocker l(c->lock);
3377 if (o->onode.has_omap()) {
3378 it->lower_bound(head);
3379 } else {
3380 it = KeyValueDB::Iterator();
3381 }
3382 return 0;
3383 }
3384
3385 int BlueStore::OmapIteratorImpl::upper_bound(const string& after)
3386 {
3387 RWLock::RLocker l(c->lock);
3388 if (o->onode.has_omap()) {
3389 string key;
3390 get_omap_key(o->onode.nid, after, &key);
3391 ldout(c->store->cct,20) << __func__ << " after " << after << " key "
3392 << pretty_binary_string(key) << dendl;
3393 it->upper_bound(key);
3394 } else {
3395 it = KeyValueDB::Iterator();
3396 }
3397 return 0;
3398 }
3399
3400 int BlueStore::OmapIteratorImpl::lower_bound(const string& to)
3401 {
3402 RWLock::RLocker l(c->lock);
3403 if (o->onode.has_omap()) {
3404 string key;
3405 get_omap_key(o->onode.nid, to, &key);
3406 ldout(c->store->cct,20) << __func__ << " to " << to << " key "
3407 << pretty_binary_string(key) << dendl;
3408 it->lower_bound(key);
3409 } else {
3410 it = KeyValueDB::Iterator();
3411 }
3412 return 0;
3413 }
3414
3415 bool BlueStore::OmapIteratorImpl::valid()
3416 {
3417 RWLock::RLocker l(c->lock);
3418 bool r = o->onode.has_omap() && it && it->valid() &&
3419 it->raw_key().second <= tail;
3420 if (it && it->valid()) {
3421 ldout(c->store->cct,20) << __func__ << " is at "
3422 << pretty_binary_string(it->raw_key().second)
3423 << dendl;
3424 }
3425 return r;
3426 }
3427
3428 int BlueStore::OmapIteratorImpl::next(bool validate)
3429 {
3430 RWLock::RLocker l(c->lock);
3431 if (o->onode.has_omap()) {
3432 it->next();
3433 return 0;
3434 } else {
3435 return -1;
3436 }
3437 }
3438
3439 string BlueStore::OmapIteratorImpl::key()
3440 {
3441 RWLock::RLocker l(c->lock);
3442 assert(it->valid());
3443 string db_key = it->raw_key().second;
3444 string user_key;
3445 decode_omap_key(db_key, &user_key);
3446 return user_key;
3447 }
3448
3449 bufferlist BlueStore::OmapIteratorImpl::value()
3450 {
3451 RWLock::RLocker l(c->lock);
3452 assert(it->valid());
3453 return it->value();
3454 }
3455
3456
3457 // =====================================
3458
3459 #undef dout_prefix
3460 #define dout_prefix *_dout << "bluestore(" << path << ") "
3461
3462
3463 static void aio_cb(void *priv, void *priv2)
3464 {
3465 BlueStore *store = static_cast<BlueStore*>(priv);
3466 BlueStore::AioContext *c = static_cast<BlueStore::AioContext*>(priv2);
3467 c->aio_finish(store);
3468 }
3469
3470 BlueStore::BlueStore(CephContext *cct, const string& path)
3471 : ObjectStore(cct, path),
3472 throttle_bytes(cct, "bluestore_throttle_bytes",
3473 cct->_conf->bluestore_throttle_bytes),
3474 throttle_deferred_bytes(cct, "bluestore_throttle_deferred_bytes",
3475 cct->_conf->bluestore_throttle_bytes +
3476 cct->_conf->bluestore_throttle_deferred_bytes),
3477 kv_sync_thread(this),
3478 kv_finalize_thread(this),
3479 mempool_thread(this)
3480 {
3481 _init_logger();
3482 cct->_conf->add_observer(this);
3483 set_cache_shards(1);
3484 }
3485
3486 BlueStore::BlueStore(CephContext *cct,
3487 const string& path,
3488 uint64_t _min_alloc_size)
3489 : ObjectStore(cct, path),
3490 throttle_bytes(cct, "bluestore_throttle_bytes",
3491 cct->_conf->bluestore_throttle_bytes),
3492 throttle_deferred_bytes(cct, "bluestore_throttle_deferred_bytes",
3493 cct->_conf->bluestore_throttle_bytes +
3494 cct->_conf->bluestore_throttle_deferred_bytes),
3495 kv_sync_thread(this),
3496 kv_finalize_thread(this),
3497 min_alloc_size(_min_alloc_size),
3498 min_alloc_size_order(ctz(_min_alloc_size)),
3499 mempool_thread(this)
3500 {
3501 _init_logger();
3502 cct->_conf->add_observer(this);
3503 set_cache_shards(1);
3504 }
3505
3506 BlueStore::~BlueStore()
3507 {
3508 for (auto f : finishers) {
3509 delete f;
3510 }
3511 finishers.clear();
3512
3513 cct->_conf->remove_observer(this);
3514 _shutdown_logger();
3515 assert(!mounted);
3516 assert(db == NULL);
3517 assert(bluefs == NULL);
3518 assert(fsid_fd < 0);
3519 assert(path_fd < 0);
3520 for (auto i : cache_shards) {
3521 delete i;
3522 }
3523 cache_shards.clear();
3524 }
3525
3526 const char **BlueStore::get_tracked_conf_keys() const
3527 {
3528 static const char* KEYS[] = {
3529 "bluestore_csum_type",
3530 "bluestore_compression_mode",
3531 "bluestore_compression_algorithm",
3532 "bluestore_compression_min_blob_size",
3533 "bluestore_compression_min_blob_size_ssd",
3534 "bluestore_compression_min_blob_size_hdd",
3535 "bluestore_compression_max_blob_size",
3536 "bluestore_compression_max_blob_size_ssd",
3537 "bluestore_compression_max_blob_size_hdd",
3538 "bluestore_compression_required_ratio",
3539 "bluestore_max_alloc_size",
3540 "bluestore_prefer_deferred_size",
3541 "bluestore_deferred_batch_ops",
3542 "bluestore_deferred_batch_ops_hdd",
3543 "bluestore_deferred_batch_ops_ssd",
3544 "bluestore_throttle_bytes",
3545 "bluestore_throttle_deferred_bytes",
3546 "bluestore_throttle_cost_per_io_hdd",
3547 "bluestore_throttle_cost_per_io_ssd",
3548 "bluestore_throttle_cost_per_io",
3549 "bluestore_max_blob_size",
3550 "bluestore_max_blob_size_ssd",
3551 "bluestore_max_blob_size_hdd",
3552 NULL
3553 };
3554 return KEYS;
3555 }
3556
3557 void BlueStore::handle_conf_change(const struct md_config_t *conf,
3558 const std::set<std::string> &changed)
3559 {
3560 if (changed.count("bluestore_csum_type")) {
3561 _set_csum();
3562 }
3563 if (changed.count("bluestore_compression_mode") ||
3564 changed.count("bluestore_compression_algorithm") ||
3565 changed.count("bluestore_compression_min_blob_size") ||
3566 changed.count("bluestore_compression_max_blob_size")) {
3567 if (bdev) {
3568 _set_compression();
3569 }
3570 }
3571 if (changed.count("bluestore_max_blob_size") ||
3572 changed.count("bluestore_max_blob_size_ssd") ||
3573 changed.count("bluestore_max_blob_size_hdd")) {
3574 if (bdev) {
3575 // only after startup
3576 _set_blob_size();
3577 }
3578 }
3579 if (changed.count("bluestore_prefer_deferred_size") ||
3580 changed.count("bluestore_max_alloc_size") ||
3581 changed.count("bluestore_deferred_batch_ops") ||
3582 changed.count("bluestore_deferred_batch_ops_hdd") ||
3583 changed.count("bluestore_deferred_batch_ops_ssd")) {
3584 if (bdev) {
3585 // only after startup
3586 _set_alloc_sizes();
3587 }
3588 }
3589 if (changed.count("bluestore_throttle_cost_per_io") ||
3590 changed.count("bluestore_throttle_cost_per_io_hdd") ||
3591 changed.count("bluestore_throttle_cost_per_io_ssd")) {
3592 if (bdev) {
3593 _set_throttle_params();
3594 }
3595 }
3596 if (changed.count("bluestore_throttle_bytes")) {
3597 throttle_bytes.reset_max(conf->bluestore_throttle_bytes);
3598 throttle_deferred_bytes.reset_max(
3599 conf->bluestore_throttle_bytes + conf->bluestore_throttle_deferred_bytes);
3600 }
3601 if (changed.count("bluestore_throttle_deferred_bytes")) {
3602 throttle_deferred_bytes.reset_max(
3603 conf->bluestore_throttle_bytes + conf->bluestore_throttle_deferred_bytes);
3604 }
3605 }
3606
3607 void BlueStore::_set_compression()
3608 {
3609 auto m = Compressor::get_comp_mode_type(cct->_conf->bluestore_compression_mode);
3610 if (m) {
3611 comp_mode = *m;
3612 } else {
3613 derr << __func__ << " unrecognized value '"
3614 << cct->_conf->bluestore_compression_mode
3615 << "' for bluestore_compression_mode, reverting to 'none'"
3616 << dendl;
3617 comp_mode = Compressor::COMP_NONE;
3618 }
3619
3620 compressor = nullptr;
3621
3622 if (comp_mode == Compressor::COMP_NONE) {
3623 dout(10) << __func__ << " compression mode set to 'none', "
3624 << "ignore other compression setttings" << dendl;
3625 return;
3626 }
3627
3628 if (cct->_conf->bluestore_compression_max_blob_size) {
3629 comp_min_blob_size = cct->_conf->bluestore_compression_max_blob_size;
3630 } else {
3631 assert(bdev);
3632 if (bdev->is_rotational()) {
3633 comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_hdd;
3634 } else {
3635 comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_ssd;
3636 }
3637 }
3638
3639 if (cct->_conf->bluestore_compression_max_blob_size) {
3640 comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size;
3641 } else {
3642 assert(bdev);
3643 if (bdev->is_rotational()) {
3644 comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_hdd;
3645 } else {
3646 comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_ssd;
3647 }
3648 }
3649
3650 auto& alg_name = cct->_conf->bluestore_compression_algorithm;
3651 if (!alg_name.empty()) {
3652 compressor = Compressor::create(cct, alg_name);
3653 if (!compressor) {
3654 derr << __func__ << " unable to initialize " << alg_name.c_str() << " compressor"
3655 << dendl;
3656 }
3657 }
3658
3659 dout(10) << __func__ << " mode " << Compressor::get_comp_mode_name(comp_mode)
3660 << " alg " << (compressor ? compressor->get_type_name() : "(none)")
3661 << dendl;
3662 }
3663
3664 void BlueStore::_set_csum()
3665 {
3666 csum_type = Checksummer::CSUM_NONE;
3667 int t = Checksummer::get_csum_string_type(cct->_conf->bluestore_csum_type);
3668 if (t > Checksummer::CSUM_NONE)
3669 csum_type = t;
3670
3671 dout(10) << __func__ << " csum_type "
3672 << Checksummer::get_csum_type_string(csum_type)
3673 << dendl;
3674 }
3675
3676 void BlueStore::_set_throttle_params()
3677 {
3678 if (cct->_conf->bluestore_throttle_cost_per_io) {
3679 throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io;
3680 } else {
3681 assert(bdev);
3682 if (bdev->is_rotational()) {
3683 throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_hdd;
3684 } else {
3685 throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_ssd;
3686 }
3687 }
3688
3689 dout(10) << __func__ << " throttle_cost_per_io " << throttle_cost_per_io
3690 << dendl;
3691 }
3692 void BlueStore::_set_blob_size()
3693 {
3694 if (cct->_conf->bluestore_max_blob_size) {
3695 max_blob_size = cct->_conf->bluestore_max_blob_size;
3696 } else {
3697 assert(bdev);
3698 if (bdev->is_rotational()) {
3699 max_blob_size = cct->_conf->bluestore_max_blob_size_hdd;
3700 } else {
3701 max_blob_size = cct->_conf->bluestore_max_blob_size_ssd;
3702 }
3703 }
3704 dout(10) << __func__ << " max_blob_size 0x" << std::hex << max_blob_size
3705 << std::dec << dendl;
3706 }
3707
3708 int BlueStore::_set_cache_sizes()
3709 {
3710 assert(bdev);
3711 if (cct->_conf->bluestore_cache_size) {
3712 cache_size = cct->_conf->bluestore_cache_size;
3713 } else {
3714 // choose global cache size based on backend type
3715 if (bdev->is_rotational()) {
3716 cache_size = cct->_conf->bluestore_cache_size_hdd;
3717 } else {
3718 cache_size = cct->_conf->bluestore_cache_size_ssd;
3719 }
3720 }
3721 cache_meta_ratio = cct->_conf->bluestore_cache_meta_ratio;
3722 cache_kv_ratio = cct->_conf->bluestore_cache_kv_ratio;
3723
3724 double cache_kv_max = cct->_conf->bluestore_cache_kv_max;
3725 double cache_kv_max_ratio = 0;
3726
3727 // if cache_kv_max is negative, disable it
3728 if (cache_size > 0 && cache_kv_max >= 0) {
3729 cache_kv_max_ratio = (double) cache_kv_max / (double) cache_size;
3730 if (cache_kv_max_ratio < 1.0 && cache_kv_max_ratio < cache_kv_ratio) {
3731 dout(1) << __func__ << " max " << cache_kv_max_ratio
3732 << " < ratio " << cache_kv_ratio
3733 << dendl;
3734 cache_meta_ratio = cache_meta_ratio + cache_kv_ratio - cache_kv_max_ratio;
3735 cache_kv_ratio = cache_kv_max_ratio;
3736 }
3737 }
3738
3739 cache_data_ratio =
3740 (double)1.0 - (double)cache_meta_ratio - (double)cache_kv_ratio;
3741
3742 if (cache_meta_ratio < 0 || cache_meta_ratio > 1.0) {
3743 derr << __func__ << " bluestore_cache_meta_ratio (" << cache_meta_ratio
3744 << ") must be in range [0,1.0]" << dendl;
3745 return -EINVAL;
3746 }
3747 if (cache_kv_ratio < 0 || cache_kv_ratio > 1.0) {
3748 derr << __func__ << " bluestore_cache_kv_ratio (" << cache_kv_ratio
3749 << ") must be in range [0,1.0]" << dendl;
3750 return -EINVAL;
3751 }
3752 if (cache_meta_ratio + cache_kv_ratio > 1.0) {
3753 derr << __func__ << " bluestore_cache_meta_ratio (" << cache_meta_ratio
3754 << ") + bluestore_cache_kv_ratio (" << cache_kv_ratio
3755 << ") = " << cache_meta_ratio + cache_kv_ratio << "; must be <= 1.0"
3756 << dendl;
3757 return -EINVAL;
3758 }
3759 if (cache_data_ratio < 0) {
3760 // deal with floating point imprecision
3761 cache_data_ratio = 0;
3762 }
3763 dout(1) << __func__ << " cache_size " << cache_size
3764 << " meta " << cache_meta_ratio
3765 << " kv " << cache_kv_ratio
3766 << " data " << cache_data_ratio
3767 << dendl;
3768 return 0;
3769 }
3770
3771 void BlueStore::_init_logger()
3772 {
3773 PerfCountersBuilder b(cct, "bluestore",
3774 l_bluestore_first, l_bluestore_last);
3775 b.add_time_avg(l_bluestore_kv_flush_lat, "kv_flush_lat",
3776 "Average kv_thread flush latency",
3777 "fl_l", PerfCountersBuilder::PRIO_INTERESTING);
3778 b.add_time_avg(l_bluestore_kv_commit_lat, "kv_commit_lat",
3779 "Average kv_thread commit latency");
3780 b.add_time_avg(l_bluestore_kv_lat, "kv_lat",
3781 "Average kv_thread sync latency",
3782 "k_l", PerfCountersBuilder::PRIO_INTERESTING);
3783 b.add_time_avg(l_bluestore_state_prepare_lat, "state_prepare_lat",
3784 "Average prepare state latency");
3785 b.add_time_avg(l_bluestore_state_aio_wait_lat, "state_aio_wait_lat",
3786 "Average aio_wait state latency",
3787 "io_l", PerfCountersBuilder::PRIO_INTERESTING);
3788 b.add_time_avg(l_bluestore_state_io_done_lat, "state_io_done_lat",
3789 "Average io_done state latency");
3790 b.add_time_avg(l_bluestore_state_kv_queued_lat, "state_kv_queued_lat",
3791 "Average kv_queued state latency");
3792 b.add_time_avg(l_bluestore_state_kv_committing_lat, "state_kv_commiting_lat",
3793 "Average kv_commiting state latency");
3794 b.add_time_avg(l_bluestore_state_kv_done_lat, "state_kv_done_lat",
3795 "Average kv_done state latency");
3796 b.add_time_avg(l_bluestore_state_deferred_queued_lat, "state_deferred_queued_lat",
3797 "Average deferred_queued state latency");
3798 b.add_time_avg(l_bluestore_state_deferred_aio_wait_lat, "state_deferred_aio_wait_lat",
3799 "Average aio_wait state latency");
3800 b.add_time_avg(l_bluestore_state_deferred_cleanup_lat, "state_deferred_cleanup_lat",
3801 "Average cleanup state latency");
3802 b.add_time_avg(l_bluestore_state_finishing_lat, "state_finishing_lat",
3803 "Average finishing state latency");
3804 b.add_time_avg(l_bluestore_state_done_lat, "state_done_lat",
3805 "Average done state latency");
3806 b.add_time_avg(l_bluestore_throttle_lat, "throttle_lat",
3807 "Average submit throttle latency",
3808 "th_l", PerfCountersBuilder::PRIO_CRITICAL);
3809 b.add_time_avg(l_bluestore_submit_lat, "submit_lat",
3810 "Average submit latency",
3811 "s_l", PerfCountersBuilder::PRIO_CRITICAL);
3812 b.add_time_avg(l_bluestore_commit_lat, "commit_lat",
3813 "Average commit latency",
3814 "c_l", PerfCountersBuilder::PRIO_CRITICAL);
3815 b.add_time_avg(l_bluestore_read_lat, "read_lat",
3816 "Average read latency",
3817 "r_l", PerfCountersBuilder::PRIO_CRITICAL);
3818 b.add_time_avg(l_bluestore_read_onode_meta_lat, "read_onode_meta_lat",
3819 "Average read onode metadata latency");
3820 b.add_time_avg(l_bluestore_read_wait_aio_lat, "read_wait_aio_lat",
3821 "Average read latency");
3822 b.add_time_avg(l_bluestore_compress_lat, "compress_lat",
3823 "Average compress latency");
3824 b.add_time_avg(l_bluestore_decompress_lat, "decompress_lat",
3825 "Average decompress latency");
3826 b.add_time_avg(l_bluestore_csum_lat, "csum_lat",
3827 "Average checksum latency");
3828 b.add_u64_counter(l_bluestore_compress_success_count, "compress_success_count",
3829 "Sum for beneficial compress ops");
3830 b.add_u64_counter(l_bluestore_compress_rejected_count, "compress_rejected_count",
3831 "Sum for compress ops rejected due to low net gain of space");
3832 b.add_u64_counter(l_bluestore_write_pad_bytes, "write_pad_bytes",
3833 "Sum for write-op padded bytes");
3834 b.add_u64_counter(l_bluestore_deferred_write_ops, "deferred_write_ops",
3835 "Sum for deferred write op");
3836 b.add_u64_counter(l_bluestore_deferred_write_bytes, "deferred_write_bytes",
3837 "Sum for deferred write bytes", "def");
3838 b.add_u64_counter(l_bluestore_write_penalty_read_ops, "write_penalty_read_ops",
3839 "Sum for write penalty read ops");
3840 b.add_u64(l_bluestore_allocated, "bluestore_allocated",
3841 "Sum for allocated bytes");
3842 b.add_u64(l_bluestore_stored, "bluestore_stored",
3843 "Sum for stored bytes");
3844 b.add_u64(l_bluestore_compressed, "bluestore_compressed",
3845 "Sum for stored compressed bytes");
3846 b.add_u64(l_bluestore_compressed_allocated, "bluestore_compressed_allocated",
3847 "Sum for bytes allocated for compressed data");
3848 b.add_u64(l_bluestore_compressed_original, "bluestore_compressed_original",
3849 "Sum for original bytes that were compressed");
3850
3851 b.add_u64(l_bluestore_onodes, "bluestore_onodes",
3852 "Number of onodes in cache");
3853 b.add_u64_counter(l_bluestore_onode_hits, "bluestore_onode_hits",
3854 "Sum for onode-lookups hit in the cache");
3855 b.add_u64_counter(l_bluestore_onode_misses, "bluestore_onode_misses",
3856 "Sum for onode-lookups missed in the cache");
3857 b.add_u64_counter(l_bluestore_onode_shard_hits, "bluestore_onode_shard_hits",
3858 "Sum for onode-shard lookups hit in the cache");
3859 b.add_u64_counter(l_bluestore_onode_shard_misses,
3860 "bluestore_onode_shard_misses",
3861 "Sum for onode-shard lookups missed in the cache");
3862 b.add_u64(l_bluestore_extents, "bluestore_extents",
3863 "Number of extents in cache");
3864 b.add_u64(l_bluestore_blobs, "bluestore_blobs",
3865 "Number of blobs in cache");
3866 b.add_u64(l_bluestore_buffers, "bluestore_buffers",
3867 "Number of buffers in cache");
3868 b.add_u64(l_bluestore_buffer_bytes, "bluestore_buffer_bytes",
3869 "Number of buffer bytes in cache");
3870 b.add_u64(l_bluestore_buffer_hit_bytes, "bluestore_buffer_hit_bytes",
3871 "Sum for bytes of read hit in the cache");
3872 b.add_u64(l_bluestore_buffer_miss_bytes, "bluestore_buffer_miss_bytes",
3873 "Sum for bytes of read missed in the cache");
3874
3875 b.add_u64_counter(l_bluestore_write_big, "bluestore_write_big",
3876 "Large aligned writes into fresh blobs");
3877 b.add_u64_counter(l_bluestore_write_big_bytes, "bluestore_write_big_bytes",
3878 "Large aligned writes into fresh blobs (bytes)");
3879 b.add_u64_counter(l_bluestore_write_big_blobs, "bluestore_write_big_blobs",
3880 "Large aligned writes into fresh blobs (blobs)");
3881 b.add_u64_counter(l_bluestore_write_small, "bluestore_write_small",
3882 "Small writes into existing or sparse small blobs");
3883 b.add_u64_counter(l_bluestore_write_small_bytes, "bluestore_write_small_bytes",
3884 "Small writes into existing or sparse small blobs (bytes)");
3885 b.add_u64_counter(l_bluestore_write_small_unused,
3886 "bluestore_write_small_unused",
3887 "Small writes into unused portion of existing blob");
3888 b.add_u64_counter(l_bluestore_write_small_deferred,
3889 "bluestore_write_small_deferred",
3890 "Small overwrites using deferred");
3891 b.add_u64_counter(l_bluestore_write_small_pre_read,
3892 "bluestore_write_small_pre_read",
3893 "Small writes that required we read some data (possibly "
3894 "cached) to fill out the block");
3895 b.add_u64_counter(l_bluestore_write_small_new, "bluestore_write_small_new",
3896 "Small write into new (sparse) blob");
3897
3898 b.add_u64_counter(l_bluestore_txc, "bluestore_txc", "Transactions committed");
3899 b.add_u64_counter(l_bluestore_onode_reshard, "bluestore_onode_reshard",
3900 "Onode extent map reshard events");
3901 b.add_u64_counter(l_bluestore_blob_split, "bluestore_blob_split",
3902 "Sum for blob splitting due to resharding");
3903 b.add_u64_counter(l_bluestore_extent_compress, "bluestore_extent_compress",
3904 "Sum for extents that have been removed due to compression");
3905 b.add_u64_counter(l_bluestore_gc_merged, "bluestore_gc_merged",
3906 "Sum for extents that have been merged due to garbage "
3907 "collection");
3908 logger = b.create_perf_counters();
3909 cct->get_perfcounters_collection()->add(logger);
3910 }
3911
3912 int BlueStore::_reload_logger()
3913 {
3914 struct store_statfs_t store_statfs;
3915
3916 int r = statfs(&store_statfs);
3917 if(r >= 0) {
3918 logger->set(l_bluestore_allocated, store_statfs.allocated);
3919 logger->set(l_bluestore_stored, store_statfs.stored);
3920 logger->set(l_bluestore_compressed, store_statfs.compressed);
3921 logger->set(l_bluestore_compressed_allocated, store_statfs.compressed_allocated);
3922 logger->set(l_bluestore_compressed_original, store_statfs.compressed_original);
3923 }
3924 return r;
3925 }
3926
3927 void BlueStore::_shutdown_logger()
3928 {
3929 cct->get_perfcounters_collection()->remove(logger);
3930 delete logger;
3931 }
3932
3933 int BlueStore::get_block_device_fsid(CephContext* cct, const string& path,
3934 uuid_d *fsid)
3935 {
3936 bluestore_bdev_label_t label;
3937 int r = _read_bdev_label(cct, path, &label);
3938 if (r < 0)
3939 return r;
3940 *fsid = label.osd_uuid;
3941 return 0;
3942 }
3943
3944 int BlueStore::_open_path()
3945 {
3946 assert(path_fd < 0);
3947 path_fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_DIRECTORY));
3948 if (path_fd < 0) {
3949 int r = -errno;
3950 derr << __func__ << " unable to open " << path << ": " << cpp_strerror(r)
3951 << dendl;
3952 return r;
3953 }
3954 return 0;
3955 }
3956
3957 void BlueStore::_close_path()
3958 {
3959 VOID_TEMP_FAILURE_RETRY(::close(path_fd));
3960 path_fd = -1;
3961 }
3962
3963 int BlueStore::_write_bdev_label(string path, bluestore_bdev_label_t label)
3964 {
3965 dout(10) << __func__ << " path " << path << " label " << label << dendl;
3966 bufferlist bl;
3967 ::encode(label, bl);
3968 uint32_t crc = bl.crc32c(-1);
3969 ::encode(crc, bl);
3970 assert(bl.length() <= BDEV_LABEL_BLOCK_SIZE);
3971 bufferptr z(BDEV_LABEL_BLOCK_SIZE - bl.length());
3972 z.zero();
3973 bl.append(std::move(z));
3974
3975 int fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_WRONLY));
3976 if (fd < 0) {
3977 fd = -errno;
3978 derr << __func__ << " failed to open " << path << ": " << cpp_strerror(fd)
3979 << dendl;
3980 return fd;
3981 }
3982 int r = bl.write_fd(fd);
3983 if (r < 0) {
3984 derr << __func__ << " failed to write to " << path
3985 << ": " << cpp_strerror(r) << dendl;
3986 }
3987 VOID_TEMP_FAILURE_RETRY(::close(fd));
3988 return r;
3989 }
3990
3991 int BlueStore::_read_bdev_label(CephContext* cct, string path,
3992 bluestore_bdev_label_t *label)
3993 {
3994 dout(10) << __func__ << dendl;
3995 int fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_RDONLY));
3996 if (fd < 0) {
3997 fd = -errno;
3998 derr << __func__ << " failed to open " << path << ": " << cpp_strerror(fd)
3999 << dendl;
4000 return fd;
4001 }
4002 bufferlist bl;
4003 int r = bl.read_fd(fd, BDEV_LABEL_BLOCK_SIZE);
4004 VOID_TEMP_FAILURE_RETRY(::close(fd));
4005 if (r < 0) {
4006 derr << __func__ << " failed to read from " << path
4007 << ": " << cpp_strerror(r) << dendl;
4008 return r;
4009 }
4010
4011 uint32_t crc, expected_crc;
4012 bufferlist::iterator p = bl.begin();
4013 try {
4014 ::decode(*label, p);
4015 bufferlist t;
4016 t.substr_of(bl, 0, p.get_off());
4017 crc = t.crc32c(-1);
4018 ::decode(expected_crc, p);
4019 }
4020 catch (buffer::error& e) {
4021 derr << __func__ << " unable to decode label at offset " << p.get_off()
4022 << ": " << e.what()
4023 << dendl;
4024 return -EINVAL;
4025 }
4026 if (crc != expected_crc) {
4027 derr << __func__ << " bad crc on label, expected " << expected_crc
4028 << " != actual " << crc << dendl;
4029 return -EIO;
4030 }
4031 dout(10) << __func__ << " got " << *label << dendl;
4032 return 0;
4033 }
4034
4035 int BlueStore::_check_or_set_bdev_label(
4036 string path, uint64_t size, string desc, bool create)
4037 {
4038 bluestore_bdev_label_t label;
4039 if (create) {
4040 label.osd_uuid = fsid;
4041 label.size = size;
4042 label.btime = ceph_clock_now();
4043 label.description = desc;
4044 int r = _write_bdev_label(path, label);
4045 if (r < 0)
4046 return r;
4047 } else {
4048 int r = _read_bdev_label(cct, path, &label);
4049 if (r < 0)
4050 return r;
4051 if (cct->_conf->bluestore_debug_permit_any_bdev_label) {
4052 dout(20) << __func__ << " bdev " << path << " fsid " << label.osd_uuid
4053 << " and fsid " << fsid << " check bypassed" << dendl;
4054 }
4055 else if (label.osd_uuid != fsid) {
4056 derr << __func__ << " bdev " << path << " fsid " << label.osd_uuid
4057 << " does not match our fsid " << fsid << dendl;
4058 return -EIO;
4059 }
4060 }
4061 return 0;
4062 }
4063
4064 void BlueStore::_set_alloc_sizes(void)
4065 {
4066 max_alloc_size = cct->_conf->bluestore_max_alloc_size;
4067
4068 if (cct->_conf->bluestore_prefer_deferred_size) {
4069 prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size;
4070 } else {
4071 assert(bdev);
4072 if (bdev->is_rotational()) {
4073 prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_hdd;
4074 } else {
4075 prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_ssd;
4076 }
4077 }
4078
4079 if (cct->_conf->bluestore_deferred_batch_ops) {
4080 deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops;
4081 } else {
4082 assert(bdev);
4083 if (bdev->is_rotational()) {
4084 deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_hdd;
4085 } else {
4086 deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_ssd;
4087 }
4088 }
4089
4090 dout(10) << __func__ << " min_alloc_size 0x" << std::hex << min_alloc_size
4091 << std::dec << " order " << min_alloc_size_order
4092 << " max_alloc_size 0x" << std::hex << max_alloc_size
4093 << " prefer_deferred_size 0x" << prefer_deferred_size
4094 << std::dec
4095 << " deferred_batch_ops " << deferred_batch_ops
4096 << dendl;
4097 }
4098
4099 int BlueStore::_open_bdev(bool create)
4100 {
4101 assert(bdev == NULL);
4102 string p = path + "/block";
4103 bdev = BlockDevice::create(cct, p, aio_cb, static_cast<void*>(this));
4104 int r = bdev->open(p);
4105 if (r < 0)
4106 goto fail;
4107
4108 if (bdev->supported_bdev_label()) {
4109 r = _check_or_set_bdev_label(p, bdev->get_size(), "main", create);
4110 if (r < 0)
4111 goto fail_close;
4112 }
4113
4114 // initialize global block parameters
4115 block_size = bdev->get_block_size();
4116 block_mask = ~(block_size - 1);
4117 block_size_order = ctz(block_size);
4118 assert(block_size == 1u << block_size_order);
4119 // and set cache_size based on device type
4120 r = _set_cache_sizes();
4121 if (r < 0) {
4122 goto fail_close;
4123 }
4124 return 0;
4125
4126 fail_close:
4127 bdev->close();
4128 fail:
4129 delete bdev;
4130 bdev = NULL;
4131 return r;
4132 }
4133
4134 void BlueStore::_close_bdev()
4135 {
4136 assert(bdev);
4137 bdev->close();
4138 delete bdev;
4139 bdev = NULL;
4140 }
4141
4142 int BlueStore::_open_fm(bool create)
4143 {
4144 assert(fm == NULL);
4145 fm = FreelistManager::create(cct, freelist_type, db, PREFIX_ALLOC);
4146
4147 if (create) {
4148 // initialize freespace
4149 dout(20) << __func__ << " initializing freespace" << dendl;
4150 KeyValueDB::Transaction t = db->get_transaction();
4151 {
4152 bufferlist bl;
4153 bl.append(freelist_type);
4154 t->set(PREFIX_SUPER, "freelist_type", bl);
4155 }
4156 fm->create(bdev->get_size(), t);
4157
4158 // allocate superblock reserved space. note that we do not mark
4159 // bluefs space as allocated in the freelist; we instead rely on
4160 // bluefs_extents.
4161 fm->allocate(0, SUPER_RESERVED, t);
4162
4163 uint64_t reserved = 0;
4164 if (cct->_conf->bluestore_bluefs) {
4165 assert(bluefs_extents.num_intervals() == 1);
4166 interval_set<uint64_t>::iterator p = bluefs_extents.begin();
4167 reserved = p.get_start() + p.get_len();
4168 dout(20) << __func__ << " reserved 0x" << std::hex << reserved << std::dec
4169 << " for bluefs" << dendl;
4170 bufferlist bl;
4171 ::encode(bluefs_extents, bl);
4172 t->set(PREFIX_SUPER, "bluefs_extents", bl);
4173 dout(20) << __func__ << " bluefs_extents 0x" << std::hex << bluefs_extents
4174 << std::dec << dendl;
4175 } else {
4176 reserved = SUPER_RESERVED;
4177 }
4178
4179 if (cct->_conf->bluestore_debug_prefill > 0) {
4180 uint64_t end = bdev->get_size() - reserved;
4181 dout(1) << __func__ << " pre-fragmenting freespace, using "
4182 << cct->_conf->bluestore_debug_prefill << " with max free extent "
4183 << cct->_conf->bluestore_debug_prefragment_max << dendl;
4184 uint64_t start = P2ROUNDUP(reserved, min_alloc_size);
4185 uint64_t max_b = cct->_conf->bluestore_debug_prefragment_max / min_alloc_size;
4186 float r = cct->_conf->bluestore_debug_prefill;
4187 r /= 1.0 - r;
4188 bool stop = false;
4189
4190 while (!stop && start < end) {
4191 uint64_t l = (rand() % max_b + 1) * min_alloc_size;
4192 if (start + l > end) {
4193 l = end - start;
4194 l = P2ALIGN(l, min_alloc_size);
4195 }
4196 assert(start + l <= end);
4197
4198 uint64_t u = 1 + (uint64_t)(r * (double)l);
4199 u = P2ROUNDUP(u, min_alloc_size);
4200 if (start + l + u > end) {
4201 u = end - (start + l);
4202 // trim to align so we don't overflow again
4203 u = P2ALIGN(u, min_alloc_size);
4204 stop = true;
4205 }
4206 assert(start + l + u <= end);
4207
4208 dout(20) << " free 0x" << std::hex << start << "~" << l
4209 << " use 0x" << u << std::dec << dendl;
4210
4211 if (u == 0) {
4212 // break if u has been trimmed to nothing
4213 break;
4214 }
4215
4216 fm->allocate(start + l, u, t);
4217 start += l + u;
4218 }
4219 }
4220 db->submit_transaction_sync(t);
4221 }
4222
4223 int r = fm->init();
4224 if (r < 0) {
4225 derr << __func__ << " freelist init failed: " << cpp_strerror(r) << dendl;
4226 delete fm;
4227 fm = NULL;
4228 return r;
4229 }
4230 return 0;
4231 }
4232
4233 void BlueStore::_close_fm()
4234 {
4235 dout(10) << __func__ << dendl;
4236 assert(fm);
4237 fm->shutdown();
4238 delete fm;
4239 fm = NULL;
4240 }
4241
4242 int BlueStore::_open_alloc()
4243 {
4244 assert(alloc == NULL);
4245 assert(bdev->get_size());
4246 alloc = Allocator::create(cct, cct->_conf->bluestore_allocator,
4247 bdev->get_size(),
4248 min_alloc_size);
4249 if (!alloc) {
4250 lderr(cct) << __func__ << " Allocator::unknown alloc type "
4251 << cct->_conf->bluestore_allocator
4252 << dendl;
4253 return -EINVAL;
4254 }
4255
4256 uint64_t num = 0, bytes = 0;
4257
4258 dout(1) << __func__ << " opening allocation metadata" << dendl;
4259 // initialize from freelist
4260 fm->enumerate_reset();
4261 uint64_t offset, length;
4262 while (fm->enumerate_next(&offset, &length)) {
4263 alloc->init_add_free(offset, length);
4264 ++num;
4265 bytes += length;
4266 }
4267 fm->enumerate_reset();
4268 dout(1) << __func__ << " loaded " << pretty_si_t(bytes)
4269 << " in " << num << " extents"
4270 << dendl;
4271
4272 // also mark bluefs space as allocated
4273 for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
4274 alloc->init_rm_free(e.get_start(), e.get_len());
4275 }
4276 dout(10) << __func__ << " marked bluefs_extents 0x" << std::hex
4277 << bluefs_extents << std::dec << " as allocated" << dendl;
4278
4279 return 0;
4280 }
4281
4282 void BlueStore::_close_alloc()
4283 {
4284 assert(alloc);
4285 alloc->shutdown();
4286 delete alloc;
4287 alloc = NULL;
4288 }
4289
4290 int BlueStore::_open_fsid(bool create)
4291 {
4292 assert(fsid_fd < 0);
4293 int flags = O_RDWR;
4294 if (create)
4295 flags |= O_CREAT;
4296 fsid_fd = ::openat(path_fd, "fsid", flags, 0644);
4297 if (fsid_fd < 0) {
4298 int err = -errno;
4299 derr << __func__ << " " << cpp_strerror(err) << dendl;
4300 return err;
4301 }
4302 return 0;
4303 }
4304
4305 int BlueStore::_read_fsid(uuid_d *uuid)
4306 {
4307 char fsid_str[40];
4308 memset(fsid_str, 0, sizeof(fsid_str));
4309 int ret = safe_read(fsid_fd, fsid_str, sizeof(fsid_str));
4310 if (ret < 0) {
4311 derr << __func__ << " failed: " << cpp_strerror(ret) << dendl;
4312 return ret;
4313 }
4314 if (ret > 36)
4315 fsid_str[36] = 0;
4316 else
4317 fsid_str[ret] = 0;
4318 if (!uuid->parse(fsid_str)) {
4319 derr << __func__ << " unparsable uuid " << fsid_str << dendl;
4320 return -EINVAL;
4321 }
4322 return 0;
4323 }
4324
4325 int BlueStore::_write_fsid()
4326 {
4327 int r = ::ftruncate(fsid_fd, 0);
4328 if (r < 0) {
4329 r = -errno;
4330 derr << __func__ << " fsid truncate failed: " << cpp_strerror(r) << dendl;
4331 return r;
4332 }
4333 string str = stringify(fsid) + "\n";
4334 r = safe_write(fsid_fd, str.c_str(), str.length());
4335 if (r < 0) {
4336 derr << __func__ << " fsid write failed: " << cpp_strerror(r) << dendl;
4337 return r;
4338 }
4339 r = ::fsync(fsid_fd);
4340 if (r < 0) {
4341 r = -errno;
4342 derr << __func__ << " fsid fsync failed: " << cpp_strerror(r) << dendl;
4343 return r;
4344 }
4345 return 0;
4346 }
4347
4348 void BlueStore::_close_fsid()
4349 {
4350 VOID_TEMP_FAILURE_RETRY(::close(fsid_fd));
4351 fsid_fd = -1;
4352 }
4353
4354 int BlueStore::_lock_fsid()
4355 {
4356 struct flock l;
4357 memset(&l, 0, sizeof(l));
4358 l.l_type = F_WRLCK;
4359 l.l_whence = SEEK_SET;
4360 int r = ::fcntl(fsid_fd, F_SETLK, &l);
4361 if (r < 0) {
4362 int err = errno;
4363 derr << __func__ << " failed to lock " << path << "/fsid"
4364 << " (is another ceph-osd still running?)"
4365 << cpp_strerror(err) << dendl;
4366 return -err;
4367 }
4368 return 0;
4369 }
4370
4371 bool BlueStore::is_rotational()
4372 {
4373 if (bdev) {
4374 return bdev->is_rotational();
4375 }
4376
4377 bool rotational = true;
4378 int r = _open_path();
4379 if (r < 0)
4380 goto out;
4381 r = _open_fsid(false);
4382 if (r < 0)
4383 goto out_path;
4384 r = _read_fsid(&fsid);
4385 if (r < 0)
4386 goto out_fsid;
4387 r = _lock_fsid();
4388 if (r < 0)
4389 goto out_fsid;
4390 r = _open_bdev(false);
4391 if (r < 0)
4392 goto out_fsid;
4393 rotational = bdev->is_rotational();
4394 _close_bdev();
4395 out_fsid:
4396 _close_fsid();
4397 out_path:
4398 _close_path();
4399 out:
4400 return rotational;
4401 }
4402
4403 bool BlueStore::is_journal_rotational()
4404 {
4405 if (!bluefs) {
4406 dout(5) << __func__ << " bluefs disabled, default to store media type"
4407 << dendl;
4408 return is_rotational();
4409 }
4410 dout(10) << __func__ << " " << (int)bluefs->wal_is_rotational() << dendl;
4411 return bluefs->wal_is_rotational();
4412 }
4413
4414 bool BlueStore::test_mount_in_use()
4415 {
4416 // most error conditions mean the mount is not in use (e.g., because
4417 // it doesn't exist). only if we fail to lock do we conclude it is
4418 // in use.
4419 bool ret = false;
4420 int r = _open_path();
4421 if (r < 0)
4422 return false;
4423 r = _open_fsid(false);
4424 if (r < 0)
4425 goto out_path;
4426 r = _lock_fsid();
4427 if (r < 0)
4428 ret = true; // if we can't lock, it is in use
4429 _close_fsid();
4430 out_path:
4431 _close_path();
4432 return ret;
4433 }
4434
4435 int BlueStore::_open_db(bool create)
4436 {
4437 int r;
4438 assert(!db);
4439 string fn = path + "/db";
4440 string options;
4441 stringstream err;
4442 ceph::shared_ptr<Int64ArrayMergeOperator> merge_op(new Int64ArrayMergeOperator);
4443
4444 string kv_backend;
4445 if (create) {
4446 kv_backend = cct->_conf->bluestore_kvbackend;
4447 } else {
4448 r = read_meta("kv_backend", &kv_backend);
4449 if (r < 0) {
4450 derr << __func__ << " unable to read 'kv_backend' meta" << dendl;
4451 return -EIO;
4452 }
4453 }
4454 dout(10) << __func__ << " kv_backend = " << kv_backend << dendl;
4455
4456 bool do_bluefs;
4457 if (create) {
4458 do_bluefs = cct->_conf->bluestore_bluefs;
4459 } else {
4460 string s;
4461 r = read_meta("bluefs", &s);
4462 if (r < 0) {
4463 derr << __func__ << " unable to read 'bluefs' meta" << dendl;
4464 return -EIO;
4465 }
4466 if (s == "1") {
4467 do_bluefs = true;
4468 } else if (s == "0") {
4469 do_bluefs = false;
4470 } else {
4471 derr << __func__ << " bluefs = " << s << " : not 0 or 1, aborting"
4472 << dendl;
4473 return -EIO;
4474 }
4475 }
4476 dout(10) << __func__ << " do_bluefs = " << do_bluefs << dendl;
4477
4478 rocksdb::Env *env = NULL;
4479 if (do_bluefs) {
4480 dout(10) << __func__ << " initializing bluefs" << dendl;
4481 if (kv_backend != "rocksdb") {
4482 derr << " backend must be rocksdb to use bluefs" << dendl;
4483 return -EINVAL;
4484 }
4485 bluefs = new BlueFS(cct);
4486
4487 string bfn;
4488 struct stat st;
4489
4490 bfn = path + "/block.db";
4491 if (::stat(bfn.c_str(), &st) == 0) {
4492 r = bluefs->add_block_device(BlueFS::BDEV_DB, bfn);
4493 if (r < 0) {
4494 derr << __func__ << " add block device(" << bfn << ") returned: "
4495 << cpp_strerror(r) << dendl;
4496 goto free_bluefs;
4497 }
4498
4499 if (bluefs->bdev_support_label(BlueFS::BDEV_DB)) {
4500 r = _check_or_set_bdev_label(
4501 bfn,
4502 bluefs->get_block_device_size(BlueFS::BDEV_DB),
4503 "bluefs db", create);
4504 if (r < 0) {
4505 derr << __func__
4506 << " check block device(" << bfn << ") label returned: "
4507 << cpp_strerror(r) << dendl;
4508 goto free_bluefs;
4509 }
4510 }
4511 if (create) {
4512 bluefs->add_block_extent(
4513 BlueFS::BDEV_DB,
4514 SUPER_RESERVED,
4515 bluefs->get_block_device_size(BlueFS::BDEV_DB) - SUPER_RESERVED);
4516 }
4517 bluefs_shared_bdev = BlueFS::BDEV_SLOW;
4518 bluefs_single_shared_device = false;
4519 } else if (::lstat(bfn.c_str(), &st) == -1) {
4520 bluefs_shared_bdev = BlueFS::BDEV_DB;
4521 } else {
4522 //symlink exist is bug
4523 derr << __func__ << " " << bfn << " link target doesn't exist" << dendl;
4524 r = -errno;
4525 goto free_bluefs;
4526 }
4527
4528 // shared device
4529 bfn = path + "/block";
4530 r = bluefs->add_block_device(bluefs_shared_bdev, bfn);
4531 if (r < 0) {
4532 derr << __func__ << " add block device(" << bfn << ") returned: "
4533 << cpp_strerror(r) << dendl;
4534 goto free_bluefs;
4535 }
4536 if (create) {
4537 // note: we always leave the first SUPER_RESERVED (8k) of the device unused
4538 uint64_t initial =
4539 bdev->get_size() * (cct->_conf->bluestore_bluefs_min_ratio +
4540 cct->_conf->bluestore_bluefs_gift_ratio);
4541 initial = MAX(initial, cct->_conf->bluestore_bluefs_min);
4542 // align to bluefs's alloc_size
4543 initial = P2ROUNDUP(initial, cct->_conf->bluefs_alloc_size);
4544 // put bluefs in the middle of the device in case it is an HDD
4545 uint64_t start = P2ALIGN((bdev->get_size() - initial) / 2,
4546 cct->_conf->bluefs_alloc_size);
4547 bluefs->add_block_extent(bluefs_shared_bdev, start, initial);
4548 bluefs_extents.insert(start, initial);
4549 }
4550
4551 bfn = path + "/block.wal";
4552 if (::stat(bfn.c_str(), &st) == 0) {
4553 r = bluefs->add_block_device(BlueFS::BDEV_WAL, bfn);
4554 if (r < 0) {
4555 derr << __func__ << " add block device(" << bfn << ") returned: "
4556 << cpp_strerror(r) << dendl;
4557 goto free_bluefs;
4558 }
4559
4560 if (bluefs->bdev_support_label(BlueFS::BDEV_WAL)) {
4561 r = _check_or_set_bdev_label(
4562 bfn,
4563 bluefs->get_block_device_size(BlueFS::BDEV_WAL),
4564 "bluefs wal", create);
4565 if (r < 0) {
4566 derr << __func__ << " check block device(" << bfn
4567 << ") label returned: " << cpp_strerror(r) << dendl;
4568 goto free_bluefs;
4569 }
4570 }
4571
4572 if (create) {
4573 bluefs->add_block_extent(
4574 BlueFS::BDEV_WAL, BDEV_LABEL_BLOCK_SIZE,
4575 bluefs->get_block_device_size(BlueFS::BDEV_WAL) -
4576 BDEV_LABEL_BLOCK_SIZE);
4577 }
4578 cct->_conf->set_val("rocksdb_separate_wal_dir", "true");
4579 bluefs_single_shared_device = false;
4580 } else if (::lstat(bfn.c_str(), &st) == -1) {
4581 cct->_conf->set_val("rocksdb_separate_wal_dir", "false");
4582 } else {
4583 //symlink exist is bug
4584 derr << __func__ << " " << bfn << " link target doesn't exist" << dendl;
4585 r = -errno;
4586 goto free_bluefs;
4587 }
4588
4589 if (create) {
4590 bluefs->mkfs(fsid);
4591 }
4592 r = bluefs->mount();
4593 if (r < 0) {
4594 derr << __func__ << " failed bluefs mount: " << cpp_strerror(r) << dendl;
4595 goto free_bluefs;
4596 }
4597 if (cct->_conf->bluestore_bluefs_env_mirror) {
4598 rocksdb::Env *a = new BlueRocksEnv(bluefs);
4599 rocksdb::Env *b = rocksdb::Env::Default();
4600 if (create) {
4601 string cmd = "rm -rf " + path + "/db " +
4602 path + "/db.slow " +
4603 path + "/db.wal";
4604 int r = system(cmd.c_str());
4605 (void)r;
4606 }
4607 env = new rocksdb::EnvMirror(b, a, false, true);
4608 } else {
4609 env = new BlueRocksEnv(bluefs);
4610
4611 // simplify the dir names, too, as "seen" by rocksdb
4612 fn = "db";
4613 }
4614
4615 if (bluefs_shared_bdev == BlueFS::BDEV_SLOW) {
4616 // we have both block.db and block; tell rocksdb!
4617 // note: the second (last) size value doesn't really matter
4618 ostringstream db_paths;
4619 uint64_t db_size = bluefs->get_block_device_size(BlueFS::BDEV_DB);
4620 uint64_t slow_size = bluefs->get_block_device_size(BlueFS::BDEV_SLOW);
4621 db_paths << fn << ","
4622 << (uint64_t)(db_size * 95 / 100) << " "
4623 << fn + ".slow" << ","
4624 << (uint64_t)(slow_size * 95 / 100);
4625 cct->_conf->set_val("rocksdb_db_paths", db_paths.str(), false);
4626 dout(10) << __func__ << " set rocksdb_db_paths to "
4627 << cct->_conf->get_val<std::string>("rocksdb_db_paths") << dendl;
4628 }
4629
4630 if (create) {
4631 env->CreateDir(fn);
4632 if (cct->_conf->rocksdb_separate_wal_dir)
4633 env->CreateDir(fn + ".wal");
4634 if (cct->_conf->get_val<std::string>("rocksdb_db_paths").length())
4635 env->CreateDir(fn + ".slow");
4636 }
4637 } else if (create) {
4638 int r = ::mkdir(fn.c_str(), 0755);
4639 if (r < 0)
4640 r = -errno;
4641 if (r < 0 && r != -EEXIST) {
4642 derr << __func__ << " failed to create " << fn << ": " << cpp_strerror(r)
4643 << dendl;
4644 return r;
4645 }
4646
4647 // wal_dir, too!
4648 if (cct->_conf->rocksdb_separate_wal_dir) {
4649 string walfn = path + "/db.wal";
4650 r = ::mkdir(walfn.c_str(), 0755);
4651 if (r < 0)
4652 r = -errno;
4653 if (r < 0 && r != -EEXIST) {
4654 derr << __func__ << " failed to create " << walfn
4655 << ": " << cpp_strerror(r)
4656 << dendl;
4657 return r;
4658 }
4659 }
4660 }
4661
4662 db = KeyValueDB::create(cct,
4663 kv_backend,
4664 fn,
4665 static_cast<void*>(env));
4666 if (!db) {
4667 derr << __func__ << " error creating db" << dendl;
4668 if (bluefs) {
4669 bluefs->umount();
4670 delete bluefs;
4671 bluefs = NULL;
4672 }
4673 // delete env manually here since we can't depend on db to do this
4674 // under this case
4675 delete env;
4676 env = NULL;
4677 return -EIO;
4678 }
4679
4680 FreelistManager::setup_merge_operators(db);
4681 db->set_merge_operator(PREFIX_STAT, merge_op);
4682
4683 db->set_cache_size(cache_size * cache_kv_ratio);
4684
4685 if (kv_backend == "rocksdb")
4686 options = cct->_conf->bluestore_rocksdb_options;
4687 db->init(options);
4688 if (create)
4689 r = db->create_and_open(err);
4690 else
4691 r = db->open(err);
4692 if (r) {
4693 derr << __func__ << " erroring opening db: " << err.str() << dendl;
4694 if (bluefs) {
4695 bluefs->umount();
4696 delete bluefs;
4697 bluefs = NULL;
4698 }
4699 delete db;
4700 db = NULL;
4701 return -EIO;
4702 }
4703 dout(1) << __func__ << " opened " << kv_backend
4704 << " path " << fn << " options " << options << dendl;
4705 return 0;
4706
4707 free_bluefs:
4708 assert(bluefs);
4709 delete bluefs;
4710 bluefs = NULL;
4711 return r;
4712 }
4713
4714 void BlueStore::_close_db()
4715 {
4716 assert(db);
4717 delete db;
4718 db = NULL;
4719 if (bluefs) {
4720 bluefs->umount();
4721 delete bluefs;
4722 bluefs = NULL;
4723 }
4724 }
4725
4726 int BlueStore::_reconcile_bluefs_freespace()
4727 {
4728 dout(10) << __func__ << dendl;
4729 interval_set<uint64_t> bset;
4730 int r = bluefs->get_block_extents(bluefs_shared_bdev, &bset);
4731 assert(r == 0);
4732 if (bset == bluefs_extents) {
4733 dout(10) << __func__ << " we agree bluefs has 0x" << std::hex << bset
4734 << std::dec << dendl;
4735 return 0;
4736 }
4737 dout(10) << __func__ << " bluefs says 0x" << std::hex << bset << std::dec
4738 << dendl;
4739 dout(10) << __func__ << " super says 0x" << std::hex << bluefs_extents
4740 << std::dec << dendl;
4741
4742 interval_set<uint64_t> overlap;
4743 overlap.intersection_of(bset, bluefs_extents);
4744
4745 bset.subtract(overlap);
4746 if (!bset.empty()) {
4747 derr << __func__ << " bluefs extra 0x" << std::hex << bset << std::dec
4748 << dendl;
4749 return -EIO;
4750 }
4751
4752 interval_set<uint64_t> super_extra;
4753 super_extra = bluefs_extents;
4754 super_extra.subtract(overlap);
4755 if (!super_extra.empty()) {
4756 // This is normal: it can happen if we commit to give extents to
4757 // bluefs and we crash before bluefs commits that it owns them.
4758 dout(10) << __func__ << " super extra " << super_extra << dendl;
4759 for (interval_set<uint64_t>::iterator p = super_extra.begin();
4760 p != super_extra.end();
4761 ++p) {
4762 bluefs->add_block_extent(bluefs_shared_bdev, p.get_start(), p.get_len());
4763 }
4764 }
4765
4766 return 0;
4767 }
4768
4769 int BlueStore::_balance_bluefs_freespace(PExtentVector *extents)
4770 {
4771 int ret = 0;
4772 assert(bluefs);
4773
4774 vector<pair<uint64_t,uint64_t>> bluefs_usage; // <free, total> ...
4775 bluefs->get_usage(&bluefs_usage);
4776 assert(bluefs_usage.size() > bluefs_shared_bdev);
4777
4778 // fixme: look at primary bdev only for now
4779 uint64_t bluefs_free = bluefs_usage[bluefs_shared_bdev].first;
4780 uint64_t bluefs_total = bluefs_usage[bluefs_shared_bdev].second;
4781 float bluefs_free_ratio = (float)bluefs_free / (float)bluefs_total;
4782
4783 uint64_t my_free = alloc->get_free();
4784 uint64_t total = bdev->get_size();
4785 float my_free_ratio = (float)my_free / (float)total;
4786
4787 uint64_t total_free = bluefs_free + my_free;
4788
4789 float bluefs_ratio = (float)bluefs_free / (float)total_free;
4790
4791 dout(10) << __func__
4792 << " bluefs " << pretty_si_t(bluefs_free)
4793 << " free (" << bluefs_free_ratio
4794 << ") bluestore " << pretty_si_t(my_free)
4795 << " free (" << my_free_ratio
4796 << "), bluefs_ratio " << bluefs_ratio
4797 << dendl;
4798
4799 uint64_t gift = 0;
4800 uint64_t reclaim = 0;
4801 if (bluefs_ratio < cct->_conf->bluestore_bluefs_min_ratio) {
4802 gift = cct->_conf->bluestore_bluefs_gift_ratio * total_free;
4803 dout(10) << __func__ << " bluefs_ratio " << bluefs_ratio
4804 << " < min_ratio " << cct->_conf->bluestore_bluefs_min_ratio
4805 << ", should gift " << pretty_si_t(gift) << dendl;
4806 } else if (bluefs_ratio > cct->_conf->bluestore_bluefs_max_ratio) {
4807 reclaim = cct->_conf->bluestore_bluefs_reclaim_ratio * total_free;
4808 if (bluefs_total - reclaim < cct->_conf->bluestore_bluefs_min)
4809 reclaim = bluefs_total - cct->_conf->bluestore_bluefs_min;
4810 dout(10) << __func__ << " bluefs_ratio " << bluefs_ratio
4811 << " > max_ratio " << cct->_conf->bluestore_bluefs_max_ratio
4812 << ", should reclaim " << pretty_si_t(reclaim) << dendl;
4813 }
4814 if (bluefs_total < cct->_conf->bluestore_bluefs_min &&
4815 cct->_conf->bluestore_bluefs_min <
4816 (uint64_t)(cct->_conf->bluestore_bluefs_max_ratio * total_free)) {
4817 uint64_t g = cct->_conf->bluestore_bluefs_min - bluefs_total;
4818 dout(10) << __func__ << " bluefs_total " << bluefs_total
4819 << " < min " << cct->_conf->bluestore_bluefs_min
4820 << ", should gift " << pretty_si_t(g) << dendl;
4821 if (g > gift)
4822 gift = g;
4823 reclaim = 0;
4824 }
4825
4826 if (gift) {
4827 // round up to alloc size
4828 gift = P2ROUNDUP(gift, cct->_conf->bluefs_alloc_size);
4829
4830 // hard cap to fit into 32 bits
4831 gift = MIN(gift, 1ull<<31);
4832 dout(10) << __func__ << " gifting " << gift
4833 << " (" << pretty_si_t(gift) << ")" << dendl;
4834
4835 // fixme: just do one allocation to start...
4836 int r = alloc->reserve(gift);
4837 assert(r == 0);
4838
4839 AllocExtentVector exts;
4840 int64_t alloc_len = alloc->allocate(gift, cct->_conf->bluefs_alloc_size,
4841 0, 0, &exts);
4842
4843 if (alloc_len < (int64_t)gift) {
4844 derr << __func__ << " allocate failed on 0x" << std::hex << gift
4845 << " min_alloc_size 0x" << min_alloc_size << std::dec << dendl;
4846 alloc->dump();
4847 assert(0 == "allocate failed, wtf");
4848 return -ENOSPC;
4849 }
4850 for (auto& p : exts) {
4851 bluestore_pextent_t e = bluestore_pextent_t(p);
4852 dout(1) << __func__ << " gifting " << e << " to bluefs" << dendl;
4853 extents->push_back(e);
4854 }
4855 gift = 0;
4856
4857 ret = 1;
4858 }
4859
4860 // reclaim from bluefs?
4861 if (reclaim) {
4862 // round up to alloc size
4863 reclaim = P2ROUNDUP(reclaim, cct->_conf->bluefs_alloc_size);
4864
4865 // hard cap to fit into 32 bits
4866 reclaim = MIN(reclaim, 1ull<<31);
4867 dout(10) << __func__ << " reclaiming " << reclaim
4868 << " (" << pretty_si_t(reclaim) << ")" << dendl;
4869
4870 while (reclaim > 0) {
4871 // NOTE: this will block and do IO.
4872 AllocExtentVector extents;
4873 int r = bluefs->reclaim_blocks(bluefs_shared_bdev, reclaim,
4874 &extents);
4875 if (r < 0) {
4876 derr << __func__ << " failed to reclaim space from bluefs"
4877 << dendl;
4878 break;
4879 }
4880 for (auto e : extents) {
4881 bluefs_extents.erase(e.offset, e.length);
4882 bluefs_extents_reclaiming.insert(e.offset, e.length);
4883 reclaim -= e.length;
4884 }
4885 }
4886
4887 ret = 1;
4888 }
4889
4890 return ret;
4891 }
4892
4893 void BlueStore::_commit_bluefs_freespace(
4894 const PExtentVector& bluefs_gift_extents)
4895 {
4896 dout(10) << __func__ << dendl;
4897 for (auto& p : bluefs_gift_extents) {
4898 bluefs->add_block_extent(bluefs_shared_bdev, p.offset, p.length);
4899 }
4900 }
4901
4902 int BlueStore::_open_collections(int *errors)
4903 {
4904 assert(coll_map.empty());
4905 KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL);
4906 for (it->upper_bound(string());
4907 it->valid();
4908 it->next()) {
4909 coll_t cid;
4910 if (cid.parse(it->key())) {
4911 CollectionRef c(
4912 new Collection(
4913 this,
4914 cache_shards[cid.hash_to_shard(cache_shards.size())],
4915 cid));
4916 bufferlist bl = it->value();
4917 bufferlist::iterator p = bl.begin();
4918 try {
4919 ::decode(c->cnode, p);
4920 } catch (buffer::error& e) {
4921 derr << __func__ << " failed to decode cnode, key:"
4922 << pretty_binary_string(it->key()) << dendl;
4923 return -EIO;
4924 }
4925 dout(20) << __func__ << " opened " << cid << " " << c << dendl;
4926 coll_map[cid] = c;
4927 } else {
4928 derr << __func__ << " unrecognized collection " << it->key() << dendl;
4929 if (errors)
4930 (*errors)++;
4931 }
4932 }
4933 return 0;
4934 }
4935
4936 void BlueStore::_open_statfs()
4937 {
4938 bufferlist bl;
4939 int r = db->get(PREFIX_STAT, "bluestore_statfs", &bl);
4940 if (r >= 0) {
4941 if (size_t(bl.length()) >= sizeof(vstatfs.values)) {
4942 auto it = bl.begin();
4943 vstatfs.decode(it);
4944 } else {
4945 dout(10) << __func__ << " store_statfs is corrupt, using empty" << dendl;
4946 }
4947 }
4948 else {
4949 dout(10) << __func__ << " store_statfs missed, using empty" << dendl;
4950 }
4951 }
4952
4953 int BlueStore::_setup_block_symlink_or_file(
4954 string name,
4955 string epath,
4956 uint64_t size,
4957 bool create)
4958 {
4959 dout(20) << __func__ << " name " << name << " path " << epath
4960 << " size " << size << " create=" << (int)create << dendl;
4961 int r = 0;
4962 int flags = O_RDWR;
4963 if (create)
4964 flags |= O_CREAT;
4965 if (epath.length()) {
4966 r = ::symlinkat(epath.c_str(), path_fd, name.c_str());
4967 if (r < 0) {
4968 r = -errno;
4969 derr << __func__ << " failed to create " << name << " symlink to "
4970 << epath << ": " << cpp_strerror(r) << dendl;
4971 return r;
4972 }
4973
4974 if (!epath.compare(0, strlen(SPDK_PREFIX), SPDK_PREFIX)) {
4975 int fd = ::openat(path_fd, epath.c_str(), flags, 0644);
4976 if (fd < 0) {
4977 r = -errno;
4978 derr << __func__ << " failed to open " << epath << " file: "
4979 << cpp_strerror(r) << dendl;
4980 return r;
4981 }
4982 string serial_number = epath.substr(strlen(SPDK_PREFIX));
4983 r = ::write(fd, serial_number.c_str(), serial_number.size());
4984 assert(r == (int)serial_number.size());
4985 dout(1) << __func__ << " created " << name << " symlink to "
4986 << epath << dendl;
4987 VOID_TEMP_FAILURE_RETRY(::close(fd));
4988 }
4989 }
4990 if (size) {
4991 int fd = ::openat(path_fd, name.c_str(), flags, 0644);
4992 if (fd >= 0) {
4993 // block file is present
4994 struct stat st;
4995 int r = ::fstat(fd, &st);
4996 if (r == 0 &&
4997 S_ISREG(st.st_mode) && // if it is a regular file
4998 st.st_size == 0) { // and is 0 bytes
4999 r = ::ftruncate(fd, size);
5000 if (r < 0) {
5001 r = -errno;
5002 derr << __func__ << " failed to resize " << name << " file to "
5003 << size << ": " << cpp_strerror(r) << dendl;
5004 VOID_TEMP_FAILURE_RETRY(::close(fd));
5005 return r;
5006 }
5007
5008 if (cct->_conf->bluestore_block_preallocate_file) {
5009 #ifdef HAVE_POSIX_FALLOCATE
5010 r = ::posix_fallocate(fd, 0, size);
5011 if (r) {
5012 derr << __func__ << " failed to prefallocate " << name << " file to "
5013 << size << ": " << cpp_strerror(r) << dendl;
5014 VOID_TEMP_FAILURE_RETRY(::close(fd));
5015 return -r;
5016 }
5017 #else
5018 char data[1024*128];
5019 for (uint64_t off = 0; off < size; off += sizeof(data)) {
5020 if (off + sizeof(data) > size)
5021 r = ::write(fd, data, size - off);
5022 else
5023 r = ::write(fd, data, sizeof(data));
5024 if (r < 0) {
5025 r = -errno;
5026 derr << __func__ << " failed to prefallocate w/ write " << name << " file to "
5027 << size << ": " << cpp_strerror(r) << dendl;
5028 VOID_TEMP_FAILURE_RETRY(::close(fd));
5029 return r;
5030 }
5031 }
5032 #endif
5033 }
5034 dout(1) << __func__ << " resized " << name << " file to "
5035 << pretty_si_t(size) << "B" << dendl;
5036 }
5037 VOID_TEMP_FAILURE_RETRY(::close(fd));
5038 } else {
5039 int r = -errno;
5040 if (r != -ENOENT) {
5041 derr << __func__ << " failed to open " << name << " file: "
5042 << cpp_strerror(r) << dendl;
5043 return r;
5044 }
5045 }
5046 }
5047 return 0;
5048 }
5049
5050 int BlueStore::mkfs()
5051 {
5052 dout(1) << __func__ << " path " << path << dendl;
5053 int r;
5054 uuid_d old_fsid;
5055
5056 {
5057 string done;
5058 r = read_meta("mkfs_done", &done);
5059 if (r == 0) {
5060 dout(1) << __func__ << " already created" << dendl;
5061 if (cct->_conf->bluestore_fsck_on_mkfs) {
5062 r = fsck(cct->_conf->bluestore_fsck_on_mkfs_deep);
5063 if (r < 0) {
5064 derr << __func__ << " fsck found fatal error: " << cpp_strerror(r)
5065 << dendl;
5066 return r;
5067 }
5068 if (r > 0) {
5069 derr << __func__ << " fsck found " << r << " errors" << dendl;
5070 r = -EIO;
5071 }
5072 }
5073 return r; // idempotent
5074 }
5075 }
5076
5077 {
5078 string type;
5079 r = read_meta("type", &type);
5080 if (r == 0) {
5081 if (type != "bluestore") {
5082 derr << __func__ << " expected bluestore, but type is " << type << dendl;
5083 return -EIO;
5084 }
5085 } else {
5086 r = write_meta("type", "bluestore");
5087 if (r < 0)
5088 return r;
5089 }
5090 }
5091
5092 freelist_type = "bitmap";
5093
5094 r = _open_path();
5095 if (r < 0)
5096 return r;
5097
5098 r = _open_fsid(true);
5099 if (r < 0)
5100 goto out_path_fd;
5101
5102 r = _lock_fsid();
5103 if (r < 0)
5104 goto out_close_fsid;
5105
5106 r = _read_fsid(&old_fsid);
5107 if (r < 0 || old_fsid.is_zero()) {
5108 if (fsid.is_zero()) {
5109 fsid.generate_random();
5110 dout(1) << __func__ << " generated fsid " << fsid << dendl;
5111 } else {
5112 dout(1) << __func__ << " using provided fsid " << fsid << dendl;
5113 }
5114 // we'll write it later.
5115 } else {
5116 if (!fsid.is_zero() && fsid != old_fsid) {
5117 derr << __func__ << " on-disk fsid " << old_fsid
5118 << " != provided " << fsid << dendl;
5119 r = -EINVAL;
5120 goto out_close_fsid;
5121 }
5122 fsid = old_fsid;
5123 }
5124
5125 r = _setup_block_symlink_or_file("block", cct->_conf->bluestore_block_path,
5126 cct->_conf->bluestore_block_size,
5127 cct->_conf->bluestore_block_create);
5128 if (r < 0)
5129 goto out_close_fsid;
5130 if (cct->_conf->bluestore_bluefs) {
5131 r = _setup_block_symlink_or_file("block.wal", cct->_conf->bluestore_block_wal_path,
5132 cct->_conf->bluestore_block_wal_size,
5133 cct->_conf->bluestore_block_wal_create);
5134 if (r < 0)
5135 goto out_close_fsid;
5136 r = _setup_block_symlink_or_file("block.db", cct->_conf->bluestore_block_db_path,
5137 cct->_conf->bluestore_block_db_size,
5138 cct->_conf->bluestore_block_db_create);
5139 if (r < 0)
5140 goto out_close_fsid;
5141 }
5142
5143 r = _open_bdev(true);
5144 if (r < 0)
5145 goto out_close_fsid;
5146
5147 r = _open_db(true);
5148 if (r < 0)
5149 goto out_close_bdev;
5150
5151 r = _open_fm(true);
5152 if (r < 0)
5153 goto out_close_db;
5154
5155 {
5156 KeyValueDB::Transaction t = db->get_transaction();
5157 {
5158 bufferlist bl;
5159 ::encode((uint64_t)0, bl);
5160 t->set(PREFIX_SUPER, "nid_max", bl);
5161 t->set(PREFIX_SUPER, "blobid_max", bl);
5162 }
5163
5164 // choose min_alloc_size
5165 if (cct->_conf->bluestore_min_alloc_size) {
5166 min_alloc_size = cct->_conf->bluestore_min_alloc_size;
5167 } else {
5168 assert(bdev);
5169 if (bdev->is_rotational()) {
5170 min_alloc_size = cct->_conf->bluestore_min_alloc_size_hdd;
5171 } else {
5172 min_alloc_size = cct->_conf->bluestore_min_alloc_size_ssd;
5173 }
5174 }
5175
5176 // make sure min_alloc_size is power of 2 aligned.
5177 if (!ISP2(min_alloc_size)) {
5178 derr << __func__ << " min_alloc_size 0x"
5179 << std::hex << min_alloc_size << std::dec
5180 << " is not power of 2 aligned!"
5181 << dendl;
5182 r = -EINVAL;
5183 goto out_close_fm;
5184 }
5185
5186 {
5187 bufferlist bl;
5188 ::encode((uint64_t)min_alloc_size, bl);
5189 t->set(PREFIX_SUPER, "min_alloc_size", bl);
5190 }
5191
5192 ondisk_format = latest_ondisk_format;
5193 _prepare_ondisk_format_super(t);
5194 db->submit_transaction_sync(t);
5195 }
5196
5197
5198 r = write_meta("kv_backend", cct->_conf->bluestore_kvbackend);
5199 if (r < 0)
5200 goto out_close_fm;
5201
5202 r = write_meta("bluefs", stringify((int)cct->_conf->bluestore_bluefs));
5203 if (r < 0)
5204 goto out_close_fm;
5205
5206 if (fsid != old_fsid) {
5207 r = _write_fsid();
5208 if (r < 0) {
5209 derr << __func__ << " error writing fsid: " << cpp_strerror(r) << dendl;
5210 goto out_close_fm;
5211 }
5212 }
5213
5214 out_close_fm:
5215 _close_fm();
5216 out_close_db:
5217 _close_db();
5218 out_close_bdev:
5219 _close_bdev();
5220 out_close_fsid:
5221 _close_fsid();
5222 out_path_fd:
5223 _close_path();
5224
5225 if (r == 0 &&
5226 cct->_conf->bluestore_fsck_on_mkfs) {
5227 int rc = fsck(cct->_conf->bluestore_fsck_on_mkfs_deep);
5228 if (rc < 0)
5229 return rc;
5230 if (rc > 0) {
5231 derr << __func__ << " fsck found " << rc << " errors" << dendl;
5232 r = -EIO;
5233 }
5234 }
5235
5236 if (r == 0) {
5237 // indicate success by writing the 'mkfs_done' file
5238 r = write_meta("mkfs_done", "yes");
5239 }
5240
5241 if (r < 0) {
5242 derr << __func__ << " failed, " << cpp_strerror(r) << dendl;
5243 } else {
5244 dout(0) << __func__ << " success" << dendl;
5245 }
5246 return r;
5247 }
5248
5249 void BlueStore::set_cache_shards(unsigned num)
5250 {
5251 dout(10) << __func__ << " " << num << dendl;
5252 size_t old = cache_shards.size();
5253 assert(num >= old);
5254 cache_shards.resize(num);
5255 for (unsigned i = old; i < num; ++i) {
5256 cache_shards[i] = Cache::create(cct, cct->_conf->bluestore_cache_type,
5257 logger);
5258 }
5259 }
5260
5261 int BlueStore::_mount(bool kv_only)
5262 {
5263 dout(1) << __func__ << " path " << path << dendl;
5264
5265 {
5266 string type;
5267 int r = read_meta("type", &type);
5268 if (r < 0) {
5269 derr << __func__ << " failed to load os-type: " << cpp_strerror(r)
5270 << dendl;
5271 return r;
5272 }
5273
5274 if (type != "bluestore") {
5275 derr << __func__ << " expected bluestore, but type is " << type << dendl;
5276 return -EIO;
5277 }
5278 }
5279
5280 if (cct->_conf->bluestore_fsck_on_mount) {
5281 int rc = fsck(cct->_conf->bluestore_fsck_on_mount_deep);
5282 if (rc < 0)
5283 return rc;
5284 if (rc > 0) {
5285 derr << __func__ << " fsck found " << rc << " errors" << dendl;
5286 return -EIO;
5287 }
5288 }
5289
5290 int r = _open_path();
5291 if (r < 0)
5292 return r;
5293 r = _open_fsid(false);
5294 if (r < 0)
5295 goto out_path;
5296
5297 r = _read_fsid(&fsid);
5298 if (r < 0)
5299 goto out_fsid;
5300
5301 r = _lock_fsid();
5302 if (r < 0)
5303 goto out_fsid;
5304
5305 r = _open_bdev(false);
5306 if (r < 0)
5307 goto out_fsid;
5308
5309 r = _open_db(false);
5310 if (r < 0)
5311 goto out_bdev;
5312
5313 if (kv_only)
5314 return 0;
5315
5316 r = _open_super_meta();
5317 if (r < 0)
5318 goto out_db;
5319
5320 r = _open_fm(false);
5321 if (r < 0)
5322 goto out_db;
5323
5324 r = _open_alloc();
5325 if (r < 0)
5326 goto out_fm;
5327
5328 r = _open_collections();
5329 if (r < 0)
5330 goto out_alloc;
5331
5332 r = _reload_logger();
5333 if (r < 0)
5334 goto out_coll;
5335
5336 if (bluefs) {
5337 r = _reconcile_bluefs_freespace();
5338 if (r < 0)
5339 goto out_coll;
5340 }
5341
5342 _kv_start();
5343
5344 r = _deferred_replay();
5345 if (r < 0)
5346 goto out_stop;
5347
5348 mempool_thread.init();
5349
5350
5351 mounted = true;
5352 return 0;
5353
5354 out_stop:
5355 _kv_stop();
5356 out_coll:
5357 _flush_cache();
5358 out_alloc:
5359 _close_alloc();
5360 out_fm:
5361 _close_fm();
5362 out_db:
5363 _close_db();
5364 out_bdev:
5365 _close_bdev();
5366 out_fsid:
5367 _close_fsid();
5368 out_path:
5369 _close_path();
5370 return r;
5371 }
5372
5373 int BlueStore::umount()
5374 {
5375 assert(mounted);
5376 dout(1) << __func__ << dendl;
5377
5378 _osr_drain_all();
5379 _osr_unregister_all();
5380
5381 mempool_thread.shutdown();
5382
5383 dout(20) << __func__ << " stopping kv thread" << dendl;
5384 _kv_stop();
5385 _reap_collections();
5386 _flush_cache();
5387 dout(20) << __func__ << " closing" << dendl;
5388
5389 mounted = false;
5390 _close_alloc();
5391 _close_fm();
5392 _close_db();
5393 _close_bdev();
5394 _close_fsid();
5395 _close_path();
5396
5397 if (cct->_conf->bluestore_fsck_on_umount) {
5398 int rc = fsck(cct->_conf->bluestore_fsck_on_umount_deep);
5399 if (rc < 0)
5400 return rc;
5401 if (rc > 0) {
5402 derr << __func__ << " fsck found " << rc << " errors" << dendl;
5403 return -EIO;
5404 }
5405 }
5406 return 0;
5407 }
5408
5409 static void apply(uint64_t off,
5410 uint64_t len,
5411 uint64_t granularity,
5412 BlueStore::mempool_dynamic_bitset &bitset,
5413 const char *what,
5414 std::function<void(uint64_t,
5415 BlueStore::mempool_dynamic_bitset &)> f) {
5416 auto end = ROUND_UP_TO(off + len, granularity);
5417 while (off < end) {
5418 uint64_t pos = off / granularity;
5419 f(pos, bitset);
5420 off += granularity;
5421 }
5422 }
5423
5424 int BlueStore::_fsck_check_extents(
5425 const ghobject_t& oid,
5426 const PExtentVector& extents,
5427 bool compressed,
5428 mempool_dynamic_bitset &used_blocks,
5429 store_statfs_t& expected_statfs)
5430 {
5431 dout(30) << __func__ << " oid " << oid << " extents " << extents << dendl;
5432 int errors = 0;
5433 for (auto e : extents) {
5434 if (!e.is_valid())
5435 continue;
5436 expected_statfs.allocated += e.length;
5437 if (compressed) {
5438 expected_statfs.compressed_allocated += e.length;
5439 }
5440 bool already = false;
5441 apply(
5442 e.offset, e.length, block_size, used_blocks, __func__,
5443 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5444 if (bs.test(pos))
5445 already = true;
5446 else
5447 bs.set(pos);
5448 });
5449 if (already) {
5450 derr << " " << oid << " extent " << e
5451 << " or a subset is already allocated" << dendl;
5452 ++errors;
5453 }
5454 if (e.end() > bdev->get_size()) {
5455 derr << " " << oid << " extent " << e
5456 << " past end of block device" << dendl;
5457 ++errors;
5458 }
5459 }
5460 return errors;
5461 }
5462
5463 int BlueStore::fsck(bool deep)
5464 {
5465 dout(1) << __func__ << (deep ? " (deep)" : " (shallow)") << " start" << dendl;
5466 int errors = 0;
5467
5468 typedef btree::btree_set<
5469 uint64_t,std::less<uint64_t>,
5470 mempool::bluestore_fsck::pool_allocator<uint64_t>> uint64_t_btree_t;
5471 uint64_t_btree_t used_nids;
5472 uint64_t_btree_t used_omap_head;
5473 uint64_t_btree_t used_sbids;
5474
5475 mempool_dynamic_bitset used_blocks;
5476 KeyValueDB::Iterator it;
5477 store_statfs_t expected_statfs, actual_statfs;
5478 struct sb_info_t {
5479 list<ghobject_t> oids;
5480 SharedBlobRef sb;
5481 bluestore_extent_ref_map_t ref_map;
5482 bool compressed;
5483 };
5484 mempool::bluestore_fsck::map<uint64_t,sb_info_t> sb_info;
5485
5486 uint64_t num_objects = 0;
5487 uint64_t num_extents = 0;
5488 uint64_t num_blobs = 0;
5489 uint64_t num_spanning_blobs = 0;
5490 uint64_t num_shared_blobs = 0;
5491 uint64_t num_sharded_objects = 0;
5492 uint64_t num_object_shards = 0;
5493
5494 utime_t start = ceph_clock_now();
5495
5496 int r = _open_path();
5497 if (r < 0)
5498 return r;
5499 r = _open_fsid(false);
5500 if (r < 0)
5501 goto out_path;
5502
5503 r = _read_fsid(&fsid);
5504 if (r < 0)
5505 goto out_fsid;
5506
5507 r = _lock_fsid();
5508 if (r < 0)
5509 goto out_fsid;
5510
5511 r = _open_bdev(false);
5512 if (r < 0)
5513 goto out_fsid;
5514
5515 r = _open_db(false);
5516 if (r < 0)
5517 goto out_bdev;
5518
5519 r = _open_super_meta();
5520 if (r < 0)
5521 goto out_db;
5522
5523 r = _open_fm(false);
5524 if (r < 0)
5525 goto out_db;
5526
5527 r = _open_alloc();
5528 if (r < 0)
5529 goto out_fm;
5530
5531 r = _open_collections(&errors);
5532 if (r < 0)
5533 goto out_alloc;
5534
5535 mempool_thread.init();
5536
5537 // we need finishers and kv_{sync,finalize}_thread *just* for replay
5538 _kv_start();
5539 r = _deferred_replay();
5540 _kv_stop();
5541 if (r < 0)
5542 goto out_scan;
5543
5544 used_blocks.resize(bdev->get_size() / block_size);
5545 apply(
5546 0, SUPER_RESERVED, block_size, used_blocks, "0~SUPER_RESERVED",
5547 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5548 bs.set(pos);
5549 }
5550 );
5551
5552 if (bluefs) {
5553 for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
5554 apply(
5555 e.get_start(), e.get_len(), block_size, used_blocks, "bluefs",
5556 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5557 bs.set(pos);
5558 }
5559 );
5560 }
5561 r = bluefs->fsck();
5562 if (r < 0) {
5563 goto out_scan;
5564 }
5565 if (r > 0)
5566 errors += r;
5567 }
5568
5569 // get expected statfs; fill unaffected fields to be able to compare
5570 // structs
5571 statfs(&actual_statfs);
5572 expected_statfs.total = actual_statfs.total;
5573 expected_statfs.available = actual_statfs.available;
5574
5575 // walk PREFIX_OBJ
5576 dout(1) << __func__ << " walking object keyspace" << dendl;
5577 it = db->get_iterator(PREFIX_OBJ);
5578 if (it) {
5579 CollectionRef c;
5580 spg_t pgid;
5581 mempool::bluestore_fsck::list<string> expecting_shards;
5582 for (it->lower_bound(string()); it->valid(); it->next()) {
5583 if (g_conf->bluestore_debug_fsck_abort) {
5584 goto out_scan;
5585 }
5586 dout(30) << " key " << pretty_binary_string(it->key()) << dendl;
5587 if (is_extent_shard_key(it->key())) {
5588 while (!expecting_shards.empty() &&
5589 expecting_shards.front() < it->key()) {
5590 derr << __func__ << " error: missing shard key "
5591 << pretty_binary_string(expecting_shards.front())
5592 << dendl;
5593 ++errors;
5594 expecting_shards.pop_front();
5595 }
5596 if (!expecting_shards.empty() &&
5597 expecting_shards.front() == it->key()) {
5598 // all good
5599 expecting_shards.pop_front();
5600 continue;
5601 }
5602
5603 uint32_t offset;
5604 string okey;
5605 get_key_extent_shard(it->key(), &okey, &offset);
5606 derr << __func__ << " error: stray shard 0x" << std::hex << offset
5607 << std::dec << dendl;
5608 if (expecting_shards.empty()) {
5609 derr << __func__ << " error: " << pretty_binary_string(it->key())
5610 << " is unexpected" << dendl;
5611 ++errors;
5612 continue;
5613 }
5614 while (expecting_shards.front() > it->key()) {
5615 derr << __func__ << " error: saw " << pretty_binary_string(it->key())
5616 << dendl;
5617 derr << __func__ << " error: exp "
5618 << pretty_binary_string(expecting_shards.front()) << dendl;
5619 ++errors;
5620 expecting_shards.pop_front();
5621 if (expecting_shards.empty()) {
5622 break;
5623 }
5624 }
5625 continue;
5626 }
5627
5628 ghobject_t oid;
5629 int r = get_key_object(it->key(), &oid);
5630 if (r < 0) {
5631 derr << __func__ << " error: bad object key "
5632 << pretty_binary_string(it->key()) << dendl;
5633 ++errors;
5634 continue;
5635 }
5636 if (!c ||
5637 oid.shard_id != pgid.shard ||
5638 oid.hobj.pool != (int64_t)pgid.pool() ||
5639 !c->contains(oid)) {
5640 c = nullptr;
5641 for (ceph::unordered_map<coll_t, CollectionRef>::iterator p =
5642 coll_map.begin();
5643 p != coll_map.end();
5644 ++p) {
5645 if (p->second->contains(oid)) {
5646 c = p->second;
5647 break;
5648 }
5649 }
5650 if (!c) {
5651 derr << __func__ << " error: stray object " << oid
5652 << " not owned by any collection" << dendl;
5653 ++errors;
5654 continue;
5655 }
5656 c->cid.is_pg(&pgid);
5657 dout(20) << __func__ << " collection " << c->cid << dendl;
5658 }
5659
5660 if (!expecting_shards.empty()) {
5661 for (auto &k : expecting_shards) {
5662 derr << __func__ << " error: missing shard key "
5663 << pretty_binary_string(k) << dendl;
5664 }
5665 ++errors;
5666 expecting_shards.clear();
5667 }
5668
5669 dout(10) << __func__ << " " << oid << dendl;
5670 RWLock::RLocker l(c->lock);
5671 OnodeRef o = c->get_onode(oid, false);
5672 if (o->onode.nid) {
5673 if (o->onode.nid > nid_max) {
5674 derr << __func__ << " error: " << oid << " nid " << o->onode.nid
5675 << " > nid_max " << nid_max << dendl;
5676 ++errors;
5677 }
5678 if (used_nids.count(o->onode.nid)) {
5679 derr << __func__ << " error: " << oid << " nid " << o->onode.nid
5680 << " already in use" << dendl;
5681 ++errors;
5682 continue; // go for next object
5683 }
5684 used_nids.insert(o->onode.nid);
5685 }
5686 ++num_objects;
5687 num_spanning_blobs += o->extent_map.spanning_blob_map.size();
5688 o->extent_map.fault_range(db, 0, OBJECT_MAX_SIZE);
5689 _dump_onode(o, 30);
5690 // shards
5691 if (!o->extent_map.shards.empty()) {
5692 ++num_sharded_objects;
5693 num_object_shards += o->extent_map.shards.size();
5694 }
5695 for (auto& s : o->extent_map.shards) {
5696 dout(20) << __func__ << " shard " << *s.shard_info << dendl;
5697 expecting_shards.push_back(string());
5698 get_extent_shard_key(o->key, s.shard_info->offset,
5699 &expecting_shards.back());
5700 if (s.shard_info->offset >= o->onode.size) {
5701 derr << __func__ << " error: " << oid << " shard 0x" << std::hex
5702 << s.shard_info->offset << " past EOF at 0x" << o->onode.size
5703 << std::dec << dendl;
5704 ++errors;
5705 }
5706 }
5707 // lextents
5708 map<BlobRef,bluestore_blob_t::unused_t> referenced;
5709 uint64_t pos = 0;
5710 mempool::bluestore_fsck::map<BlobRef,
5711 bluestore_blob_use_tracker_t> ref_map;
5712 for (auto& l : o->extent_map.extent_map) {
5713 dout(20) << __func__ << " " << l << dendl;
5714 if (l.logical_offset < pos) {
5715 derr << __func__ << " error: " << oid << " lextent at 0x"
5716 << std::hex << l.logical_offset
5717 << " overlaps with the previous, which ends at 0x" << pos
5718 << std::dec << dendl;
5719 ++errors;
5720 }
5721 if (o->extent_map.spans_shard(l.logical_offset, l.length)) {
5722 derr << __func__ << " error: " << oid << " lextent at 0x"
5723 << std::hex << l.logical_offset << "~" << l.length
5724 << " spans a shard boundary"
5725 << std::dec << dendl;
5726 ++errors;
5727 }
5728 pos = l.logical_offset + l.length;
5729 expected_statfs.stored += l.length;
5730 assert(l.blob);
5731 const bluestore_blob_t& blob = l.blob->get_blob();
5732
5733 auto& ref = ref_map[l.blob];
5734 if (ref.is_empty()) {
5735 uint32_t min_release_size = blob.get_release_size(min_alloc_size);
5736 uint32_t l = blob.get_logical_length();
5737 ref.init(l, min_release_size);
5738 }
5739 ref.get(
5740 l.blob_offset,
5741 l.length);
5742 ++num_extents;
5743 if (blob.has_unused()) {
5744 auto p = referenced.find(l.blob);
5745 bluestore_blob_t::unused_t *pu;
5746 if (p == referenced.end()) {
5747 pu = &referenced[l.blob];
5748 } else {
5749 pu = &p->second;
5750 }
5751 uint64_t blob_len = blob.get_logical_length();
5752 assert((blob_len % (sizeof(*pu)*8)) == 0);
5753 assert(l.blob_offset + l.length <= blob_len);
5754 uint64_t chunk_size = blob_len / (sizeof(*pu)*8);
5755 uint64_t start = l.blob_offset / chunk_size;
5756 uint64_t end =
5757 ROUND_UP_TO(l.blob_offset + l.length, chunk_size) / chunk_size;
5758 for (auto i = start; i < end; ++i) {
5759 (*pu) |= (1u << i);
5760 }
5761 }
5762 }
5763 for (auto &i : referenced) {
5764 dout(20) << __func__ << " referenced 0x" << std::hex << i.second
5765 << std::dec << " for " << *i.first << dendl;
5766 const bluestore_blob_t& blob = i.first->get_blob();
5767 if (i.second & blob.unused) {
5768 derr << __func__ << " error: " << oid << " blob claims unused 0x"
5769 << std::hex << blob.unused
5770 << " but extents reference 0x" << i.second
5771 << " on blob " << *i.first << dendl;
5772 ++errors;
5773 }
5774 if (blob.has_csum()) {
5775 uint64_t blob_len = blob.get_logical_length();
5776 uint64_t unused_chunk_size = blob_len / (sizeof(blob.unused)*8);
5777 unsigned csum_count = blob.get_csum_count();
5778 unsigned csum_chunk_size = blob.get_csum_chunk_size();
5779 for (unsigned p = 0; p < csum_count; ++p) {
5780 unsigned pos = p * csum_chunk_size;
5781 unsigned firstbit = pos / unused_chunk_size; // [firstbit,lastbit]
5782 unsigned lastbit = (pos + csum_chunk_size - 1) / unused_chunk_size;
5783 unsigned mask = 1u << firstbit;
5784 for (unsigned b = firstbit + 1; b <= lastbit; ++b) {
5785 mask |= 1u << b;
5786 }
5787 if ((blob.unused & mask) == mask) {
5788 // this csum chunk region is marked unused
5789 if (blob.get_csum_item(p) != 0) {
5790 derr << __func__ << " error: " << oid
5791 << " blob claims csum chunk 0x" << std::hex << pos
5792 << "~" << csum_chunk_size
5793 << " is unused (mask 0x" << mask << " of unused 0x"
5794 << blob.unused << ") but csum is non-zero 0x"
5795 << blob.get_csum_item(p) << std::dec << " on blob "
5796 << *i.first << dendl;
5797 ++errors;
5798 }
5799 }
5800 }
5801 }
5802 }
5803 for (auto &i : ref_map) {
5804 ++num_blobs;
5805 const bluestore_blob_t& blob = i.first->get_blob();
5806 bool equal = i.first->get_blob_use_tracker().equal(i.second);
5807 if (!equal) {
5808 derr << __func__ << " error: " << oid << " blob " << *i.first
5809 << " doesn't match expected ref_map " << i.second << dendl;
5810 ++errors;
5811 }
5812 if (blob.is_compressed()) {
5813 expected_statfs.compressed += blob.get_compressed_payload_length();
5814 expected_statfs.compressed_original +=
5815 i.first->get_referenced_bytes();
5816 }
5817 if (blob.is_shared()) {
5818 if (i.first->shared_blob->get_sbid() > blobid_max) {
5819 derr << __func__ << " error: " << oid << " blob " << blob
5820 << " sbid " << i.first->shared_blob->get_sbid() << " > blobid_max "
5821 << blobid_max << dendl;
5822 ++errors;
5823 } else if (i.first->shared_blob->get_sbid() == 0) {
5824 derr << __func__ << " error: " << oid << " blob " << blob
5825 << " marked as shared but has uninitialized sbid"
5826 << dendl;
5827 ++errors;
5828 }
5829 sb_info_t& sbi = sb_info[i.first->shared_blob->get_sbid()];
5830 sbi.sb = i.first->shared_blob;
5831 sbi.oids.push_back(oid);
5832 sbi.compressed = blob.is_compressed();
5833 for (auto e : blob.get_extents()) {
5834 if (e.is_valid()) {
5835 sbi.ref_map.get(e.offset, e.length);
5836 }
5837 }
5838 } else {
5839 errors += _fsck_check_extents(oid, blob.get_extents(),
5840 blob.is_compressed(),
5841 used_blocks,
5842 expected_statfs);
5843 }
5844 }
5845 if (deep) {
5846 bufferlist bl;
5847 int r = _do_read(c.get(), o, 0, o->onode.size, bl, 0);
5848 if (r < 0) {
5849 ++errors;
5850 derr << __func__ << " error: " << oid << " error during read: "
5851 << cpp_strerror(r) << dendl;
5852 }
5853 }
5854 // omap
5855 if (o->onode.has_omap()) {
5856 if (used_omap_head.count(o->onode.nid)) {
5857 derr << __func__ << " error: " << oid << " omap_head " << o->onode.nid
5858 << " already in use" << dendl;
5859 ++errors;
5860 } else {
5861 used_omap_head.insert(o->onode.nid);
5862 }
5863 }
5864 }
5865 }
5866 dout(1) << __func__ << " checking shared_blobs" << dendl;
5867 it = db->get_iterator(PREFIX_SHARED_BLOB);
5868 if (it) {
5869 for (it->lower_bound(string()); it->valid(); it->next()) {
5870 string key = it->key();
5871 uint64_t sbid;
5872 if (get_key_shared_blob(key, &sbid)) {
5873 derr << __func__ << " error: bad key '" << key
5874 << "' in shared blob namespace" << dendl;
5875 ++errors;
5876 continue;
5877 }
5878 auto p = sb_info.find(sbid);
5879 if (p == sb_info.end()) {
5880 derr << __func__ << " error: found stray shared blob data for sbid 0x"
5881 << std::hex << sbid << std::dec << dendl;
5882 ++errors;
5883 } else {
5884 ++num_shared_blobs;
5885 sb_info_t& sbi = p->second;
5886 bluestore_shared_blob_t shared_blob(sbid);
5887 bufferlist bl = it->value();
5888 bufferlist::iterator blp = bl.begin();
5889 ::decode(shared_blob, blp);
5890 dout(20) << __func__ << " " << *sbi.sb << " " << shared_blob << dendl;
5891 if (shared_blob.ref_map != sbi.ref_map) {
5892 derr << __func__ << " error: shared blob 0x" << std::hex << sbid
5893 << std::dec << " ref_map " << shared_blob.ref_map
5894 << " != expected " << sbi.ref_map << dendl;
5895 ++errors;
5896 }
5897 PExtentVector extents;
5898 for (auto &r : shared_blob.ref_map.ref_map) {
5899 extents.emplace_back(bluestore_pextent_t(r.first, r.second.length));
5900 }
5901 errors += _fsck_check_extents(p->second.oids.front(),
5902 extents,
5903 p->second.compressed,
5904 used_blocks, expected_statfs);
5905 sb_info.erase(p);
5906 }
5907 }
5908 }
5909 for (auto &p : sb_info) {
5910 derr << __func__ << " error: shared_blob 0x" << p.first
5911 << " key is missing (" << *p.second.sb << ")" << dendl;
5912 ++errors;
5913 }
5914 if (!(actual_statfs == expected_statfs)) {
5915 derr << __func__ << " error: actual " << actual_statfs
5916 << " != expected " << expected_statfs << dendl;
5917 ++errors;
5918 }
5919
5920 dout(1) << __func__ << " checking for stray omap data" << dendl;
5921 it = db->get_iterator(PREFIX_OMAP);
5922 if (it) {
5923 for (it->lower_bound(string()); it->valid(); it->next()) {
5924 uint64_t omap_head;
5925 _key_decode_u64(it->key().c_str(), &omap_head);
5926 if (used_omap_head.count(omap_head) == 0) {
5927 derr << __func__ << " error: found stray omap data on omap_head "
5928 << omap_head << dendl;
5929 ++errors;
5930 }
5931 }
5932 }
5933
5934 dout(1) << __func__ << " checking deferred events" << dendl;
5935 it = db->get_iterator(PREFIX_DEFERRED);
5936 if (it) {
5937 for (it->lower_bound(string()); it->valid(); it->next()) {
5938 bufferlist bl = it->value();
5939 bufferlist::iterator p = bl.begin();
5940 bluestore_deferred_transaction_t wt;
5941 try {
5942 ::decode(wt, p);
5943 } catch (buffer::error& e) {
5944 derr << __func__ << " error: failed to decode deferred txn "
5945 << pretty_binary_string(it->key()) << dendl;
5946 r = -EIO;
5947 goto out_scan;
5948 }
5949 dout(20) << __func__ << " deferred " << wt.seq
5950 << " ops " << wt.ops.size()
5951 << " released 0x" << std::hex << wt.released << std::dec << dendl;
5952 for (auto e = wt.released.begin(); e != wt.released.end(); ++e) {
5953 apply(
5954 e.get_start(), e.get_len(), block_size, used_blocks, "deferred",
5955 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5956 bs.set(pos);
5957 }
5958 );
5959 }
5960 }
5961 }
5962
5963 dout(1) << __func__ << " checking freelist vs allocated" << dendl;
5964 {
5965 // remove bluefs_extents from used set since the freelist doesn't
5966 // know they are allocated.
5967 for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
5968 apply(
5969 e.get_start(), e.get_len(), block_size, used_blocks, "bluefs_extents",
5970 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5971 bs.reset(pos);
5972 }
5973 );
5974 }
5975 fm->enumerate_reset();
5976 uint64_t offset, length;
5977 while (fm->enumerate_next(&offset, &length)) {
5978 bool intersects = false;
5979 apply(
5980 offset, length, block_size, used_blocks, "free",
5981 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5982 if (bs.test(pos)) {
5983 intersects = true;
5984 } else {
5985 bs.set(pos);
5986 }
5987 }
5988 );
5989 if (intersects) {
5990 derr << __func__ << " error: free extent 0x" << std::hex << offset
5991 << "~" << length << std::dec
5992 << " intersects allocated blocks" << dendl;
5993 ++errors;
5994 }
5995 }
5996 fm->enumerate_reset();
5997 size_t count = used_blocks.count();
5998 if (used_blocks.size() == count + 1) {
5999 // this due to http://tracker.ceph.com/issues/21089
6000 bufferlist fm_bpb_bl, fm_blocks_bl, fm_bpk_bl;
6001 db->get(PREFIX_ALLOC, "bytes_per_block", &fm_bpb_bl);
6002 db->get(PREFIX_ALLOC, "blocks", &fm_blocks_bl);
6003 db->get(PREFIX_ALLOC, "blocks_per_key", &fm_bpk_bl);
6004 uint64_t fm_blocks = 0;
6005 uint64_t fm_bsize = 1;
6006 uint64_t fm_blocks_per_key = 1;
6007 try {
6008 auto p = fm_blocks_bl.begin();
6009 ::decode(fm_blocks, p);
6010 auto q = fm_bpb_bl.begin();
6011 ::decode(fm_bsize, q);
6012 auto r = fm_bpk_bl.begin();
6013 ::decode(fm_blocks_per_key, r);
6014 } catch (buffer::error& e) {
6015 }
6016 uint64_t dev_bsize = bdev->get_block_size();
6017 uint64_t bad_size = bdev->get_size() & ~fm_bsize;
6018 if (used_blocks.test(bad_size / dev_bsize) == 0) {
6019 // this is the last block of the device that we previously
6020 // (incorrectly) truncated off of the effective device size. this
6021 // prevented BitmapFreelistManager from marking it as used along with
6022 // the other "past-eof" blocks in the last key slot. mark it used
6023 // now.
6024 derr << __func__ << " warning: fixing leaked block 0x" << std::hex
6025 << bad_size << "~" << fm_bsize << std::dec << " due to old bug"
6026 << dendl;
6027 KeyValueDB::Transaction t = db->get_transaction();
6028 // fix freelistmanager metadata (the internal 'blocks' count is
6029 // rounded up to include the trailing key, past eof)
6030 uint64_t new_blocks = bdev->get_size() / fm_bsize;
6031 if (new_blocks / fm_blocks_per_key * fm_blocks_per_key != new_blocks) {
6032 new_blocks = (new_blocks / fm_blocks_per_key + 1) *
6033 fm_blocks_per_key;
6034 }
6035 if (new_blocks != fm_blocks) {
6036 // the fm block count increased
6037 derr << __func__ << " freelist block and key count changed, fixing 0x"
6038 << std::hex << bdev->get_size() << "~"
6039 << ((new_blocks * fm_bsize) - bdev->get_size()) << std::dec
6040 << dendl;
6041 bufferlist bl;
6042 ::encode(new_blocks, bl);
6043 t->set(PREFIX_ALLOC, "blocks", bl);
6044 fm->allocate(bdev->get_size(),
6045 (new_blocks * fm_bsize) - bdev->get_size(),
6046 t);
6047 } else {
6048 // block count is the same, but size changed; fix just the size
6049 derr << __func__ << " fixing just the stray block at 0x"
6050 << std::hex << bad_size << "~" << fm_bsize << std::dec << dendl;
6051 fm->allocate(bad_size, fm_bsize, t);
6052 }
6053 bufferlist sizebl;
6054 ::encode(bdev->get_size(), sizebl);
6055 t->set(PREFIX_ALLOC, "size", sizebl);
6056 int r = db->submit_transaction_sync(t);
6057 assert(r == 0);
6058
6059 used_blocks.set(bad_size / dev_bsize);
6060 ++count;
6061 }
6062 }
6063 if (used_blocks.size() != count) {
6064 assert(used_blocks.size() > count);
6065 ++errors;
6066 used_blocks.flip();
6067 size_t start = used_blocks.find_first();
6068 while (start != decltype(used_blocks)::npos) {
6069 size_t cur = start;
6070 while (true) {
6071 size_t next = used_blocks.find_next(cur);
6072 if (next != cur + 1) {
6073 derr << __func__ << " error: leaked extent 0x" << std::hex
6074 << ((uint64_t)start * block_size) << "~"
6075 << ((cur + 1 - start) * block_size) << std::dec
6076 << dendl;
6077 start = next;
6078 break;
6079 }
6080 cur = next;
6081 }
6082 }
6083 used_blocks.flip();
6084 }
6085 }
6086
6087 out_scan:
6088 mempool_thread.shutdown();
6089 _flush_cache();
6090 out_alloc:
6091 _close_alloc();
6092 out_fm:
6093 _close_fm();
6094 out_db:
6095 it.reset(); // before db is closed
6096 _close_db();
6097 out_bdev:
6098 _close_bdev();
6099 out_fsid:
6100 _close_fsid();
6101 out_path:
6102 _close_path();
6103
6104 // fatal errors take precedence
6105 if (r < 0)
6106 return r;
6107
6108 dout(2) << __func__ << " " << num_objects << " objects, "
6109 << num_sharded_objects << " of them sharded. "
6110 << dendl;
6111 dout(2) << __func__ << " " << num_extents << " extents to "
6112 << num_blobs << " blobs, "
6113 << num_spanning_blobs << " spanning, "
6114 << num_shared_blobs << " shared."
6115 << dendl;
6116
6117 utime_t duration = ceph_clock_now() - start;
6118 dout(1) << __func__ << " finish with " << errors << " errors in "
6119 << duration << " seconds" << dendl;
6120 return errors;
6121 }
6122
6123 void BlueStore::collect_metadata(map<string,string> *pm)
6124 {
6125 dout(10) << __func__ << dendl;
6126 bdev->collect_metadata("bluestore_bdev_", pm);
6127 if (bluefs) {
6128 (*pm)["bluefs"] = "1";
6129 (*pm)["bluefs_single_shared_device"] = stringify((int)bluefs_single_shared_device);
6130 bluefs->collect_metadata(pm);
6131 } else {
6132 (*pm)["bluefs"] = "0";
6133 }
6134 }
6135
6136 int BlueStore::statfs(struct store_statfs_t *buf)
6137 {
6138 buf->reset();
6139 buf->total = bdev->get_size();
6140 buf->available = alloc->get_free();
6141
6142 if (bluefs) {
6143 // part of our shared device is "free" according to BlueFS
6144 // Don't include bluestore_bluefs_min because that space can't
6145 // be used for any other purpose.
6146 buf->available += bluefs->get_free(bluefs_shared_bdev) - cct->_conf->bluestore_bluefs_min;
6147
6148 // include dedicated db, too, if that isn't the shared device.
6149 if (bluefs_shared_bdev != BlueFS::BDEV_DB) {
6150 buf->total += bluefs->get_total(BlueFS::BDEV_DB);
6151 }
6152 }
6153
6154 {
6155 std::lock_guard<std::mutex> l(vstatfs_lock);
6156
6157 buf->allocated = vstatfs.allocated();
6158 buf->stored = vstatfs.stored();
6159 buf->compressed = vstatfs.compressed();
6160 buf->compressed_original = vstatfs.compressed_original();
6161 buf->compressed_allocated = vstatfs.compressed_allocated();
6162 }
6163
6164 dout(20) << __func__ << *buf << dendl;
6165 return 0;
6166 }
6167
6168 // ---------------
6169 // cache
6170
6171 BlueStore::CollectionRef BlueStore::_get_collection(const coll_t& cid)
6172 {
6173 RWLock::RLocker l(coll_lock);
6174 ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
6175 if (cp == coll_map.end())
6176 return CollectionRef();
6177 return cp->second;
6178 }
6179
6180 void BlueStore::_queue_reap_collection(CollectionRef& c)
6181 {
6182 dout(10) << __func__ << " " << c << " " << c->cid << dendl;
6183 std::lock_guard<std::mutex> l(reap_lock);
6184 removed_collections.push_back(c);
6185 }
6186
6187 void BlueStore::_reap_collections()
6188 {
6189 list<CollectionRef> removed_colls;
6190 {
6191 std::lock_guard<std::mutex> l(reap_lock);
6192 removed_colls.swap(removed_collections);
6193 }
6194
6195 bool all_reaped = true;
6196
6197 for (list<CollectionRef>::iterator p = removed_colls.begin();
6198 p != removed_colls.end();
6199 ++p) {
6200 CollectionRef c = *p;
6201 dout(10) << __func__ << " " << c << " " << c->cid << dendl;
6202 if (c->onode_map.map_any([&](OnodeRef o) {
6203 assert(!o->exists);
6204 if (o->flushing_count.load()) {
6205 dout(10) << __func__ << " " << c << " " << c->cid << " " << o->oid
6206 << " flush_txns " << o->flushing_count << dendl;
6207 return false;
6208 }
6209 return true;
6210 })) {
6211 all_reaped = false;
6212 continue;
6213 }
6214 c->onode_map.clear();
6215 dout(10) << __func__ << " " << c << " " << c->cid << " done" << dendl;
6216 }
6217
6218 if (all_reaped) {
6219 dout(10) << __func__ << " all reaped" << dendl;
6220 }
6221 }
6222
6223 void BlueStore::_update_cache_logger()
6224 {
6225 uint64_t num_onodes = 0;
6226 uint64_t num_extents = 0;
6227 uint64_t num_blobs = 0;
6228 uint64_t num_buffers = 0;
6229 uint64_t num_buffer_bytes = 0;
6230 for (auto c : cache_shards) {
6231 c->add_stats(&num_onodes, &num_extents, &num_blobs,
6232 &num_buffers, &num_buffer_bytes);
6233 }
6234 logger->set(l_bluestore_onodes, num_onodes);
6235 logger->set(l_bluestore_extents, num_extents);
6236 logger->set(l_bluestore_blobs, num_blobs);
6237 logger->set(l_bluestore_buffers, num_buffers);
6238 logger->set(l_bluestore_buffer_bytes, num_buffer_bytes);
6239 }
6240
6241 // ---------------
6242 // read operations
6243
6244 ObjectStore::CollectionHandle BlueStore::open_collection(const coll_t& cid)
6245 {
6246 return _get_collection(cid);
6247 }
6248
6249 bool BlueStore::exists(const coll_t& cid, const ghobject_t& oid)
6250 {
6251 CollectionHandle c = _get_collection(cid);
6252 if (!c)
6253 return false;
6254 return exists(c, oid);
6255 }
6256
6257 bool BlueStore::exists(CollectionHandle &c_, const ghobject_t& oid)
6258 {
6259 Collection *c = static_cast<Collection *>(c_.get());
6260 dout(10) << __func__ << " " << c->cid << " " << oid << dendl;
6261 if (!c->exists)
6262 return false;
6263
6264 bool r = true;
6265
6266 {
6267 RWLock::RLocker l(c->lock);
6268 OnodeRef o = c->get_onode(oid, false);
6269 if (!o || !o->exists)
6270 r = false;
6271 }
6272
6273 return r;
6274 }
6275
6276 int BlueStore::stat(
6277 const coll_t& cid,
6278 const ghobject_t& oid,
6279 struct stat *st,
6280 bool allow_eio)
6281 {
6282 CollectionHandle c = _get_collection(cid);
6283 if (!c)
6284 return -ENOENT;
6285 return stat(c, oid, st, allow_eio);
6286 }
6287
6288 int BlueStore::stat(
6289 CollectionHandle &c_,
6290 const ghobject_t& oid,
6291 struct stat *st,
6292 bool allow_eio)
6293 {
6294 Collection *c = static_cast<Collection *>(c_.get());
6295 if (!c->exists)
6296 return -ENOENT;
6297 dout(10) << __func__ << " " << c->get_cid() << " " << oid << dendl;
6298
6299 {
6300 RWLock::RLocker l(c->lock);
6301 OnodeRef o = c->get_onode(oid, false);
6302 if (!o || !o->exists)
6303 return -ENOENT;
6304 st->st_size = o->onode.size;
6305 st->st_blksize = 4096;
6306 st->st_blocks = (st->st_size + st->st_blksize - 1) / st->st_blksize;
6307 st->st_nlink = 1;
6308 }
6309
6310 int r = 0;
6311 if (_debug_mdata_eio(oid)) {
6312 r = -EIO;
6313 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6314 }
6315 return r;
6316 }
6317 int BlueStore::set_collection_opts(
6318 const coll_t& cid,
6319 const pool_opts_t& opts)
6320 {
6321 CollectionHandle ch = _get_collection(cid);
6322 if (!ch)
6323 return -ENOENT;
6324 Collection *c = static_cast<Collection *>(ch.get());
6325 dout(15) << __func__ << " " << cid << " options " << opts << dendl;
6326 if (!c->exists)
6327 return -ENOENT;
6328 RWLock::WLocker l(c->lock);
6329 c->pool_opts = opts;
6330 return 0;
6331 }
6332
6333 int BlueStore::read(
6334 const coll_t& cid,
6335 const ghobject_t& oid,
6336 uint64_t offset,
6337 size_t length,
6338 bufferlist& bl,
6339 uint32_t op_flags)
6340 {
6341 CollectionHandle c = _get_collection(cid);
6342 if (!c)
6343 return -ENOENT;
6344 return read(c, oid, offset, length, bl, op_flags);
6345 }
6346
6347 int BlueStore::read(
6348 CollectionHandle &c_,
6349 const ghobject_t& oid,
6350 uint64_t offset,
6351 size_t length,
6352 bufferlist& bl,
6353 uint32_t op_flags)
6354 {
6355 utime_t start = ceph_clock_now();
6356 Collection *c = static_cast<Collection *>(c_.get());
6357 const coll_t &cid = c->get_cid();
6358 dout(15) << __func__ << " " << cid << " " << oid
6359 << " 0x" << std::hex << offset << "~" << length << std::dec
6360 << dendl;
6361 if (!c->exists)
6362 return -ENOENT;
6363
6364 bl.clear();
6365 int r;
6366 {
6367 RWLock::RLocker l(c->lock);
6368 utime_t start1 = ceph_clock_now();
6369 OnodeRef o = c->get_onode(oid, false);
6370 logger->tinc(l_bluestore_read_onode_meta_lat, ceph_clock_now() - start1);
6371 if (!o || !o->exists) {
6372 r = -ENOENT;
6373 goto out;
6374 }
6375
6376 if (offset == length && offset == 0)
6377 length = o->onode.size;
6378
6379 r = _do_read(c, o, offset, length, bl, op_flags);
6380 }
6381
6382 out:
6383 if (r == 0 && _debug_data_eio(oid)) {
6384 r = -EIO;
6385 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6386 } else if (cct->_conf->bluestore_debug_random_read_err &&
6387 (rand() % (int)(cct->_conf->bluestore_debug_random_read_err * 100.0)) == 0) {
6388 dout(0) << __func__ << ": inject random EIO" << dendl;
6389 r = -EIO;
6390 }
6391 dout(10) << __func__ << " " << cid << " " << oid
6392 << " 0x" << std::hex << offset << "~" << length << std::dec
6393 << " = " << r << dendl;
6394 logger->tinc(l_bluestore_read_lat, ceph_clock_now() - start);
6395 return r;
6396 }
6397
6398 // --------------------------------------------------------
6399 // intermediate data structures used while reading
6400 struct region_t {
6401 uint64_t logical_offset;
6402 uint64_t blob_xoffset; //region offset within the blob
6403 uint64_t length;
6404 bufferlist bl;
6405
6406 // used later in read process
6407 uint64_t front = 0;
6408 uint64_t r_off = 0;
6409
6410 region_t(uint64_t offset, uint64_t b_offs, uint64_t len)
6411 : logical_offset(offset),
6412 blob_xoffset(b_offs),
6413 length(len){}
6414 region_t(const region_t& from)
6415 : logical_offset(from.logical_offset),
6416 blob_xoffset(from.blob_xoffset),
6417 length(from.length){}
6418
6419 friend ostream& operator<<(ostream& out, const region_t& r) {
6420 return out << "0x" << std::hex << r.logical_offset << ":"
6421 << r.blob_xoffset << "~" << r.length << std::dec;
6422 }
6423 };
6424
6425 typedef list<region_t> regions2read_t;
6426 typedef map<BlueStore::BlobRef, regions2read_t> blobs2read_t;
6427
6428 int BlueStore::_do_read(
6429 Collection *c,
6430 OnodeRef o,
6431 uint64_t offset,
6432 size_t length,
6433 bufferlist& bl,
6434 uint32_t op_flags)
6435 {
6436 FUNCTRACE();
6437 int r = 0;
6438
6439 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
6440 << " size 0x" << o->onode.size << " (" << std::dec
6441 << o->onode.size << ")" << dendl;
6442 bl.clear();
6443
6444 if (offset >= o->onode.size) {
6445 return r;
6446 }
6447
6448 // generally, don't buffer anything, unless the client explicitly requests
6449 // it.
6450 bool buffered = false;
6451 if (op_flags & CEPH_OSD_OP_FLAG_FADVISE_WILLNEED) {
6452 dout(20) << __func__ << " will do buffered read" << dendl;
6453 buffered = true;
6454 } else if (cct->_conf->bluestore_default_buffered_read &&
6455 (op_flags & (CEPH_OSD_OP_FLAG_FADVISE_DONTNEED |
6456 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE)) == 0) {
6457 dout(20) << __func__ << " defaulting to buffered read" << dendl;
6458 buffered = true;
6459 }
6460
6461 if (offset + length > o->onode.size) {
6462 length = o->onode.size - offset;
6463 }
6464
6465 utime_t start = ceph_clock_now();
6466 o->extent_map.fault_range(db, offset, length);
6467 logger->tinc(l_bluestore_read_onode_meta_lat, ceph_clock_now() - start);
6468 _dump_onode(o);
6469
6470 ready_regions_t ready_regions;
6471
6472 // build blob-wise list to of stuff read (that isn't cached)
6473 blobs2read_t blobs2read;
6474 unsigned left = length;
6475 uint64_t pos = offset;
6476 unsigned num_regions = 0;
6477 auto lp = o->extent_map.seek_lextent(offset);
6478 while (left > 0 && lp != o->extent_map.extent_map.end()) {
6479 if (pos < lp->logical_offset) {
6480 unsigned hole = lp->logical_offset - pos;
6481 if (hole >= left) {
6482 break;
6483 }
6484 dout(30) << __func__ << " hole 0x" << std::hex << pos << "~" << hole
6485 << std::dec << dendl;
6486 pos += hole;
6487 left -= hole;
6488 }
6489 BlobRef bptr = lp->blob;
6490 unsigned l_off = pos - lp->logical_offset;
6491 unsigned b_off = l_off + lp->blob_offset;
6492 unsigned b_len = std::min(left, lp->length - l_off);
6493
6494 ready_regions_t cache_res;
6495 interval_set<uint32_t> cache_interval;
6496 bptr->shared_blob->bc.read(
6497 bptr->shared_blob->get_cache(), b_off, b_len, cache_res, cache_interval);
6498 dout(20) << __func__ << " blob " << *bptr << std::hex
6499 << " need 0x" << b_off << "~" << b_len
6500 << " cache has 0x" << cache_interval
6501 << std::dec << dendl;
6502
6503 auto pc = cache_res.begin();
6504 while (b_len > 0) {
6505 unsigned l;
6506 if (pc != cache_res.end() &&
6507 pc->first == b_off) {
6508 l = pc->second.length();
6509 ready_regions[pos].claim(pc->second);
6510 dout(30) << __func__ << " use cache 0x" << std::hex << pos << ": 0x"
6511 << b_off << "~" << l << std::dec << dendl;
6512 ++pc;
6513 } else {
6514 l = b_len;
6515 if (pc != cache_res.end()) {
6516 assert(pc->first > b_off);
6517 l = pc->first - b_off;
6518 }
6519 dout(30) << __func__ << " will read 0x" << std::hex << pos << ": 0x"
6520 << b_off << "~" << l << std::dec << dendl;
6521 blobs2read[bptr].emplace_back(region_t(pos, b_off, l));
6522 ++num_regions;
6523 }
6524 pos += l;
6525 b_off += l;
6526 left -= l;
6527 b_len -= l;
6528 }
6529 ++lp;
6530 }
6531
6532 // read raw blob data. use aio if we have >1 blobs to read.
6533 start = ceph_clock_now(); // for the sake of simplicity
6534 // measure the whole block below.
6535 // The error isn't that much...
6536 vector<bufferlist> compressed_blob_bls;
6537 IOContext ioc(cct, NULL);
6538 for (auto& p : blobs2read) {
6539 BlobRef bptr = p.first;
6540 dout(20) << __func__ << " blob " << *bptr << std::hex
6541 << " need " << p.second << std::dec << dendl;
6542 if (bptr->get_blob().is_compressed()) {
6543 // read the whole thing
6544 if (compressed_blob_bls.empty()) {
6545 // ensure we avoid any reallocation on subsequent blobs
6546 compressed_blob_bls.reserve(blobs2read.size());
6547 }
6548 compressed_blob_bls.push_back(bufferlist());
6549 bufferlist& bl = compressed_blob_bls.back();
6550 r = bptr->get_blob().map(
6551 0, bptr->get_blob().get_ondisk_length(),
6552 [&](uint64_t offset, uint64_t length) {
6553 int r;
6554 // use aio if there are more regions to read than those in this blob
6555 if (num_regions > p.second.size()) {
6556 r = bdev->aio_read(offset, length, &bl, &ioc);
6557 } else {
6558 r = bdev->read(offset, length, &bl, &ioc, false);
6559 }
6560 if (r < 0)
6561 return r;
6562 return 0;
6563 });
6564 assert(r == 0);
6565 } else {
6566 // read the pieces
6567 for (auto& reg : p.second) {
6568 // determine how much of the blob to read
6569 uint64_t chunk_size = bptr->get_blob().get_chunk_size(block_size);
6570 reg.r_off = reg.blob_xoffset;
6571 uint64_t r_len = reg.length;
6572 reg.front = reg.r_off % chunk_size;
6573 if (reg.front) {
6574 reg.r_off -= reg.front;
6575 r_len += reg.front;
6576 }
6577 unsigned tail = r_len % chunk_size;
6578 if (tail) {
6579 r_len += chunk_size - tail;
6580 }
6581 dout(20) << __func__ << " region 0x" << std::hex
6582 << reg.logical_offset
6583 << ": 0x" << reg.blob_xoffset << "~" << reg.length
6584 << " reading 0x" << reg.r_off << "~" << r_len << std::dec
6585 << dendl;
6586
6587 // read it
6588 r = bptr->get_blob().map(
6589 reg.r_off, r_len,
6590 [&](uint64_t offset, uint64_t length) {
6591 int r;
6592 // use aio if there is more than one region to read
6593 if (num_regions > 1) {
6594 r = bdev->aio_read(offset, length, &reg.bl, &ioc);
6595 } else {
6596 r = bdev->read(offset, length, &reg.bl, &ioc, false);
6597 }
6598 if (r < 0)
6599 return r;
6600 return 0;
6601 });
6602 assert(r == 0);
6603 assert(reg.bl.length() == r_len);
6604 }
6605 }
6606 }
6607 if (ioc.has_pending_aios()) {
6608 bdev->aio_submit(&ioc);
6609 dout(20) << __func__ << " waiting for aio" << dendl;
6610 ioc.aio_wait();
6611 }
6612 logger->tinc(l_bluestore_read_wait_aio_lat, ceph_clock_now() - start);
6613
6614 // enumerate and decompress desired blobs
6615 auto p = compressed_blob_bls.begin();
6616 blobs2read_t::iterator b2r_it = blobs2read.begin();
6617 while (b2r_it != blobs2read.end()) {
6618 BlobRef bptr = b2r_it->first;
6619 dout(20) << __func__ << " blob " << *bptr << std::hex
6620 << " need 0x" << b2r_it->second << std::dec << dendl;
6621 if (bptr->get_blob().is_compressed()) {
6622 assert(p != compressed_blob_bls.end());
6623 bufferlist& compressed_bl = *p++;
6624 if (_verify_csum(o, &bptr->get_blob(), 0, compressed_bl,
6625 b2r_it->second.front().logical_offset) < 0) {
6626 return -EIO;
6627 }
6628 bufferlist raw_bl;
6629 r = _decompress(compressed_bl, &raw_bl);
6630 if (r < 0)
6631 return r;
6632 if (buffered) {
6633 bptr->shared_blob->bc.did_read(bptr->shared_blob->get_cache(), 0,
6634 raw_bl);
6635 }
6636 for (auto& i : b2r_it->second) {
6637 ready_regions[i.logical_offset].substr_of(
6638 raw_bl, i.blob_xoffset, i.length);
6639 }
6640 } else {
6641 for (auto& reg : b2r_it->second) {
6642 if (_verify_csum(o, &bptr->get_blob(), reg.r_off, reg.bl,
6643 reg.logical_offset) < 0) {
6644 return -EIO;
6645 }
6646 if (buffered) {
6647 bptr->shared_blob->bc.did_read(bptr->shared_blob->get_cache(),
6648 reg.r_off, reg.bl);
6649 }
6650
6651 // prune and keep result
6652 ready_regions[reg.logical_offset].substr_of(
6653 reg.bl, reg.front, reg.length);
6654 }
6655 }
6656 ++b2r_it;
6657 }
6658
6659 // generate a resulting buffer
6660 auto pr = ready_regions.begin();
6661 auto pr_end = ready_regions.end();
6662 pos = 0;
6663 while (pos < length) {
6664 if (pr != pr_end && pr->first == pos + offset) {
6665 dout(30) << __func__ << " assemble 0x" << std::hex << pos
6666 << ": data from 0x" << pr->first << "~" << pr->second.length()
6667 << std::dec << dendl;
6668 pos += pr->second.length();
6669 bl.claim_append(pr->second);
6670 ++pr;
6671 } else {
6672 uint64_t l = length - pos;
6673 if (pr != pr_end) {
6674 assert(pr->first > pos + offset);
6675 l = pr->first - (pos + offset);
6676 }
6677 dout(30) << __func__ << " assemble 0x" << std::hex << pos
6678 << ": zeros for 0x" << (pos + offset) << "~" << l
6679 << std::dec << dendl;
6680 bl.append_zero(l);
6681 pos += l;
6682 }
6683 }
6684 assert(bl.length() == length);
6685 assert(pos == length);
6686 assert(pr == pr_end);
6687 r = bl.length();
6688 return r;
6689 }
6690
6691 int BlueStore::_verify_csum(OnodeRef& o,
6692 const bluestore_blob_t* blob, uint64_t blob_xoffset,
6693 const bufferlist& bl,
6694 uint64_t logical_offset) const
6695 {
6696 int bad;
6697 uint64_t bad_csum;
6698 utime_t start = ceph_clock_now();
6699 int r = blob->verify_csum(blob_xoffset, bl, &bad, &bad_csum);
6700 if (r < 0) {
6701 if (r == -1) {
6702 PExtentVector pex;
6703 blob->map(
6704 bad,
6705 blob->get_csum_chunk_size(),
6706 [&](uint64_t offset, uint64_t length) {
6707 pex.emplace_back(bluestore_pextent_t(offset, length));
6708 return 0;
6709 });
6710 derr << __func__ << " bad "
6711 << Checksummer::get_csum_type_string(blob->csum_type)
6712 << "/0x" << std::hex << blob->get_csum_chunk_size()
6713 << " checksum at blob offset 0x" << bad
6714 << ", got 0x" << bad_csum << ", expected 0x"
6715 << blob->get_csum_item(bad / blob->get_csum_chunk_size()) << std::dec
6716 << ", device location " << pex
6717 << ", logical extent 0x" << std::hex
6718 << (logical_offset + bad - blob_xoffset) << "~"
6719 << blob->get_csum_chunk_size() << std::dec
6720 << ", object " << o->oid
6721 << dendl;
6722 } else {
6723 derr << __func__ << " failed with exit code: " << cpp_strerror(r) << dendl;
6724 }
6725 }
6726 logger->tinc(l_bluestore_csum_lat, ceph_clock_now() - start);
6727 return r;
6728 }
6729
6730 int BlueStore::_decompress(bufferlist& source, bufferlist* result)
6731 {
6732 int r = 0;
6733 utime_t start = ceph_clock_now();
6734 bufferlist::iterator i = source.begin();
6735 bluestore_compression_header_t chdr;
6736 ::decode(chdr, i);
6737 int alg = int(chdr.type);
6738 CompressorRef cp = compressor;
6739 if (!cp || (int)cp->get_type() != alg) {
6740 cp = Compressor::create(cct, alg);
6741 }
6742
6743 if (!cp.get()) {
6744 // if compressor isn't available - error, because cannot return
6745 // decompressed data?
6746 derr << __func__ << " can't load decompressor " << alg << dendl;
6747 r = -EIO;
6748 } else {
6749 r = cp->decompress(i, chdr.length, *result);
6750 if (r < 0) {
6751 derr << __func__ << " decompression failed with exit code " << r << dendl;
6752 r = -EIO;
6753 }
6754 }
6755 logger->tinc(l_bluestore_decompress_lat, ceph_clock_now() - start);
6756 return r;
6757 }
6758
6759 // this stores fiemap into interval_set, other variations
6760 // use it internally
6761 int BlueStore::_fiemap(
6762 CollectionHandle &c_,
6763 const ghobject_t& oid,
6764 uint64_t offset,
6765 size_t length,
6766 interval_set<uint64_t>& destset)
6767 {
6768 Collection *c = static_cast<Collection *>(c_.get());
6769 if (!c->exists)
6770 return -ENOENT;
6771 {
6772 RWLock::RLocker l(c->lock);
6773
6774 OnodeRef o = c->get_onode(oid, false);
6775 if (!o || !o->exists) {
6776 return -ENOENT;
6777 }
6778 _dump_onode(o);
6779
6780 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
6781 << " size 0x" << o->onode.size << std::dec << dendl;
6782
6783 boost::intrusive::set<Extent>::iterator ep, eend;
6784 if (offset >= o->onode.size)
6785 goto out;
6786
6787 if (offset + length > o->onode.size) {
6788 length = o->onode.size - offset;
6789 }
6790
6791 o->extent_map.fault_range(db, offset, length);
6792 eend = o->extent_map.extent_map.end();
6793 ep = o->extent_map.seek_lextent(offset);
6794 while (length > 0) {
6795 dout(20) << __func__ << " offset " << offset << dendl;
6796 if (ep != eend && ep->logical_offset + ep->length <= offset) {
6797 ++ep;
6798 continue;
6799 }
6800
6801 uint64_t x_len = length;
6802 if (ep != eend && ep->logical_offset <= offset) {
6803 uint64_t x_off = offset - ep->logical_offset;
6804 x_len = MIN(x_len, ep->length - x_off);
6805 dout(30) << __func__ << " lextent 0x" << std::hex << offset << "~"
6806 << x_len << std::dec << " blob " << ep->blob << dendl;
6807 destset.insert(offset, x_len);
6808 length -= x_len;
6809 offset += x_len;
6810 if (x_off + x_len == ep->length)
6811 ++ep;
6812 continue;
6813 }
6814 if (ep != eend &&
6815 ep->logical_offset > offset &&
6816 ep->logical_offset - offset < x_len) {
6817 x_len = ep->logical_offset - offset;
6818 }
6819 offset += x_len;
6820 length -= x_len;
6821 }
6822 }
6823
6824 out:
6825 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
6826 << " size = 0x(" << destset << ")" << std::dec << dendl;
6827 return 0;
6828 }
6829
6830 int BlueStore::fiemap(
6831 const coll_t& cid,
6832 const ghobject_t& oid,
6833 uint64_t offset,
6834 size_t len,
6835 bufferlist& bl)
6836 {
6837 CollectionHandle c = _get_collection(cid);
6838 if (!c)
6839 return -ENOENT;
6840 return fiemap(c, oid, offset, len, bl);
6841 }
6842
6843 int BlueStore::fiemap(
6844 CollectionHandle &c_,
6845 const ghobject_t& oid,
6846 uint64_t offset,
6847 size_t length,
6848 bufferlist& bl)
6849 {
6850 interval_set<uint64_t> m;
6851 int r = _fiemap(c_, oid, offset, length, m);
6852 if (r >= 0) {
6853 ::encode(m, bl);
6854 }
6855 return r;
6856 }
6857
6858 int BlueStore::fiemap(
6859 const coll_t& cid,
6860 const ghobject_t& oid,
6861 uint64_t offset,
6862 size_t len,
6863 map<uint64_t, uint64_t>& destmap)
6864 {
6865 CollectionHandle c = _get_collection(cid);
6866 if (!c)
6867 return -ENOENT;
6868 return fiemap(c, oid, offset, len, destmap);
6869 }
6870
6871 int BlueStore::fiemap(
6872 CollectionHandle &c_,
6873 const ghobject_t& oid,
6874 uint64_t offset,
6875 size_t length,
6876 map<uint64_t, uint64_t>& destmap)
6877 {
6878 interval_set<uint64_t> m;
6879 int r = _fiemap(c_, oid, offset, length, m);
6880 if (r >= 0) {
6881 m.move_into(destmap);
6882 }
6883 return r;
6884 }
6885
6886 int BlueStore::getattr(
6887 const coll_t& cid,
6888 const ghobject_t& oid,
6889 const char *name,
6890 bufferptr& value)
6891 {
6892 CollectionHandle c = _get_collection(cid);
6893 if (!c)
6894 return -ENOENT;
6895 return getattr(c, oid, name, value);
6896 }
6897
6898 int BlueStore::getattr(
6899 CollectionHandle &c_,
6900 const ghobject_t& oid,
6901 const char *name,
6902 bufferptr& value)
6903 {
6904 Collection *c = static_cast<Collection *>(c_.get());
6905 dout(15) << __func__ << " " << c->cid << " " << oid << " " << name << dendl;
6906 if (!c->exists)
6907 return -ENOENT;
6908
6909 int r;
6910 {
6911 RWLock::RLocker l(c->lock);
6912 mempool::bluestore_cache_other::string k(name);
6913
6914 OnodeRef o = c->get_onode(oid, false);
6915 if (!o || !o->exists) {
6916 r = -ENOENT;
6917 goto out;
6918 }
6919
6920 if (!o->onode.attrs.count(k)) {
6921 r = -ENODATA;
6922 goto out;
6923 }
6924 value = o->onode.attrs[k];
6925 r = 0;
6926 }
6927 out:
6928 if (r == 0 && _debug_mdata_eio(oid)) {
6929 r = -EIO;
6930 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6931 }
6932 dout(10) << __func__ << " " << c->cid << " " << oid << " " << name
6933 << " = " << r << dendl;
6934 return r;
6935 }
6936
6937
6938 int BlueStore::getattrs(
6939 const coll_t& cid,
6940 const ghobject_t& oid,
6941 map<string,bufferptr>& aset)
6942 {
6943 CollectionHandle c = _get_collection(cid);
6944 if (!c)
6945 return -ENOENT;
6946 return getattrs(c, oid, aset);
6947 }
6948
6949 int BlueStore::getattrs(
6950 CollectionHandle &c_,
6951 const ghobject_t& oid,
6952 map<string,bufferptr>& aset)
6953 {
6954 Collection *c = static_cast<Collection *>(c_.get());
6955 dout(15) << __func__ << " " << c->cid << " " << oid << dendl;
6956 if (!c->exists)
6957 return -ENOENT;
6958
6959 int r;
6960 {
6961 RWLock::RLocker l(c->lock);
6962
6963 OnodeRef o = c->get_onode(oid, false);
6964 if (!o || !o->exists) {
6965 r = -ENOENT;
6966 goto out;
6967 }
6968 for (auto& i : o->onode.attrs) {
6969 aset.emplace(i.first.c_str(), i.second);
6970 }
6971 r = 0;
6972 }
6973
6974 out:
6975 if (r == 0 && _debug_mdata_eio(oid)) {
6976 r = -EIO;
6977 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6978 }
6979 dout(10) << __func__ << " " << c->cid << " " << oid
6980 << " = " << r << dendl;
6981 return r;
6982 }
6983
6984 int BlueStore::list_collections(vector<coll_t>& ls)
6985 {
6986 RWLock::RLocker l(coll_lock);
6987 for (ceph::unordered_map<coll_t, CollectionRef>::iterator p = coll_map.begin();
6988 p != coll_map.end();
6989 ++p)
6990 ls.push_back(p->first);
6991 return 0;
6992 }
6993
6994 bool BlueStore::collection_exists(const coll_t& c)
6995 {
6996 RWLock::RLocker l(coll_lock);
6997 return coll_map.count(c);
6998 }
6999
7000 int BlueStore::collection_empty(const coll_t& cid, bool *empty)
7001 {
7002 dout(15) << __func__ << " " << cid << dendl;
7003 vector<ghobject_t> ls;
7004 ghobject_t next;
7005 int r = collection_list(cid, ghobject_t(), ghobject_t::get_max(), 1,
7006 &ls, &next);
7007 if (r < 0) {
7008 derr << __func__ << " collection_list returned: " << cpp_strerror(r)
7009 << dendl;
7010 return r;
7011 }
7012 *empty = ls.empty();
7013 dout(10) << __func__ << " " << cid << " = " << (int)(*empty) << dendl;
7014 return 0;
7015 }
7016
7017 int BlueStore::collection_bits(const coll_t& cid)
7018 {
7019 dout(15) << __func__ << " " << cid << dendl;
7020 CollectionRef c = _get_collection(cid);
7021 if (!c)
7022 return -ENOENT;
7023 RWLock::RLocker l(c->lock);
7024 dout(10) << __func__ << " " << cid << " = " << c->cnode.bits << dendl;
7025 return c->cnode.bits;
7026 }
7027
7028 int BlueStore::collection_list(
7029 const coll_t& cid, const ghobject_t& start, const ghobject_t& end, int max,
7030 vector<ghobject_t> *ls, ghobject_t *pnext)
7031 {
7032 CollectionHandle c = _get_collection(cid);
7033 if (!c)
7034 return -ENOENT;
7035 return collection_list(c, start, end, max, ls, pnext);
7036 }
7037
7038 int BlueStore::collection_list(
7039 CollectionHandle &c_, const ghobject_t& start, const ghobject_t& end, int max,
7040 vector<ghobject_t> *ls, ghobject_t *pnext)
7041 {
7042 Collection *c = static_cast<Collection *>(c_.get());
7043 dout(15) << __func__ << " " << c->cid
7044 << " start " << start << " end " << end << " max " << max << dendl;
7045 int r;
7046 {
7047 RWLock::RLocker l(c->lock);
7048 r = _collection_list(c, start, end, max, ls, pnext);
7049 }
7050
7051 dout(10) << __func__ << " " << c->cid
7052 << " start " << start << " end " << end << " max " << max
7053 << " = " << r << ", ls.size() = " << ls->size()
7054 << ", next = " << (pnext ? *pnext : ghobject_t()) << dendl;
7055 return r;
7056 }
7057
7058 int BlueStore::_collection_list(
7059 Collection *c, const ghobject_t& start, const ghobject_t& end, int max,
7060 vector<ghobject_t> *ls, ghobject_t *pnext)
7061 {
7062
7063 if (!c->exists)
7064 return -ENOENT;
7065
7066 int r = 0;
7067 ghobject_t static_next;
7068 KeyValueDB::Iterator it;
7069 string temp_start_key, temp_end_key;
7070 string start_key, end_key;
7071 bool set_next = false;
7072 string pend;
7073 bool temp;
7074
7075 if (!pnext)
7076 pnext = &static_next;
7077
7078 if (start == ghobject_t::get_max() ||
7079 start.hobj.is_max()) {
7080 goto out;
7081 }
7082 get_coll_key_range(c->cid, c->cnode.bits, &temp_start_key, &temp_end_key,
7083 &start_key, &end_key);
7084 dout(20) << __func__
7085 << " range " << pretty_binary_string(temp_start_key)
7086 << " to " << pretty_binary_string(temp_end_key)
7087 << " and " << pretty_binary_string(start_key)
7088 << " to " << pretty_binary_string(end_key)
7089 << " start " << start << dendl;
7090 it = db->get_iterator(PREFIX_OBJ);
7091 if (start == ghobject_t() ||
7092 start.hobj == hobject_t() ||
7093 start == c->cid.get_min_hobj()) {
7094 it->upper_bound(temp_start_key);
7095 temp = true;
7096 } else {
7097 string k;
7098 get_object_key(cct, start, &k);
7099 if (start.hobj.is_temp()) {
7100 temp = true;
7101 assert(k >= temp_start_key && k < temp_end_key);
7102 } else {
7103 temp = false;
7104 assert(k >= start_key && k < end_key);
7105 }
7106 dout(20) << " start from " << pretty_binary_string(k)
7107 << " temp=" << (int)temp << dendl;
7108 it->lower_bound(k);
7109 }
7110 if (end.hobj.is_max()) {
7111 pend = temp ? temp_end_key : end_key;
7112 } else {
7113 get_object_key(cct, end, &end_key);
7114 if (end.hobj.is_temp()) {
7115 if (temp)
7116 pend = end_key;
7117 else
7118 goto out;
7119 } else {
7120 pend = temp ? temp_end_key : end_key;
7121 }
7122 }
7123 dout(20) << __func__ << " pend " << pretty_binary_string(pend) << dendl;
7124 while (true) {
7125 if (!it->valid() || it->key() >= pend) {
7126 if (!it->valid())
7127 dout(20) << __func__ << " iterator not valid (end of db?)" << dendl;
7128 else
7129 dout(20) << __func__ << " key " << pretty_binary_string(it->key())
7130 << " >= " << end << dendl;
7131 if (temp) {
7132 if (end.hobj.is_temp()) {
7133 break;
7134 }
7135 dout(30) << __func__ << " switch to non-temp namespace" << dendl;
7136 temp = false;
7137 it->upper_bound(start_key);
7138 pend = end_key;
7139 dout(30) << __func__ << " pend " << pretty_binary_string(pend) << dendl;
7140 continue;
7141 }
7142 break;
7143 }
7144 dout(30) << __func__ << " key " << pretty_binary_string(it->key()) << dendl;
7145 if (is_extent_shard_key(it->key())) {
7146 it->next();
7147 continue;
7148 }
7149 ghobject_t oid;
7150 int r = get_key_object(it->key(), &oid);
7151 assert(r == 0);
7152 dout(20) << __func__ << " oid " << oid << " end " << end << dendl;
7153 if (ls->size() >= (unsigned)max) {
7154 dout(20) << __func__ << " reached max " << max << dendl;
7155 *pnext = oid;
7156 set_next = true;
7157 break;
7158 }
7159 ls->push_back(oid);
7160 it->next();
7161 }
7162 out:
7163 if (!set_next) {
7164 *pnext = ghobject_t::get_max();
7165 }
7166
7167 return r;
7168 }
7169
7170 int BlueStore::omap_get(
7171 const coll_t& cid, ///< [in] Collection containing oid
7172 const ghobject_t &oid, ///< [in] Object containing omap
7173 bufferlist *header, ///< [out] omap header
7174 map<string, bufferlist> *out /// < [out] Key to value map
7175 )
7176 {
7177 CollectionHandle c = _get_collection(cid);
7178 if (!c)
7179 return -ENOENT;
7180 return omap_get(c, oid, header, out);
7181 }
7182
7183 int BlueStore::omap_get(
7184 CollectionHandle &c_, ///< [in] Collection containing oid
7185 const ghobject_t &oid, ///< [in] Object containing omap
7186 bufferlist *header, ///< [out] omap header
7187 map<string, bufferlist> *out /// < [out] Key to value map
7188 )
7189 {
7190 Collection *c = static_cast<Collection *>(c_.get());
7191 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7192 if (!c->exists)
7193 return -ENOENT;
7194 RWLock::RLocker l(c->lock);
7195 int r = 0;
7196 OnodeRef o = c->get_onode(oid, false);
7197 if (!o || !o->exists) {
7198 r = -ENOENT;
7199 goto out;
7200 }
7201 if (!o->onode.has_omap())
7202 goto out;
7203 o->flush();
7204 {
7205 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
7206 string head, tail;
7207 get_omap_header(o->onode.nid, &head);
7208 get_omap_tail(o->onode.nid, &tail);
7209 it->lower_bound(head);
7210 while (it->valid()) {
7211 if (it->key() == head) {
7212 dout(30) << __func__ << " got header" << dendl;
7213 *header = it->value();
7214 } else if (it->key() >= tail) {
7215 dout(30) << __func__ << " reached tail" << dendl;
7216 break;
7217 } else {
7218 string user_key;
7219 decode_omap_key(it->key(), &user_key);
7220 dout(30) << __func__ << " got " << pretty_binary_string(it->key())
7221 << " -> " << user_key << dendl;
7222 (*out)[user_key] = it->value();
7223 }
7224 it->next();
7225 }
7226 }
7227 out:
7228 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7229 << dendl;
7230 return r;
7231 }
7232
7233 int BlueStore::omap_get_header(
7234 const coll_t& cid, ///< [in] Collection containing oid
7235 const ghobject_t &oid, ///< [in] Object containing omap
7236 bufferlist *header, ///< [out] omap header
7237 bool allow_eio ///< [in] don't assert on eio
7238 )
7239 {
7240 CollectionHandle c = _get_collection(cid);
7241 if (!c)
7242 return -ENOENT;
7243 return omap_get_header(c, oid, header, allow_eio);
7244 }
7245
7246 int BlueStore::omap_get_header(
7247 CollectionHandle &c_, ///< [in] Collection containing oid
7248 const ghobject_t &oid, ///< [in] Object containing omap
7249 bufferlist *header, ///< [out] omap header
7250 bool allow_eio ///< [in] don't assert on eio
7251 )
7252 {
7253 Collection *c = static_cast<Collection *>(c_.get());
7254 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7255 if (!c->exists)
7256 return -ENOENT;
7257 RWLock::RLocker l(c->lock);
7258 int r = 0;
7259 OnodeRef o = c->get_onode(oid, false);
7260 if (!o || !o->exists) {
7261 r = -ENOENT;
7262 goto out;
7263 }
7264 if (!o->onode.has_omap())
7265 goto out;
7266 o->flush();
7267 {
7268 string head;
7269 get_omap_header(o->onode.nid, &head);
7270 if (db->get(PREFIX_OMAP, head, header) >= 0) {
7271 dout(30) << __func__ << " got header" << dendl;
7272 } else {
7273 dout(30) << __func__ << " no header" << dendl;
7274 }
7275 }
7276 out:
7277 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7278 << dendl;
7279 return r;
7280 }
7281
7282 int BlueStore::omap_get_keys(
7283 const coll_t& cid, ///< [in] Collection containing oid
7284 const ghobject_t &oid, ///< [in] Object containing omap
7285 set<string> *keys ///< [out] Keys defined on oid
7286 )
7287 {
7288 CollectionHandle c = _get_collection(cid);
7289 if (!c)
7290 return -ENOENT;
7291 return omap_get_keys(c, oid, keys);
7292 }
7293
7294 int BlueStore::omap_get_keys(
7295 CollectionHandle &c_, ///< [in] Collection containing oid
7296 const ghobject_t &oid, ///< [in] Object containing omap
7297 set<string> *keys ///< [out] Keys defined on oid
7298 )
7299 {
7300 Collection *c = static_cast<Collection *>(c_.get());
7301 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7302 if (!c->exists)
7303 return -ENOENT;
7304 RWLock::RLocker l(c->lock);
7305 int r = 0;
7306 OnodeRef o = c->get_onode(oid, false);
7307 if (!o || !o->exists) {
7308 r = -ENOENT;
7309 goto out;
7310 }
7311 if (!o->onode.has_omap())
7312 goto out;
7313 o->flush();
7314 {
7315 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
7316 string head, tail;
7317 get_omap_key(o->onode.nid, string(), &head);
7318 get_omap_tail(o->onode.nid, &tail);
7319 it->lower_bound(head);
7320 while (it->valid()) {
7321 if (it->key() >= tail) {
7322 dout(30) << __func__ << " reached tail" << dendl;
7323 break;
7324 }
7325 string user_key;
7326 decode_omap_key(it->key(), &user_key);
7327 dout(30) << __func__ << " got " << pretty_binary_string(it->key())
7328 << " -> " << user_key << dendl;
7329 keys->insert(user_key);
7330 it->next();
7331 }
7332 }
7333 out:
7334 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7335 << dendl;
7336 return r;
7337 }
7338
7339 int BlueStore::omap_get_values(
7340 const coll_t& cid, ///< [in] Collection containing oid
7341 const ghobject_t &oid, ///< [in] Object containing omap
7342 const set<string> &keys, ///< [in] Keys to get
7343 map<string, bufferlist> *out ///< [out] Returned keys and values
7344 )
7345 {
7346 CollectionHandle c = _get_collection(cid);
7347 if (!c)
7348 return -ENOENT;
7349 return omap_get_values(c, oid, keys, out);
7350 }
7351
7352 int BlueStore::omap_get_values(
7353 CollectionHandle &c_, ///< [in] Collection containing oid
7354 const ghobject_t &oid, ///< [in] Object containing omap
7355 const set<string> &keys, ///< [in] Keys to get
7356 map<string, bufferlist> *out ///< [out] Returned keys and values
7357 )
7358 {
7359 Collection *c = static_cast<Collection *>(c_.get());
7360 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7361 if (!c->exists)
7362 return -ENOENT;
7363 RWLock::RLocker l(c->lock);
7364 int r = 0;
7365 string final_key;
7366 OnodeRef o = c->get_onode(oid, false);
7367 if (!o || !o->exists) {
7368 r = -ENOENT;
7369 goto out;
7370 }
7371 if (!o->onode.has_omap())
7372 goto out;
7373 o->flush();
7374 _key_encode_u64(o->onode.nid, &final_key);
7375 final_key.push_back('.');
7376 for (set<string>::const_iterator p = keys.begin(); p != keys.end(); ++p) {
7377 final_key.resize(9); // keep prefix
7378 final_key += *p;
7379 bufferlist val;
7380 if (db->get(PREFIX_OMAP, final_key, &val) >= 0) {
7381 dout(30) << __func__ << " got " << pretty_binary_string(final_key)
7382 << " -> " << *p << dendl;
7383 out->insert(make_pair(*p, val));
7384 }
7385 }
7386 out:
7387 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7388 << dendl;
7389 return r;
7390 }
7391
7392 int BlueStore::omap_check_keys(
7393 const coll_t& cid, ///< [in] Collection containing oid
7394 const ghobject_t &oid, ///< [in] Object containing omap
7395 const set<string> &keys, ///< [in] Keys to check
7396 set<string> *out ///< [out] Subset of keys defined on oid
7397 )
7398 {
7399 CollectionHandle c = _get_collection(cid);
7400 if (!c)
7401 return -ENOENT;
7402 return omap_check_keys(c, oid, keys, out);
7403 }
7404
7405 int BlueStore::omap_check_keys(
7406 CollectionHandle &c_, ///< [in] Collection containing oid
7407 const ghobject_t &oid, ///< [in] Object containing omap
7408 const set<string> &keys, ///< [in] Keys to check
7409 set<string> *out ///< [out] Subset of keys defined on oid
7410 )
7411 {
7412 Collection *c = static_cast<Collection *>(c_.get());
7413 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7414 if (!c->exists)
7415 return -ENOENT;
7416 RWLock::RLocker l(c->lock);
7417 int r = 0;
7418 string final_key;
7419 OnodeRef o = c->get_onode(oid, false);
7420 if (!o || !o->exists) {
7421 r = -ENOENT;
7422 goto out;
7423 }
7424 if (!o->onode.has_omap())
7425 goto out;
7426 o->flush();
7427 _key_encode_u64(o->onode.nid, &final_key);
7428 final_key.push_back('.');
7429 for (set<string>::const_iterator p = keys.begin(); p != keys.end(); ++p) {
7430 final_key.resize(9); // keep prefix
7431 final_key += *p;
7432 bufferlist val;
7433 if (db->get(PREFIX_OMAP, final_key, &val) >= 0) {
7434 dout(30) << __func__ << " have " << pretty_binary_string(final_key)
7435 << " -> " << *p << dendl;
7436 out->insert(*p);
7437 } else {
7438 dout(30) << __func__ << " miss " << pretty_binary_string(final_key)
7439 << " -> " << *p << dendl;
7440 }
7441 }
7442 out:
7443 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7444 << dendl;
7445 return r;
7446 }
7447
7448 ObjectMap::ObjectMapIterator BlueStore::get_omap_iterator(
7449 const coll_t& cid, ///< [in] collection
7450 const ghobject_t &oid ///< [in] object
7451 )
7452 {
7453 CollectionHandle c = _get_collection(cid);
7454 if (!c) {
7455 dout(10) << __func__ << " " << cid << "doesn't exist" <<dendl;
7456 return ObjectMap::ObjectMapIterator();
7457 }
7458 return get_omap_iterator(c, oid);
7459 }
7460
7461 ObjectMap::ObjectMapIterator BlueStore::get_omap_iterator(
7462 CollectionHandle &c_, ///< [in] collection
7463 const ghobject_t &oid ///< [in] object
7464 )
7465 {
7466 Collection *c = static_cast<Collection *>(c_.get());
7467 dout(10) << __func__ << " " << c->get_cid() << " " << oid << dendl;
7468 if (!c->exists) {
7469 return ObjectMap::ObjectMapIterator();
7470 }
7471 RWLock::RLocker l(c->lock);
7472 OnodeRef o = c->get_onode(oid, false);
7473 if (!o || !o->exists) {
7474 dout(10) << __func__ << " " << oid << "doesn't exist" <<dendl;
7475 return ObjectMap::ObjectMapIterator();
7476 }
7477 o->flush();
7478 dout(10) << __func__ << " has_omap = " << (int)o->onode.has_omap() <<dendl;
7479 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
7480 return ObjectMap::ObjectMapIterator(new OmapIteratorImpl(c, o, it));
7481 }
7482
7483 // -----------------
7484 // write helpers
7485
7486 void BlueStore::_prepare_ondisk_format_super(KeyValueDB::Transaction& t)
7487 {
7488 dout(10) << __func__ << " ondisk_format " << ondisk_format
7489 << " min_compat_ondisk_format " << min_compat_ondisk_format
7490 << dendl;
7491 assert(ondisk_format == latest_ondisk_format);
7492 {
7493 bufferlist bl;
7494 ::encode(ondisk_format, bl);
7495 t->set(PREFIX_SUPER, "ondisk_format", bl);
7496 }
7497 {
7498 bufferlist bl;
7499 ::encode(min_compat_ondisk_format, bl);
7500 t->set(PREFIX_SUPER, "min_compat_ondisk_format", bl);
7501 }
7502 }
7503
7504 int BlueStore::_open_super_meta()
7505 {
7506 // nid
7507 {
7508 nid_max = 0;
7509 bufferlist bl;
7510 db->get(PREFIX_SUPER, "nid_max", &bl);
7511 bufferlist::iterator p = bl.begin();
7512 try {
7513 uint64_t v;
7514 ::decode(v, p);
7515 nid_max = v;
7516 } catch (buffer::error& e) {
7517 derr << __func__ << " unable to read nid_max" << dendl;
7518 return -EIO;
7519 }
7520 dout(10) << __func__ << " old nid_max " << nid_max << dendl;
7521 nid_last = nid_max.load();
7522 }
7523
7524 // blobid
7525 {
7526 blobid_max = 0;
7527 bufferlist bl;
7528 db->get(PREFIX_SUPER, "blobid_max", &bl);
7529 bufferlist::iterator p = bl.begin();
7530 try {
7531 uint64_t v;
7532 ::decode(v, p);
7533 blobid_max = v;
7534 } catch (buffer::error& e) {
7535 derr << __func__ << " unable to read blobid_max" << dendl;
7536 return -EIO;
7537 }
7538 dout(10) << __func__ << " old blobid_max " << blobid_max << dendl;
7539 blobid_last = blobid_max.load();
7540 }
7541
7542 // freelist
7543 {
7544 bufferlist bl;
7545 db->get(PREFIX_SUPER, "freelist_type", &bl);
7546 if (bl.length()) {
7547 freelist_type = std::string(bl.c_str(), bl.length());
7548 dout(10) << __func__ << " freelist_type " << freelist_type << dendl;
7549 } else {
7550 assert("Not Support extent freelist manager" == 0);
7551 }
7552 }
7553
7554 // bluefs alloc
7555 if (cct->_conf->bluestore_bluefs) {
7556 bluefs_extents.clear();
7557 bufferlist bl;
7558 db->get(PREFIX_SUPER, "bluefs_extents", &bl);
7559 bufferlist::iterator p = bl.begin();
7560 try {
7561 ::decode(bluefs_extents, p);
7562 }
7563 catch (buffer::error& e) {
7564 derr << __func__ << " unable to read bluefs_extents" << dendl;
7565 return -EIO;
7566 }
7567 dout(10) << __func__ << " bluefs_extents 0x" << std::hex << bluefs_extents
7568 << std::dec << dendl;
7569 }
7570
7571 // ondisk format
7572 int32_t compat_ondisk_format = 0;
7573 {
7574 bufferlist bl;
7575 int r = db->get(PREFIX_SUPER, "ondisk_format", &bl);
7576 if (r < 0) {
7577 // base case: kraken bluestore is v1 and readable by v1
7578 dout(20) << __func__ << " missing ondisk_format; assuming kraken"
7579 << dendl;
7580 ondisk_format = 1;
7581 compat_ondisk_format = 1;
7582 } else {
7583 auto p = bl.begin();
7584 try {
7585 ::decode(ondisk_format, p);
7586 } catch (buffer::error& e) {
7587 derr << __func__ << " unable to read ondisk_format" << dendl;
7588 return -EIO;
7589 }
7590 bl.clear();
7591 {
7592 r = db->get(PREFIX_SUPER, "min_compat_ondisk_format", &bl);
7593 assert(!r);
7594 auto p = bl.begin();
7595 try {
7596 ::decode(compat_ondisk_format, p);
7597 } catch (buffer::error& e) {
7598 derr << __func__ << " unable to read compat_ondisk_format" << dendl;
7599 return -EIO;
7600 }
7601 }
7602 }
7603 dout(10) << __func__ << " ondisk_format " << ondisk_format
7604 << " compat_ondisk_format " << compat_ondisk_format
7605 << dendl;
7606 }
7607
7608 if (latest_ondisk_format < compat_ondisk_format) {
7609 derr << __func__ << " compat_ondisk_format is "
7610 << compat_ondisk_format << " but we only understand version "
7611 << latest_ondisk_format << dendl;
7612 return -EPERM;
7613 }
7614 if (ondisk_format < latest_ondisk_format) {
7615 int r = _upgrade_super();
7616 if (r < 0) {
7617 return r;
7618 }
7619 }
7620
7621 {
7622 bufferlist bl;
7623 db->get(PREFIX_SUPER, "min_alloc_size", &bl);
7624 auto p = bl.begin();
7625 try {
7626 uint64_t val;
7627 ::decode(val, p);
7628 min_alloc_size = val;
7629 min_alloc_size_order = ctz(val);
7630 assert(min_alloc_size == 1u << min_alloc_size_order);
7631 } catch (buffer::error& e) {
7632 derr << __func__ << " unable to read min_alloc_size" << dendl;
7633 return -EIO;
7634 }
7635 dout(10) << __func__ << " min_alloc_size 0x" << std::hex << min_alloc_size
7636 << std::dec << dendl;
7637 }
7638 _open_statfs();
7639 _set_alloc_sizes();
7640 _set_throttle_params();
7641
7642 _set_csum();
7643 _set_compression();
7644 _set_blob_size();
7645
7646 return 0;
7647 }
7648
7649 int BlueStore::_upgrade_super()
7650 {
7651 dout(1) << __func__ << " from " << ondisk_format << ", latest "
7652 << latest_ondisk_format << dendl;
7653 assert(ondisk_format > 0);
7654 assert(ondisk_format < latest_ondisk_format);
7655
7656 if (ondisk_format == 1) {
7657 // changes:
7658 // - super: added ondisk_format
7659 // - super: added min_readable_ondisk_format
7660 // - super: added min_compat_ondisk_format
7661 // - super: added min_alloc_size
7662 // - super: removed min_min_alloc_size
7663 KeyValueDB::Transaction t = db->get_transaction();
7664 {
7665 bufferlist bl;
7666 db->get(PREFIX_SUPER, "min_min_alloc_size", &bl);
7667 auto p = bl.begin();
7668 try {
7669 uint64_t val;
7670 ::decode(val, p);
7671 min_alloc_size = val;
7672 } catch (buffer::error& e) {
7673 derr << __func__ << " failed to read min_min_alloc_size" << dendl;
7674 return -EIO;
7675 }
7676 t->set(PREFIX_SUPER, "min_alloc_size", bl);
7677 t->rmkey(PREFIX_SUPER, "min_min_alloc_size");
7678 }
7679 ondisk_format = 2;
7680 _prepare_ondisk_format_super(t);
7681 int r = db->submit_transaction_sync(t);
7682 assert(r == 0);
7683 }
7684
7685 // done
7686 dout(1) << __func__ << " done" << dendl;
7687 return 0;
7688 }
7689
7690 void BlueStore::_assign_nid(TransContext *txc, OnodeRef o)
7691 {
7692 if (o->onode.nid) {
7693 assert(o->exists);
7694 return;
7695 }
7696 uint64_t nid = ++nid_last;
7697 dout(20) << __func__ << " " << nid << dendl;
7698 o->onode.nid = nid;
7699 txc->last_nid = nid;
7700 o->exists = true;
7701 }
7702
7703 uint64_t BlueStore::_assign_blobid(TransContext *txc)
7704 {
7705 uint64_t bid = ++blobid_last;
7706 dout(20) << __func__ << " " << bid << dendl;
7707 txc->last_blobid = bid;
7708 return bid;
7709 }
7710
7711 void BlueStore::get_db_statistics(Formatter *f)
7712 {
7713 db->get_statistics(f);
7714 }
7715
7716 BlueStore::TransContext *BlueStore::_txc_create(OpSequencer *osr)
7717 {
7718 TransContext *txc = new TransContext(cct, osr);
7719 txc->t = db->get_transaction();
7720 osr->queue_new(txc);
7721 dout(20) << __func__ << " osr " << osr << " = " << txc
7722 << " seq " << txc->seq << dendl;
7723 return txc;
7724 }
7725
7726 void BlueStore::_txc_calc_cost(TransContext *txc)
7727 {
7728 // this is about the simplest model for transaction cost you can
7729 // imagine. there is some fixed overhead cost by saying there is a
7730 // minimum of one "io". and then we have some cost per "io" that is
7731 // a configurable (with different hdd and ssd defaults), and add
7732 // that to the bytes value.
7733 int ios = 1; // one "io" for the kv commit
7734 for (auto& p : txc->ioc.pending_aios) {
7735 ios += p.iov.size();
7736 }
7737 auto cost = throttle_cost_per_io.load();
7738 txc->cost = ios * cost + txc->bytes;
7739 dout(10) << __func__ << " " << txc << " cost " << txc->cost << " ("
7740 << ios << " ios * " << cost << " + " << txc->bytes
7741 << " bytes)" << dendl;
7742 }
7743
7744 void BlueStore::_txc_update_store_statfs(TransContext *txc)
7745 {
7746 if (txc->statfs_delta.is_empty())
7747 return;
7748
7749 logger->inc(l_bluestore_allocated, txc->statfs_delta.allocated());
7750 logger->inc(l_bluestore_stored, txc->statfs_delta.stored());
7751 logger->inc(l_bluestore_compressed, txc->statfs_delta.compressed());
7752 logger->inc(l_bluestore_compressed_allocated, txc->statfs_delta.compressed_allocated());
7753 logger->inc(l_bluestore_compressed_original, txc->statfs_delta.compressed_original());
7754
7755 {
7756 std::lock_guard<std::mutex> l(vstatfs_lock);
7757 vstatfs += txc->statfs_delta;
7758 }
7759
7760 bufferlist bl;
7761 txc->statfs_delta.encode(bl);
7762
7763 txc->t->merge(PREFIX_STAT, "bluestore_statfs", bl);
7764 txc->statfs_delta.reset();
7765 }
7766
7767 void BlueStore::_txc_state_proc(TransContext *txc)
7768 {
7769 while (true) {
7770 dout(10) << __func__ << " txc " << txc
7771 << " " << txc->get_state_name() << dendl;
7772 switch (txc->state) {
7773 case TransContext::STATE_PREPARE:
7774 txc->log_state_latency(logger, l_bluestore_state_prepare_lat);
7775 if (txc->ioc.has_pending_aios()) {
7776 txc->state = TransContext::STATE_AIO_WAIT;
7777 txc->had_ios = true;
7778 _txc_aio_submit(txc);
7779 return;
7780 }
7781 // ** fall-thru **
7782
7783 case TransContext::STATE_AIO_WAIT:
7784 txc->log_state_latency(logger, l_bluestore_state_aio_wait_lat);
7785 _txc_finish_io(txc); // may trigger blocked txc's too
7786 return;
7787
7788 case TransContext::STATE_IO_DONE:
7789 //assert(txc->osr->qlock.is_locked()); // see _txc_finish_io
7790 if (txc->had_ios) {
7791 ++txc->osr->txc_with_unstable_io;
7792 }
7793 txc->log_state_latency(logger, l_bluestore_state_io_done_lat);
7794 txc->state = TransContext::STATE_KV_QUEUED;
7795 if (cct->_conf->bluestore_sync_submit_transaction) {
7796 if (txc->last_nid >= nid_max ||
7797 txc->last_blobid >= blobid_max) {
7798 dout(20) << __func__
7799 << " last_{nid,blobid} exceeds max, submit via kv thread"
7800 << dendl;
7801 } else if (txc->osr->kv_committing_serially) {
7802 dout(20) << __func__ << " prior txc submitted via kv thread, us too"
7803 << dendl;
7804 // note: this is starvation-prone. once we have a txc in a busy
7805 // sequencer that is committing serially it is possible to keep
7806 // submitting new transactions fast enough that we get stuck doing
7807 // so. the alternative is to block here... fixme?
7808 } else if (txc->osr->txc_with_unstable_io) {
7809 dout(20) << __func__ << " prior txc(s) with unstable ios "
7810 << txc->osr->txc_with_unstable_io.load() << dendl;
7811 } else if (cct->_conf->bluestore_debug_randomize_serial_transaction &&
7812 rand() % cct->_conf->bluestore_debug_randomize_serial_transaction
7813 == 0) {
7814 dout(20) << __func__ << " DEBUG randomly forcing submit via kv thread"
7815 << dendl;
7816 } else {
7817 txc->state = TransContext::STATE_KV_SUBMITTED;
7818 int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
7819 assert(r == 0);
7820 _txc_applied_kv(txc);
7821 }
7822 }
7823 {
7824 std::lock_guard<std::mutex> l(kv_lock);
7825 kv_queue.push_back(txc);
7826 kv_cond.notify_one();
7827 if (txc->state != TransContext::STATE_KV_SUBMITTED) {
7828 kv_queue_unsubmitted.push_back(txc);
7829 ++txc->osr->kv_committing_serially;
7830 }
7831 if (txc->had_ios)
7832 kv_ios++;
7833 kv_throttle_costs += txc->cost;
7834 }
7835 return;
7836 case TransContext::STATE_KV_SUBMITTED:
7837 txc->log_state_latency(logger, l_bluestore_state_kv_committing_lat);
7838 txc->state = TransContext::STATE_KV_DONE;
7839 _txc_committed_kv(txc);
7840 // ** fall-thru **
7841
7842 case TransContext::STATE_KV_DONE:
7843 txc->log_state_latency(logger, l_bluestore_state_kv_done_lat);
7844 if (txc->deferred_txn) {
7845 txc->state = TransContext::STATE_DEFERRED_QUEUED;
7846 _deferred_queue(txc);
7847 return;
7848 }
7849 txc->state = TransContext::STATE_FINISHING;
7850 break;
7851
7852 case TransContext::STATE_DEFERRED_CLEANUP:
7853 txc->log_state_latency(logger, l_bluestore_state_deferred_cleanup_lat);
7854 txc->state = TransContext::STATE_FINISHING;
7855 // ** fall-thru **
7856
7857 case TransContext::STATE_FINISHING:
7858 txc->log_state_latency(logger, l_bluestore_state_finishing_lat);
7859 _txc_finish(txc);
7860 return;
7861
7862 default:
7863 derr << __func__ << " unexpected txc " << txc
7864 << " state " << txc->get_state_name() << dendl;
7865 assert(0 == "unexpected txc state");
7866 return;
7867 }
7868 }
7869 }
7870
7871 void BlueStore::_txc_finish_io(TransContext *txc)
7872 {
7873 dout(20) << __func__ << " " << txc << dendl;
7874
7875 /*
7876 * we need to preserve the order of kv transactions,
7877 * even though aio will complete in any order.
7878 */
7879
7880 OpSequencer *osr = txc->osr.get();
7881 std::lock_guard<std::mutex> l(osr->qlock);
7882 txc->state = TransContext::STATE_IO_DONE;
7883
7884 // release aio contexts (including pinned buffers).
7885 txc->ioc.running_aios.clear();
7886
7887 OpSequencer::q_list_t::iterator p = osr->q.iterator_to(*txc);
7888 while (p != osr->q.begin()) {
7889 --p;
7890 if (p->state < TransContext::STATE_IO_DONE) {
7891 dout(20) << __func__ << " " << txc << " blocked by " << &*p << " "
7892 << p->get_state_name() << dendl;
7893 return;
7894 }
7895 if (p->state > TransContext::STATE_IO_DONE) {
7896 ++p;
7897 break;
7898 }
7899 }
7900 do {
7901 _txc_state_proc(&*p++);
7902 } while (p != osr->q.end() &&
7903 p->state == TransContext::STATE_IO_DONE);
7904
7905 if (osr->kv_submitted_waiters &&
7906 osr->_is_all_kv_submitted()) {
7907 osr->qcond.notify_all();
7908 }
7909 }
7910
7911 void BlueStore::_txc_write_nodes(TransContext *txc, KeyValueDB::Transaction t)
7912 {
7913 dout(20) << __func__ << " txc " << txc
7914 << " onodes " << txc->onodes
7915 << " shared_blobs " << txc->shared_blobs
7916 << dendl;
7917
7918 // finalize onodes
7919 for (auto o : txc->onodes) {
7920 // finalize extent_map shards
7921 o->extent_map.update(t, false);
7922 if (o->extent_map.needs_reshard()) {
7923 o->extent_map.reshard(db, t);
7924 o->extent_map.update(t, true);
7925 if (o->extent_map.needs_reshard()) {
7926 dout(20) << __func__ << " warning: still wants reshard, check options?"
7927 << dendl;
7928 o->extent_map.clear_needs_reshard();
7929 }
7930 logger->inc(l_bluestore_onode_reshard);
7931 }
7932
7933 // bound encode
7934 size_t bound = 0;
7935 denc(o->onode, bound);
7936 o->extent_map.bound_encode_spanning_blobs(bound);
7937 if (o->onode.extent_map_shards.empty()) {
7938 denc(o->extent_map.inline_bl, bound);
7939 }
7940
7941 // encode
7942 bufferlist bl;
7943 unsigned onode_part, blob_part, extent_part;
7944 {
7945 auto p = bl.get_contiguous_appender(bound, true);
7946 denc(o->onode, p);
7947 onode_part = p.get_logical_offset();
7948 o->extent_map.encode_spanning_blobs(p);
7949 blob_part = p.get_logical_offset() - onode_part;
7950 if (o->onode.extent_map_shards.empty()) {
7951 denc(o->extent_map.inline_bl, p);
7952 }
7953 extent_part = p.get_logical_offset() - onode_part - blob_part;
7954 }
7955
7956 dout(20) << " onode " << o->oid << " is " << bl.length()
7957 << " (" << onode_part << " bytes onode + "
7958 << blob_part << " bytes spanning blobs + "
7959 << extent_part << " bytes inline extents)"
7960 << dendl;
7961 t->set(PREFIX_OBJ, o->key.c_str(), o->key.size(), bl);
7962 o->flushing_count++;
7963 }
7964
7965 // objects we modified but didn't affect the onode
7966 auto p = txc->modified_objects.begin();
7967 while (p != txc->modified_objects.end()) {
7968 if (txc->onodes.count(*p) == 0) {
7969 (*p)->flushing_count++;
7970 ++p;
7971 } else {
7972 // remove dups with onodes list to avoid problems in _txc_finish
7973 p = txc->modified_objects.erase(p);
7974 }
7975 }
7976
7977 // finalize shared_blobs
7978 for (auto sb : txc->shared_blobs) {
7979 string key;
7980 auto sbid = sb->get_sbid();
7981 get_shared_blob_key(sbid, &key);
7982 if (sb->persistent->empty()) {
7983 dout(20) << " shared_blob 0x" << std::hex << sbid << std::dec
7984 << " is empty" << dendl;
7985 t->rmkey(PREFIX_SHARED_BLOB, key);
7986 } else {
7987 bufferlist bl;
7988 ::encode(*(sb->persistent), bl);
7989 dout(20) << " shared_blob 0x" << std::hex << sbid << std::dec
7990 << " is " << bl.length() << " " << *sb << dendl;
7991 t->set(PREFIX_SHARED_BLOB, key, bl);
7992 }
7993 }
7994 }
7995
7996 void BlueStore::BSPerfTracker::update_from_perfcounters(
7997 PerfCounters &logger)
7998 {
7999 os_commit_latency.consume_next(
8000 logger.get_tavg_ms(
8001 l_bluestore_commit_lat));
8002 os_apply_latency.consume_next(
8003 logger.get_tavg_ms(
8004 l_bluestore_commit_lat));
8005 }
8006
8007 void BlueStore::_txc_finalize_kv(TransContext *txc, KeyValueDB::Transaction t)
8008 {
8009 dout(20) << __func__ << " txc " << txc << std::hex
8010 << " allocated 0x" << txc->allocated
8011 << " released 0x" << txc->released
8012 << std::dec << dendl;
8013
8014 // We have to handle the case where we allocate *and* deallocate the
8015 // same region in this transaction. The freelist doesn't like that.
8016 // (Actually, the only thing that cares is the BitmapFreelistManager
8017 // debug check. But that's important.)
8018 interval_set<uint64_t> tmp_allocated, tmp_released;
8019 interval_set<uint64_t> *pallocated = &txc->allocated;
8020 interval_set<uint64_t> *preleased = &txc->released;
8021 if (!txc->allocated.empty() && !txc->released.empty()) {
8022 interval_set<uint64_t> overlap;
8023 overlap.intersection_of(txc->allocated, txc->released);
8024 if (!overlap.empty()) {
8025 tmp_allocated = txc->allocated;
8026 tmp_allocated.subtract(overlap);
8027 tmp_released = txc->released;
8028 tmp_released.subtract(overlap);
8029 dout(20) << __func__ << " overlap 0x" << std::hex << overlap
8030 << ", new allocated 0x" << tmp_allocated
8031 << " released 0x" << tmp_released << std::dec
8032 << dendl;
8033 pallocated = &tmp_allocated;
8034 preleased = &tmp_released;
8035 }
8036 }
8037
8038 // update freelist with non-overlap sets
8039 for (interval_set<uint64_t>::iterator p = pallocated->begin();
8040 p != pallocated->end();
8041 ++p) {
8042 fm->allocate(p.get_start(), p.get_len(), t);
8043 }
8044 for (interval_set<uint64_t>::iterator p = preleased->begin();
8045 p != preleased->end();
8046 ++p) {
8047 dout(20) << __func__ << " release 0x" << std::hex << p.get_start()
8048 << "~" << p.get_len() << std::dec << dendl;
8049 fm->release(p.get_start(), p.get_len(), t);
8050 }
8051
8052 _txc_update_store_statfs(txc);
8053 }
8054
8055 void BlueStore::_txc_applied_kv(TransContext *txc)
8056 {
8057 for (auto ls : { &txc->onodes, &txc->modified_objects }) {
8058 for (auto& o : *ls) {
8059 dout(20) << __func__ << " onode " << o << " had " << o->flushing_count
8060 << dendl;
8061 if (--o->flushing_count == 0) {
8062 std::lock_guard<std::mutex> l(o->flush_lock);
8063 o->flush_cond.notify_all();
8064 }
8065 }
8066 }
8067 }
8068
8069 void BlueStore::_txc_committed_kv(TransContext *txc)
8070 {
8071 dout(20) << __func__ << " txc " << txc << dendl;
8072
8073 // warning: we're calling onreadable_sync inside the sequencer lock
8074 if (txc->onreadable_sync) {
8075 txc->onreadable_sync->complete(0);
8076 txc->onreadable_sync = NULL;
8077 }
8078 unsigned n = txc->osr->parent->shard_hint.hash_to_shard(m_finisher_num);
8079 if (txc->oncommit) {
8080 logger->tinc(l_bluestore_commit_lat, ceph_clock_now() - txc->start);
8081 finishers[n]->queue(txc->oncommit);
8082 txc->oncommit = NULL;
8083 }
8084 if (txc->onreadable) {
8085 finishers[n]->queue(txc->onreadable);
8086 txc->onreadable = NULL;
8087 }
8088
8089 if (!txc->oncommits.empty()) {
8090 finishers[n]->queue(txc->oncommits);
8091 }
8092 }
8093
8094 void BlueStore::_txc_finish(TransContext *txc)
8095 {
8096 dout(20) << __func__ << " " << txc << " onodes " << txc->onodes << dendl;
8097 assert(txc->state == TransContext::STATE_FINISHING);
8098
8099 for (auto& sb : txc->shared_blobs_written) {
8100 sb->bc.finish_write(sb->get_cache(), txc->seq);
8101 }
8102 txc->shared_blobs_written.clear();
8103
8104 while (!txc->removed_collections.empty()) {
8105 _queue_reap_collection(txc->removed_collections.front());
8106 txc->removed_collections.pop_front();
8107 }
8108
8109 OpSequencerRef osr = txc->osr;
8110 bool empty = false;
8111 bool submit_deferred = false;
8112 OpSequencer::q_list_t releasing_txc;
8113 {
8114 std::lock_guard<std::mutex> l(osr->qlock);
8115 txc->state = TransContext::STATE_DONE;
8116 bool notify = false;
8117 while (!osr->q.empty()) {
8118 TransContext *txc = &osr->q.front();
8119 dout(20) << __func__ << " txc " << txc << " " << txc->get_state_name()
8120 << dendl;
8121 if (txc->state != TransContext::STATE_DONE) {
8122 if (txc->state == TransContext::STATE_PREPARE &&
8123 deferred_aggressive) {
8124 // for _osr_drain_preceding()
8125 notify = true;
8126 }
8127 if (txc->state == TransContext::STATE_DEFERRED_QUEUED &&
8128 osr->q.size() > g_conf->bluestore_max_deferred_txc) {
8129 submit_deferred = true;
8130 }
8131 break;
8132 }
8133
8134 osr->q.pop_front();
8135 releasing_txc.push_back(*txc);
8136 notify = true;
8137 }
8138 if (notify) {
8139 osr->qcond.notify_all();
8140 }
8141 if (osr->q.empty()) {
8142 dout(20) << __func__ << " osr " << osr << " q now empty" << dendl;
8143 empty = true;
8144 }
8145 }
8146 while (!releasing_txc.empty()) {
8147 // release to allocator only after all preceding txc's have also
8148 // finished any deferred writes that potentially land in these
8149 // blocks
8150 auto txc = &releasing_txc.front();
8151 _txc_release_alloc(txc);
8152 releasing_txc.pop_front();
8153 txc->log_state_latency(logger, l_bluestore_state_done_lat);
8154 delete txc;
8155 }
8156
8157 if (submit_deferred) {
8158 // we're pinning memory; flush! we could be more fine-grained here but
8159 // i'm not sure it's worth the bother.
8160 deferred_try_submit();
8161 }
8162
8163 if (empty && osr->zombie) {
8164 dout(10) << __func__ << " reaping empty zombie osr " << osr << dendl;
8165 osr->_unregister();
8166 }
8167 }
8168
8169 void BlueStore::_txc_release_alloc(TransContext *txc)
8170 {
8171 // update allocator with full released set
8172 if (!cct->_conf->bluestore_debug_no_reuse_blocks) {
8173 dout(10) << __func__ << " " << txc << " " << txc->released << dendl;
8174 for (interval_set<uint64_t>::iterator p = txc->released.begin();
8175 p != txc->released.end();
8176 ++p) {
8177 alloc->release(p.get_start(), p.get_len());
8178 }
8179 }
8180
8181 txc->allocated.clear();
8182 txc->released.clear();
8183 }
8184
8185 void BlueStore::_osr_drain_preceding(TransContext *txc)
8186 {
8187 OpSequencer *osr = txc->osr.get();
8188 dout(10) << __func__ << " " << txc << " osr " << osr << dendl;
8189 ++deferred_aggressive; // FIXME: maybe osr-local aggressive flag?
8190 {
8191 // submit anything pending
8192 deferred_lock.lock();
8193 if (osr->deferred_pending) {
8194 _deferred_submit_unlock(osr);
8195 } else {
8196 deferred_lock.unlock();
8197 }
8198 }
8199 {
8200 // wake up any previously finished deferred events
8201 std::lock_guard<std::mutex> l(kv_lock);
8202 kv_cond.notify_one();
8203 }
8204 osr->drain_preceding(txc);
8205 --deferred_aggressive;
8206 dout(10) << __func__ << " " << osr << " done" << dendl;
8207 }
8208
8209 void BlueStore::_osr_drain_all()
8210 {
8211 dout(10) << __func__ << dendl;
8212
8213 set<OpSequencerRef> s;
8214 {
8215 std::lock_guard<std::mutex> l(osr_lock);
8216 s = osr_set;
8217 }
8218 dout(20) << __func__ << " osr_set " << s << dendl;
8219
8220 ++deferred_aggressive;
8221 {
8222 // submit anything pending
8223 deferred_try_submit();
8224 }
8225 {
8226 // wake up any previously finished deferred events
8227 std::lock_guard<std::mutex> l(kv_lock);
8228 kv_cond.notify_one();
8229 }
8230 {
8231 std::lock_guard<std::mutex> l(kv_finalize_lock);
8232 kv_finalize_cond.notify_one();
8233 }
8234 for (auto osr : s) {
8235 dout(20) << __func__ << " drain " << osr << dendl;
8236 osr->drain();
8237 }
8238 --deferred_aggressive;
8239
8240 dout(10) << __func__ << " done" << dendl;
8241 }
8242
8243 void BlueStore::_osr_unregister_all()
8244 {
8245 set<OpSequencerRef> s;
8246 {
8247 std::lock_guard<std::mutex> l(osr_lock);
8248 s = osr_set;
8249 }
8250 dout(10) << __func__ << " " << s << dendl;
8251 for (auto osr : s) {
8252 osr->_unregister();
8253
8254 if (!osr->zombie) {
8255 // break link from Sequencer to us so that this OpSequencer
8256 // instance can die with this mount/umount cycle. note that
8257 // we assume umount() will not race against ~Sequencer.
8258 assert(osr->parent);
8259 osr->parent->p.reset();
8260 }
8261 }
8262 // nobody should be creating sequencers during umount either.
8263 {
8264 std::lock_guard<std::mutex> l(osr_lock);
8265 assert(osr_set.empty());
8266 }
8267 }
8268
8269 void BlueStore::_kv_start()
8270 {
8271 dout(10) << __func__ << dendl;
8272
8273 if (cct->_conf->bluestore_shard_finishers) {
8274 if (cct->_conf->osd_op_num_shards) {
8275 m_finisher_num = cct->_conf->osd_op_num_shards;
8276 } else {
8277 assert(bdev);
8278 if (bdev->is_rotational()) {
8279 m_finisher_num = cct->_conf->osd_op_num_shards_hdd;
8280 } else {
8281 m_finisher_num = cct->_conf->osd_op_num_shards_ssd;
8282 }
8283 }
8284 }
8285
8286 assert(m_finisher_num != 0);
8287
8288 for (int i = 0; i < m_finisher_num; ++i) {
8289 ostringstream oss;
8290 oss << "finisher-" << i;
8291 Finisher *f = new Finisher(cct, oss.str(), "finisher");
8292 finishers.push_back(f);
8293 }
8294
8295 for (auto f : finishers) {
8296 f->start();
8297 }
8298 kv_sync_thread.create("bstore_kv_sync");
8299 kv_finalize_thread.create("bstore_kv_final");
8300 }
8301
8302 void BlueStore::_kv_stop()
8303 {
8304 dout(10) << __func__ << dendl;
8305 {
8306 std::unique_lock<std::mutex> l(kv_lock);
8307 while (!kv_sync_started) {
8308 kv_cond.wait(l);
8309 }
8310 kv_stop = true;
8311 kv_cond.notify_all();
8312 }
8313 {
8314 std::unique_lock<std::mutex> l(kv_finalize_lock);
8315 while (!kv_finalize_started) {
8316 kv_finalize_cond.wait(l);
8317 }
8318 kv_finalize_stop = true;
8319 kv_finalize_cond.notify_all();
8320 }
8321 kv_sync_thread.join();
8322 kv_finalize_thread.join();
8323 {
8324 std::lock_guard<std::mutex> l(kv_lock);
8325 kv_stop = false;
8326 }
8327 {
8328 std::lock_guard<std::mutex> l(kv_finalize_lock);
8329 kv_finalize_stop = false;
8330 }
8331 dout(10) << __func__ << " stopping finishers" << dendl;
8332 for (auto f : finishers) {
8333 f->wait_for_empty();
8334 f->stop();
8335 }
8336 dout(10) << __func__ << " stopped" << dendl;
8337 }
8338
8339 void BlueStore::_kv_sync_thread()
8340 {
8341 dout(10) << __func__ << " start" << dendl;
8342 std::unique_lock<std::mutex> l(kv_lock);
8343 assert(!kv_sync_started);
8344 kv_sync_started = true;
8345 kv_cond.notify_all();
8346 while (true) {
8347 assert(kv_committing.empty());
8348 if (kv_queue.empty() &&
8349 ((deferred_done_queue.empty() && deferred_stable_queue.empty()) ||
8350 !deferred_aggressive)) {
8351 if (kv_stop)
8352 break;
8353 dout(20) << __func__ << " sleep" << dendl;
8354 kv_cond.wait(l);
8355 dout(20) << __func__ << " wake" << dendl;
8356 } else {
8357 deque<TransContext*> kv_submitting;
8358 deque<DeferredBatch*> deferred_done, deferred_stable;
8359 uint64_t aios = 0, costs = 0;
8360
8361 dout(20) << __func__ << " committing " << kv_queue.size()
8362 << " submitting " << kv_queue_unsubmitted.size()
8363 << " deferred done " << deferred_done_queue.size()
8364 << " stable " << deferred_stable_queue.size()
8365 << dendl;
8366 kv_committing.swap(kv_queue);
8367 kv_submitting.swap(kv_queue_unsubmitted);
8368 deferred_done.swap(deferred_done_queue);
8369 deferred_stable.swap(deferred_stable_queue);
8370 aios = kv_ios;
8371 costs = kv_throttle_costs;
8372 kv_ios = 0;
8373 kv_throttle_costs = 0;
8374 utime_t start = ceph_clock_now();
8375 l.unlock();
8376
8377 dout(30) << __func__ << " committing " << kv_committing << dendl;
8378 dout(30) << __func__ << " submitting " << kv_submitting << dendl;
8379 dout(30) << __func__ << " deferred_done " << deferred_done << dendl;
8380 dout(30) << __func__ << " deferred_stable " << deferred_stable << dendl;
8381
8382 bool force_flush = false;
8383 // if bluefs is sharing the same device as data (only), then we
8384 // can rely on the bluefs commit to flush the device and make
8385 // deferred aios stable. that means that if we do have done deferred
8386 // txcs AND we are not on a single device, we need to force a flush.
8387 if (bluefs_single_shared_device && bluefs) {
8388 if (aios) {
8389 force_flush = true;
8390 } else if (kv_committing.empty() && kv_submitting.empty() &&
8391 deferred_stable.empty()) {
8392 force_flush = true; // there's nothing else to commit!
8393 } else if (deferred_aggressive) {
8394 force_flush = true;
8395 }
8396 } else
8397 force_flush = true;
8398
8399 if (force_flush) {
8400 dout(20) << __func__ << " num_aios=" << aios
8401 << " force_flush=" << (int)force_flush
8402 << ", flushing, deferred done->stable" << dendl;
8403 // flush/barrier on block device
8404 bdev->flush();
8405
8406 // if we flush then deferred done are now deferred stable
8407 deferred_stable.insert(deferred_stable.end(), deferred_done.begin(),
8408 deferred_done.end());
8409 deferred_done.clear();
8410 }
8411 utime_t after_flush = ceph_clock_now();
8412
8413 // we will use one final transaction to force a sync
8414 KeyValueDB::Transaction synct = db->get_transaction();
8415
8416 // increase {nid,blobid}_max? note that this covers both the
8417 // case where we are approaching the max and the case we passed
8418 // it. in either case, we increase the max in the earlier txn
8419 // we submit.
8420 uint64_t new_nid_max = 0, new_blobid_max = 0;
8421 if (nid_last + cct->_conf->bluestore_nid_prealloc/2 > nid_max) {
8422 KeyValueDB::Transaction t =
8423 kv_submitting.empty() ? synct : kv_submitting.front()->t;
8424 new_nid_max = nid_last + cct->_conf->bluestore_nid_prealloc;
8425 bufferlist bl;
8426 ::encode(new_nid_max, bl);
8427 t->set(PREFIX_SUPER, "nid_max", bl);
8428 dout(10) << __func__ << " new_nid_max " << new_nid_max << dendl;
8429 }
8430 if (blobid_last + cct->_conf->bluestore_blobid_prealloc/2 > blobid_max) {
8431 KeyValueDB::Transaction t =
8432 kv_submitting.empty() ? synct : kv_submitting.front()->t;
8433 new_blobid_max = blobid_last + cct->_conf->bluestore_blobid_prealloc;
8434 bufferlist bl;
8435 ::encode(new_blobid_max, bl);
8436 t->set(PREFIX_SUPER, "blobid_max", bl);
8437 dout(10) << __func__ << " new_blobid_max " << new_blobid_max << dendl;
8438 }
8439
8440 for (auto txc : kv_committing) {
8441 if (txc->state == TransContext::STATE_KV_QUEUED) {
8442 txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
8443 int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
8444 assert(r == 0);
8445 _txc_applied_kv(txc);
8446 --txc->osr->kv_committing_serially;
8447 txc->state = TransContext::STATE_KV_SUBMITTED;
8448 if (txc->osr->kv_submitted_waiters) {
8449 std::lock_guard<std::mutex> l(txc->osr->qlock);
8450 if (txc->osr->_is_all_kv_submitted()) {
8451 txc->osr->qcond.notify_all();
8452 }
8453 }
8454
8455 } else {
8456 assert(txc->state == TransContext::STATE_KV_SUBMITTED);
8457 txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
8458 }
8459 if (txc->had_ios) {
8460 --txc->osr->txc_with_unstable_io;
8461 }
8462 }
8463
8464 // release throttle *before* we commit. this allows new ops
8465 // to be prepared and enter pipeline while we are waiting on
8466 // the kv commit sync/flush. then hopefully on the next
8467 // iteration there will already be ops awake. otherwise, we
8468 // end up going to sleep, and then wake up when the very first
8469 // transaction is ready for commit.
8470 throttle_bytes.put(costs);
8471
8472 PExtentVector bluefs_gift_extents;
8473 if (bluefs &&
8474 after_flush - bluefs_last_balance >
8475 cct->_conf->bluestore_bluefs_balance_interval) {
8476 bluefs_last_balance = after_flush;
8477 int r = _balance_bluefs_freespace(&bluefs_gift_extents);
8478 assert(r >= 0);
8479 if (r > 0) {
8480 for (auto& p : bluefs_gift_extents) {
8481 bluefs_extents.insert(p.offset, p.length);
8482 }
8483 bufferlist bl;
8484 ::encode(bluefs_extents, bl);
8485 dout(10) << __func__ << " bluefs_extents now 0x" << std::hex
8486 << bluefs_extents << std::dec << dendl;
8487 synct->set(PREFIX_SUPER, "bluefs_extents", bl);
8488 }
8489 }
8490
8491 // cleanup sync deferred keys
8492 for (auto b : deferred_stable) {
8493 for (auto& txc : b->txcs) {
8494 bluestore_deferred_transaction_t& wt = *txc.deferred_txn;
8495 if (!wt.released.empty()) {
8496 // kraken replay compat only
8497 txc.released = wt.released;
8498 dout(10) << __func__ << " deferred txn has released "
8499 << txc.released
8500 << " (we just upgraded from kraken) on " << &txc << dendl;
8501 _txc_finalize_kv(&txc, synct);
8502 }
8503 // cleanup the deferred
8504 string key;
8505 get_deferred_key(wt.seq, &key);
8506 synct->rm_single_key(PREFIX_DEFERRED, key);
8507 }
8508 }
8509
8510 // submit synct synchronously (block and wait for it to commit)
8511 int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction_sync(synct);
8512 assert(r == 0);
8513
8514 if (new_nid_max) {
8515 nid_max = new_nid_max;
8516 dout(10) << __func__ << " nid_max now " << nid_max << dendl;
8517 }
8518 if (new_blobid_max) {
8519 blobid_max = new_blobid_max;
8520 dout(10) << __func__ << " blobid_max now " << blobid_max << dendl;
8521 }
8522
8523 {
8524 utime_t finish = ceph_clock_now();
8525 utime_t dur_flush = after_flush - start;
8526 utime_t dur_kv = finish - after_flush;
8527 utime_t dur = finish - start;
8528 dout(20) << __func__ << " committed " << kv_committing.size()
8529 << " cleaned " << deferred_stable.size()
8530 << " in " << dur
8531 << " (" << dur_flush << " flush + " << dur_kv << " kv commit)"
8532 << dendl;
8533 logger->tinc(l_bluestore_kv_flush_lat, dur_flush);
8534 logger->tinc(l_bluestore_kv_commit_lat, dur_kv);
8535 logger->tinc(l_bluestore_kv_lat, dur);
8536 }
8537
8538 if (bluefs) {
8539 if (!bluefs_gift_extents.empty()) {
8540 _commit_bluefs_freespace(bluefs_gift_extents);
8541 }
8542 for (auto p = bluefs_extents_reclaiming.begin();
8543 p != bluefs_extents_reclaiming.end();
8544 ++p) {
8545 dout(20) << __func__ << " releasing old bluefs 0x" << std::hex
8546 << p.get_start() << "~" << p.get_len() << std::dec
8547 << dendl;
8548 alloc->release(p.get_start(), p.get_len());
8549 }
8550 bluefs_extents_reclaiming.clear();
8551 }
8552
8553 {
8554 std::unique_lock<std::mutex> m(kv_finalize_lock);
8555 if (kv_committing_to_finalize.empty()) {
8556 kv_committing_to_finalize.swap(kv_committing);
8557 } else {
8558 kv_committing_to_finalize.insert(
8559 kv_committing_to_finalize.end(),
8560 kv_committing.begin(),
8561 kv_committing.end());
8562 kv_committing.clear();
8563 }
8564 if (deferred_stable_to_finalize.empty()) {
8565 deferred_stable_to_finalize.swap(deferred_stable);
8566 } else {
8567 deferred_stable_to_finalize.insert(
8568 deferred_stable_to_finalize.end(),
8569 deferred_stable.begin(),
8570 deferred_stable.end());
8571 deferred_stable.clear();
8572 }
8573 kv_finalize_cond.notify_one();
8574 }
8575
8576 l.lock();
8577 // previously deferred "done" are now "stable" by virtue of this
8578 // commit cycle.
8579 deferred_stable_queue.swap(deferred_done);
8580 }
8581 }
8582 dout(10) << __func__ << " finish" << dendl;
8583 kv_sync_started = false;
8584 }
8585
8586 void BlueStore::_kv_finalize_thread()
8587 {
8588 deque<TransContext*> kv_committed;
8589 deque<DeferredBatch*> deferred_stable;
8590 dout(10) << __func__ << " start" << dendl;
8591 std::unique_lock<std::mutex> l(kv_finalize_lock);
8592 assert(!kv_finalize_started);
8593 kv_finalize_started = true;
8594 kv_finalize_cond.notify_all();
8595 while (true) {
8596 assert(kv_committed.empty());
8597 assert(deferred_stable.empty());
8598 if (kv_committing_to_finalize.empty() &&
8599 deferred_stable_to_finalize.empty()) {
8600 if (kv_finalize_stop)
8601 break;
8602 dout(20) << __func__ << " sleep" << dendl;
8603 kv_finalize_cond.wait(l);
8604 dout(20) << __func__ << " wake" << dendl;
8605 } else {
8606 kv_committed.swap(kv_committing_to_finalize);
8607 deferred_stable.swap(deferred_stable_to_finalize);
8608 l.unlock();
8609 dout(20) << __func__ << " kv_committed " << kv_committed << dendl;
8610 dout(20) << __func__ << " deferred_stable " << deferred_stable << dendl;
8611
8612 while (!kv_committed.empty()) {
8613 TransContext *txc = kv_committed.front();
8614 assert(txc->state == TransContext::STATE_KV_SUBMITTED);
8615 _txc_state_proc(txc);
8616 kv_committed.pop_front();
8617 }
8618
8619 for (auto b : deferred_stable) {
8620 auto p = b->txcs.begin();
8621 while (p != b->txcs.end()) {
8622 TransContext *txc = &*p;
8623 p = b->txcs.erase(p); // unlink here because
8624 _txc_state_proc(txc); // this may destroy txc
8625 }
8626 delete b;
8627 }
8628 deferred_stable.clear();
8629
8630 if (!deferred_aggressive) {
8631 if (deferred_queue_size >= deferred_batch_ops.load() ||
8632 throttle_deferred_bytes.past_midpoint()) {
8633 deferred_try_submit();
8634 }
8635 }
8636
8637 // this is as good a place as any ...
8638 _reap_collections();
8639
8640 l.lock();
8641 }
8642 }
8643 dout(10) << __func__ << " finish" << dendl;
8644 kv_finalize_started = false;
8645 }
8646
8647 bluestore_deferred_op_t *BlueStore::_get_deferred_op(
8648 TransContext *txc, OnodeRef o)
8649 {
8650 if (!txc->deferred_txn) {
8651 txc->deferred_txn = new bluestore_deferred_transaction_t;
8652 }
8653 txc->deferred_txn->ops.push_back(bluestore_deferred_op_t());
8654 return &txc->deferred_txn->ops.back();
8655 }
8656
8657 void BlueStore::_deferred_queue(TransContext *txc)
8658 {
8659 dout(20) << __func__ << " txc " << txc << " osr " << txc->osr << dendl;
8660 deferred_lock.lock();
8661 if (!txc->osr->deferred_pending &&
8662 !txc->osr->deferred_running) {
8663 deferred_queue.push_back(*txc->osr);
8664 }
8665 if (!txc->osr->deferred_pending) {
8666 txc->osr->deferred_pending = new DeferredBatch(cct, txc->osr.get());
8667 }
8668 ++deferred_queue_size;
8669 txc->osr->deferred_pending->txcs.push_back(*txc);
8670 bluestore_deferred_transaction_t& wt = *txc->deferred_txn;
8671 for (auto opi = wt.ops.begin(); opi != wt.ops.end(); ++opi) {
8672 const auto& op = *opi;
8673 assert(op.op == bluestore_deferred_op_t::OP_WRITE);
8674 bufferlist::const_iterator p = op.data.begin();
8675 for (auto e : op.extents) {
8676 txc->osr->deferred_pending->prepare_write(
8677 cct, wt.seq, e.offset, e.length, p);
8678 }
8679 }
8680 if (deferred_aggressive &&
8681 !txc->osr->deferred_running) {
8682 _deferred_submit_unlock(txc->osr.get());
8683 } else {
8684 deferred_lock.unlock();
8685 }
8686 }
8687
8688 void BlueStore::deferred_try_submit()
8689 {
8690 dout(20) << __func__ << " " << deferred_queue.size() << " osrs, "
8691 << deferred_queue_size << " txcs" << dendl;
8692 std::lock_guard<std::mutex> l(deferred_lock);
8693 vector<OpSequencerRef> osrs;
8694 osrs.reserve(deferred_queue.size());
8695 for (auto& osr : deferred_queue) {
8696 osrs.push_back(&osr);
8697 }
8698 for (auto& osr : osrs) {
8699 if (osr->deferred_pending && !osr->deferred_running) {
8700 _deferred_submit_unlock(osr.get());
8701 deferred_lock.lock();
8702 }
8703 }
8704 }
8705
8706 void BlueStore::_deferred_submit_unlock(OpSequencer *osr)
8707 {
8708 dout(10) << __func__ << " osr " << osr
8709 << " " << osr->deferred_pending->iomap.size() << " ios pending "
8710 << dendl;
8711 assert(osr->deferred_pending);
8712 assert(!osr->deferred_running);
8713
8714 auto b = osr->deferred_pending;
8715 deferred_queue_size -= b->seq_bytes.size();
8716 assert(deferred_queue_size >= 0);
8717
8718 osr->deferred_running = osr->deferred_pending;
8719 osr->deferred_pending = nullptr;
8720
8721 uint64_t start = 0, pos = 0;
8722 bufferlist bl;
8723 auto i = b->iomap.begin();
8724 while (true) {
8725 if (i == b->iomap.end() || i->first != pos) {
8726 if (bl.length()) {
8727 dout(20) << __func__ << " write 0x" << std::hex
8728 << start << "~" << bl.length()
8729 << " crc " << bl.crc32c(-1) << std::dec << dendl;
8730 if (!g_conf->bluestore_debug_omit_block_device_write) {
8731 logger->inc(l_bluestore_deferred_write_ops);
8732 logger->inc(l_bluestore_deferred_write_bytes, bl.length());
8733 int r = bdev->aio_write(start, bl, &b->ioc, false);
8734 assert(r == 0);
8735 }
8736 }
8737 if (i == b->iomap.end()) {
8738 break;
8739 }
8740 start = 0;
8741 pos = i->first;
8742 bl.clear();
8743 }
8744 dout(20) << __func__ << " seq " << i->second.seq << " 0x"
8745 << std::hex << pos << "~" << i->second.bl.length() << std::dec
8746 << dendl;
8747 if (!bl.length()) {
8748 start = pos;
8749 }
8750 pos += i->second.bl.length();
8751 bl.claim_append(i->second.bl);
8752 ++i;
8753 }
8754
8755 // demote to deferred_submit_lock, then drop that too
8756 std::lock_guard<std::mutex> l(deferred_submit_lock);
8757 deferred_lock.unlock();
8758 bdev->aio_submit(&b->ioc);
8759 }
8760
8761 void BlueStore::_deferred_aio_finish(OpSequencer *osr)
8762 {
8763 dout(10) << __func__ << " osr " << osr << dendl;
8764 assert(osr->deferred_running);
8765 DeferredBatch *b = osr->deferred_running;
8766
8767 {
8768 std::lock_guard<std::mutex> l(deferred_lock);
8769 assert(osr->deferred_running == b);
8770 osr->deferred_running = nullptr;
8771 if (!osr->deferred_pending) {
8772 auto q = deferred_queue.iterator_to(*osr);
8773 deferred_queue.erase(q);
8774 } else if (deferred_aggressive) {
8775 dout(20) << __func__ << " queuing async deferred_try_submit" << dendl;
8776 finishers[0]->queue(new FunctionContext([&](int) {
8777 deferred_try_submit();
8778 }));
8779 }
8780 }
8781
8782 {
8783 uint64_t costs = 0;
8784 std::lock_guard<std::mutex> l2(osr->qlock);
8785 for (auto& i : b->txcs) {
8786 TransContext *txc = &i;
8787 txc->state = TransContext::STATE_DEFERRED_CLEANUP;
8788 costs += txc->cost;
8789 }
8790 osr->qcond.notify_all();
8791 throttle_deferred_bytes.put(costs);
8792 std::lock_guard<std::mutex> l(kv_lock);
8793 deferred_done_queue.emplace_back(b);
8794 }
8795
8796 // in the normal case, do not bother waking up the kv thread; it will
8797 // catch us on the next commit anyway.
8798 if (deferred_aggressive) {
8799 std::lock_guard<std::mutex> l(kv_lock);
8800 kv_cond.notify_one();
8801 }
8802 }
8803
8804 int BlueStore::_deferred_replay()
8805 {
8806 dout(10) << __func__ << " start" << dendl;
8807 OpSequencerRef osr = new OpSequencer(cct, this);
8808 int count = 0;
8809 int r = 0;
8810 KeyValueDB::Iterator it = db->get_iterator(PREFIX_DEFERRED);
8811 for (it->lower_bound(string()); it->valid(); it->next(), ++count) {
8812 dout(20) << __func__ << " replay " << pretty_binary_string(it->key())
8813 << dendl;
8814 bluestore_deferred_transaction_t *deferred_txn =
8815 new bluestore_deferred_transaction_t;
8816 bufferlist bl = it->value();
8817 bufferlist::iterator p = bl.begin();
8818 try {
8819 ::decode(*deferred_txn, p);
8820 } catch (buffer::error& e) {
8821 derr << __func__ << " failed to decode deferred txn "
8822 << pretty_binary_string(it->key()) << dendl;
8823 delete deferred_txn;
8824 r = -EIO;
8825 goto out;
8826 }
8827 TransContext *txc = _txc_create(osr.get());
8828 txc->deferred_txn = deferred_txn;
8829 txc->state = TransContext::STATE_KV_DONE;
8830 _txc_state_proc(txc);
8831 }
8832 out:
8833 dout(20) << __func__ << " draining osr" << dendl;
8834 _osr_drain_all();
8835 osr->discard();
8836 dout(10) << __func__ << " completed " << count << " events" << dendl;
8837 return r;
8838 }
8839
8840 // ---------------------------
8841 // transactions
8842
8843 int BlueStore::queue_transactions(
8844 Sequencer *posr,
8845 vector<Transaction>& tls,
8846 TrackedOpRef op,
8847 ThreadPool::TPHandle *handle)
8848 {
8849 FUNCTRACE();
8850 Context *onreadable;
8851 Context *ondisk;
8852 Context *onreadable_sync;
8853 ObjectStore::Transaction::collect_contexts(
8854 tls, &onreadable, &ondisk, &onreadable_sync);
8855
8856 if (cct->_conf->objectstore_blackhole) {
8857 dout(0) << __func__ << " objectstore_blackhole = TRUE, dropping transaction"
8858 << dendl;
8859 delete ondisk;
8860 delete onreadable;
8861 delete onreadable_sync;
8862 return 0;
8863 }
8864 utime_t start = ceph_clock_now();
8865 // set up the sequencer
8866 OpSequencer *osr;
8867 assert(posr);
8868 if (posr->p) {
8869 osr = static_cast<OpSequencer *>(posr->p.get());
8870 dout(10) << __func__ << " existing " << osr << " " << *osr << dendl;
8871 } else {
8872 osr = new OpSequencer(cct, this);
8873 osr->parent = posr;
8874 posr->p = osr;
8875 dout(10) << __func__ << " new " << osr << " " << *osr << dendl;
8876 }
8877
8878 // prepare
8879 TransContext *txc = _txc_create(osr);
8880 txc->onreadable = onreadable;
8881 txc->onreadable_sync = onreadable_sync;
8882 txc->oncommit = ondisk;
8883
8884 for (vector<Transaction>::iterator p = tls.begin(); p != tls.end(); ++p) {
8885 (*p).set_osr(osr);
8886 txc->bytes += (*p).get_num_bytes();
8887 _txc_add_transaction(txc, &(*p));
8888 }
8889 _txc_calc_cost(txc);
8890
8891 _txc_write_nodes(txc, txc->t);
8892
8893 // journal deferred items
8894 if (txc->deferred_txn) {
8895 txc->deferred_txn->seq = ++deferred_seq;
8896 bufferlist bl;
8897 ::encode(*txc->deferred_txn, bl);
8898 string key;
8899 get_deferred_key(txc->deferred_txn->seq, &key);
8900 txc->t->set(PREFIX_DEFERRED, key, bl);
8901 }
8902
8903 _txc_finalize_kv(txc, txc->t);
8904 if (handle)
8905 handle->suspend_tp_timeout();
8906
8907 utime_t tstart = ceph_clock_now();
8908 throttle_bytes.get(txc->cost);
8909 if (txc->deferred_txn) {
8910 // ensure we do not block here because of deferred writes
8911 if (!throttle_deferred_bytes.get_or_fail(txc->cost)) {
8912 dout(10) << __func__ << " failed get throttle_deferred_bytes, aggressive"
8913 << dendl;
8914 ++deferred_aggressive;
8915 deferred_try_submit();
8916 throttle_deferred_bytes.get(txc->cost);
8917 --deferred_aggressive;
8918 }
8919 }
8920 utime_t tend = ceph_clock_now();
8921
8922 if (handle)
8923 handle->reset_tp_timeout();
8924
8925 logger->inc(l_bluestore_txc);
8926
8927 // execute (start)
8928 _txc_state_proc(txc);
8929
8930 logger->tinc(l_bluestore_submit_lat, ceph_clock_now() - start);
8931 logger->tinc(l_bluestore_throttle_lat, tend - tstart);
8932 return 0;
8933 }
8934
8935 void BlueStore::_txc_aio_submit(TransContext *txc)
8936 {
8937 dout(10) << __func__ << " txc " << txc << dendl;
8938 bdev->aio_submit(&txc->ioc);
8939 }
8940
8941 void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t)
8942 {
8943 Transaction::iterator i = t->begin();
8944
8945 _dump_transaction(t);
8946
8947 vector<CollectionRef> cvec(i.colls.size());
8948 unsigned j = 0;
8949 for (vector<coll_t>::iterator p = i.colls.begin(); p != i.colls.end();
8950 ++p, ++j) {
8951 cvec[j] = _get_collection(*p);
8952 }
8953 vector<OnodeRef> ovec(i.objects.size());
8954
8955 for (int pos = 0; i.have_op(); ++pos) {
8956 Transaction::Op *op = i.decode_op();
8957 int r = 0;
8958
8959 // no coll or obj
8960 if (op->op == Transaction::OP_NOP)
8961 continue;
8962
8963 // collection operations
8964 CollectionRef &c = cvec[op->cid];
8965 switch (op->op) {
8966 case Transaction::OP_RMCOLL:
8967 {
8968 const coll_t &cid = i.get_cid(op->cid);
8969 r = _remove_collection(txc, cid, &c);
8970 if (!r)
8971 continue;
8972 }
8973 break;
8974
8975 case Transaction::OP_MKCOLL:
8976 {
8977 assert(!c);
8978 const coll_t &cid = i.get_cid(op->cid);
8979 r = _create_collection(txc, cid, op->split_bits, &c);
8980 if (!r)
8981 continue;
8982 }
8983 break;
8984
8985 case Transaction::OP_SPLIT_COLLECTION:
8986 assert(0 == "deprecated");
8987 break;
8988
8989 case Transaction::OP_SPLIT_COLLECTION2:
8990 {
8991 uint32_t bits = op->split_bits;
8992 uint32_t rem = op->split_rem;
8993 r = _split_collection(txc, c, cvec[op->dest_cid], bits, rem);
8994 if (!r)
8995 continue;
8996 }
8997 break;
8998
8999 case Transaction::OP_COLL_HINT:
9000 {
9001 uint32_t type = op->hint_type;
9002 bufferlist hint;
9003 i.decode_bl(hint);
9004 bufferlist::iterator hiter = hint.begin();
9005 if (type == Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS) {
9006 uint32_t pg_num;
9007 uint64_t num_objs;
9008 ::decode(pg_num, hiter);
9009 ::decode(num_objs, hiter);
9010 dout(10) << __func__ << " collection hint objects is a no-op, "
9011 << " pg_num " << pg_num << " num_objects " << num_objs
9012 << dendl;
9013 } else {
9014 // Ignore the hint
9015 dout(10) << __func__ << " unknown collection hint " << type << dendl;
9016 }
9017 continue;
9018 }
9019 break;
9020
9021 case Transaction::OP_COLL_SETATTR:
9022 r = -EOPNOTSUPP;
9023 break;
9024
9025 case Transaction::OP_COLL_RMATTR:
9026 r = -EOPNOTSUPP;
9027 break;
9028
9029 case Transaction::OP_COLL_RENAME:
9030 assert(0 == "not implemented");
9031 break;
9032 }
9033 if (r < 0) {
9034 derr << __func__ << " error " << cpp_strerror(r)
9035 << " not handled on operation " << op->op
9036 << " (op " << pos << ", counting from 0)" << dendl;
9037 _dump_transaction(t, 0);
9038 assert(0 == "unexpected error");
9039 }
9040
9041 // these operations implicity create the object
9042 bool create = false;
9043 if (op->op == Transaction::OP_TOUCH ||
9044 op->op == Transaction::OP_WRITE ||
9045 op->op == Transaction::OP_ZERO) {
9046 create = true;
9047 }
9048
9049 // object operations
9050 RWLock::WLocker l(c->lock);
9051 OnodeRef &o = ovec[op->oid];
9052 if (!o) {
9053 ghobject_t oid = i.get_oid(op->oid);
9054 o = c->get_onode(oid, create);
9055 }
9056 if (!create && (!o || !o->exists)) {
9057 dout(10) << __func__ << " op " << op->op << " got ENOENT on "
9058 << i.get_oid(op->oid) << dendl;
9059 r = -ENOENT;
9060 goto endop;
9061 }
9062
9063 switch (op->op) {
9064 case Transaction::OP_TOUCH:
9065 r = _touch(txc, c, o);
9066 break;
9067
9068 case Transaction::OP_WRITE:
9069 {
9070 uint64_t off = op->off;
9071 uint64_t len = op->len;
9072 uint32_t fadvise_flags = i.get_fadvise_flags();
9073 bufferlist bl;
9074 i.decode_bl(bl);
9075 r = _write(txc, c, o, off, len, bl, fadvise_flags);
9076 }
9077 break;
9078
9079 case Transaction::OP_ZERO:
9080 {
9081 uint64_t off = op->off;
9082 uint64_t len = op->len;
9083 r = _zero(txc, c, o, off, len);
9084 }
9085 break;
9086
9087 case Transaction::OP_TRIMCACHE:
9088 {
9089 // deprecated, no-op
9090 }
9091 break;
9092
9093 case Transaction::OP_TRUNCATE:
9094 {
9095 uint64_t off = op->off;
9096 r = _truncate(txc, c, o, off);
9097 }
9098 break;
9099
9100 case Transaction::OP_REMOVE:
9101 {
9102 r = _remove(txc, c, o);
9103 }
9104 break;
9105
9106 case Transaction::OP_SETATTR:
9107 {
9108 string name = i.decode_string();
9109 bufferptr bp;
9110 i.decode_bp(bp);
9111 r = _setattr(txc, c, o, name, bp);
9112 }
9113 break;
9114
9115 case Transaction::OP_SETATTRS:
9116 {
9117 map<string, bufferptr> aset;
9118 i.decode_attrset(aset);
9119 r = _setattrs(txc, c, o, aset);
9120 }
9121 break;
9122
9123 case Transaction::OP_RMATTR:
9124 {
9125 string name = i.decode_string();
9126 r = _rmattr(txc, c, o, name);
9127 }
9128 break;
9129
9130 case Transaction::OP_RMATTRS:
9131 {
9132 r = _rmattrs(txc, c, o);
9133 }
9134 break;
9135
9136 case Transaction::OP_CLONE:
9137 {
9138 OnodeRef& no = ovec[op->dest_oid];
9139 if (!no) {
9140 const ghobject_t& noid = i.get_oid(op->dest_oid);
9141 no = c->get_onode(noid, true);
9142 }
9143 r = _clone(txc, c, o, no);
9144 }
9145 break;
9146
9147 case Transaction::OP_CLONERANGE:
9148 assert(0 == "deprecated");
9149 break;
9150
9151 case Transaction::OP_CLONERANGE2:
9152 {
9153 OnodeRef& no = ovec[op->dest_oid];
9154 if (!no) {
9155 const ghobject_t& noid = i.get_oid(op->dest_oid);
9156 no = c->get_onode(noid, true);
9157 }
9158 uint64_t srcoff = op->off;
9159 uint64_t len = op->len;
9160 uint64_t dstoff = op->dest_off;
9161 r = _clone_range(txc, c, o, no, srcoff, len, dstoff);
9162 }
9163 break;
9164
9165 case Transaction::OP_COLL_ADD:
9166 assert(0 == "not implemented");
9167 break;
9168
9169 case Transaction::OP_COLL_REMOVE:
9170 assert(0 == "not implemented");
9171 break;
9172
9173 case Transaction::OP_COLL_MOVE:
9174 assert(0 == "deprecated");
9175 break;
9176
9177 case Transaction::OP_COLL_MOVE_RENAME:
9178 case Transaction::OP_TRY_RENAME:
9179 {
9180 assert(op->cid == op->dest_cid);
9181 const ghobject_t& noid = i.get_oid(op->dest_oid);
9182 OnodeRef& no = ovec[op->dest_oid];
9183 if (!no) {
9184 no = c->get_onode(noid, false);
9185 }
9186 r = _rename(txc, c, o, no, noid);
9187 }
9188 break;
9189
9190 case Transaction::OP_OMAP_CLEAR:
9191 {
9192 r = _omap_clear(txc, c, o);
9193 }
9194 break;
9195 case Transaction::OP_OMAP_SETKEYS:
9196 {
9197 bufferlist aset_bl;
9198 i.decode_attrset_bl(&aset_bl);
9199 r = _omap_setkeys(txc, c, o, aset_bl);
9200 }
9201 break;
9202 case Transaction::OP_OMAP_RMKEYS:
9203 {
9204 bufferlist keys_bl;
9205 i.decode_keyset_bl(&keys_bl);
9206 r = _omap_rmkeys(txc, c, o, keys_bl);
9207 }
9208 break;
9209 case Transaction::OP_OMAP_RMKEYRANGE:
9210 {
9211 string first, last;
9212 first = i.decode_string();
9213 last = i.decode_string();
9214 r = _omap_rmkey_range(txc, c, o, first, last);
9215 }
9216 break;
9217 case Transaction::OP_OMAP_SETHEADER:
9218 {
9219 bufferlist bl;
9220 i.decode_bl(bl);
9221 r = _omap_setheader(txc, c, o, bl);
9222 }
9223 break;
9224
9225 case Transaction::OP_SETALLOCHINT:
9226 {
9227 r = _set_alloc_hint(txc, c, o,
9228 op->expected_object_size,
9229 op->expected_write_size,
9230 op->alloc_hint_flags);
9231 }
9232 break;
9233
9234 default:
9235 derr << __func__ << "bad op " << op->op << dendl;
9236 ceph_abort();
9237 }
9238
9239 endop:
9240 if (r < 0) {
9241 bool ok = false;
9242
9243 if (r == -ENOENT && !(op->op == Transaction::OP_CLONERANGE ||
9244 op->op == Transaction::OP_CLONE ||
9245 op->op == Transaction::OP_CLONERANGE2 ||
9246 op->op == Transaction::OP_COLL_ADD ||
9247 op->op == Transaction::OP_SETATTR ||
9248 op->op == Transaction::OP_SETATTRS ||
9249 op->op == Transaction::OP_RMATTR ||
9250 op->op == Transaction::OP_OMAP_SETKEYS ||
9251 op->op == Transaction::OP_OMAP_RMKEYS ||
9252 op->op == Transaction::OP_OMAP_RMKEYRANGE ||
9253 op->op == Transaction::OP_OMAP_SETHEADER))
9254 // -ENOENT is usually okay
9255 ok = true;
9256 if (r == -ENODATA)
9257 ok = true;
9258
9259 if (!ok) {
9260 const char *msg = "unexpected error code";
9261
9262 if (r == -ENOENT && (op->op == Transaction::OP_CLONERANGE ||
9263 op->op == Transaction::OP_CLONE ||
9264 op->op == Transaction::OP_CLONERANGE2))
9265 msg = "ENOENT on clone suggests osd bug";
9266
9267 if (r == -ENOSPC)
9268 // For now, if we hit _any_ ENOSPC, crash, before we do any damage
9269 // by partially applying transactions.
9270 msg = "ENOSPC from bluestore, misconfigured cluster";
9271
9272 if (r == -ENOTEMPTY) {
9273 msg = "ENOTEMPTY suggests garbage data in osd data dir";
9274 }
9275
9276 derr << __func__ << " error " << cpp_strerror(r)
9277 << " not handled on operation " << op->op
9278 << " (op " << pos << ", counting from 0)"
9279 << dendl;
9280 derr << msg << dendl;
9281 _dump_transaction(t, 0);
9282 assert(0 == "unexpected error");
9283 }
9284 }
9285 }
9286 }
9287
9288
9289
9290 // -----------------
9291 // write operations
9292
9293 int BlueStore::_touch(TransContext *txc,
9294 CollectionRef& c,
9295 OnodeRef &o)
9296 {
9297 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
9298 int r = 0;
9299 _assign_nid(txc, o);
9300 txc->write_onode(o);
9301 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
9302 return r;
9303 }
9304
9305 void BlueStore::_dump_onode(OnodeRef o, int log_level)
9306 {
9307 if (!cct->_conf->subsys.should_gather(ceph_subsys_bluestore, log_level))
9308 return;
9309 dout(log_level) << __func__ << " " << o << " " << o->oid
9310 << " nid " << o->onode.nid
9311 << " size 0x" << std::hex << o->onode.size
9312 << " (" << std::dec << o->onode.size << ")"
9313 << " expected_object_size " << o->onode.expected_object_size
9314 << " expected_write_size " << o->onode.expected_write_size
9315 << " in " << o->onode.extent_map_shards.size() << " shards"
9316 << ", " << o->extent_map.spanning_blob_map.size()
9317 << " spanning blobs"
9318 << dendl;
9319 for (auto p = o->onode.attrs.begin();
9320 p != o->onode.attrs.end();
9321 ++p) {
9322 dout(log_level) << __func__ << " attr " << p->first
9323 << " len " << p->second.length() << dendl;
9324 }
9325 _dump_extent_map(o->extent_map, log_level);
9326 }
9327
9328 void BlueStore::_dump_extent_map(ExtentMap &em, int log_level)
9329 {
9330 uint64_t pos = 0;
9331 for (auto& s : em.shards) {
9332 dout(log_level) << __func__ << " shard " << *s.shard_info
9333 << (s.loaded ? " (loaded)" : "")
9334 << (s.dirty ? " (dirty)" : "")
9335 << dendl;
9336 }
9337 for (auto& e : em.extent_map) {
9338 dout(log_level) << __func__ << " " << e << dendl;
9339 assert(e.logical_offset >= pos);
9340 pos = e.logical_offset + e.length;
9341 const bluestore_blob_t& blob = e.blob->get_blob();
9342 if (blob.has_csum()) {
9343 vector<uint64_t> v;
9344 unsigned n = blob.get_csum_count();
9345 for (unsigned i = 0; i < n; ++i)
9346 v.push_back(blob.get_csum_item(i));
9347 dout(log_level) << __func__ << " csum: " << std::hex << v << std::dec
9348 << dendl;
9349 }
9350 std::lock_guard<std::recursive_mutex> l(e.blob->shared_blob->get_cache()->lock);
9351 for (auto& i : e.blob->shared_blob->bc.buffer_map) {
9352 dout(log_level) << __func__ << " 0x" << std::hex << i.first
9353 << "~" << i.second->length << std::dec
9354 << " " << *i.second << dendl;
9355 }
9356 }
9357 }
9358
9359 void BlueStore::_dump_transaction(Transaction *t, int log_level)
9360 {
9361 dout(log_level) << " transaction dump:\n";
9362 JSONFormatter f(true);
9363 f.open_object_section("transaction");
9364 t->dump(&f);
9365 f.close_section();
9366 f.flush(*_dout);
9367 *_dout << dendl;
9368 }
9369
9370 void BlueStore::_pad_zeros(
9371 bufferlist *bl, uint64_t *offset,
9372 uint64_t chunk_size)
9373 {
9374 auto length = bl->length();
9375 dout(30) << __func__ << " 0x" << std::hex << *offset << "~" << length
9376 << " chunk_size 0x" << chunk_size << std::dec << dendl;
9377 dout(40) << "before:\n";
9378 bl->hexdump(*_dout);
9379 *_dout << dendl;
9380 // front
9381 size_t front_pad = *offset % chunk_size;
9382 size_t back_pad = 0;
9383 size_t pad_count = 0;
9384 if (front_pad) {
9385 size_t front_copy = MIN(chunk_size - front_pad, length);
9386 bufferptr z = buffer::create_page_aligned(chunk_size);
9387 z.zero(0, front_pad, false);
9388 pad_count += front_pad;
9389 bl->copy(0, front_copy, z.c_str() + front_pad);
9390 if (front_copy + front_pad < chunk_size) {
9391 back_pad = chunk_size - (length + front_pad);
9392 z.zero(front_pad + length, back_pad, false);
9393 pad_count += back_pad;
9394 }
9395 bufferlist old, t;
9396 old.swap(*bl);
9397 t.substr_of(old, front_copy, length - front_copy);
9398 bl->append(z);
9399 bl->claim_append(t);
9400 *offset -= front_pad;
9401 length += pad_count;
9402 }
9403
9404 // back
9405 uint64_t end = *offset + length;
9406 unsigned back_copy = end % chunk_size;
9407 if (back_copy) {
9408 assert(back_pad == 0);
9409 back_pad = chunk_size - back_copy;
9410 assert(back_copy <= length);
9411 bufferptr tail(chunk_size);
9412 bl->copy(length - back_copy, back_copy, tail.c_str());
9413 tail.zero(back_copy, back_pad, false);
9414 bufferlist old;
9415 old.swap(*bl);
9416 bl->substr_of(old, 0, length - back_copy);
9417 bl->append(tail);
9418 length += back_pad;
9419 pad_count += back_pad;
9420 }
9421 dout(20) << __func__ << " pad 0x" << std::hex << front_pad << " + 0x"
9422 << back_pad << " on front/back, now 0x" << *offset << "~"
9423 << length << std::dec << dendl;
9424 dout(40) << "after:\n";
9425 bl->hexdump(*_dout);
9426 *_dout << dendl;
9427 if (pad_count)
9428 logger->inc(l_bluestore_write_pad_bytes, pad_count);
9429 assert(bl->length() == length);
9430 }
9431
9432 void BlueStore::_do_write_small(
9433 TransContext *txc,
9434 CollectionRef &c,
9435 OnodeRef o,
9436 uint64_t offset, uint64_t length,
9437 bufferlist::iterator& blp,
9438 WriteContext *wctx)
9439 {
9440 dout(10) << __func__ << " 0x" << std::hex << offset << "~" << length
9441 << std::dec << dendl;
9442 assert(length < min_alloc_size);
9443 uint64_t end_offs = offset + length;
9444
9445 logger->inc(l_bluestore_write_small);
9446 logger->inc(l_bluestore_write_small_bytes, length);
9447
9448 bufferlist bl;
9449 blp.copy(length, bl);
9450
9451 // Look for an existing mutable blob we can use.
9452 auto begin = o->extent_map.extent_map.begin();
9453 auto end = o->extent_map.extent_map.end();
9454 auto ep = o->extent_map.seek_lextent(offset);
9455 if (ep != begin) {
9456 --ep;
9457 if (ep->blob_end() <= offset) {
9458 ++ep;
9459 }
9460 }
9461 auto prev_ep = ep;
9462 if (prev_ep != begin) {
9463 --prev_ep;
9464 } else {
9465 prev_ep = end; // to avoid this extent check as it's a duplicate
9466 }
9467
9468 auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
9469 auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
9470 uint32_t alloc_len = min_alloc_size;
9471 auto offset0 = P2ALIGN(offset, alloc_len);
9472
9473 bool any_change;
9474
9475 // search suitable extent in both forward and reverse direction in
9476 // [offset - target_max_blob_size, offset + target_max_blob_size] range
9477 // then check if blob can be reused via can_reuse_blob func or apply
9478 // direct/deferred write (the latter for extents including or higher
9479 // than 'offset' only).
9480 do {
9481 any_change = false;
9482
9483 if (ep != end && ep->logical_offset < offset + max_bsize) {
9484 BlobRef b = ep->blob;
9485 auto bstart = ep->blob_start();
9486 dout(20) << __func__ << " considering " << *b
9487 << " bstart 0x" << std::hex << bstart << std::dec << dendl;
9488 if (bstart >= end_offs) {
9489 dout(20) << __func__ << " ignoring distant " << *b << dendl;
9490 } else if (!b->get_blob().is_mutable()) {
9491 dout(20) << __func__ << " ignoring immutable " << *b << dendl;
9492 } else if (ep->logical_offset % min_alloc_size !=
9493 ep->blob_offset % min_alloc_size) {
9494 dout(20) << __func__ << " ignoring offset-skewed " << *b << dendl;
9495 } else {
9496 uint64_t chunk_size = b->get_blob().get_chunk_size(block_size);
9497 // can we pad our head/tail out with zeros?
9498 uint64_t head_pad, tail_pad;
9499 head_pad = P2PHASE(offset, chunk_size);
9500 tail_pad = P2NPHASE(end_offs, chunk_size);
9501 if (head_pad || tail_pad) {
9502 o->extent_map.fault_range(db, offset - head_pad,
9503 end_offs - offset + head_pad + tail_pad);
9504 }
9505 if (head_pad &&
9506 o->extent_map.has_any_lextents(offset - head_pad, chunk_size)) {
9507 head_pad = 0;
9508 }
9509 if (tail_pad && o->extent_map.has_any_lextents(end_offs, tail_pad)) {
9510 tail_pad = 0;
9511 }
9512
9513 uint64_t b_off = offset - head_pad - bstart;
9514 uint64_t b_len = length + head_pad + tail_pad;
9515
9516 // direct write into unused blocks of an existing mutable blob?
9517 if ((b_off % chunk_size == 0 && b_len % chunk_size == 0) &&
9518 b->get_blob().get_ondisk_length() >= b_off + b_len &&
9519 b->get_blob().is_unused(b_off, b_len) &&
9520 b->get_blob().is_allocated(b_off, b_len)) {
9521 _apply_padding(head_pad, tail_pad, bl);
9522
9523 dout(20) << __func__ << " write to unused 0x" << std::hex
9524 << b_off << "~" << b_len
9525 << " pad 0x" << head_pad << " + 0x" << tail_pad
9526 << std::dec << " of mutable " << *b << dendl;
9527 _buffer_cache_write(txc, b, b_off, bl,
9528 wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
9529
9530 if (!g_conf->bluestore_debug_omit_block_device_write) {
9531 if (b_len <= prefer_deferred_size) {
9532 dout(20) << __func__ << " deferring small 0x" << std::hex
9533 << b_len << std::dec << " unused write via deferred" << dendl;
9534 bluestore_deferred_op_t *op = _get_deferred_op(txc, o);
9535 op->op = bluestore_deferred_op_t::OP_WRITE;
9536 b->get_blob().map(
9537 b_off, b_len,
9538 [&](uint64_t offset, uint64_t length) {
9539 op->extents.emplace_back(bluestore_pextent_t(offset, length));
9540 return 0;
9541 });
9542 op->data = bl;
9543 } else {
9544 b->get_blob().map_bl(
9545 b_off, bl,
9546 [&](uint64_t offset, bufferlist& t) {
9547 bdev->aio_write(offset, t,
9548 &txc->ioc, wctx->buffered);
9549 });
9550 }
9551 }
9552 b->dirty_blob().calc_csum(b_off, bl);
9553 dout(20) << __func__ << " lex old " << *ep << dendl;
9554 Extent *le = o->extent_map.set_lextent(c, offset, b_off + head_pad, length,
9555 b,
9556 &wctx->old_extents);
9557 b->dirty_blob().mark_used(le->blob_offset, le->length);
9558 txc->statfs_delta.stored() += le->length;
9559 dout(20) << __func__ << " lex " << *le << dendl;
9560 logger->inc(l_bluestore_write_small_unused);
9561 return;
9562 }
9563 // read some data to fill out the chunk?
9564 uint64_t head_read = P2PHASE(b_off, chunk_size);
9565 uint64_t tail_read = P2NPHASE(b_off + b_len, chunk_size);
9566 if ((head_read || tail_read) &&
9567 (b->get_blob().get_ondisk_length() >= b_off + b_len + tail_read) &&
9568 head_read + tail_read < min_alloc_size) {
9569 b_off -= head_read;
9570 b_len += head_read + tail_read;
9571
9572 } else {
9573 head_read = tail_read = 0;
9574 }
9575
9576 // chunk-aligned deferred overwrite?
9577 if (b->get_blob().get_ondisk_length() >= b_off + b_len &&
9578 b_off % chunk_size == 0 &&
9579 b_len % chunk_size == 0 &&
9580 b->get_blob().is_allocated(b_off, b_len)) {
9581
9582 _apply_padding(head_pad, tail_pad, bl);
9583
9584 dout(20) << __func__ << " reading head 0x" << std::hex << head_read
9585 << " and tail 0x" << tail_read << std::dec << dendl;
9586 if (head_read) {
9587 bufferlist head_bl;
9588 int r = _do_read(c.get(), o, offset - head_pad - head_read, head_read,
9589 head_bl, 0);
9590 assert(r >= 0 && r <= (int)head_read);
9591 size_t zlen = head_read - r;
9592 if (zlen) {
9593 head_bl.append_zero(zlen);
9594 logger->inc(l_bluestore_write_pad_bytes, zlen);
9595 }
9596 bl.claim_prepend(head_bl);
9597 logger->inc(l_bluestore_write_penalty_read_ops);
9598 }
9599 if (tail_read) {
9600 bufferlist tail_bl;
9601 int r = _do_read(c.get(), o, offset + length + tail_pad, tail_read,
9602 tail_bl, 0);
9603 assert(r >= 0 && r <= (int)tail_read);
9604 size_t zlen = tail_read - r;
9605 if (zlen) {
9606 tail_bl.append_zero(zlen);
9607 logger->inc(l_bluestore_write_pad_bytes, zlen);
9608 }
9609 bl.claim_append(tail_bl);
9610 logger->inc(l_bluestore_write_penalty_read_ops);
9611 }
9612 logger->inc(l_bluestore_write_small_pre_read);
9613
9614 bluestore_deferred_op_t *op = _get_deferred_op(txc, o);
9615 op->op = bluestore_deferred_op_t::OP_WRITE;
9616 _buffer_cache_write(txc, b, b_off, bl,
9617 wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
9618
9619 int r = b->get_blob().map(
9620 b_off, b_len,
9621 [&](uint64_t offset, uint64_t length) {
9622 op->extents.emplace_back(bluestore_pextent_t(offset, length));
9623 return 0;
9624 });
9625 assert(r == 0);
9626 if (b->get_blob().csum_type) {
9627 b->dirty_blob().calc_csum(b_off, bl);
9628 }
9629 op->data.claim(bl);
9630 dout(20) << __func__ << " deferred write 0x" << std::hex << b_off << "~"
9631 << b_len << std::dec << " of mutable " << *b
9632 << " at " << op->extents << dendl;
9633 Extent *le = o->extent_map.set_lextent(c, offset, offset - bstart, length,
9634 b, &wctx->old_extents);
9635 b->dirty_blob().mark_used(le->blob_offset, le->length);
9636 txc->statfs_delta.stored() += le->length;
9637 dout(20) << __func__ << " lex " << *le << dendl;
9638 logger->inc(l_bluestore_write_small_deferred);
9639 return;
9640 }
9641 // try to reuse blob if we can
9642 if (b->can_reuse_blob(min_alloc_size,
9643 max_bsize,
9644 offset0 - bstart,
9645 &alloc_len)) {
9646 assert(alloc_len == min_alloc_size); // expecting data always
9647 // fit into reused blob
9648 // Need to check for pending writes desiring to
9649 // reuse the same pextent. The rationale is that during GC two chunks
9650 // from garbage blobs(compressed?) can share logical space within the same
9651 // AU. That's in turn might be caused by unaligned len in clone_range2.
9652 // Hence the second write will fail in an attempt to reuse blob at
9653 // do_alloc_write().
9654 if (!wctx->has_conflict(b,
9655 offset0,
9656 offset0 + alloc_len,
9657 min_alloc_size)) {
9658
9659 // we can't reuse pad_head/pad_tail since they might be truncated
9660 // due to existent extents
9661 uint64_t b_off = offset - bstart;
9662 uint64_t b_off0 = b_off;
9663 _pad_zeros(&bl, &b_off0, chunk_size);
9664
9665 dout(20) << __func__ << " reuse blob " << *b << std::hex
9666 << " (0x" << b_off0 << "~" << bl.length() << ")"
9667 << " (0x" << b_off << "~" << length << ")"
9668 << std::dec << dendl;
9669
9670 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9671 wctx->write(offset, b, alloc_len, b_off0, bl, b_off, length,
9672 false, false);
9673 logger->inc(l_bluestore_write_small_unused);
9674 return;
9675 }
9676 }
9677 }
9678 ++ep;
9679 any_change = true;
9680 } // if (ep != end && ep->logical_offset < offset + max_bsize)
9681
9682 // check extent for reuse in reverse order
9683 if (prev_ep != end && prev_ep->logical_offset >= min_off) {
9684 BlobRef b = prev_ep->blob;
9685 auto bstart = prev_ep->blob_start();
9686 dout(20) << __func__ << " considering " << *b
9687 << " bstart 0x" << std::hex << bstart << std::dec << dendl;
9688 if (b->can_reuse_blob(min_alloc_size,
9689 max_bsize,
9690 offset0 - bstart,
9691 &alloc_len)) {
9692 assert(alloc_len == min_alloc_size); // expecting data always
9693 // fit into reused blob
9694 // Need to check for pending writes desiring to
9695 // reuse the same pextent. The rationale is that during GC two chunks
9696 // from garbage blobs(compressed?) can share logical space within the same
9697 // AU. That's in turn might be caused by unaligned len in clone_range2.
9698 // Hence the second write will fail in an attempt to reuse blob at
9699 // do_alloc_write().
9700 if (!wctx->has_conflict(b,
9701 offset0,
9702 offset0 + alloc_len,
9703 min_alloc_size)) {
9704
9705 uint64_t chunk_size = b->get_blob().get_chunk_size(block_size);
9706 uint64_t b_off = offset - bstart;
9707 uint64_t b_off0 = b_off;
9708 _pad_zeros(&bl, &b_off0, chunk_size);
9709
9710 dout(20) << __func__ << " reuse blob " << *b << std::hex
9711 << " (0x" << b_off0 << "~" << bl.length() << ")"
9712 << " (0x" << b_off << "~" << length << ")"
9713 << std::dec << dendl;
9714
9715 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9716 wctx->write(offset, b, alloc_len, b_off0, bl, b_off, length,
9717 false, false);
9718 logger->inc(l_bluestore_write_small_unused);
9719 return;
9720 }
9721 }
9722 if (prev_ep != begin) {
9723 --prev_ep;
9724 any_change = true;
9725 } else {
9726 prev_ep = end; // to avoid useless first extent re-check
9727 }
9728 } // if (prev_ep != end && prev_ep->logical_offset >= min_off)
9729 } while (any_change);
9730
9731 // new blob.
9732
9733 BlobRef b = c->new_blob();
9734 uint64_t b_off = P2PHASE(offset, alloc_len);
9735 uint64_t b_off0 = b_off;
9736 _pad_zeros(&bl, &b_off0, block_size);
9737 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9738 wctx->write(offset, b, alloc_len, b_off0, bl, b_off, length, true, true);
9739 logger->inc(l_bluestore_write_small_new);
9740
9741 return;
9742 }
9743
9744 void BlueStore::_do_write_big(
9745 TransContext *txc,
9746 CollectionRef &c,
9747 OnodeRef o,
9748 uint64_t offset, uint64_t length,
9749 bufferlist::iterator& blp,
9750 WriteContext *wctx)
9751 {
9752 dout(10) << __func__ << " 0x" << std::hex << offset << "~" << length
9753 << " target_blob_size 0x" << wctx->target_blob_size << std::dec
9754 << " compress " << (int)wctx->compress
9755 << dendl;
9756 logger->inc(l_bluestore_write_big);
9757 logger->inc(l_bluestore_write_big_bytes, length);
9758 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9759 auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
9760 while (length > 0) {
9761 bool new_blob = false;
9762 uint32_t l = MIN(max_bsize, length);
9763 BlobRef b;
9764 uint32_t b_off = 0;
9765
9766 //attempting to reuse existing blob
9767 if (!wctx->compress) {
9768 // look for an existing mutable blob we can reuse
9769 auto begin = o->extent_map.extent_map.begin();
9770 auto end = o->extent_map.extent_map.end();
9771 auto ep = o->extent_map.seek_lextent(offset);
9772 auto prev_ep = ep;
9773 if (prev_ep != begin) {
9774 --prev_ep;
9775 } else {
9776 prev_ep = end; // to avoid this extent check as it's a duplicate
9777 }
9778 auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
9779 // search suitable extent in both forward and reverse direction in
9780 // [offset - target_max_blob_size, offset + target_max_blob_size] range
9781 // then check if blob can be reused via can_reuse_blob func.
9782 bool any_change;
9783 do {
9784 any_change = false;
9785 if (ep != end && ep->logical_offset < offset + max_bsize) {
9786 if (offset >= ep->blob_start() &&
9787 ep->blob->can_reuse_blob(min_alloc_size, max_bsize,
9788 offset - ep->blob_start(),
9789 &l)) {
9790 b = ep->blob;
9791 b_off = offset - ep->blob_start();
9792 prev_ep = end; // to avoid check below
9793 dout(20) << __func__ << " reuse blob " << *b << std::hex
9794 << " (0x" << b_off << "~" << l << ")" << std::dec << dendl;
9795 } else {
9796 ++ep;
9797 any_change = true;
9798 }
9799 }
9800
9801 if (prev_ep != end && prev_ep->logical_offset >= min_off) {
9802 if (prev_ep->blob->can_reuse_blob(min_alloc_size, max_bsize,
9803 offset - prev_ep->blob_start(),
9804 &l)) {
9805 b = prev_ep->blob;
9806 b_off = offset - prev_ep->blob_start();
9807 dout(20) << __func__ << " reuse blob " << *b << std::hex
9808 << " (0x" << b_off << "~" << l << ")" << std::dec << dendl;
9809 } else if (prev_ep != begin) {
9810 --prev_ep;
9811 any_change = true;
9812 } else {
9813 prev_ep = end; // to avoid useless first extent re-check
9814 }
9815 }
9816 } while (b == nullptr && any_change);
9817 }
9818 if (b == nullptr) {
9819 b = c->new_blob();
9820 b_off = 0;
9821 new_blob = true;
9822 }
9823
9824 bufferlist t;
9825 blp.copy(l, t);
9826 wctx->write(offset, b, l, b_off, t, b_off, l, false, new_blob);
9827 offset += l;
9828 length -= l;
9829 logger->inc(l_bluestore_write_big_blobs);
9830 }
9831 }
9832
9833 int BlueStore::_do_alloc_write(
9834 TransContext *txc,
9835 CollectionRef coll,
9836 OnodeRef o,
9837 WriteContext *wctx)
9838 {
9839 dout(20) << __func__ << " txc " << txc
9840 << " " << wctx->writes.size() << " blobs"
9841 << dendl;
9842
9843 uint64_t need = 0;
9844 auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
9845 for (auto &wi : wctx->writes) {
9846 need += wi.blob_length;
9847 }
9848 int r = alloc->reserve(need);
9849 if (r < 0) {
9850 derr << __func__ << " failed to reserve 0x" << std::hex << need << std::dec
9851 << dendl;
9852 return r;
9853 }
9854
9855 uint64_t hint = 0;
9856 CompressorRef c;
9857 double crr = 0;
9858 if (wctx->compress) {
9859 c = select_option(
9860 "compression_algorithm",
9861 compressor,
9862 [&]() {
9863 string val;
9864 if (coll->pool_opts.get(pool_opts_t::COMPRESSION_ALGORITHM, &val)) {
9865 CompressorRef cp = compressor;
9866 if (!cp || cp->get_type_name() != val) {
9867 cp = Compressor::create(cct, val);
9868 }
9869 return boost::optional<CompressorRef>(cp);
9870 }
9871 return boost::optional<CompressorRef>();
9872 }
9873 );
9874
9875 crr = select_option(
9876 "compression_required_ratio",
9877 cct->_conf->bluestore_compression_required_ratio,
9878 [&]() {
9879 double val;
9880 if(coll->pool_opts.get(pool_opts_t::COMPRESSION_REQUIRED_RATIO, &val)) {
9881 return boost::optional<double>(val);
9882 }
9883 return boost::optional<double>();
9884 }
9885 );
9886 }
9887
9888 // checksum
9889 int csum = csum_type.load();
9890 csum = select_option(
9891 "csum_type",
9892 csum,
9893 [&]() {
9894 int val;
9895 if(coll->pool_opts.get(pool_opts_t::CSUM_TYPE, &val)) {
9896 return boost::optional<int>(val);
9897 }
9898 return boost::optional<int>();
9899 }
9900 );
9901
9902 for (auto& wi : wctx->writes) {
9903 BlobRef b = wi.b;
9904 bluestore_blob_t& dblob = b->dirty_blob();
9905 uint64_t b_off = wi.b_off;
9906 bufferlist *l = &wi.bl;
9907 uint64_t final_length = wi.blob_length;
9908 uint64_t csum_length = wi.blob_length;
9909 unsigned csum_order = block_size_order;
9910 bufferlist compressed_bl;
9911 bool compressed = false;
9912 if(c && wi.blob_length > min_alloc_size) {
9913
9914 utime_t start = ceph_clock_now();
9915
9916 // compress
9917 assert(b_off == 0);
9918 assert(wi.blob_length == l->length());
9919 bluestore_compression_header_t chdr;
9920 chdr.type = c->get_type();
9921 // FIXME: memory alignment here is bad
9922 bufferlist t;
9923
9924 r = c->compress(*l, t);
9925 assert(r == 0);
9926
9927 chdr.length = t.length();
9928 ::encode(chdr, compressed_bl);
9929 compressed_bl.claim_append(t);
9930 uint64_t rawlen = compressed_bl.length();
9931 uint64_t newlen = P2ROUNDUP(rawlen, min_alloc_size);
9932 uint64_t want_len_raw = final_length * crr;
9933 uint64_t want_len = P2ROUNDUP(want_len_raw, min_alloc_size);
9934 if (newlen <= want_len && newlen < final_length) {
9935 // Cool. We compressed at least as much as we were hoping to.
9936 // pad out to min_alloc_size
9937 compressed_bl.append_zero(newlen - rawlen);
9938 logger->inc(l_bluestore_write_pad_bytes, newlen - rawlen);
9939 dout(20) << __func__ << std::hex << " compressed 0x" << wi.blob_length
9940 << " -> 0x" << rawlen << " => 0x" << newlen
9941 << " with " << c->get_type()
9942 << std::dec << dendl;
9943 txc->statfs_delta.compressed() += rawlen;
9944 txc->statfs_delta.compressed_original() += l->length();
9945 txc->statfs_delta.compressed_allocated() += newlen;
9946 l = &compressed_bl;
9947 final_length = newlen;
9948 csum_length = newlen;
9949 csum_order = ctz(newlen);
9950 dblob.set_compressed(wi.blob_length, rawlen);
9951 compressed = true;
9952 logger->inc(l_bluestore_compress_success_count);
9953 } else {
9954 dout(20) << __func__ << std::hex << " 0x" << l->length()
9955 << " compressed to 0x" << rawlen << " -> 0x" << newlen
9956 << " with " << c->get_type()
9957 << ", which is more than required 0x" << want_len_raw
9958 << " -> 0x" << want_len
9959 << ", leaving uncompressed"
9960 << std::dec << dendl;
9961 logger->inc(l_bluestore_compress_rejected_count);
9962 }
9963 logger->tinc(l_bluestore_compress_lat,
9964 ceph_clock_now() - start);
9965 }
9966 if (!compressed && wi.new_blob) {
9967 // initialize newly created blob only
9968 assert(dblob.is_mutable());
9969 if (l->length() != wi.blob_length) {
9970 // hrm, maybe we could do better here, but let's not bother.
9971 dout(20) << __func__ << " forcing csum_order to block_size_order "
9972 << block_size_order << dendl;
9973 csum_order = block_size_order;
9974 } else {
9975 csum_order = std::min(wctx->csum_order, ctz(l->length()));
9976 }
9977 // try to align blob with max_blob_size to improve
9978 // its reuse ratio, e.g. in case of reverse write
9979 uint32_t suggested_boff =
9980 (wi.logical_offset - (wi.b_off0 - wi.b_off)) % max_bsize;
9981 if ((suggested_boff % (1 << csum_order)) == 0 &&
9982 suggested_boff + final_length <= max_bsize &&
9983 suggested_boff > b_off) {
9984 dout(20) << __func__ << " forcing blob_offset to "
9985 << std::hex << suggested_boff << std::dec << dendl;
9986 assert(suggested_boff >= b_off);
9987 csum_length += suggested_boff - b_off;
9988 b_off = suggested_boff;
9989 }
9990 }
9991
9992 AllocExtentVector extents;
9993 extents.reserve(4); // 4 should be (more than) enough for most allocations
9994 int64_t got = alloc->allocate(final_length, min_alloc_size,
9995 max_alloc_size.load(),
9996 hint, &extents);
9997 assert(got == (int64_t)final_length);
9998 need -= got;
9999 txc->statfs_delta.allocated() += got;
10000 for (auto& p : extents) {
10001 bluestore_pextent_t e = bluestore_pextent_t(p);
10002 txc->allocated.insert(e.offset, e.length);
10003 hint = p.end();
10004 }
10005 dblob.allocated(P2ALIGN(b_off, min_alloc_size), final_length, extents);
10006
10007 dout(20) << __func__ << " blob " << *b
10008 << " csum_type " << Checksummer::get_csum_type_string(csum)
10009 << " csum_order " << csum_order
10010 << " csum_length 0x" << std::hex << csum_length << std::dec
10011 << dendl;
10012
10013 if (csum != Checksummer::CSUM_NONE) {
10014 if (!dblob.has_csum()) {
10015 dblob.init_csum(csum, csum_order, csum_length);
10016 }
10017 dblob.calc_csum(b_off, *l);
10018 }
10019 if (wi.mark_unused) {
10020 auto b_end = b_off + wi.bl.length();
10021 if (b_off) {
10022 dblob.add_unused(0, b_off);
10023 }
10024 if (b_end < wi.blob_length) {
10025 dblob.add_unused(b_end, wi.blob_length - b_end);
10026 }
10027 }
10028
10029 Extent *le = o->extent_map.set_lextent(coll, wi.logical_offset,
10030 b_off + (wi.b_off0 - wi.b_off),
10031 wi.length0,
10032 wi.b,
10033 nullptr);
10034 wi.b->dirty_blob().mark_used(le->blob_offset, le->length);
10035 txc->statfs_delta.stored() += le->length;
10036 dout(20) << __func__ << " lex " << *le << dendl;
10037 _buffer_cache_write(txc, wi.b, b_off, wi.bl,
10038 wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
10039
10040 // queue io
10041 if (!g_conf->bluestore_debug_omit_block_device_write) {
10042 if (l->length() <= prefer_deferred_size.load()) {
10043 dout(20) << __func__ << " deferring small 0x" << std::hex
10044 << l->length() << std::dec << " write via deferred" << dendl;
10045 bluestore_deferred_op_t *op = _get_deferred_op(txc, o);
10046 op->op = bluestore_deferred_op_t::OP_WRITE;
10047 int r = b->get_blob().map(
10048 b_off, l->length(),
10049 [&](uint64_t offset, uint64_t length) {
10050 op->extents.emplace_back(bluestore_pextent_t(offset, length));
10051 return 0;
10052 });
10053 assert(r == 0);
10054 op->data = *l;
10055 } else {
10056 b->get_blob().map_bl(
10057 b_off, *l,
10058 [&](uint64_t offset, bufferlist& t) {
10059 bdev->aio_write(offset, t, &txc->ioc, false);
10060 });
10061 }
10062 }
10063 }
10064 if (need > 0) {
10065 alloc->unreserve(need);
10066 }
10067 return 0;
10068 }
10069
10070 void BlueStore::_wctx_finish(
10071 TransContext *txc,
10072 CollectionRef& c,
10073 OnodeRef o,
10074 WriteContext *wctx,
10075 set<SharedBlob*> *maybe_unshared_blobs)
10076 {
10077 auto oep = wctx->old_extents.begin();
10078 while (oep != wctx->old_extents.end()) {
10079 auto &lo = *oep;
10080 oep = wctx->old_extents.erase(oep);
10081 dout(20) << __func__ << " lex_old " << lo.e << dendl;
10082 BlobRef b = lo.e.blob;
10083 const bluestore_blob_t& blob = b->get_blob();
10084 if (blob.is_compressed()) {
10085 if (lo.blob_empty) {
10086 txc->statfs_delta.compressed() -= blob.get_compressed_payload_length();
10087 }
10088 txc->statfs_delta.compressed_original() -= lo.e.length;
10089 }
10090 auto& r = lo.r;
10091 txc->statfs_delta.stored() -= lo.e.length;
10092 if (!r.empty()) {
10093 dout(20) << __func__ << " blob release " << r << dendl;
10094 if (blob.is_shared()) {
10095 PExtentVector final;
10096 c->load_shared_blob(b->shared_blob);
10097 for (auto e : r) {
10098 b->shared_blob->put_ref(
10099 e.offset, e.length, &final,
10100 b->is_referenced() ? nullptr : maybe_unshared_blobs);
10101 }
10102 dout(20) << __func__ << " shared_blob release " << final
10103 << " from " << *b->shared_blob << dendl;
10104 txc->write_shared_blob(b->shared_blob);
10105 r.clear();
10106 r.swap(final);
10107 }
10108 }
10109 // we can't invalidate our logical extents as we drop them because
10110 // other lextents (either in our onode or others) may still
10111 // reference them. but we can throw out anything that is no
10112 // longer allocated. Note that this will leave behind edge bits
10113 // that are no longer referenced but not deallocated (until they
10114 // age out of the cache naturally).
10115 b->discard_unallocated(c.get());
10116 for (auto e : r) {
10117 dout(20) << __func__ << " release " << e << dendl;
10118 txc->released.insert(e.offset, e.length);
10119 txc->statfs_delta.allocated() -= e.length;
10120 if (blob.is_compressed()) {
10121 txc->statfs_delta.compressed_allocated() -= e.length;
10122 }
10123 }
10124 delete &lo;
10125 if (b->is_spanning() && !b->is_referenced()) {
10126 dout(20) << __func__ << " spanning_blob_map removing empty " << *b
10127 << dendl;
10128 o->extent_map.spanning_blob_map.erase(b->id);
10129 }
10130 }
10131 }
10132
10133 void BlueStore::_do_write_data(
10134 TransContext *txc,
10135 CollectionRef& c,
10136 OnodeRef o,
10137 uint64_t offset,
10138 uint64_t length,
10139 bufferlist& bl,
10140 WriteContext *wctx)
10141 {
10142 uint64_t end = offset + length;
10143 bufferlist::iterator p = bl.begin();
10144
10145 if (offset / min_alloc_size == (end - 1) / min_alloc_size &&
10146 (length != min_alloc_size)) {
10147 // we fall within the same block
10148 _do_write_small(txc, c, o, offset, length, p, wctx);
10149 } else {
10150 uint64_t head_offset, head_length;
10151 uint64_t middle_offset, middle_length;
10152 uint64_t tail_offset, tail_length;
10153
10154 head_offset = offset;
10155 head_length = P2NPHASE(offset, min_alloc_size);
10156
10157 tail_offset = P2ALIGN(end, min_alloc_size);
10158 tail_length = P2PHASE(end, min_alloc_size);
10159
10160 middle_offset = head_offset + head_length;
10161 middle_length = length - head_length - tail_length;
10162
10163 if (head_length) {
10164 _do_write_small(txc, c, o, head_offset, head_length, p, wctx);
10165 }
10166
10167 if (middle_length) {
10168 _do_write_big(txc, c, o, middle_offset, middle_length, p, wctx);
10169 }
10170
10171 if (tail_length) {
10172 _do_write_small(txc, c, o, tail_offset, tail_length, p, wctx);
10173 }
10174 }
10175 }
10176
10177 void BlueStore::_choose_write_options(
10178 CollectionRef& c,
10179 OnodeRef o,
10180 uint32_t fadvise_flags,
10181 WriteContext *wctx)
10182 {
10183 if (fadvise_flags & CEPH_OSD_OP_FLAG_FADVISE_WILLNEED) {
10184 dout(20) << __func__ << " will do buffered write" << dendl;
10185 wctx->buffered = true;
10186 } else if (cct->_conf->bluestore_default_buffered_write &&
10187 (fadvise_flags & (CEPH_OSD_OP_FLAG_FADVISE_DONTNEED |
10188 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE)) == 0) {
10189 dout(20) << __func__ << " defaulting to buffered write" << dendl;
10190 wctx->buffered = true;
10191 }
10192
10193 // apply basic csum block size
10194 wctx->csum_order = block_size_order;
10195
10196 // compression parameters
10197 unsigned alloc_hints = o->onode.alloc_hint_flags;
10198 auto cm = select_option(
10199 "compression_mode",
10200 comp_mode.load(),
10201 [&]() {
10202 string val;
10203 if(c->pool_opts.get(pool_opts_t::COMPRESSION_MODE, &val)) {
10204 return boost::optional<Compressor::CompressionMode>(
10205 Compressor::get_comp_mode_type(val));
10206 }
10207 return boost::optional<Compressor::CompressionMode>();
10208 }
10209 );
10210
10211 wctx->compress = (cm != Compressor::COMP_NONE) &&
10212 ((cm == Compressor::COMP_FORCE) ||
10213 (cm == Compressor::COMP_AGGRESSIVE &&
10214 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE) == 0) ||
10215 (cm == Compressor::COMP_PASSIVE &&
10216 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE)));
10217
10218 if ((alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ) &&
10219 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ) == 0 &&
10220 (alloc_hints & (CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE |
10221 CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY)) &&
10222 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE) == 0) {
10223
10224 dout(20) << __func__ << " will prefer large blob and csum sizes" << dendl;
10225
10226 if (o->onode.expected_write_size) {
10227 wctx->csum_order = std::max(min_alloc_size_order,
10228 (uint8_t)ctz(o->onode.expected_write_size));
10229 } else {
10230 wctx->csum_order = min_alloc_size_order;
10231 }
10232
10233 if (wctx->compress) {
10234 wctx->target_blob_size = select_option(
10235 "compression_max_blob_size",
10236 comp_max_blob_size.load(),
10237 [&]() {
10238 int val;
10239 if(c->pool_opts.get(pool_opts_t::COMPRESSION_MAX_BLOB_SIZE, &val)) {
10240 return boost::optional<uint64_t>((uint64_t)val);
10241 }
10242 return boost::optional<uint64_t>();
10243 }
10244 );
10245 }
10246 } else {
10247 if (wctx->compress) {
10248 wctx->target_blob_size = select_option(
10249 "compression_min_blob_size",
10250 comp_min_blob_size.load(),
10251 [&]() {
10252 int val;
10253 if(c->pool_opts.get(pool_opts_t::COMPRESSION_MIN_BLOB_SIZE, &val)) {
10254 return boost::optional<uint64_t>((uint64_t)val);
10255 }
10256 return boost::optional<uint64_t>();
10257 }
10258 );
10259 }
10260 }
10261
10262 uint64_t max_bsize = max_blob_size.load();
10263 if (wctx->target_blob_size == 0 || wctx->target_blob_size > max_bsize) {
10264 wctx->target_blob_size = max_bsize;
10265 }
10266
10267 // set the min blob size floor at 2x the min_alloc_size, or else we
10268 // won't be able to allocate a smaller extent for the compressed
10269 // data.
10270 if (wctx->compress &&
10271 wctx->target_blob_size < min_alloc_size * 2) {
10272 wctx->target_blob_size = min_alloc_size * 2;
10273 }
10274
10275 dout(20) << __func__ << " prefer csum_order " << wctx->csum_order
10276 << " target_blob_size 0x" << std::hex << wctx->target_blob_size
10277 << std::dec << dendl;
10278 }
10279
10280 int BlueStore::_do_gc(
10281 TransContext *txc,
10282 CollectionRef& c,
10283 OnodeRef o,
10284 const GarbageCollector& gc,
10285 const WriteContext& wctx,
10286 uint64_t *dirty_start,
10287 uint64_t *dirty_end)
10288 {
10289 auto& extents_to_collect = gc.get_extents_to_collect();
10290
10291 WriteContext wctx_gc;
10292 wctx_gc.fork(wctx); // make a clone for garbage collection
10293
10294 for (auto it = extents_to_collect.begin();
10295 it != extents_to_collect.end();
10296 ++it) {
10297 bufferlist bl;
10298 int r = _do_read(c.get(), o, it->offset, it->length, bl, 0);
10299 assert(r == (int)it->length);
10300
10301 o->extent_map.fault_range(db, it->offset, it->length);
10302 _do_write_data(txc, c, o, it->offset, it->length, bl, &wctx_gc);
10303 logger->inc(l_bluestore_gc_merged, it->length);
10304
10305 if (*dirty_start > it->offset) {
10306 *dirty_start = it->offset;
10307 }
10308
10309 if (*dirty_end < it->offset + it->length) {
10310 *dirty_end = it->offset + it->length;
10311 }
10312 }
10313
10314 dout(30) << __func__ << " alloc write" << dendl;
10315 int r = _do_alloc_write(txc, c, o, &wctx_gc);
10316 if (r < 0) {
10317 derr << __func__ << " _do_alloc_write failed with " << cpp_strerror(r)
10318 << dendl;
10319 return r;
10320 }
10321
10322 _wctx_finish(txc, c, o, &wctx_gc);
10323 return 0;
10324 }
10325
10326 int BlueStore::_do_write(
10327 TransContext *txc,
10328 CollectionRef& c,
10329 OnodeRef o,
10330 uint64_t offset,
10331 uint64_t length,
10332 bufferlist& bl,
10333 uint32_t fadvise_flags)
10334 {
10335 int r = 0;
10336
10337 dout(20) << __func__
10338 << " " << o->oid
10339 << " 0x" << std::hex << offset << "~" << length
10340 << " - have 0x" << o->onode.size
10341 << " (" << std::dec << o->onode.size << ")"
10342 << " bytes"
10343 << " fadvise_flags 0x" << std::hex << fadvise_flags << std::dec
10344 << dendl;
10345 _dump_onode(o);
10346
10347 if (length == 0) {
10348 return 0;
10349 }
10350
10351 uint64_t end = offset + length;
10352
10353 GarbageCollector gc(c->store->cct);
10354 int64_t benefit;
10355 auto dirty_start = offset;
10356 auto dirty_end = end;
10357
10358 WriteContext wctx;
10359 _choose_write_options(c, o, fadvise_flags, &wctx);
10360 o->extent_map.fault_range(db, offset, length);
10361 _do_write_data(txc, c, o, offset, length, bl, &wctx);
10362 r = _do_alloc_write(txc, c, o, &wctx);
10363 if (r < 0) {
10364 derr << __func__ << " _do_alloc_write failed with " << cpp_strerror(r)
10365 << dendl;
10366 goto out;
10367 }
10368
10369 // NB: _wctx_finish() will empty old_extents
10370 // so we must do gc estimation before that
10371 benefit = gc.estimate(offset,
10372 length,
10373 o->extent_map,
10374 wctx.old_extents,
10375 min_alloc_size);
10376
10377 _wctx_finish(txc, c, o, &wctx);
10378 if (end > o->onode.size) {
10379 dout(20) << __func__ << " extending size to 0x" << std::hex << end
10380 << std::dec << dendl;
10381 o->onode.size = end;
10382 }
10383
10384 if (benefit >= g_conf->bluestore_gc_enable_total_threshold) {
10385 if (!gc.get_extents_to_collect().empty()) {
10386 dout(20) << __func__ << " perform garbage collection, "
10387 << "expected benefit = " << benefit << " AUs" << dendl;
10388 r = _do_gc(txc, c, o, gc, wctx, &dirty_start, &dirty_end);
10389 if (r < 0) {
10390 derr << __func__ << " _do_gc failed with " << cpp_strerror(r)
10391 << dendl;
10392 goto out;
10393 }
10394 }
10395 }
10396
10397 o->extent_map.compress_extent_map(dirty_start, dirty_end - dirty_start);
10398 o->extent_map.dirty_range(dirty_start, dirty_end - dirty_start);
10399
10400 r = 0;
10401
10402 out:
10403 return r;
10404 }
10405
10406 int BlueStore::_write(TransContext *txc,
10407 CollectionRef& c,
10408 OnodeRef& o,
10409 uint64_t offset, size_t length,
10410 bufferlist& bl,
10411 uint32_t fadvise_flags)
10412 {
10413 dout(15) << __func__ << " " << c->cid << " " << o->oid
10414 << " 0x" << std::hex << offset << "~" << length << std::dec
10415 << dendl;
10416 int r = 0;
10417 if (offset + length >= OBJECT_MAX_SIZE) {
10418 r = -E2BIG;
10419 } else {
10420 _assign_nid(txc, o);
10421 r = _do_write(txc, c, o, offset, length, bl, fadvise_flags);
10422 txc->write_onode(o);
10423 }
10424 dout(10) << __func__ << " " << c->cid << " " << o->oid
10425 << " 0x" << std::hex << offset << "~" << length << std::dec
10426 << " = " << r << dendl;
10427 return r;
10428 }
10429
10430 int BlueStore::_zero(TransContext *txc,
10431 CollectionRef& c,
10432 OnodeRef& o,
10433 uint64_t offset, size_t length)
10434 {
10435 dout(15) << __func__ << " " << c->cid << " " << o->oid
10436 << " 0x" << std::hex << offset << "~" << length << std::dec
10437 << dendl;
10438 int r = 0;
10439 if (offset + length >= OBJECT_MAX_SIZE) {
10440 r = -E2BIG;
10441 } else {
10442 _assign_nid(txc, o);
10443 r = _do_zero(txc, c, o, offset, length);
10444 }
10445 dout(10) << __func__ << " " << c->cid << " " << o->oid
10446 << " 0x" << std::hex << offset << "~" << length << std::dec
10447 << " = " << r << dendl;
10448 return r;
10449 }
10450
10451 int BlueStore::_do_zero(TransContext *txc,
10452 CollectionRef& c,
10453 OnodeRef& o,
10454 uint64_t offset, size_t length)
10455 {
10456 dout(15) << __func__ << " " << c->cid << " " << o->oid
10457 << " 0x" << std::hex << offset << "~" << length << std::dec
10458 << dendl;
10459 int r = 0;
10460
10461 _dump_onode(o);
10462
10463 WriteContext wctx;
10464 o->extent_map.fault_range(db, offset, length);
10465 o->extent_map.punch_hole(c, offset, length, &wctx.old_extents);
10466 o->extent_map.dirty_range(offset, length);
10467 _wctx_finish(txc, c, o, &wctx);
10468
10469 if (offset + length > o->onode.size) {
10470 o->onode.size = offset + length;
10471 dout(20) << __func__ << " extending size to " << offset + length
10472 << dendl;
10473 }
10474 txc->write_onode(o);
10475
10476 dout(10) << __func__ << " " << c->cid << " " << o->oid
10477 << " 0x" << std::hex << offset << "~" << length << std::dec
10478 << " = " << r << dendl;
10479 return r;
10480 }
10481
10482 void BlueStore::_do_truncate(
10483 TransContext *txc, CollectionRef& c, OnodeRef o, uint64_t offset,
10484 set<SharedBlob*> *maybe_unshared_blobs)
10485 {
10486 dout(15) << __func__ << " " << c->cid << " " << o->oid
10487 << " 0x" << std::hex << offset << std::dec << dendl;
10488
10489 _dump_onode(o, 30);
10490
10491 if (offset == o->onode.size)
10492 return;
10493
10494 if (offset < o->onode.size) {
10495 WriteContext wctx;
10496 uint64_t length = o->onode.size - offset;
10497 o->extent_map.fault_range(db, offset, length);
10498 o->extent_map.punch_hole(c, offset, length, &wctx.old_extents);
10499 o->extent_map.dirty_range(offset, length);
10500 _wctx_finish(txc, c, o, &wctx, maybe_unshared_blobs);
10501
10502 // if we have shards past EOF, ask for a reshard
10503 if (!o->onode.extent_map_shards.empty() &&
10504 o->onode.extent_map_shards.back().offset >= offset) {
10505 dout(10) << __func__ << " request reshard past EOF" << dendl;
10506 if (offset) {
10507 o->extent_map.request_reshard(offset - 1, offset + length);
10508 } else {
10509 o->extent_map.request_reshard(0, length);
10510 }
10511 }
10512 }
10513
10514 o->onode.size = offset;
10515
10516 txc->write_onode(o);
10517 }
10518
10519 int BlueStore::_truncate(TransContext *txc,
10520 CollectionRef& c,
10521 OnodeRef& o,
10522 uint64_t offset)
10523 {
10524 dout(15) << __func__ << " " << c->cid << " " << o->oid
10525 << " 0x" << std::hex << offset << std::dec
10526 << dendl;
10527 int r = 0;
10528 if (offset >= OBJECT_MAX_SIZE) {
10529 r = -E2BIG;
10530 } else {
10531 _do_truncate(txc, c, o, offset);
10532 }
10533 dout(10) << __func__ << " " << c->cid << " " << o->oid
10534 << " 0x" << std::hex << offset << std::dec
10535 << " = " << r << dendl;
10536 return r;
10537 }
10538
10539 int BlueStore::_do_remove(
10540 TransContext *txc,
10541 CollectionRef& c,
10542 OnodeRef o)
10543 {
10544 set<SharedBlob*> maybe_unshared_blobs;
10545 bool is_gen = !o->oid.is_no_gen();
10546 _do_truncate(txc, c, o, 0, is_gen ? &maybe_unshared_blobs : nullptr);
10547 if (o->onode.has_omap()) {
10548 o->flush();
10549 _do_omap_clear(txc, o->onode.nid);
10550 }
10551 o->exists = false;
10552 string key;
10553 for (auto &s : o->extent_map.shards) {
10554 dout(20) << __func__ << " removing shard 0x" << std::hex
10555 << s.shard_info->offset << std::dec << dendl;
10556 generate_extent_shard_key_and_apply(o->key, s.shard_info->offset, &key,
10557 [&](const string& final_key) {
10558 txc->t->rmkey(PREFIX_OBJ, final_key);
10559 }
10560 );
10561 }
10562 txc->t->rmkey(PREFIX_OBJ, o->key.c_str(), o->key.size());
10563 txc->removed(o);
10564 o->extent_map.clear();
10565 o->onode = bluestore_onode_t();
10566 _debug_obj_on_delete(o->oid);
10567
10568 if (!is_gen || maybe_unshared_blobs.empty()) {
10569 return 0;
10570 }
10571
10572 // see if we can unshare blobs still referenced by the head
10573 dout(10) << __func__ << " gen and maybe_unshared_blobs "
10574 << maybe_unshared_blobs << dendl;
10575 ghobject_t nogen = o->oid;
10576 nogen.generation = ghobject_t::NO_GEN;
10577 OnodeRef h = c->onode_map.lookup(nogen);
10578
10579 if (!h || !h->exists) {
10580 return 0;
10581 }
10582
10583 dout(20) << __func__ << " checking for unshareable blobs on " << h
10584 << " " << h->oid << dendl;
10585 map<SharedBlob*,bluestore_extent_ref_map_t> expect;
10586 for (auto& e : h->extent_map.extent_map) {
10587 const bluestore_blob_t& b = e.blob->get_blob();
10588 SharedBlob *sb = e.blob->shared_blob.get();
10589 if (b.is_shared() &&
10590 sb->loaded &&
10591 maybe_unshared_blobs.count(sb)) {
10592 b.map(e.blob_offset, e.length, [&](uint64_t off, uint64_t len) {
10593 expect[sb].get(off, len);
10594 return 0;
10595 });
10596 }
10597 }
10598
10599 vector<SharedBlob*> unshared_blobs;
10600 unshared_blobs.reserve(maybe_unshared_blobs.size());
10601 for (auto& p : expect) {
10602 dout(20) << " ? " << *p.first << " vs " << p.second << dendl;
10603 if (p.first->persistent->ref_map == p.second) {
10604 SharedBlob *sb = p.first;
10605 dout(20) << __func__ << " unsharing " << *sb << dendl;
10606 unshared_blobs.push_back(sb);
10607 txc->unshare_blob(sb);
10608 uint64_t sbid = c->make_blob_unshared(sb);
10609 string key;
10610 get_shared_blob_key(sbid, &key);
10611 txc->t->rmkey(PREFIX_SHARED_BLOB, key);
10612 }
10613 }
10614
10615 if (unshared_blobs.empty()) {
10616 return 0;
10617 }
10618
10619 for (auto& e : h->extent_map.extent_map) {
10620 const bluestore_blob_t& b = e.blob->get_blob();
10621 SharedBlob *sb = e.blob->shared_blob.get();
10622 if (b.is_shared() &&
10623 std::find(unshared_blobs.begin(), unshared_blobs.end(),
10624 sb) != unshared_blobs.end()) {
10625 dout(20) << __func__ << " unsharing " << e << dendl;
10626 bluestore_blob_t& blob = e.blob->dirty_blob();
10627 blob.clear_flag(bluestore_blob_t::FLAG_SHARED);
10628 h->extent_map.dirty_range(e.logical_offset, 1);
10629 }
10630 }
10631 txc->write_onode(h);
10632
10633 return 0;
10634 }
10635
10636 int BlueStore::_remove(TransContext *txc,
10637 CollectionRef& c,
10638 OnodeRef &o)
10639 {
10640 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10641 int r = _do_remove(txc, c, o);
10642 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10643 return r;
10644 }
10645
10646 int BlueStore::_setattr(TransContext *txc,
10647 CollectionRef& c,
10648 OnodeRef& o,
10649 const string& name,
10650 bufferptr& val)
10651 {
10652 dout(15) << __func__ << " " << c->cid << " " << o->oid
10653 << " " << name << " (" << val.length() << " bytes)"
10654 << dendl;
10655 int r = 0;
10656 if (val.is_partial())
10657 o->onode.attrs[name.c_str()] = bufferptr(val.c_str(), val.length());
10658 else
10659 o->onode.attrs[name.c_str()] = val;
10660 txc->write_onode(o);
10661 dout(10) << __func__ << " " << c->cid << " " << o->oid
10662 << " " << name << " (" << val.length() << " bytes)"
10663 << " = " << r << dendl;
10664 return r;
10665 }
10666
10667 int BlueStore::_setattrs(TransContext *txc,
10668 CollectionRef& c,
10669 OnodeRef& o,
10670 const map<string,bufferptr>& aset)
10671 {
10672 dout(15) << __func__ << " " << c->cid << " " << o->oid
10673 << " " << aset.size() << " keys"
10674 << dendl;
10675 int r = 0;
10676 for (map<string,bufferptr>::const_iterator p = aset.begin();
10677 p != aset.end(); ++p) {
10678 if (p->second.is_partial())
10679 o->onode.attrs[p->first.c_str()] =
10680 bufferptr(p->second.c_str(), p->second.length());
10681 else
10682 o->onode.attrs[p->first.c_str()] = p->second;
10683 }
10684 txc->write_onode(o);
10685 dout(10) << __func__ << " " << c->cid << " " << o->oid
10686 << " " << aset.size() << " keys"
10687 << " = " << r << dendl;
10688 return r;
10689 }
10690
10691
10692 int BlueStore::_rmattr(TransContext *txc,
10693 CollectionRef& c,
10694 OnodeRef& o,
10695 const string& name)
10696 {
10697 dout(15) << __func__ << " " << c->cid << " " << o->oid
10698 << " " << name << dendl;
10699 int r = 0;
10700 auto it = o->onode.attrs.find(name.c_str());
10701 if (it == o->onode.attrs.end())
10702 goto out;
10703
10704 o->onode.attrs.erase(it);
10705 txc->write_onode(o);
10706
10707 out:
10708 dout(10) << __func__ << " " << c->cid << " " << o->oid
10709 << " " << name << " = " << r << dendl;
10710 return r;
10711 }
10712
10713 int BlueStore::_rmattrs(TransContext *txc,
10714 CollectionRef& c,
10715 OnodeRef& o)
10716 {
10717 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10718 int r = 0;
10719
10720 if (o->onode.attrs.empty())
10721 goto out;
10722
10723 o->onode.attrs.clear();
10724 txc->write_onode(o);
10725
10726 out:
10727 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10728 return r;
10729 }
10730
10731 void BlueStore::_do_omap_clear(TransContext *txc, uint64_t id)
10732 {
10733 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
10734 string prefix, tail;
10735 get_omap_header(id, &prefix);
10736 get_omap_tail(id, &tail);
10737 it->lower_bound(prefix);
10738 while (it->valid()) {
10739 if (it->key() >= tail) {
10740 dout(30) << __func__ << " stop at " << pretty_binary_string(tail)
10741 << dendl;
10742 break;
10743 }
10744 txc->t->rmkey(PREFIX_OMAP, it->key());
10745 dout(30) << __func__ << " rm " << pretty_binary_string(it->key()) << dendl;
10746 it->next();
10747 }
10748 }
10749
10750 int BlueStore::_omap_clear(TransContext *txc,
10751 CollectionRef& c,
10752 OnodeRef& o)
10753 {
10754 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10755 int r = 0;
10756 if (o->onode.has_omap()) {
10757 o->flush();
10758 _do_omap_clear(txc, o->onode.nid);
10759 o->onode.clear_omap_flag();
10760 txc->write_onode(o);
10761 }
10762 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10763 return r;
10764 }
10765
10766 int BlueStore::_omap_setkeys(TransContext *txc,
10767 CollectionRef& c,
10768 OnodeRef& o,
10769 bufferlist &bl)
10770 {
10771 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10772 int r;
10773 bufferlist::iterator p = bl.begin();
10774 __u32 num;
10775 if (!o->onode.has_omap()) {
10776 o->onode.set_omap_flag();
10777 txc->write_onode(o);
10778 } else {
10779 txc->note_modified_object(o);
10780 }
10781 string final_key;
10782 _key_encode_u64(o->onode.nid, &final_key);
10783 final_key.push_back('.');
10784 ::decode(num, p);
10785 while (num--) {
10786 string key;
10787 bufferlist value;
10788 ::decode(key, p);
10789 ::decode(value, p);
10790 final_key.resize(9); // keep prefix
10791 final_key += key;
10792 dout(30) << __func__ << " " << pretty_binary_string(final_key)
10793 << " <- " << key << dendl;
10794 txc->t->set(PREFIX_OMAP, final_key, value);
10795 }
10796 r = 0;
10797 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10798 return r;
10799 }
10800
10801 int BlueStore::_omap_setheader(TransContext *txc,
10802 CollectionRef& c,
10803 OnodeRef &o,
10804 bufferlist& bl)
10805 {
10806 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10807 int r;
10808 string key;
10809 if (!o->onode.has_omap()) {
10810 o->onode.set_omap_flag();
10811 txc->write_onode(o);
10812 } else {
10813 txc->note_modified_object(o);
10814 }
10815 get_omap_header(o->onode.nid, &key);
10816 txc->t->set(PREFIX_OMAP, key, bl);
10817 r = 0;
10818 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10819 return r;
10820 }
10821
10822 int BlueStore::_omap_rmkeys(TransContext *txc,
10823 CollectionRef& c,
10824 OnodeRef& o,
10825 bufferlist& bl)
10826 {
10827 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10828 int r = 0;
10829 bufferlist::iterator p = bl.begin();
10830 __u32 num;
10831 string final_key;
10832
10833 if (!o->onode.has_omap()) {
10834 goto out;
10835 }
10836 _key_encode_u64(o->onode.nid, &final_key);
10837 final_key.push_back('.');
10838 ::decode(num, p);
10839 while (num--) {
10840 string key;
10841 ::decode(key, p);
10842 final_key.resize(9); // keep prefix
10843 final_key += key;
10844 dout(30) << __func__ << " rm " << pretty_binary_string(final_key)
10845 << " <- " << key << dendl;
10846 txc->t->rmkey(PREFIX_OMAP, final_key);
10847 }
10848 txc->note_modified_object(o);
10849
10850 out:
10851 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10852 return r;
10853 }
10854
10855 int BlueStore::_omap_rmkey_range(TransContext *txc,
10856 CollectionRef& c,
10857 OnodeRef& o,
10858 const string& first, const string& last)
10859 {
10860 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10861 KeyValueDB::Iterator it;
10862 string key_first, key_last;
10863 int r = 0;
10864 if (!o->onode.has_omap()) {
10865 goto out;
10866 }
10867 o->flush();
10868 it = db->get_iterator(PREFIX_OMAP);
10869 get_omap_key(o->onode.nid, first, &key_first);
10870 get_omap_key(o->onode.nid, last, &key_last);
10871 it->lower_bound(key_first);
10872 while (it->valid()) {
10873 if (it->key() >= key_last) {
10874 dout(30) << __func__ << " stop at " << pretty_binary_string(key_last)
10875 << dendl;
10876 break;
10877 }
10878 txc->t->rmkey(PREFIX_OMAP, it->key());
10879 dout(30) << __func__ << " rm " << pretty_binary_string(it->key()) << dendl;
10880 it->next();
10881 }
10882 txc->note_modified_object(o);
10883
10884 out:
10885 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10886 return r;
10887 }
10888
10889 int BlueStore::_set_alloc_hint(
10890 TransContext *txc,
10891 CollectionRef& c,
10892 OnodeRef& o,
10893 uint64_t expected_object_size,
10894 uint64_t expected_write_size,
10895 uint32_t flags)
10896 {
10897 dout(15) << __func__ << " " << c->cid << " " << o->oid
10898 << " object_size " << expected_object_size
10899 << " write_size " << expected_write_size
10900 << " flags " << ceph_osd_alloc_hint_flag_string(flags)
10901 << dendl;
10902 int r = 0;
10903 o->onode.expected_object_size = expected_object_size;
10904 o->onode.expected_write_size = expected_write_size;
10905 o->onode.alloc_hint_flags = flags;
10906 txc->write_onode(o);
10907 dout(10) << __func__ << " " << c->cid << " " << o->oid
10908 << " object_size " << expected_object_size
10909 << " write_size " << expected_write_size
10910 << " flags " << ceph_osd_alloc_hint_flag_string(flags)
10911 << " = " << r << dendl;
10912 return r;
10913 }
10914
10915 int BlueStore::_clone(TransContext *txc,
10916 CollectionRef& c,
10917 OnodeRef& oldo,
10918 OnodeRef& newo)
10919 {
10920 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
10921 << newo->oid << dendl;
10922 int r = 0;
10923 if (oldo->oid.hobj.get_hash() != newo->oid.hobj.get_hash()) {
10924 derr << __func__ << " mismatched hash on " << oldo->oid
10925 << " and " << newo->oid << dendl;
10926 return -EINVAL;
10927 }
10928
10929 _assign_nid(txc, newo);
10930
10931 // clone data
10932 oldo->flush();
10933 _do_truncate(txc, c, newo, 0);
10934 if (cct->_conf->bluestore_clone_cow) {
10935 _do_clone_range(txc, c, oldo, newo, 0, oldo->onode.size, 0);
10936 } else {
10937 bufferlist bl;
10938 r = _do_read(c.get(), oldo, 0, oldo->onode.size, bl, 0);
10939 if (r < 0)
10940 goto out;
10941 r = _do_write(txc, c, newo, 0, oldo->onode.size, bl, 0);
10942 if (r < 0)
10943 goto out;
10944 }
10945
10946 // clone attrs
10947 newo->onode.attrs = oldo->onode.attrs;
10948
10949 // clone omap
10950 if (newo->onode.has_omap()) {
10951 dout(20) << __func__ << " clearing old omap data" << dendl;
10952 newo->flush();
10953 _do_omap_clear(txc, newo->onode.nid);
10954 }
10955 if (oldo->onode.has_omap()) {
10956 dout(20) << __func__ << " copying omap data" << dendl;
10957 if (!newo->onode.has_omap()) {
10958 newo->onode.set_omap_flag();
10959 }
10960 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
10961 string head, tail;
10962 get_omap_header(oldo->onode.nid, &head);
10963 get_omap_tail(oldo->onode.nid, &tail);
10964 it->lower_bound(head);
10965 while (it->valid()) {
10966 if (it->key() >= tail) {
10967 dout(30) << __func__ << " reached tail" << dendl;
10968 break;
10969 } else {
10970 dout(30) << __func__ << " got header/data "
10971 << pretty_binary_string(it->key()) << dendl;
10972 string key;
10973 rewrite_omap_key(newo->onode.nid, it->key(), &key);
10974 txc->t->set(PREFIX_OMAP, key, it->value());
10975 }
10976 it->next();
10977 }
10978 } else {
10979 newo->onode.clear_omap_flag();
10980 }
10981
10982 txc->write_onode(newo);
10983 r = 0;
10984
10985 out:
10986 dout(10) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
10987 << newo->oid << " = " << r << dendl;
10988 return r;
10989 }
10990
10991 int BlueStore::_do_clone_range(
10992 TransContext *txc,
10993 CollectionRef& c,
10994 OnodeRef& oldo,
10995 OnodeRef& newo,
10996 uint64_t srcoff,
10997 uint64_t length,
10998 uint64_t dstoff)
10999 {
11000 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
11001 << newo->oid
11002 << " 0x" << std::hex << srcoff << "~" << length << " -> "
11003 << " 0x" << dstoff << "~" << length << std::dec << dendl;
11004 oldo->extent_map.fault_range(db, srcoff, length);
11005 newo->extent_map.fault_range(db, dstoff, length);
11006 _dump_onode(oldo);
11007 _dump_onode(newo);
11008
11009 // hmm, this could go into an ExtentMap::dup() method.
11010 vector<BlobRef> id_to_blob(oldo->extent_map.extent_map.size());
11011 for (auto &e : oldo->extent_map.extent_map) {
11012 e.blob->last_encoded_id = -1;
11013 }
11014 int n = 0;
11015 uint64_t end = srcoff + length;
11016 uint32_t dirty_range_begin = 0;
11017 uint32_t dirty_range_end = 0;
11018 bool src_dirty = false;
11019 for (auto ep = oldo->extent_map.seek_lextent(srcoff);
11020 ep != oldo->extent_map.extent_map.end();
11021 ++ep) {
11022 auto& e = *ep;
11023 if (e.logical_offset >= end) {
11024 break;
11025 }
11026 dout(20) << __func__ << " src " << e << dendl;
11027 BlobRef cb;
11028 bool blob_duped = true;
11029 if (e.blob->last_encoded_id >= 0) {
11030 // blob is already duped
11031 cb = id_to_blob[e.blob->last_encoded_id];
11032 blob_duped = false;
11033 } else {
11034 // dup the blob
11035 const bluestore_blob_t& blob = e.blob->get_blob();
11036 // make sure it is shared
11037 if (!blob.is_shared()) {
11038 c->make_blob_shared(_assign_blobid(txc), e.blob);
11039 if (!src_dirty) {
11040 src_dirty = true;
11041 dirty_range_begin = e.logical_offset;
11042 }
11043 assert(e.logical_end() > 0);
11044 // -1 to exclude next potential shard
11045 dirty_range_end = e.logical_end() - 1;
11046 } else {
11047 c->load_shared_blob(e.blob->shared_blob);
11048 }
11049 cb = new Blob();
11050 e.blob->last_encoded_id = n;
11051 id_to_blob[n] = cb;
11052 e.blob->dup(*cb);
11053 // bump the extent refs on the copied blob's extents
11054 for (auto p : blob.get_extents()) {
11055 if (p.is_valid()) {
11056 e.blob->shared_blob->get_ref(p.offset, p.length);
11057 }
11058 }
11059 txc->write_shared_blob(e.blob->shared_blob);
11060 dout(20) << __func__ << " new " << *cb << dendl;
11061 }
11062 // dup extent
11063 int skip_front, skip_back;
11064 if (e.logical_offset < srcoff) {
11065 skip_front = srcoff - e.logical_offset;
11066 } else {
11067 skip_front = 0;
11068 }
11069 if (e.logical_end() > end) {
11070 skip_back = e.logical_end() - end;
11071 } else {
11072 skip_back = 0;
11073 }
11074 Extent *ne = new Extent(e.logical_offset + skip_front + dstoff - srcoff,
11075 e.blob_offset + skip_front,
11076 e.length - skip_front - skip_back, cb);
11077 newo->extent_map.extent_map.insert(*ne);
11078 ne->blob->get_ref(c.get(), ne->blob_offset, ne->length);
11079 // fixme: we may leave parts of new blob unreferenced that could
11080 // be freed (relative to the shared_blob).
11081 txc->statfs_delta.stored() += ne->length;
11082 if (e.blob->get_blob().is_compressed()) {
11083 txc->statfs_delta.compressed_original() += ne->length;
11084 if (blob_duped){
11085 txc->statfs_delta.compressed() +=
11086 cb->get_blob().get_compressed_payload_length();
11087 }
11088 }
11089 dout(20) << __func__ << " dst " << *ne << dendl;
11090 ++n;
11091 }
11092 if (src_dirty) {
11093 oldo->extent_map.dirty_range(dirty_range_begin,
11094 dirty_range_end - dirty_range_begin);
11095 txc->write_onode(oldo);
11096 }
11097 txc->write_onode(newo);
11098
11099 if (dstoff + length > newo->onode.size) {
11100 newo->onode.size = dstoff + length;
11101 }
11102 newo->extent_map.dirty_range(dstoff, length);
11103 _dump_onode(oldo);
11104 _dump_onode(newo);
11105 return 0;
11106 }
11107
11108 int BlueStore::_clone_range(TransContext *txc,
11109 CollectionRef& c,
11110 OnodeRef& oldo,
11111 OnodeRef& newo,
11112 uint64_t srcoff, uint64_t length, uint64_t dstoff)
11113 {
11114 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
11115 << newo->oid << " from 0x" << std::hex << srcoff << "~" << length
11116 << " to offset 0x" << dstoff << std::dec << dendl;
11117 int r = 0;
11118
11119 if (srcoff + length >= OBJECT_MAX_SIZE ||
11120 dstoff + length >= OBJECT_MAX_SIZE) {
11121 r = -E2BIG;
11122 goto out;
11123 }
11124 if (srcoff + length > oldo->onode.size) {
11125 r = -EINVAL;
11126 goto out;
11127 }
11128
11129 _assign_nid(txc, newo);
11130
11131 if (length > 0) {
11132 if (cct->_conf->bluestore_clone_cow) {
11133 _do_zero(txc, c, newo, dstoff, length);
11134 _do_clone_range(txc, c, oldo, newo, srcoff, length, dstoff);
11135 } else {
11136 bufferlist bl;
11137 r = _do_read(c.get(), oldo, srcoff, length, bl, 0);
11138 if (r < 0)
11139 goto out;
11140 r = _do_write(txc, c, newo, dstoff, bl.length(), bl, 0);
11141 if (r < 0)
11142 goto out;
11143 }
11144 }
11145
11146 txc->write_onode(newo);
11147 r = 0;
11148
11149 out:
11150 dout(10) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
11151 << newo->oid << " from 0x" << std::hex << srcoff << "~" << length
11152 << " to offset 0x" << dstoff << std::dec
11153 << " = " << r << dendl;
11154 return r;
11155 }
11156
11157 int BlueStore::_rename(TransContext *txc,
11158 CollectionRef& c,
11159 OnodeRef& oldo,
11160 OnodeRef& newo,
11161 const ghobject_t& new_oid)
11162 {
11163 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
11164 << new_oid << dendl;
11165 int r;
11166 ghobject_t old_oid = oldo->oid;
11167 mempool::bluestore_cache_other::string new_okey;
11168
11169 if (newo) {
11170 if (newo->exists) {
11171 r = -EEXIST;
11172 goto out;
11173 }
11174 assert(txc->onodes.count(newo) == 0);
11175 }
11176
11177 txc->t->rmkey(PREFIX_OBJ, oldo->key.c_str(), oldo->key.size());
11178
11179 // rewrite shards
11180 {
11181 oldo->extent_map.fault_range(db, 0, oldo->onode.size);
11182 get_object_key(cct, new_oid, &new_okey);
11183 string key;
11184 for (auto &s : oldo->extent_map.shards) {
11185 generate_extent_shard_key_and_apply(oldo->key, s.shard_info->offset, &key,
11186 [&](const string& final_key) {
11187 txc->t->rmkey(PREFIX_OBJ, final_key);
11188 }
11189 );
11190 s.dirty = true;
11191 }
11192 }
11193
11194 newo = oldo;
11195 txc->write_onode(newo);
11196
11197 // this adjusts oldo->{oid,key}, and reset oldo to a fresh empty
11198 // Onode in the old slot
11199 c->onode_map.rename(oldo, old_oid, new_oid, new_okey);
11200 r = 0;
11201
11202 out:
11203 dout(10) << __func__ << " " << c->cid << " " << old_oid << " -> "
11204 << new_oid << " = " << r << dendl;
11205 return r;
11206 }
11207
11208 // collections
11209
11210 int BlueStore::_create_collection(
11211 TransContext *txc,
11212 const coll_t &cid,
11213 unsigned bits,
11214 CollectionRef *c)
11215 {
11216 dout(15) << __func__ << " " << cid << " bits " << bits << dendl;
11217 int r;
11218 bufferlist bl;
11219
11220 {
11221 RWLock::WLocker l(coll_lock);
11222 if (*c) {
11223 r = -EEXIST;
11224 goto out;
11225 }
11226 c->reset(
11227 new Collection(
11228 this,
11229 cache_shards[cid.hash_to_shard(cache_shards.size())],
11230 cid));
11231 (*c)->cnode.bits = bits;
11232 coll_map[cid] = *c;
11233 }
11234 ::encode((*c)->cnode, bl);
11235 txc->t->set(PREFIX_COLL, stringify(cid), bl);
11236 r = 0;
11237
11238 out:
11239 dout(10) << __func__ << " " << cid << " bits " << bits << " = " << r << dendl;
11240 return r;
11241 }
11242
11243 int BlueStore::_remove_collection(TransContext *txc, const coll_t &cid,
11244 CollectionRef *c)
11245 {
11246 dout(15) << __func__ << " " << cid << dendl;
11247 int r;
11248
11249 {
11250 RWLock::WLocker l(coll_lock);
11251 if (!*c) {
11252 r = -ENOENT;
11253 goto out;
11254 }
11255 size_t nonexistent_count = 0;
11256 assert((*c)->exists);
11257 if ((*c)->onode_map.map_any([&](OnodeRef o) {
11258 if (o->exists) {
11259 dout(10) << __func__ << " " << o->oid << " " << o
11260 << " exists in onode_map" << dendl;
11261 return true;
11262 }
11263 ++nonexistent_count;
11264 return false;
11265 })) {
11266 r = -ENOTEMPTY;
11267 goto out;
11268 }
11269
11270 vector<ghobject_t> ls;
11271 ghobject_t next;
11272 // Enumerate onodes in db, up to nonexistent_count + 1
11273 // then check if all of them are marked as non-existent.
11274 // Bypass the check if returned number is greater than nonexistent_count
11275 r = _collection_list(c->get(), ghobject_t(), ghobject_t::get_max(),
11276 nonexistent_count + 1, &ls, &next);
11277 if (r >= 0) {
11278 bool exists = false; //ls.size() > nonexistent_count;
11279 for (auto it = ls.begin(); !exists && it < ls.end(); ++it) {
11280 dout(10) << __func__ << " oid " << *it << dendl;
11281 auto onode = (*c)->onode_map.lookup(*it);
11282 exists = !onode || onode->exists;
11283 if (exists) {
11284 dout(10) << __func__ << " " << *it
11285 << " exists in db" << dendl;
11286 }
11287 }
11288 if (!exists) {
11289 coll_map.erase(cid);
11290 txc->removed_collections.push_back(*c);
11291 (*c)->exists = false;
11292 c->reset();
11293 txc->t->rmkey(PREFIX_COLL, stringify(cid));
11294 r = 0;
11295 } else {
11296 dout(10) << __func__ << " " << cid
11297 << " is non-empty" << dendl;
11298 r = -ENOTEMPTY;
11299 }
11300 }
11301 }
11302
11303 out:
11304 dout(10) << __func__ << " " << cid << " = " << r << dendl;
11305 return r;
11306 }
11307
11308 int BlueStore::_split_collection(TransContext *txc,
11309 CollectionRef& c,
11310 CollectionRef& d,
11311 unsigned bits, int rem)
11312 {
11313 dout(15) << __func__ << " " << c->cid << " to " << d->cid << " "
11314 << " bits " << bits << dendl;
11315 RWLock::WLocker l(c->lock);
11316 RWLock::WLocker l2(d->lock);
11317 int r;
11318
11319 // flush all previous deferred writes on this sequencer. this is a bit
11320 // heavyweight, but we need to make sure all deferred writes complete
11321 // before we split as the new collection's sequencer may need to order
11322 // this after those writes, and we don't bother with the complexity of
11323 // moving those TransContexts over to the new osr.
11324 _osr_drain_preceding(txc);
11325
11326 // move any cached items (onodes and referenced shared blobs) that will
11327 // belong to the child collection post-split. leave everything else behind.
11328 // this may include things that don't strictly belong to the now-smaller
11329 // parent split, but the OSD will always send us a split for every new
11330 // child.
11331
11332 spg_t pgid, dest_pgid;
11333 bool is_pg = c->cid.is_pg(&pgid);
11334 assert(is_pg);
11335 is_pg = d->cid.is_pg(&dest_pgid);
11336 assert(is_pg);
11337
11338 // the destination should initially be empty.
11339 assert(d->onode_map.empty());
11340 assert(d->shared_blob_set.empty());
11341 assert(d->cnode.bits == bits);
11342
11343 c->split_cache(d.get());
11344
11345 // adjust bits. note that this will be redundant for all but the first
11346 // split call for this parent (first child).
11347 c->cnode.bits = bits;
11348 assert(d->cnode.bits == bits);
11349 r = 0;
11350
11351 bufferlist bl;
11352 ::encode(c->cnode, bl);
11353 txc->t->set(PREFIX_COLL, stringify(c->cid), bl);
11354
11355 dout(10) << __func__ << " " << c->cid << " to " << d->cid << " "
11356 << " bits " << bits << " = " << r << dendl;
11357 return r;
11358 }
11359
11360 // DB key value Histogram
11361 #define KEY_SLAB 32
11362 #define VALUE_SLAB 64
11363
11364 const string prefix_onode = "o";
11365 const string prefix_onode_shard = "x";
11366 const string prefix_other = "Z";
11367
11368 int BlueStore::DBHistogram::get_key_slab(size_t sz)
11369 {
11370 return (sz/KEY_SLAB);
11371 }
11372
11373 string BlueStore::DBHistogram::get_key_slab_to_range(int slab)
11374 {
11375 int lower_bound = slab * KEY_SLAB;
11376 int upper_bound = (slab + 1) * KEY_SLAB;
11377 string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
11378 return ret;
11379 }
11380
11381 int BlueStore::DBHistogram::get_value_slab(size_t sz)
11382 {
11383 return (sz/VALUE_SLAB);
11384 }
11385
11386 string BlueStore::DBHistogram::get_value_slab_to_range(int slab)
11387 {
11388 int lower_bound = slab * VALUE_SLAB;
11389 int upper_bound = (slab + 1) * VALUE_SLAB;
11390 string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
11391 return ret;
11392 }
11393
11394 void BlueStore::DBHistogram::update_hist_entry(map<string, map<int, struct key_dist> > &key_hist,
11395 const string &prefix, size_t key_size, size_t value_size)
11396 {
11397 uint32_t key_slab = get_key_slab(key_size);
11398 uint32_t value_slab = get_value_slab(value_size);
11399 key_hist[prefix][key_slab].count++;
11400 key_hist[prefix][key_slab].max_len = MAX(key_size, key_hist[prefix][key_slab].max_len);
11401 key_hist[prefix][key_slab].val_map[value_slab].count++;
11402 key_hist[prefix][key_slab].val_map[value_slab].max_len =
11403 MAX(value_size, key_hist[prefix][key_slab].val_map[value_slab].max_len);
11404 }
11405
11406 void BlueStore::DBHistogram::dump(Formatter *f)
11407 {
11408 f->open_object_section("rocksdb_value_distribution");
11409 for (auto i : value_hist) {
11410 f->dump_unsigned(get_value_slab_to_range(i.first).data(), i.second);
11411 }
11412 f->close_section();
11413
11414 f->open_object_section("rocksdb_key_value_histogram");
11415 for (auto i : key_hist) {
11416 f->dump_string("prefix", i.first);
11417 f->open_object_section("key_hist");
11418 for ( auto k : i.second) {
11419 f->dump_unsigned(get_key_slab_to_range(k.first).data(), k.second.count);
11420 f->dump_unsigned("max_len", k.second.max_len);
11421 f->open_object_section("value_hist");
11422 for ( auto j : k.second.val_map) {
11423 f->dump_unsigned(get_value_slab_to_range(j.first).data(), j.second.count);
11424 f->dump_unsigned("max_len", j.second.max_len);
11425 }
11426 f->close_section();
11427 }
11428 f->close_section();
11429 }
11430 f->close_section();
11431 }
11432
11433 //Itrerates through the db and collects the stats
11434 void BlueStore::generate_db_histogram(Formatter *f)
11435 {
11436 //globals
11437 uint64_t num_onodes = 0;
11438 uint64_t num_shards = 0;
11439 uint64_t num_super = 0;
11440 uint64_t num_coll = 0;
11441 uint64_t num_omap = 0;
11442 uint64_t num_deferred = 0;
11443 uint64_t num_alloc = 0;
11444 uint64_t num_stat = 0;
11445 uint64_t num_others = 0;
11446 uint64_t num_shared_shards = 0;
11447 size_t max_key_size =0, max_value_size = 0;
11448 uint64_t total_key_size = 0, total_value_size = 0;
11449 size_t key_size = 0, value_size = 0;
11450 DBHistogram hist;
11451
11452 utime_t start = ceph_clock_now();
11453
11454 KeyValueDB::WholeSpaceIterator iter = db->get_iterator();
11455 iter->seek_to_first();
11456 while (iter->valid()) {
11457 dout(30) << __func__ << " Key: " << iter->key() << dendl;
11458 key_size = iter->key_size();
11459 value_size = iter->value_size();
11460 hist.value_hist[hist.get_value_slab(value_size)]++;
11461 max_key_size = MAX(max_key_size, key_size);
11462 max_value_size = MAX(max_value_size, value_size);
11463 total_key_size += key_size;
11464 total_value_size += value_size;
11465
11466 pair<string,string> key(iter->raw_key());
11467
11468 if (key.first == PREFIX_SUPER) {
11469 hist.update_hist_entry(hist.key_hist, PREFIX_SUPER, key_size, value_size);
11470 num_super++;
11471 } else if (key.first == PREFIX_STAT) {
11472 hist.update_hist_entry(hist.key_hist, PREFIX_STAT, key_size, value_size);
11473 num_stat++;
11474 } else if (key.first == PREFIX_COLL) {
11475 hist.update_hist_entry(hist.key_hist, PREFIX_COLL, key_size, value_size);
11476 num_coll++;
11477 } else if (key.first == PREFIX_OBJ) {
11478 if (key.second.back() == ONODE_KEY_SUFFIX) {
11479 hist.update_hist_entry(hist.key_hist, prefix_onode, key_size, value_size);
11480 num_onodes++;
11481 } else {
11482 hist.update_hist_entry(hist.key_hist, prefix_onode_shard, key_size, value_size);
11483 num_shards++;
11484 }
11485 } else if (key.first == PREFIX_OMAP) {
11486 hist.update_hist_entry(hist.key_hist, PREFIX_OMAP, key_size, value_size);
11487 num_omap++;
11488 } else if (key.first == PREFIX_DEFERRED) {
11489 hist.update_hist_entry(hist.key_hist, PREFIX_DEFERRED, key_size, value_size);
11490 num_deferred++;
11491 } else if (key.first == PREFIX_ALLOC || key.first == "b" ) {
11492 hist.update_hist_entry(hist.key_hist, PREFIX_ALLOC, key_size, value_size);
11493 num_alloc++;
11494 } else if (key.first == PREFIX_SHARED_BLOB) {
11495 hist.update_hist_entry(hist.key_hist, PREFIX_SHARED_BLOB, key_size, value_size);
11496 num_shared_shards++;
11497 } else {
11498 hist.update_hist_entry(hist.key_hist, prefix_other, key_size, value_size);
11499 num_others++;
11500 }
11501 iter->next();
11502 }
11503
11504 utime_t duration = ceph_clock_now() - start;
11505 f->open_object_section("rocksdb_key_value_stats");
11506 f->dump_unsigned("num_onodes", num_onodes);
11507 f->dump_unsigned("num_shards", num_shards);
11508 f->dump_unsigned("num_super", num_super);
11509 f->dump_unsigned("num_coll", num_coll);
11510 f->dump_unsigned("num_omap", num_omap);
11511 f->dump_unsigned("num_deferred", num_deferred);
11512 f->dump_unsigned("num_alloc", num_alloc);
11513 f->dump_unsigned("num_stat", num_stat);
11514 f->dump_unsigned("num_shared_shards", num_shared_shards);
11515 f->dump_unsigned("num_others", num_others);
11516 f->dump_unsigned("max_key_size", max_key_size);
11517 f->dump_unsigned("max_value_size", max_value_size);
11518 f->dump_unsigned("total_key_size", total_key_size);
11519 f->dump_unsigned("total_value_size", total_value_size);
11520 f->close_section();
11521
11522 hist.dump(f);
11523
11524 dout(20) << __func__ << " finished in " << duration << " seconds" << dendl;
11525
11526 }
11527
11528 void BlueStore::_flush_cache()
11529 {
11530 dout(10) << __func__ << dendl;
11531 for (auto i : cache_shards) {
11532 i->trim_all();
11533 assert(i->empty());
11534 }
11535 for (auto& p : coll_map) {
11536 assert(p.second->onode_map.empty());
11537 assert(p.second->shared_blob_set.empty());
11538 }
11539 coll_map.clear();
11540 }
11541
11542 // For external caller.
11543 // We use a best-effort policy instead, e.g.,
11544 // we don't care if there are still some pinned onodes/data in the cache
11545 // after this command is completed.
11546 void BlueStore::flush_cache()
11547 {
11548 dout(10) << __func__ << dendl;
11549 for (auto i : cache_shards) {
11550 i->trim_all();
11551 }
11552 }
11553
11554 void BlueStore::_apply_padding(uint64_t head_pad,
11555 uint64_t tail_pad,
11556 bufferlist& padded)
11557 {
11558 if (head_pad) {
11559 padded.prepend_zero(head_pad);
11560 }
11561 if (tail_pad) {
11562 padded.append_zero(tail_pad);
11563 }
11564 if (head_pad || tail_pad) {
11565 dout(20) << __func__ << " can pad head 0x" << std::hex << head_pad
11566 << " tail 0x" << tail_pad << std::dec << dendl;
11567 logger->inc(l_bluestore_write_pad_bytes, head_pad + tail_pad);
11568 }
11569 }
11570
11571 // ===========================================