]> git.proxmox.com Git - ceph.git/blob - ceph/src/os/bluestore/BlueStore.cc
update sources to v12.1.1
[ceph.git] / ceph / src / os / bluestore / BlueStore.cc
1 // vim: ts=8 sw=2 smarttab
2 /*
3 * Ceph - scalable distributed file system
4 *
5 * Copyright (C) 2014 Red Hat
6 *
7 * This is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License version 2.1, as published by the Free Software
10 * Foundation. See file COPYING.
11 *
12 */
13
14 #include <unistd.h>
15 #include <stdlib.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <fcntl.h>
19
20 #include "include/cpp-btree/btree_set.h"
21
22 #include "BlueStore.h"
23 #include "os/kv.h"
24 #include "include/compat.h"
25 #include "include/intarith.h"
26 #include "include/stringify.h"
27 #include "common/errno.h"
28 #include "common/safe_io.h"
29 #include "Allocator.h"
30 #include "FreelistManager.h"
31 #include "BlueFS.h"
32 #include "BlueRocksEnv.h"
33 #include "auth/Crypto.h"
34 #include "common/EventTrace.h"
35
36 #define dout_context cct
37 #define dout_subsys ceph_subsys_bluestore
38
39 using bid_t = decltype(BlueStore::Blob::id);
40
41 // bluestore_cache_onode
42 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Onode, bluestore_onode,
43 bluestore_cache_onode);
44
45 // bluestore_cache_other
46 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Buffer, bluestore_buffer,
47 bluestore_cache_other);
48 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Extent, bluestore_extent,
49 bluestore_cache_other);
50 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Blob, bluestore_blob,
51 bluestore_cache_other);
52 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::SharedBlob, bluestore_shared_blob,
53 bluestore_cache_other);
54
55 // bluestore_txc
56 MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::TransContext, bluestore_transcontext,
57 bluestore_txc);
58
59
60 // kv store prefixes
61 const string PREFIX_SUPER = "S"; // field -> value
62 const string PREFIX_STAT = "T"; // field -> value(int64 array)
63 const string PREFIX_COLL = "C"; // collection name -> cnode_t
64 const string PREFIX_OBJ = "O"; // object name -> onode_t
65 const string PREFIX_OMAP = "M"; // u64 + keyname -> value
66 const string PREFIX_DEFERRED = "L"; // id -> deferred_transaction_t
67 const string PREFIX_ALLOC = "B"; // u64 offset -> u64 length (freelist)
68 const string PREFIX_SHARED_BLOB = "X"; // u64 offset -> shared_blob_t
69
70 // write a label in the first block. always use this size. note that
71 // bluefs makes a matching assumption about the location of its
72 // superblock (always the second block of the device).
73 #define BDEV_LABEL_BLOCK_SIZE 4096
74
75 // reserve: label (4k) + bluefs super (4k), which means we start at 8k.
76 #define SUPER_RESERVED 8192
77
78 #define OBJECT_MAX_SIZE 0xffffffff // 32 bits
79
80
81 /*
82 * extent map blob encoding
83 *
84 * we use the low bits of the blobid field to indicate some common scenarios
85 * and spanning vs local ids. See ExtentMap::{encode,decode}_some().
86 */
87 #define BLOBID_FLAG_CONTIGUOUS 0x1 // this extent starts at end of previous
88 #define BLOBID_FLAG_ZEROOFFSET 0x2 // blob_offset is 0
89 #define BLOBID_FLAG_SAMELENGTH 0x4 // length matches previous extent
90 #define BLOBID_FLAG_SPANNING 0x8 // has spanning blob id
91 #define BLOBID_SHIFT_BITS 4
92
93 /*
94 * object name key structure
95 *
96 * encoded u8: shard + 2^7 (so that it sorts properly)
97 * encoded u64: poolid + 2^63 (so that it sorts properly)
98 * encoded u32: hash (bit reversed)
99 *
100 * escaped string: namespace
101 *
102 * escaped string: key or object name
103 * 1 char: '<', '=', or '>'. if =, then object key == object name, and
104 * we are done. otherwise, we are followed by the object name.
105 * escaped string: object name (unless '=' above)
106 *
107 * encoded u64: snap
108 * encoded u64: generation
109 * 'o'
110 */
111 #define ONODE_KEY_SUFFIX 'o'
112
113 /*
114 * extent shard key
115 *
116 * object prefix key
117 * u32
118 * 'x'
119 */
120 #define EXTENT_SHARD_KEY_SUFFIX 'x'
121
122 /*
123 * string encoding in the key
124 *
125 * The key string needs to lexicographically sort the same way that
126 * ghobject_t does. We do this by escaping anything <= to '#' with #
127 * plus a 2 digit hex string, and anything >= '~' with ~ plus the two
128 * hex digits.
129 *
130 * We use ! as a terminator for strings; this works because it is < #
131 * and will get escaped if it is present in the string.
132 *
133 */
134 template<typename S>
135 static void append_escaped(const string &in, S *out)
136 {
137 char hexbyte[in.length() * 3 + 1];
138 char* ptr = &hexbyte[0];
139 for (string::const_iterator i = in.begin(); i != in.end(); ++i) {
140 if (*i <= '#') {
141 *ptr++ = '#';
142 *ptr++ = "0123456789abcdef"[(*i >> 4) & 0x0f];
143 *ptr++ = "0123456789abcdef"[*i & 0x0f];
144 } else if (*i >= '~') {
145 *ptr++ = '~';
146 *ptr++ = "0123456789abcdef"[(*i >> 4) & 0x0f];
147 *ptr++ = "0123456789abcdef"[*i & 0x0f];
148 } else {
149 *ptr++ = *i;
150 }
151 }
152 *ptr++ = '!';
153 out->append(hexbyte, ptr - &hexbyte[0]);
154 }
155
156 inline unsigned h2i(char c)
157 {
158 if ((c >= '0') && (c <= '9')) {
159 return c - 0x30;
160 } else if ((c >= 'a') && (c <= 'f')) {
161 return c - 'a' + 10;
162 } else if ((c >= 'A') && (c <= 'F')) {
163 return c - 'A' + 10;
164 } else {
165 return 256; // make it always larger than 255
166 }
167 }
168
169 static int decode_escaped(const char *p, string *out)
170 {
171 char buff[256];
172 char* ptr = &buff[0];
173 char* max = &buff[252];
174 const char *orig_p = p;
175 while (*p && *p != '!') {
176 if (*p == '#' || *p == '~') {
177 unsigned hex = 0;
178 p++;
179 hex = h2i(*p++) << 4;
180 if (hex > 255) {
181 return -EINVAL;
182 }
183 hex |= h2i(*p++);
184 if (hex > 255) {
185 return -EINVAL;
186 }
187 *ptr++ = hex;
188 } else {
189 *ptr++ = *p++;
190 }
191 if (ptr > max) {
192 out->append(buff, ptr-buff);
193 ptr = &buff[0];
194 }
195 }
196 if (ptr != buff) {
197 out->append(buff, ptr-buff);
198 }
199 return p - orig_p;
200 }
201
202 // some things we encode in binary (as le32 or le64); print the
203 // resulting key strings nicely
204 template<typename S>
205 static string pretty_binary_string(const S& in)
206 {
207 char buf[10];
208 string out;
209 out.reserve(in.length() * 3);
210 enum { NONE, HEX, STRING } mode = NONE;
211 unsigned from = 0, i;
212 for (i=0; i < in.length(); ++i) {
213 if ((in[i] < 32 || (unsigned char)in[i] > 126) ||
214 (mode == HEX && in.length() - i >= 4 &&
215 ((in[i] < 32 || (unsigned char)in[i] > 126) ||
216 (in[i+1] < 32 || (unsigned char)in[i+1] > 126) ||
217 (in[i+2] < 32 || (unsigned char)in[i+2] > 126) ||
218 (in[i+3] < 32 || (unsigned char)in[i+3] > 126)))) {
219 if (mode == STRING) {
220 out.append(in.c_str() + from, i - from);
221 out.push_back('\'');
222 }
223 if (mode != HEX) {
224 out.append("0x");
225 mode = HEX;
226 }
227 if (in.length() - i >= 4) {
228 // print a whole u32 at once
229 snprintf(buf, sizeof(buf), "%08x",
230 (uint32_t)(((unsigned char)in[i] << 24) |
231 ((unsigned char)in[i+1] << 16) |
232 ((unsigned char)in[i+2] << 8) |
233 ((unsigned char)in[i+3] << 0)));
234 i += 3;
235 } else {
236 snprintf(buf, sizeof(buf), "%02x", (int)(unsigned char)in[i]);
237 }
238 out.append(buf);
239 } else {
240 if (mode != STRING) {
241 out.push_back('\'');
242 mode = STRING;
243 from = i;
244 }
245 }
246 }
247 if (mode == STRING) {
248 out.append(in.c_str() + from, i - from);
249 out.push_back('\'');
250 }
251 return out;
252 }
253
254 template<typename T>
255 static void _key_encode_shard(shard_id_t shard, T *key)
256 {
257 key->push_back((char)((uint8_t)shard.id + (uint8_t)0x80));
258 }
259
260 static const char *_key_decode_shard(const char *key, shard_id_t *pshard)
261 {
262 pshard->id = (uint8_t)*key - (uint8_t)0x80;
263 return key + 1;
264 }
265
266 static void get_coll_key_range(const coll_t& cid, int bits,
267 string *temp_start, string *temp_end,
268 string *start, string *end)
269 {
270 temp_start->clear();
271 temp_end->clear();
272 start->clear();
273 end->clear();
274
275 spg_t pgid;
276 if (cid.is_pg(&pgid)) {
277 _key_encode_shard(pgid.shard, start);
278 *temp_start = *start;
279
280 _key_encode_u64(pgid.pool() + 0x8000000000000000ull, start);
281 _key_encode_u64((-2ll - pgid.pool()) + 0x8000000000000000ull, temp_start);
282
283 *end = *start;
284 *temp_end = *temp_start;
285
286 uint32_t reverse_hash = hobject_t::_reverse_bits(pgid.ps());
287 _key_encode_u32(reverse_hash, start);
288 _key_encode_u32(reverse_hash, temp_start);
289
290 uint64_t end_hash = reverse_hash + (1ull << (32 - bits));
291 if (end_hash > 0xffffffffull)
292 end_hash = 0xffffffffull;
293
294 _key_encode_u32(end_hash, end);
295 _key_encode_u32(end_hash, temp_end);
296 } else {
297 _key_encode_shard(shard_id_t::NO_SHARD, start);
298 _key_encode_u64(-1ull + 0x8000000000000000ull, start);
299 *end = *start;
300 _key_encode_u32(0, start);
301 _key_encode_u32(0xffffffff, end);
302
303 // no separate temp section
304 *temp_start = *end;
305 *temp_end = *end;
306 }
307 }
308
309 static void get_shared_blob_key(uint64_t sbid, string *key)
310 {
311 key->clear();
312 _key_encode_u64(sbid, key);
313 }
314
315 static int get_key_shared_blob(const string& key, uint64_t *sbid)
316 {
317 const char *p = key.c_str();
318 if (key.length() < sizeof(uint64_t))
319 return -1;
320 _key_decode_u64(p, sbid);
321 return 0;
322 }
323
324 template<typename S>
325 static int get_key_object(const S& key, ghobject_t *oid)
326 {
327 int r;
328 const char *p = key.c_str();
329
330 if (key.length() < 1 + 8 + 4)
331 return -1;
332 p = _key_decode_shard(p, &oid->shard_id);
333
334 uint64_t pool;
335 p = _key_decode_u64(p, &pool);
336 oid->hobj.pool = pool - 0x8000000000000000ull;
337
338 unsigned hash;
339 p = _key_decode_u32(p, &hash);
340
341 oid->hobj.set_bitwise_key_u32(hash);
342
343 r = decode_escaped(p, &oid->hobj.nspace);
344 if (r < 0)
345 return -2;
346 p += r + 1;
347
348 string k;
349 r = decode_escaped(p, &k);
350 if (r < 0)
351 return -3;
352 p += r + 1;
353 if (*p == '=') {
354 // no key
355 ++p;
356 oid->hobj.oid.name = k;
357 } else if (*p == '<' || *p == '>') {
358 // key + name
359 ++p;
360 r = decode_escaped(p, &oid->hobj.oid.name);
361 if (r < 0)
362 return -5;
363 p += r + 1;
364 oid->hobj.set_key(k);
365 } else {
366 // malformed
367 return -6;
368 }
369
370 p = _key_decode_u64(p, &oid->hobj.snap.val);
371 p = _key_decode_u64(p, &oid->generation);
372
373 if (*p != ONODE_KEY_SUFFIX) {
374 return -7;
375 }
376 p++;
377 if (*p) {
378 // if we get something other than a null terminator here,
379 // something goes wrong.
380 return -8;
381 }
382
383 return 0;
384 }
385
386 template<typename S>
387 static void get_object_key(CephContext *cct, const ghobject_t& oid, S *key)
388 {
389 key->clear();
390
391 size_t max_len = 1 + 8 + 4 +
392 (oid.hobj.nspace.length() * 3 + 1) +
393 (oid.hobj.get_key().length() * 3 + 1) +
394 1 + // for '<', '=', or '>'
395 (oid.hobj.oid.name.length() * 3 + 1) +
396 8 + 8 + 1;
397 key->reserve(max_len);
398
399 _key_encode_shard(oid.shard_id, key);
400 _key_encode_u64(oid.hobj.pool + 0x8000000000000000ull, key);
401 _key_encode_u32(oid.hobj.get_bitwise_key_u32(), key);
402
403 append_escaped(oid.hobj.nspace, key);
404
405 if (oid.hobj.get_key().length()) {
406 // is a key... could be < = or >.
407 append_escaped(oid.hobj.get_key(), key);
408 // (ASCII chars < = and > sort in that order, yay)
409 int r = oid.hobj.get_key().compare(oid.hobj.oid.name);
410 if (r) {
411 key->append(r > 0 ? ">" : "<");
412 append_escaped(oid.hobj.oid.name, key);
413 } else {
414 // same as no key
415 key->append("=");
416 }
417 } else {
418 // no key
419 append_escaped(oid.hobj.oid.name, key);
420 key->append("=");
421 }
422
423 _key_encode_u64(oid.hobj.snap, key);
424 _key_encode_u64(oid.generation, key);
425
426 key->push_back(ONODE_KEY_SUFFIX);
427
428 // sanity check
429 if (true) {
430 ghobject_t t;
431 int r = get_key_object(*key, &t);
432 if (r || t != oid) {
433 derr << " r " << r << dendl;
434 derr << "key " << pretty_binary_string(*key) << dendl;
435 derr << "oid " << oid << dendl;
436 derr << " t " << t << dendl;
437 assert(r == 0 && t == oid);
438 }
439 }
440 }
441
442
443 // extent shard keys are the onode key, plus a u32, plus 'x'. the trailing
444 // char lets us quickly test whether it is a shard key without decoding any
445 // of the prefix bytes.
446 template<typename S>
447 static void get_extent_shard_key(const S& onode_key, uint32_t offset,
448 string *key)
449 {
450 key->clear();
451 key->reserve(onode_key.length() + 4 + 1);
452 key->append(onode_key.c_str(), onode_key.size());
453 _key_encode_u32(offset, key);
454 key->push_back(EXTENT_SHARD_KEY_SUFFIX);
455 }
456
457 static void rewrite_extent_shard_key(uint32_t offset, string *key)
458 {
459 assert(key->size() > sizeof(uint32_t) + 1);
460 assert(*key->rbegin() == EXTENT_SHARD_KEY_SUFFIX);
461 _key_encode_u32(offset, key->size() - sizeof(uint32_t) - 1, key);
462 }
463
464 template<typename S>
465 static void generate_extent_shard_key_and_apply(
466 const S& onode_key,
467 uint32_t offset,
468 string *key,
469 std::function<void(const string& final_key)> apply)
470 {
471 if (key->empty()) { // make full key
472 assert(!onode_key.empty());
473 get_extent_shard_key(onode_key, offset, key);
474 } else {
475 rewrite_extent_shard_key(offset, key);
476 }
477 apply(*key);
478 }
479
480 int get_key_extent_shard(const string& key, string *onode_key, uint32_t *offset)
481 {
482 assert(key.size() > sizeof(uint32_t) + 1);
483 assert(*key.rbegin() == EXTENT_SHARD_KEY_SUFFIX);
484 int okey_len = key.size() - sizeof(uint32_t) - 1;
485 *onode_key = key.substr(0, okey_len);
486 const char *p = key.data() + okey_len;
487 _key_decode_u32(p, offset);
488 return 0;
489 }
490
491 static bool is_extent_shard_key(const string& key)
492 {
493 return *key.rbegin() == EXTENT_SHARD_KEY_SUFFIX;
494 }
495
496 // '-' < '.' < '~'
497 static void get_omap_header(uint64_t id, string *out)
498 {
499 _key_encode_u64(id, out);
500 out->push_back('-');
501 }
502
503 // hmm, I don't think there's any need to escape the user key since we
504 // have a clean prefix.
505 static void get_omap_key(uint64_t id, const string& key, string *out)
506 {
507 _key_encode_u64(id, out);
508 out->push_back('.');
509 out->append(key);
510 }
511
512 static void rewrite_omap_key(uint64_t id, string old, string *out)
513 {
514 _key_encode_u64(id, out);
515 out->append(old.c_str() + out->length(), old.size() - out->length());
516 }
517
518 static void decode_omap_key(const string& key, string *user_key)
519 {
520 *user_key = key.substr(sizeof(uint64_t) + 1);
521 }
522
523 static void get_omap_tail(uint64_t id, string *out)
524 {
525 _key_encode_u64(id, out);
526 out->push_back('~');
527 }
528
529 static void get_deferred_key(uint64_t seq, string *out)
530 {
531 _key_encode_u64(seq, out);
532 }
533
534
535 // merge operators
536
537 struct Int64ArrayMergeOperator : public KeyValueDB::MergeOperator {
538 void merge_nonexistent(
539 const char *rdata, size_t rlen, std::string *new_value) override {
540 *new_value = std::string(rdata, rlen);
541 }
542 void merge(
543 const char *ldata, size_t llen,
544 const char *rdata, size_t rlen,
545 std::string *new_value) override {
546 assert(llen == rlen);
547 assert((rlen % 8) == 0);
548 new_value->resize(rlen);
549 const __le64* lv = (const __le64*)ldata;
550 const __le64* rv = (const __le64*)rdata;
551 __le64* nv = &(__le64&)new_value->at(0);
552 for (size_t i = 0; i < rlen >> 3; ++i) {
553 nv[i] = lv[i] + rv[i];
554 }
555 }
556 // We use each operator name and each prefix to construct the
557 // overall RocksDB operator name for consistency check at open time.
558 string name() const override {
559 return "int64_array";
560 }
561 };
562
563
564 // Buffer
565
566 ostream& operator<<(ostream& out, const BlueStore::Buffer& b)
567 {
568 out << "buffer(" << &b << " space " << b.space << " 0x" << std::hex
569 << b.offset << "~" << b.length << std::dec
570 << " " << BlueStore::Buffer::get_state_name(b.state);
571 if (b.flags)
572 out << " " << BlueStore::Buffer::get_flag_name(b.flags);
573 return out << ")";
574 }
575
576 // Garbage Collector
577
578 void BlueStore::GarbageCollector::process_protrusive_extents(
579 const BlueStore::ExtentMap& extent_map,
580 uint64_t start_offset,
581 uint64_t end_offset,
582 uint64_t start_touch_offset,
583 uint64_t end_touch_offset,
584 uint64_t min_alloc_size)
585 {
586 assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset);
587
588 uint64_t lookup_start_offset = P2ALIGN(start_offset, min_alloc_size);
589 uint64_t lookup_end_offset = ROUND_UP_TO(end_offset, min_alloc_size);
590
591 dout(30) << __func__ << " (hex): [" << std::hex
592 << lookup_start_offset << ", " << lookup_end_offset
593 << ")" << std::dec << dendl;
594
595 for (auto it = extent_map.seek_lextent(lookup_start_offset);
596 it != extent_map.extent_map.end() &&
597 it->logical_offset < lookup_end_offset;
598 ++it) {
599 uint64_t alloc_unit_start = it->logical_offset / min_alloc_size;
600 uint64_t alloc_unit_end = (it->logical_end() - 1) / min_alloc_size;
601
602 dout(30) << __func__ << " " << *it
603 << "alloc_units: " << alloc_unit_start << ".." << alloc_unit_end
604 << dendl;
605
606 Blob* b = it->blob.get();
607
608 if (it->logical_offset >=start_touch_offset &&
609 it->logical_end() <= end_touch_offset) {
610 // Process extents within the range affected by
611 // the current write request.
612 // Need to take into account if existing extents
613 // can be merged with them (uncompressed case)
614 if (!b->get_blob().is_compressed()) {
615 if (blob_info_counted && used_alloc_unit == alloc_unit_start) {
616 --blob_info_counted->expected_allocations; // don't need to allocate
617 // new AU for compressed
618 // data since another
619 // collocated uncompressed
620 // blob already exists
621 dout(30) << __func__ << " --expected:"
622 << alloc_unit_start << dendl;
623 }
624 used_alloc_unit = alloc_unit_end;
625 blob_info_counted = nullptr;
626 }
627 } else if (b->get_blob().is_compressed()) {
628
629 // additionally we take compressed blobs that were not impacted
630 // by the write into account too
631 BlobInfo& bi =
632 affected_blobs.emplace(
633 b, BlobInfo(b->get_referenced_bytes())).first->second;
634
635 int adjust =
636 (used_alloc_unit && used_alloc_unit == alloc_unit_start) ? 0 : 1;
637 bi.expected_allocations += alloc_unit_end - alloc_unit_start + adjust;
638 dout(30) << __func__ << " expected_allocations="
639 << bi.expected_allocations << " end_au:"
640 << alloc_unit_end << dendl;
641
642 blob_info_counted = &bi;
643 used_alloc_unit = alloc_unit_end;
644
645 assert(it->length <= bi.referenced_bytes);
646 bi.referenced_bytes -= it->length;
647 dout(30) << __func__ << " affected_blob:" << *b
648 << " unref 0x" << std::hex << it->length
649 << " referenced = 0x" << bi.referenced_bytes
650 << std::dec << dendl;
651 // NOTE: we can't move specific blob to resulting GC list here
652 // when reference counter == 0 since subsequent extents might
653 // decrement its expected_allocation.
654 // Hence need to enumerate all the extents first.
655 if (!bi.collect_candidate) {
656 bi.first_lextent = it;
657 bi.collect_candidate = true;
658 }
659 bi.last_lextent = it;
660 } else {
661 if (blob_info_counted && used_alloc_unit == alloc_unit_start) {
662 // don't need to allocate new AU for compressed data since another
663 // collocated uncompressed blob already exists
664 --blob_info_counted->expected_allocations;
665 dout(30) << __func__ << " --expected_allocations:"
666 << alloc_unit_start << dendl;
667 }
668 used_alloc_unit = alloc_unit_end;
669 blob_info_counted = nullptr;
670 }
671 }
672
673 for (auto b_it = affected_blobs.begin();
674 b_it != affected_blobs.end();
675 ++b_it) {
676 Blob* b = b_it->first;
677 BlobInfo& bi = b_it->second;
678 if (bi.referenced_bytes == 0) {
679 uint64_t len_on_disk = b_it->first->get_blob().get_ondisk_length();
680 int64_t blob_expected_for_release =
681 ROUND_UP_TO(len_on_disk, min_alloc_size) / min_alloc_size;
682
683 dout(30) << __func__ << " " << *(b_it->first)
684 << " expected4release=" << blob_expected_for_release
685 << " expected_allocations=" << bi.expected_allocations
686 << dendl;
687 int64_t benefit = blob_expected_for_release - bi.expected_allocations;
688 if (benefit >= g_conf->bluestore_gc_enable_blob_threshold) {
689 if (bi.collect_candidate) {
690 auto it = bi.first_lextent;
691 bool bExit = false;
692 do {
693 if (it->blob.get() == b) {
694 extents_to_collect.emplace_back(it->logical_offset, it->length);
695 }
696 bExit = it == bi.last_lextent;
697 ++it;
698 } while (!bExit);
699 }
700 expected_for_release += blob_expected_for_release;
701 expected_allocations += bi.expected_allocations;
702 }
703 }
704 }
705 }
706
707 int64_t BlueStore::GarbageCollector::estimate(
708 uint64_t start_offset,
709 uint64_t length,
710 const BlueStore::ExtentMap& extent_map,
711 const BlueStore::old_extent_map_t& old_extents,
712 uint64_t min_alloc_size)
713 {
714
715 affected_blobs.clear();
716 extents_to_collect.clear();
717 used_alloc_unit = boost::optional<uint64_t >();
718 blob_info_counted = nullptr;
719
720 gc_start_offset = start_offset;
721 gc_end_offset = start_offset + length;
722
723 uint64_t end_offset = start_offset + length;
724
725 for (auto it = old_extents.begin(); it != old_extents.end(); ++it) {
726 Blob* b = it->e.blob.get();
727 if (b->get_blob().is_compressed()) {
728
729 // update gc_start_offset/gc_end_offset if needed
730 gc_start_offset = min(gc_start_offset, (uint64_t)it->e.blob_start());
731 gc_end_offset = max(gc_end_offset, (uint64_t)it->e.blob_end());
732
733 auto o = it->e.logical_offset;
734 auto l = it->e.length;
735
736 uint64_t ref_bytes = b->get_referenced_bytes();
737 // micro optimization to bypass blobs that have no more references
738 if (ref_bytes != 0) {
739 dout(30) << __func__ << " affected_blob:" << *b
740 << " unref 0x" << std::hex << o << "~" << l
741 << std::dec << dendl;
742 affected_blobs.emplace(b, BlobInfo(ref_bytes));
743 }
744 }
745 }
746 dout(30) << __func__ << " gc range(hex): [" << std::hex
747 << gc_start_offset << ", " << gc_end_offset
748 << ")" << std::dec << dendl;
749
750 // enumerate preceeding extents to check if they reference affected blobs
751 if (gc_start_offset < start_offset || gc_end_offset > end_offset) {
752 process_protrusive_extents(extent_map,
753 gc_start_offset,
754 gc_end_offset,
755 start_offset,
756 end_offset,
757 min_alloc_size);
758 }
759 return expected_for_release - expected_allocations;
760 }
761
762 // Cache
763
764 BlueStore::Cache *BlueStore::Cache::create(CephContext* cct, string type,
765 PerfCounters *logger)
766 {
767 Cache *c = nullptr;
768
769 if (type == "lru")
770 c = new LRUCache(cct);
771 else if (type == "2q")
772 c = new TwoQCache(cct);
773 else
774 assert(0 == "unrecognized cache type");
775
776 c->logger = logger;
777 return c;
778 }
779
780 void BlueStore::Cache::trim_all()
781 {
782 std::lock_guard<std::recursive_mutex> l(lock);
783 _trim(0, 0);
784 }
785
786 void BlueStore::Cache::trim(
787 uint64_t target_bytes,
788 float target_meta_ratio,
789 float target_data_ratio,
790 float bytes_per_onode)
791 {
792 std::lock_guard<std::recursive_mutex> l(lock);
793 uint64_t current_meta = _get_num_onodes() * bytes_per_onode;
794 uint64_t current_buffer = _get_buffer_bytes();
795 uint64_t current = current_meta + current_buffer;
796
797 uint64_t target_meta = target_bytes * target_meta_ratio;
798 uint64_t target_buffer = target_bytes * target_data_ratio;
799
800 // correct for overflow or float imprecision
801 target_meta = min(target_bytes, target_meta);
802 target_buffer = min(target_bytes - target_meta, target_buffer);
803
804 if (current <= target_bytes) {
805 dout(10) << __func__
806 << " shard target " << pretty_si_t(target_bytes)
807 << " meta/data ratios " << target_meta_ratio
808 << " + " << target_data_ratio << " ("
809 << pretty_si_t(target_meta) << " + "
810 << pretty_si_t(target_buffer) << "), "
811 << " current " << pretty_si_t(current) << " ("
812 << pretty_si_t(current_meta) << " + "
813 << pretty_si_t(current_buffer) << ")"
814 << dendl;
815 return;
816 }
817
818 uint64_t need_to_free = current - target_bytes;
819 uint64_t free_buffer = 0;
820 uint64_t free_meta = 0;
821 if (current_buffer > target_buffer) {
822 free_buffer = current_buffer - target_buffer;
823 if (free_buffer > need_to_free) {
824 free_buffer = need_to_free;
825 }
826 }
827 free_meta = need_to_free - free_buffer;
828
829 // start bounds at what we have now
830 uint64_t max_buffer = current_buffer - free_buffer;
831 uint64_t max_meta = current_meta - free_meta;
832 uint64_t max_onodes = max_meta / bytes_per_onode;
833
834 dout(10) << __func__
835 << " shard target " << pretty_si_t(target_bytes)
836 << " ratio " << target_meta_ratio << " ("
837 << pretty_si_t(target_meta) << " + "
838 << pretty_si_t(target_buffer) << "), "
839 << " current " << pretty_si_t(current) << " ("
840 << pretty_si_t(current_meta) << " + "
841 << pretty_si_t(current_buffer) << "),"
842 << " need_to_free " << pretty_si_t(need_to_free) << " ("
843 << pretty_si_t(free_meta) << " + "
844 << pretty_si_t(free_buffer) << ")"
845 << " -> max " << max_onodes << " onodes + "
846 << max_buffer << " buffer"
847 << dendl;
848 _trim(max_onodes, max_buffer);
849 }
850
851
852 // LRUCache
853 #undef dout_prefix
854 #define dout_prefix *_dout << "bluestore.LRUCache(" << this << ") "
855
856 void BlueStore::LRUCache::_touch_onode(OnodeRef& o)
857 {
858 auto p = onode_lru.iterator_to(*o);
859 onode_lru.erase(p);
860 onode_lru.push_front(*o);
861 }
862
863 void BlueStore::LRUCache::_trim(uint64_t onode_max, uint64_t buffer_max)
864 {
865 dout(20) << __func__ << " onodes " << onode_lru.size() << " / " << onode_max
866 << " buffers " << buffer_size << " / " << buffer_max
867 << dendl;
868
869 _audit("trim start");
870
871 // buffers
872 while (buffer_size > buffer_max) {
873 auto i = buffer_lru.rbegin();
874 if (i == buffer_lru.rend()) {
875 // stop if buffer_lru is now empty
876 break;
877 }
878
879 Buffer *b = &*i;
880 assert(b->is_clean());
881 dout(20) << __func__ << " rm " << *b << dendl;
882 b->space->_rm_buffer(this, b);
883 }
884
885 // onodes
886 int num = onode_lru.size() - onode_max;
887 if (num <= 0)
888 return; // don't even try
889
890 auto p = onode_lru.end();
891 assert(p != onode_lru.begin());
892 --p;
893 int skipped = 0;
894 int max_skipped = g_conf->bluestore_cache_trim_max_skip_pinned;
895 while (num > 0) {
896 Onode *o = &*p;
897 int refs = o->nref.load();
898 if (refs > 1) {
899 dout(20) << __func__ << " " << o->oid << " has " << refs
900 << " refs, skipping" << dendl;
901 if (++skipped >= max_skipped) {
902 dout(20) << __func__ << " maximum skip pinned reached; stopping with "
903 << num << " left to trim" << dendl;
904 break;
905 }
906
907 if (p == onode_lru.begin()) {
908 break;
909 } else {
910 p--;
911 num--;
912 continue;
913 }
914 }
915 dout(30) << __func__ << " rm " << o->oid << dendl;
916 if (p != onode_lru.begin()) {
917 onode_lru.erase(p--);
918 } else {
919 onode_lru.erase(p);
920 assert(num == 1);
921 }
922 o->get(); // paranoia
923 o->c->onode_map.remove(o->oid);
924 o->put();
925 --num;
926 }
927 }
928
929 #ifdef DEBUG_CACHE
930 void BlueStore::LRUCache::_audit(const char *when)
931 {
932 dout(10) << __func__ << " " << when << " start" << dendl;
933 uint64_t s = 0;
934 for (auto i = buffer_lru.begin(); i != buffer_lru.end(); ++i) {
935 s += i->length;
936 }
937 if (s != buffer_size) {
938 derr << __func__ << " buffer_size " << buffer_size << " actual " << s
939 << dendl;
940 for (auto i = buffer_lru.begin(); i != buffer_lru.end(); ++i) {
941 derr << __func__ << " " << *i << dendl;
942 }
943 assert(s == buffer_size);
944 }
945 dout(20) << __func__ << " " << when << " buffer_size " << buffer_size
946 << " ok" << dendl;
947 }
948 #endif
949
950 // TwoQCache
951 #undef dout_prefix
952 #define dout_prefix *_dout << "bluestore.2QCache(" << this << ") "
953
954
955 void BlueStore::TwoQCache::_touch_onode(OnodeRef& o)
956 {
957 auto p = onode_lru.iterator_to(*o);
958 onode_lru.erase(p);
959 onode_lru.push_front(*o);
960 }
961
962 void BlueStore::TwoQCache::_add_buffer(Buffer *b, int level, Buffer *near)
963 {
964 dout(20) << __func__ << " level " << level << " near " << near
965 << " on " << *b
966 << " which has cache_private " << b->cache_private << dendl;
967 if (near) {
968 b->cache_private = near->cache_private;
969 switch (b->cache_private) {
970 case BUFFER_WARM_IN:
971 buffer_warm_in.insert(buffer_warm_in.iterator_to(*near), *b);
972 break;
973 case BUFFER_WARM_OUT:
974 assert(b->is_empty());
975 buffer_warm_out.insert(buffer_warm_out.iterator_to(*near), *b);
976 break;
977 case BUFFER_HOT:
978 buffer_hot.insert(buffer_hot.iterator_to(*near), *b);
979 break;
980 default:
981 assert(0 == "bad cache_private");
982 }
983 } else if (b->cache_private == BUFFER_NEW) {
984 b->cache_private = BUFFER_WARM_IN;
985 if (level > 0) {
986 buffer_warm_in.push_front(*b);
987 } else {
988 // take caller hint to start at the back of the warm queue
989 buffer_warm_in.push_back(*b);
990 }
991 } else {
992 // we got a hint from discard
993 switch (b->cache_private) {
994 case BUFFER_WARM_IN:
995 // stay in warm_in. move to front, even though 2Q doesn't actually
996 // do this.
997 dout(20) << __func__ << " move to front of warm " << *b << dendl;
998 buffer_warm_in.push_front(*b);
999 break;
1000 case BUFFER_WARM_OUT:
1001 b->cache_private = BUFFER_HOT;
1002 // move to hot. fall-thru
1003 case BUFFER_HOT:
1004 dout(20) << __func__ << " move to front of hot " << *b << dendl;
1005 buffer_hot.push_front(*b);
1006 break;
1007 default:
1008 assert(0 == "bad cache_private");
1009 }
1010 }
1011 if (!b->is_empty()) {
1012 buffer_bytes += b->length;
1013 buffer_list_bytes[b->cache_private] += b->length;
1014 }
1015 }
1016
1017 void BlueStore::TwoQCache::_rm_buffer(Buffer *b)
1018 {
1019 dout(20) << __func__ << " " << *b << dendl;
1020 if (!b->is_empty()) {
1021 assert(buffer_bytes >= b->length);
1022 buffer_bytes -= b->length;
1023 assert(buffer_list_bytes[b->cache_private] >= b->length);
1024 buffer_list_bytes[b->cache_private] -= b->length;
1025 }
1026 switch (b->cache_private) {
1027 case BUFFER_WARM_IN:
1028 buffer_warm_in.erase(buffer_warm_in.iterator_to(*b));
1029 break;
1030 case BUFFER_WARM_OUT:
1031 buffer_warm_out.erase(buffer_warm_out.iterator_to(*b));
1032 break;
1033 case BUFFER_HOT:
1034 buffer_hot.erase(buffer_hot.iterator_to(*b));
1035 break;
1036 default:
1037 assert(0 == "bad cache_private");
1038 }
1039 }
1040
1041 void BlueStore::TwoQCache::_move_buffer(Cache *srcc, Buffer *b)
1042 {
1043 TwoQCache *src = static_cast<TwoQCache*>(srcc);
1044 src->_rm_buffer(b);
1045
1046 // preserve which list we're on (even if we can't preserve the order!)
1047 switch (b->cache_private) {
1048 case BUFFER_WARM_IN:
1049 assert(!b->is_empty());
1050 buffer_warm_in.push_back(*b);
1051 break;
1052 case BUFFER_WARM_OUT:
1053 assert(b->is_empty());
1054 buffer_warm_out.push_back(*b);
1055 break;
1056 case BUFFER_HOT:
1057 assert(!b->is_empty());
1058 buffer_hot.push_back(*b);
1059 break;
1060 default:
1061 assert(0 == "bad cache_private");
1062 }
1063 if (!b->is_empty()) {
1064 buffer_bytes += b->length;
1065 buffer_list_bytes[b->cache_private] += b->length;
1066 }
1067 }
1068
1069 void BlueStore::TwoQCache::_adjust_buffer_size(Buffer *b, int64_t delta)
1070 {
1071 dout(20) << __func__ << " delta " << delta << " on " << *b << dendl;
1072 if (!b->is_empty()) {
1073 assert((int64_t)buffer_bytes + delta >= 0);
1074 buffer_bytes += delta;
1075 assert((int64_t)buffer_list_bytes[b->cache_private] + delta >= 0);
1076 buffer_list_bytes[b->cache_private] += delta;
1077 }
1078 }
1079
1080 void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max)
1081 {
1082 dout(20) << __func__ << " onodes " << onode_lru.size() << " / " << onode_max
1083 << " buffers " << buffer_bytes << " / " << buffer_max
1084 << dendl;
1085
1086 _audit("trim start");
1087
1088 // buffers
1089 if (buffer_bytes > buffer_max) {
1090 uint64_t kin = buffer_max * cct->_conf->bluestore_2q_cache_kin_ratio;
1091 uint64_t khot = buffer_max - kin;
1092
1093 // pre-calculate kout based on average buffer size too,
1094 // which is typical(the warm_in and hot lists may change later)
1095 uint64_t kout = 0;
1096 uint64_t buffer_num = buffer_hot.size() + buffer_warm_in.size();
1097 if (buffer_num) {
1098 uint64_t buffer_avg_size = buffer_bytes / buffer_num;
1099 assert(buffer_avg_size);
1100 uint64_t calculated_buffer_num = buffer_max / buffer_avg_size;
1101 kout = calculated_buffer_num * cct->_conf->bluestore_2q_cache_kout_ratio;
1102 }
1103
1104 if (buffer_list_bytes[BUFFER_HOT] < khot) {
1105 // hot is small, give slack to warm_in
1106 kin += khot - buffer_list_bytes[BUFFER_HOT];
1107 } else if (buffer_list_bytes[BUFFER_WARM_IN] < kin) {
1108 // warm_in is small, give slack to hot
1109 khot += kin - buffer_list_bytes[BUFFER_WARM_IN];
1110 }
1111
1112 // adjust warm_in list
1113 int64_t to_evict_bytes = buffer_list_bytes[BUFFER_WARM_IN] - kin;
1114 uint64_t evicted = 0;
1115
1116 while (to_evict_bytes > 0) {
1117 auto p = buffer_warm_in.rbegin();
1118 if (p == buffer_warm_in.rend()) {
1119 // stop if warm_in list is now empty
1120 break;
1121 }
1122
1123 Buffer *b = &*p;
1124 assert(b->is_clean());
1125 dout(20) << __func__ << " buffer_warm_in -> out " << *b << dendl;
1126 assert(buffer_bytes >= b->length);
1127 buffer_bytes -= b->length;
1128 assert(buffer_list_bytes[BUFFER_WARM_IN] >= b->length);
1129 buffer_list_bytes[BUFFER_WARM_IN] -= b->length;
1130 to_evict_bytes -= b->length;
1131 evicted += b->length;
1132 b->state = Buffer::STATE_EMPTY;
1133 b->data.clear();
1134 buffer_warm_in.erase(buffer_warm_in.iterator_to(*b));
1135 buffer_warm_out.push_front(*b);
1136 b->cache_private = BUFFER_WARM_OUT;
1137 }
1138
1139 if (evicted > 0) {
1140 dout(20) << __func__ << " evicted " << prettybyte_t(evicted)
1141 << " from warm_in list, done evicting warm_in buffers"
1142 << dendl;
1143 }
1144
1145 // adjust hot list
1146 to_evict_bytes = buffer_list_bytes[BUFFER_HOT] - khot;
1147 evicted = 0;
1148
1149 while (to_evict_bytes > 0) {
1150 auto p = buffer_hot.rbegin();
1151 if (p == buffer_hot.rend()) {
1152 // stop if hot list is now empty
1153 break;
1154 }
1155
1156 Buffer *b = &*p;
1157 dout(20) << __func__ << " buffer_hot rm " << *b << dendl;
1158 assert(b->is_clean());
1159 // adjust evict size before buffer goes invalid
1160 to_evict_bytes -= b->length;
1161 evicted += b->length;
1162 b->space->_rm_buffer(this, b);
1163 }
1164
1165 if (evicted > 0) {
1166 dout(20) << __func__ << " evicted " << prettybyte_t(evicted)
1167 << " from hot list, done evicting hot buffers"
1168 << dendl;
1169 }
1170
1171 // adjust warm out list too, if necessary
1172 int64_t num = buffer_warm_out.size() - kout;
1173 while (num-- > 0) {
1174 Buffer *b = &*buffer_warm_out.rbegin();
1175 assert(b->is_empty());
1176 dout(20) << __func__ << " buffer_warm_out rm " << *b << dendl;
1177 b->space->_rm_buffer(this, b);
1178 }
1179 }
1180
1181 // onodes
1182 int num = onode_lru.size() - onode_max;
1183 if (num <= 0)
1184 return; // don't even try
1185
1186 auto p = onode_lru.end();
1187 assert(p != onode_lru.begin());
1188 --p;
1189 int skipped = 0;
1190 int max_skipped = g_conf->bluestore_cache_trim_max_skip_pinned;
1191 while (num > 0) {
1192 Onode *o = &*p;
1193 dout(20) << __func__ << " considering " << o << dendl;
1194 int refs = o->nref.load();
1195 if (refs > 1) {
1196 dout(20) << __func__ << " " << o->oid << " has " << refs
1197 << " refs; skipping" << dendl;
1198 if (++skipped >= max_skipped) {
1199 dout(20) << __func__ << " maximum skip pinned reached; stopping with "
1200 << num << " left to trim" << dendl;
1201 break;
1202 }
1203
1204 if (p == onode_lru.begin()) {
1205 break;
1206 } else {
1207 p--;
1208 num--;
1209 continue;
1210 }
1211 }
1212 dout(30) << __func__ << " " << o->oid << " num=" << num <<" lru size="<<onode_lru.size()<< dendl;
1213 if (p != onode_lru.begin()) {
1214 onode_lru.erase(p--);
1215 } else {
1216 onode_lru.erase(p);
1217 assert(num == 1);
1218 }
1219 o->get(); // paranoia
1220 o->c->onode_map.remove(o->oid);
1221 o->put();
1222 --num;
1223 }
1224 }
1225
1226 #ifdef DEBUG_CACHE
1227 void BlueStore::TwoQCache::_audit(const char *when)
1228 {
1229 dout(10) << __func__ << " " << when << " start" << dendl;
1230 uint64_t s = 0;
1231 for (auto i = buffer_hot.begin(); i != buffer_hot.end(); ++i) {
1232 s += i->length;
1233 }
1234
1235 uint64_t hot_bytes = s;
1236 if (hot_bytes != buffer_list_bytes[BUFFER_HOT]) {
1237 derr << __func__ << " hot_list_bytes "
1238 << buffer_list_bytes[BUFFER_HOT]
1239 << " != actual " << hot_bytes
1240 << dendl;
1241 assert(hot_bytes == buffer_list_bytes[BUFFER_HOT]);
1242 }
1243
1244 for (auto i = buffer_warm_in.begin(); i != buffer_warm_in.end(); ++i) {
1245 s += i->length;
1246 }
1247
1248 uint64_t warm_in_bytes = s - hot_bytes;
1249 if (warm_in_bytes != buffer_list_bytes[BUFFER_WARM_IN]) {
1250 derr << __func__ << " warm_in_list_bytes "
1251 << buffer_list_bytes[BUFFER_WARM_IN]
1252 << " != actual " << warm_in_bytes
1253 << dendl;
1254 assert(warm_in_bytes == buffer_list_bytes[BUFFER_WARM_IN]);
1255 }
1256
1257 if (s != buffer_bytes) {
1258 derr << __func__ << " buffer_bytes " << buffer_bytes << " actual " << s
1259 << dendl;
1260 assert(s == buffer_bytes);
1261 }
1262
1263 dout(20) << __func__ << " " << when << " buffer_bytes " << buffer_bytes
1264 << " ok" << dendl;
1265 }
1266 #endif
1267
1268
1269 // BufferSpace
1270
1271 #undef dout_prefix
1272 #define dout_prefix *_dout << "bluestore.BufferSpace(" << this << " in " << cache << ") "
1273
1274 void BlueStore::BufferSpace::_clear(Cache* cache)
1275 {
1276 // note: we already hold cache->lock
1277 ldout(cache->cct, 20) << __func__ << dendl;
1278 while (!buffer_map.empty()) {
1279 _rm_buffer(cache, buffer_map.begin());
1280 }
1281 }
1282
1283 int BlueStore::BufferSpace::_discard(Cache* cache, uint32_t offset, uint32_t length)
1284 {
1285 // note: we already hold cache->lock
1286 ldout(cache->cct, 20) << __func__ << std::hex << " 0x" << offset << "~" << length
1287 << std::dec << dendl;
1288 int cache_private = 0;
1289 cache->_audit("discard start");
1290 auto i = _data_lower_bound(offset);
1291 uint32_t end = offset + length;
1292 while (i != buffer_map.end()) {
1293 Buffer *b = i->second.get();
1294 if (b->offset >= end) {
1295 break;
1296 }
1297 if (b->cache_private > cache_private) {
1298 cache_private = b->cache_private;
1299 }
1300 if (b->offset < offset) {
1301 int64_t front = offset - b->offset;
1302 if (b->end() > end) {
1303 // drop middle (split)
1304 uint32_t tail = b->end() - end;
1305 if (b->data.length()) {
1306 bufferlist bl;
1307 bl.substr_of(b->data, b->length - tail, tail);
1308 Buffer *nb = new Buffer(this, b->state, b->seq, end, bl);
1309 nb->maybe_rebuild();
1310 _add_buffer(cache, nb, 0, b);
1311 } else {
1312 _add_buffer(cache, new Buffer(this, b->state, b->seq, end, tail),
1313 0, b);
1314 }
1315 if (!b->is_writing()) {
1316 cache->_adjust_buffer_size(b, front - (int64_t)b->length);
1317 }
1318 b->truncate(front);
1319 b->maybe_rebuild();
1320 cache->_audit("discard end 1");
1321 break;
1322 } else {
1323 // drop tail
1324 if (!b->is_writing()) {
1325 cache->_adjust_buffer_size(b, front - (int64_t)b->length);
1326 }
1327 b->truncate(front);
1328 b->maybe_rebuild();
1329 ++i;
1330 continue;
1331 }
1332 }
1333 if (b->end() <= end) {
1334 // drop entire buffer
1335 _rm_buffer(cache, i++);
1336 continue;
1337 }
1338 // drop front
1339 uint32_t keep = b->end() - end;
1340 if (b->data.length()) {
1341 bufferlist bl;
1342 bl.substr_of(b->data, b->length - keep, keep);
1343 Buffer *nb = new Buffer(this, b->state, b->seq, end, bl);
1344 nb->maybe_rebuild();
1345 _add_buffer(cache, nb, 0, b);
1346 } else {
1347 _add_buffer(cache, new Buffer(this, b->state, b->seq, end, keep), 0, b);
1348 }
1349 _rm_buffer(cache, i);
1350 cache->_audit("discard end 2");
1351 break;
1352 }
1353 return cache_private;
1354 }
1355
1356 void BlueStore::BufferSpace::read(
1357 Cache* cache,
1358 uint32_t offset,
1359 uint32_t length,
1360 BlueStore::ready_regions_t& res,
1361 interval_set<uint32_t>& res_intervals)
1362 {
1363 res.clear();
1364 res_intervals.clear();
1365 uint32_t want_bytes = length;
1366 uint32_t end = offset + length;
1367
1368 {
1369 std::lock_guard<std::recursive_mutex> l(cache->lock);
1370 for (auto i = _data_lower_bound(offset);
1371 i != buffer_map.end() && offset < end && i->first < end;
1372 ++i) {
1373 Buffer *b = i->second.get();
1374 assert(b->end() > offset);
1375 if (b->is_writing() || b->is_clean()) {
1376 if (b->offset < offset) {
1377 uint32_t skip = offset - b->offset;
1378 uint32_t l = MIN(length, b->length - skip);
1379 res[offset].substr_of(b->data, skip, l);
1380 res_intervals.insert(offset, l);
1381 offset += l;
1382 length -= l;
1383 if (!b->is_writing()) {
1384 cache->_touch_buffer(b);
1385 }
1386 continue;
1387 }
1388 if (b->offset > offset) {
1389 uint32_t gap = b->offset - offset;
1390 if (length <= gap) {
1391 break;
1392 }
1393 offset += gap;
1394 length -= gap;
1395 }
1396 if (!b->is_writing()) {
1397 cache->_touch_buffer(b);
1398 }
1399 if (b->length > length) {
1400 res[offset].substr_of(b->data, 0, length);
1401 res_intervals.insert(offset, length);
1402 break;
1403 } else {
1404 res[offset].append(b->data);
1405 res_intervals.insert(offset, b->length);
1406 if (b->length == length)
1407 break;
1408 offset += b->length;
1409 length -= b->length;
1410 }
1411 }
1412 }
1413 }
1414
1415 uint64_t hit_bytes = res_intervals.size();
1416 assert(hit_bytes <= want_bytes);
1417 uint64_t miss_bytes = want_bytes - hit_bytes;
1418 cache->logger->inc(l_bluestore_buffer_hit_bytes, hit_bytes);
1419 cache->logger->inc(l_bluestore_buffer_miss_bytes, miss_bytes);
1420 }
1421
1422 void BlueStore::BufferSpace::finish_write(Cache* cache, uint64_t seq)
1423 {
1424 std::lock_guard<std::recursive_mutex> l(cache->lock);
1425
1426 auto i = writing.begin();
1427 while (i != writing.end()) {
1428 if (i->seq > seq) {
1429 break;
1430 }
1431 if (i->seq < seq) {
1432 ++i;
1433 continue;
1434 }
1435
1436 Buffer *b = &*i;
1437 assert(b->is_writing());
1438
1439 if (b->flags & Buffer::FLAG_NOCACHE) {
1440 writing.erase(i++);
1441 ldout(cache->cct, 20) << __func__ << " discard " << *b << dendl;
1442 buffer_map.erase(b->offset);
1443 } else {
1444 b->state = Buffer::STATE_CLEAN;
1445 writing.erase(i++);
1446 b->maybe_rebuild();
1447 b->data.reassign_to_mempool(mempool::mempool_bluestore_cache_data);
1448 cache->_add_buffer(b, 1, nullptr);
1449 ldout(cache->cct, 20) << __func__ << " added " << *b << dendl;
1450 }
1451 }
1452
1453 cache->_audit("finish_write end");
1454 }
1455
1456 void BlueStore::BufferSpace::split(Cache* cache, size_t pos, BlueStore::BufferSpace &r)
1457 {
1458 std::lock_guard<std::recursive_mutex> lk(cache->lock);
1459 if (buffer_map.empty())
1460 return;
1461
1462 auto p = --buffer_map.end();
1463 while (true) {
1464 if (p->second->end() <= pos)
1465 break;
1466
1467 if (p->second->offset < pos) {
1468 ldout(cache->cct, 30) << __func__ << " cut " << *p->second << dendl;
1469 size_t left = pos - p->second->offset;
1470 size_t right = p->second->length - left;
1471 if (p->second->data.length()) {
1472 bufferlist bl;
1473 bl.substr_of(p->second->data, left, right);
1474 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq, 0, bl),
1475 0, p->second.get());
1476 } else {
1477 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq, 0, right),
1478 0, p->second.get());
1479 }
1480 cache->_adjust_buffer_size(p->second.get(), -right);
1481 p->second->truncate(left);
1482 break;
1483 }
1484
1485 assert(p->second->end() > pos);
1486 ldout(cache->cct, 30) << __func__ << " move " << *p->second << dendl;
1487 if (p->second->data.length()) {
1488 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq,
1489 p->second->offset - pos, p->second->data),
1490 0, p->second.get());
1491 } else {
1492 r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq,
1493 p->second->offset - pos, p->second->length),
1494 0, p->second.get());
1495 }
1496 if (p == buffer_map.begin()) {
1497 _rm_buffer(cache, p);
1498 break;
1499 } else {
1500 _rm_buffer(cache, p--);
1501 }
1502 }
1503 assert(writing.empty());
1504 }
1505
1506 // OnodeSpace
1507
1508 #undef dout_prefix
1509 #define dout_prefix *_dout << "bluestore.OnodeSpace(" << this << " in " << cache << ") "
1510
1511 BlueStore::OnodeRef BlueStore::OnodeSpace::add(const ghobject_t& oid, OnodeRef o)
1512 {
1513 std::lock_guard<std::recursive_mutex> l(cache->lock);
1514 auto p = onode_map.find(oid);
1515 if (p != onode_map.end()) {
1516 ldout(cache->cct, 30) << __func__ << " " << oid << " " << o
1517 << " raced, returning existing " << p->second
1518 << dendl;
1519 return p->second;
1520 }
1521 ldout(cache->cct, 30) << __func__ << " " << oid << " " << o << dendl;
1522 onode_map[oid] = o;
1523 cache->_add_onode(o, 1);
1524 return o;
1525 }
1526
1527 BlueStore::OnodeRef BlueStore::OnodeSpace::lookup(const ghobject_t& oid)
1528 {
1529 ldout(cache->cct, 30) << __func__ << dendl;
1530 OnodeRef o;
1531 bool hit = false;
1532
1533 {
1534 std::lock_guard<std::recursive_mutex> l(cache->lock);
1535 ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(oid);
1536 if (p == onode_map.end()) {
1537 ldout(cache->cct, 30) << __func__ << " " << oid << " miss" << dendl;
1538 } else {
1539 ldout(cache->cct, 30) << __func__ << " " << oid << " hit " << p->second
1540 << dendl;
1541 cache->_touch_onode(p->second);
1542 hit = true;
1543 o = p->second;
1544 }
1545 }
1546
1547 if (hit) {
1548 cache->logger->inc(l_bluestore_onode_hits);
1549 } else {
1550 cache->logger->inc(l_bluestore_onode_misses);
1551 }
1552 return o;
1553 }
1554
1555 void BlueStore::OnodeSpace::clear()
1556 {
1557 std::lock_guard<std::recursive_mutex> l(cache->lock);
1558 ldout(cache->cct, 10) << __func__ << dendl;
1559 for (auto &p : onode_map) {
1560 cache->_rm_onode(p.second);
1561 }
1562 onode_map.clear();
1563 }
1564
1565 bool BlueStore::OnodeSpace::empty()
1566 {
1567 std::lock_guard<std::recursive_mutex> l(cache->lock);
1568 return onode_map.empty();
1569 }
1570
1571 void BlueStore::OnodeSpace::rename(
1572 OnodeRef& oldo,
1573 const ghobject_t& old_oid,
1574 const ghobject_t& new_oid,
1575 const mempool::bluestore_cache_other::string& new_okey)
1576 {
1577 std::lock_guard<std::recursive_mutex> l(cache->lock);
1578 ldout(cache->cct, 30) << __func__ << " " << old_oid << " -> " << new_oid
1579 << dendl;
1580 ceph::unordered_map<ghobject_t,OnodeRef>::iterator po, pn;
1581 po = onode_map.find(old_oid);
1582 pn = onode_map.find(new_oid);
1583 assert(po != pn);
1584
1585 assert(po != onode_map.end());
1586 if (pn != onode_map.end()) {
1587 ldout(cache->cct, 30) << __func__ << " removing target " << pn->second
1588 << dendl;
1589 cache->_rm_onode(pn->second);
1590 onode_map.erase(pn);
1591 }
1592 OnodeRef o = po->second;
1593
1594 // install a non-existent onode at old location
1595 oldo.reset(new Onode(o->c, old_oid, o->key));
1596 po->second = oldo;
1597 cache->_add_onode(po->second, 1);
1598
1599 // add at new position and fix oid, key
1600 onode_map.insert(make_pair(new_oid, o));
1601 cache->_touch_onode(o);
1602 o->oid = new_oid;
1603 o->key = new_okey;
1604 }
1605
1606 bool BlueStore::OnodeSpace::map_any(std::function<bool(OnodeRef)> f)
1607 {
1608 std::lock_guard<std::recursive_mutex> l(cache->lock);
1609 ldout(cache->cct, 20) << __func__ << dendl;
1610 for (auto& i : onode_map) {
1611 if (f(i.second)) {
1612 return true;
1613 }
1614 }
1615 return false;
1616 }
1617
1618
1619 // SharedBlob
1620
1621 #undef dout_prefix
1622 #define dout_prefix *_dout << "bluestore.sharedblob(" << this << ") "
1623
1624 ostream& operator<<(ostream& out, const BlueStore::SharedBlob& sb)
1625 {
1626 out << "SharedBlob(" << &sb;
1627
1628 if (sb.loaded) {
1629 out << " loaded " << *sb.persistent;
1630 } else {
1631 out << " sbid 0x" << std::hex << sb.sbid_unloaded << std::dec;
1632 }
1633 return out << ")";
1634 }
1635
1636 BlueStore::SharedBlob::SharedBlob(uint64_t i, Collection *_coll)
1637 : coll(_coll), sbid_unloaded(i)
1638 {
1639 assert(sbid_unloaded > 0);
1640 if (get_cache()) {
1641 get_cache()->add_blob();
1642 }
1643 }
1644
1645 BlueStore::SharedBlob::~SharedBlob()
1646 {
1647 if (get_cache()) { // the dummy instances have a nullptr
1648 std::lock_guard<std::recursive_mutex> l(get_cache()->lock);
1649 bc._clear(get_cache());
1650 get_cache()->rm_blob();
1651 }
1652 if (loaded && persistent) {
1653 delete persistent;
1654 }
1655 }
1656
1657 void BlueStore::SharedBlob::put()
1658 {
1659 if (--nref == 0) {
1660 ldout(coll->store->cct, 20) << __func__ << " " << this
1661 << " removing self from set " << get_parent()
1662 << dendl;
1663 if (get_parent()) {
1664 if (get_parent()->remove(this)) {
1665 delete this;
1666 } else {
1667 ldout(coll->store->cct, 20)
1668 << __func__ << " " << this << " lost race to remove myself from set"
1669 << dendl;
1670 }
1671 } else {
1672 delete this;
1673 }
1674 }
1675 }
1676
1677 void BlueStore::SharedBlob::get_ref(uint64_t offset, uint32_t length)
1678 {
1679 assert(persistent);
1680 persistent->ref_map.get(offset, length);
1681 }
1682
1683 void BlueStore::SharedBlob::put_ref(uint64_t offset, uint32_t length,
1684 PExtentVector *r,
1685 set<SharedBlob*> *maybe_unshared)
1686 {
1687 assert(persistent);
1688 bool maybe = false;
1689 persistent->ref_map.put(offset, length, r, maybe_unshared ? &maybe : nullptr);
1690 if (maybe_unshared && maybe) {
1691 maybe_unshared->insert(this);
1692 }
1693 }
1694
1695 // Blob
1696
1697 #undef dout_prefix
1698 #define dout_prefix *_dout << "bluestore.blob(" << this << ") "
1699
1700 ostream& operator<<(ostream& out, const BlueStore::Blob& b)
1701 {
1702 out << "Blob(" << &b;
1703 if (b.is_spanning()) {
1704 out << " spanning " << b.id;
1705 }
1706 out << " " << b.get_blob() << " " << b.get_blob_use_tracker()
1707 << " " << *b.shared_blob
1708 << ")";
1709 return out;
1710 }
1711
1712 void BlueStore::Blob::discard_unallocated(Collection *coll)
1713 {
1714 if (get_blob().is_shared()) {
1715 return;
1716 }
1717 if (get_blob().is_compressed()) {
1718 bool discard = false;
1719 bool all_invalid = true;
1720 for (auto e : get_blob().get_extents()) {
1721 if (!e.is_valid()) {
1722 discard = true;
1723 } else {
1724 all_invalid = false;
1725 }
1726 }
1727 assert(discard == all_invalid); // in case of compressed blob all
1728 // or none pextents are invalid.
1729 if (discard) {
1730 shared_blob->bc.discard(shared_blob->get_cache(), 0,
1731 get_blob().get_logical_length());
1732 }
1733 } else {
1734 size_t pos = 0;
1735 for (auto e : get_blob().get_extents()) {
1736 if (!e.is_valid()) {
1737 ldout(coll->store->cct, 20) << __func__ << " 0x" << std::hex << pos
1738 << "~" << e.length
1739 << std::dec << dendl;
1740 shared_blob->bc.discard(shared_blob->get_cache(), pos, e.length);
1741 }
1742 pos += e.length;
1743 }
1744 if (get_blob().can_prune_tail()) {
1745 dirty_blob().prune_tail();
1746 used_in_blob.prune_tail(get_blob().get_ondisk_length());
1747 auto cct = coll->store->cct; //used by dout
1748 dout(20) << __func__ << " pruned tail, now " << get_blob() << dendl;
1749 }
1750 }
1751 }
1752
1753 void BlueStore::Blob::get_ref(
1754 Collection *coll,
1755 uint32_t offset,
1756 uint32_t length)
1757 {
1758 // Caller has to initialize Blob's logical length prior to increment
1759 // references. Otherwise one is neither unable to determine required
1760 // amount of counters in case of per-au tracking nor obtain min_release_size
1761 // for single counter mode.
1762 assert(get_blob().get_logical_length() != 0);
1763 auto cct = coll->store->cct;
1764 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
1765 << std::dec << " " << *this << dendl;
1766
1767 if (used_in_blob.is_empty()) {
1768 uint32_t min_release_size =
1769 get_blob().get_release_size(coll->store->min_alloc_size);
1770 uint64_t l = get_blob().get_logical_length();
1771 dout(20) << __func__ << " init 0x" << std::hex << l << ", "
1772 << min_release_size << std::dec << dendl;
1773 used_in_blob.init(l, min_release_size);
1774 }
1775 used_in_blob.get(
1776 offset,
1777 length);
1778 }
1779
1780 bool BlueStore::Blob::put_ref(
1781 Collection *coll,
1782 uint32_t offset,
1783 uint32_t length,
1784 PExtentVector *r)
1785 {
1786 PExtentVector logical;
1787
1788 auto cct = coll->store->cct;
1789 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
1790 << std::dec << " " << *this << dendl;
1791
1792 bool empty = used_in_blob.put(
1793 offset,
1794 length,
1795 &logical);
1796 r->clear();
1797 // nothing to release
1798 if (!empty && logical.empty()) {
1799 return false;
1800 }
1801
1802 bluestore_blob_t& b = dirty_blob();
1803 return b.release_extents(empty, logical, r);
1804 }
1805
1806 bool BlueStore::Blob::can_reuse_blob(uint32_t min_alloc_size,
1807 uint32_t target_blob_size,
1808 uint32_t b_offset,
1809 uint32_t *length0) {
1810 assert(min_alloc_size);
1811 assert(target_blob_size);
1812 if (!get_blob().is_mutable()) {
1813 return false;
1814 }
1815
1816 uint32_t length = *length0;
1817 uint32_t end = b_offset + length;
1818
1819 // Currently for the sake of simplicity we omit blob reuse if data is
1820 // unaligned with csum chunk. Later we can perform padding if needed.
1821 if (get_blob().has_csum() &&
1822 ((b_offset % get_blob().get_csum_chunk_size()) != 0 ||
1823 (end % get_blob().get_csum_chunk_size()) != 0)) {
1824 return false;
1825 }
1826
1827 auto blen = get_blob().get_logical_length();
1828 uint32_t new_blen = blen;
1829
1830 // make sure target_blob_size isn't less than current blob len
1831 target_blob_size = MAX(blen, target_blob_size);
1832
1833 if (b_offset >= blen) {
1834 // new data totally stands out of the existing blob
1835 new_blen = end;
1836 } else {
1837 // new data overlaps with the existing blob
1838 new_blen = MAX(blen, end);
1839
1840 uint32_t overlap = 0;
1841 if (new_blen > blen) {
1842 overlap = blen - b_offset;
1843 } else {
1844 overlap = length;
1845 }
1846
1847 if (!get_blob().is_unallocated(b_offset, overlap)) {
1848 // abort if any piece of the overlap has already been allocated
1849 return false;
1850 }
1851 }
1852
1853 if (new_blen > blen) {
1854 int64_t overflow = int64_t(new_blen) - target_blob_size;
1855 // Unable to decrease the provided length to fit into max_blob_size
1856 if (overflow >= length) {
1857 return false;
1858 }
1859
1860 // FIXME: in some cases we could reduce unused resolution
1861 if (get_blob().has_unused()) {
1862 return false;
1863 }
1864
1865 if (overflow > 0) {
1866 new_blen -= overflow;
1867 length -= overflow;
1868 *length0 = length;
1869 }
1870
1871 if (new_blen > blen) {
1872 dirty_blob().add_tail(new_blen);
1873 used_in_blob.add_tail(new_blen,
1874 get_blob().get_release_size(min_alloc_size));
1875 }
1876 }
1877 return true;
1878 }
1879
1880 void BlueStore::Blob::split(Collection *coll, uint32_t blob_offset, Blob *r)
1881 {
1882 auto cct = coll->store->cct; //used by dout
1883 dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
1884 << " start " << *this << dendl;
1885 assert(blob.can_split());
1886 assert(used_in_blob.can_split());
1887 bluestore_blob_t &lb = dirty_blob();
1888 bluestore_blob_t &rb = r->dirty_blob();
1889
1890 used_in_blob.split(
1891 blob_offset,
1892 &(r->used_in_blob));
1893
1894 lb.split(blob_offset, rb);
1895 shared_blob->bc.split(shared_blob->get_cache(), blob_offset, r->shared_blob->bc);
1896
1897 dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
1898 << " finish " << *this << dendl;
1899 dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
1900 << " and " << *r << dendl;
1901 }
1902
1903 #ifndef CACHE_BLOB_BL
1904 void BlueStore::Blob::decode(
1905 Collection *coll,
1906 bufferptr::iterator& p,
1907 uint64_t struct_v,
1908 uint64_t* sbid,
1909 bool include_ref_map)
1910 {
1911 denc(blob, p, struct_v);
1912 if (blob.is_shared()) {
1913 denc(*sbid, p);
1914 }
1915 if (include_ref_map) {
1916 if (struct_v > 1) {
1917 used_in_blob.decode(p);
1918 } else {
1919 used_in_blob.clear();
1920 bluestore_extent_ref_map_t legacy_ref_map;
1921 legacy_ref_map.decode(p);
1922 for (auto r : legacy_ref_map.ref_map) {
1923 get_ref(
1924 coll,
1925 r.first,
1926 r.second.refs * r.second.length);
1927 }
1928 }
1929 }
1930 }
1931 #endif
1932
1933 // Extent
1934
1935 ostream& operator<<(ostream& out, const BlueStore::Extent& e)
1936 {
1937 return out << std::hex << "0x" << e.logical_offset << "~" << e.length
1938 << ": 0x" << e.blob_offset << "~" << e.length << std::dec
1939 << " " << *e.blob;
1940 }
1941
1942 // OldExtent
1943 BlueStore::OldExtent* BlueStore::OldExtent::create(CollectionRef c,
1944 uint32_t lo,
1945 uint32_t o,
1946 uint32_t l,
1947 BlobRef& b) {
1948 OldExtent* oe = new OldExtent(lo, o, l, b);
1949 b->put_ref(c.get(), o, l, &(oe->r));
1950 oe->blob_empty = b->get_referenced_bytes() == 0;
1951 return oe;
1952 }
1953
1954 // ExtentMap
1955
1956 #undef dout_prefix
1957 #define dout_prefix *_dout << "bluestore.extentmap(" << this << ") "
1958
1959 BlueStore::ExtentMap::ExtentMap(Onode *o)
1960 : onode(o),
1961 inline_bl(
1962 o->c->store->cct->_conf->bluestore_extent_map_inline_shard_prealloc_size) {
1963 }
1964
1965 void BlueStore::ExtentMap::update(KeyValueDB::Transaction t,
1966 bool force)
1967 {
1968 auto cct = onode->c->store->cct; //used by dout
1969 dout(20) << __func__ << " " << onode->oid << (force ? " force" : "") << dendl;
1970 if (onode->onode.extent_map_shards.empty()) {
1971 if (inline_bl.length() == 0) {
1972 unsigned n;
1973 // we need to encode inline_bl to measure encoded length
1974 bool never_happen = encode_some(0, OBJECT_MAX_SIZE, inline_bl, &n);
1975 assert(!never_happen);
1976 size_t len = inline_bl.length();
1977 dout(20) << __func__ << " inline shard " << len << " bytes from " << n
1978 << " extents" << dendl;
1979 if (!force && len > cct->_conf->bluestore_extent_map_shard_max_size) {
1980 request_reshard(0, OBJECT_MAX_SIZE);
1981 return;
1982 }
1983 }
1984 // will persist in the onode key.
1985 } else {
1986 // pending shard update
1987 struct dirty_shard_t {
1988 Shard *shard;
1989 bufferlist bl;
1990 dirty_shard_t(Shard *s) : shard(s) {}
1991 };
1992 vector<dirty_shard_t> encoded_shards;
1993 // allocate slots for all shards in a single call instead of
1994 // doing multiple allocations - one per each dirty shard
1995 encoded_shards.reserve(shards.size());
1996
1997 auto p = shards.begin();
1998 auto prev_p = p;
1999 while (p != shards.end()) {
2000 assert(p->shard_info->offset >= prev_p->shard_info->offset);
2001 auto n = p;
2002 ++n;
2003 if (p->dirty) {
2004 uint32_t endoff;
2005 if (n == shards.end()) {
2006 endoff = OBJECT_MAX_SIZE;
2007 } else {
2008 endoff = n->shard_info->offset;
2009 }
2010 encoded_shards.emplace_back(dirty_shard_t(&(*p)));
2011 bufferlist& bl = encoded_shards.back().bl;
2012 if (encode_some(p->shard_info->offset, endoff - p->shard_info->offset,
2013 bl, &p->extents)) {
2014 if (force) {
2015 derr << __func__ << " encode_some needs reshard" << dendl;
2016 assert(!force);
2017 }
2018 }
2019 size_t len = bl.length();
2020
2021 dout(20) << __func__ << " shard 0x" << std::hex
2022 << p->shard_info->offset << std::dec << " is " << len
2023 << " bytes (was " << p->shard_info->bytes << ") from "
2024 << p->extents << " extents" << dendl;
2025
2026 if (!force) {
2027 if (len > cct->_conf->bluestore_extent_map_shard_max_size) {
2028 // we are big; reshard ourselves
2029 request_reshard(p->shard_info->offset, endoff);
2030 }
2031 // avoid resharding the trailing shard, even if it is small
2032 else if (n != shards.end() &&
2033 len < g_conf->bluestore_extent_map_shard_min_size) {
2034 assert(endoff != OBJECT_MAX_SIZE);
2035 if (p == shards.begin()) {
2036 // we are the first shard, combine with next shard
2037 request_reshard(p->shard_info->offset, endoff + 1);
2038 } else {
2039 // combine either with the previous shard or the next,
2040 // whichever is smaller
2041 if (prev_p->shard_info->bytes > n->shard_info->bytes) {
2042 request_reshard(p->shard_info->offset, endoff + 1);
2043 } else {
2044 request_reshard(prev_p->shard_info->offset, endoff);
2045 }
2046 }
2047 }
2048 }
2049 }
2050 prev_p = p;
2051 p = n;
2052 }
2053 if (needs_reshard()) {
2054 return;
2055 }
2056
2057 // schedule DB update for dirty shards
2058 string key;
2059 for (auto& it : encoded_shards) {
2060 it.shard->dirty = false;
2061 it.shard->shard_info->bytes = it.bl.length();
2062 generate_extent_shard_key_and_apply(
2063 onode->key,
2064 it.shard->shard_info->offset,
2065 &key,
2066 [&](const string& final_key) {
2067 t->set(PREFIX_OBJ, final_key, it.bl);
2068 }
2069 );
2070 }
2071 }
2072 }
2073
2074 bid_t BlueStore::ExtentMap::allocate_spanning_blob_id()
2075 {
2076 if (spanning_blob_map.empty())
2077 return 0;
2078 bid_t bid = spanning_blob_map.rbegin()->first + 1;
2079 // bid is valid and available.
2080 if (bid >= 0)
2081 return bid;
2082 // Find next unused bid;
2083 bid = rand() % (numeric_limits<bid_t>::max() + 1);
2084 const auto begin_bid = bid;
2085 do {
2086 if (!spanning_blob_map.count(bid))
2087 return bid;
2088 else {
2089 bid++;
2090 if (bid < 0) bid = 0;
2091 }
2092 } while (bid != begin_bid);
2093 assert(0 == "no available blob id");
2094 }
2095
2096 void BlueStore::ExtentMap::reshard(
2097 KeyValueDB *db,
2098 KeyValueDB::Transaction t)
2099 {
2100 auto cct = onode->c->store->cct; // used by dout
2101
2102 dout(10) << __func__ << " 0x[" << std::hex << needs_reshard_begin << ","
2103 << needs_reshard_end << ")" << std::dec
2104 << " of " << onode->onode.extent_map_shards.size()
2105 << " shards on " << onode->oid << dendl;
2106 for (auto& p : spanning_blob_map) {
2107 dout(20) << __func__ << " spanning blob " << p.first << " " << *p.second
2108 << dendl;
2109 }
2110 // determine shard index range
2111 unsigned si_begin = 0, si_end = 0;
2112 if (!shards.empty()) {
2113 while (si_begin + 1 < shards.size() &&
2114 shards[si_begin + 1].shard_info->offset <= needs_reshard_begin) {
2115 ++si_begin;
2116 }
2117 needs_reshard_begin = shards[si_begin].shard_info->offset;
2118 for (si_end = si_begin; si_end < shards.size(); ++si_end) {
2119 if (shards[si_end].shard_info->offset >= needs_reshard_end) {
2120 needs_reshard_end = shards[si_end].shard_info->offset;
2121 break;
2122 }
2123 }
2124 if (si_end == shards.size()) {
2125 needs_reshard_end = OBJECT_MAX_SIZE;
2126 }
2127 dout(20) << __func__ << " shards [" << si_begin << "," << si_end << ")"
2128 << " over 0x[" << std::hex << needs_reshard_begin << ","
2129 << needs_reshard_end << ")" << std::dec << dendl;
2130 }
2131
2132 fault_range(db, needs_reshard_begin, needs_reshard_end);
2133
2134 // we may need to fault in a larger interval later must have all
2135 // referring extents for spanning blobs loaded in order to have
2136 // accurate use_tracker values.
2137 uint32_t spanning_scan_begin = needs_reshard_begin;
2138 uint32_t spanning_scan_end = needs_reshard_end;
2139
2140 // remove old keys
2141 string key;
2142 for (unsigned i = si_begin; i < si_end; ++i) {
2143 generate_extent_shard_key_and_apply(
2144 onode->key, shards[i].shard_info->offset, &key,
2145 [&](const string& final_key) {
2146 t->rmkey(PREFIX_OBJ, final_key);
2147 }
2148 );
2149 }
2150
2151 // calculate average extent size
2152 unsigned bytes = 0;
2153 unsigned extents = 0;
2154 if (onode->onode.extent_map_shards.empty()) {
2155 bytes = inline_bl.length();
2156 extents = extent_map.size();
2157 } else {
2158 for (unsigned i = si_begin; i < si_end; ++i) {
2159 bytes += shards[i].shard_info->bytes;
2160 extents += shards[i].extents;
2161 }
2162 }
2163 unsigned target = cct->_conf->bluestore_extent_map_shard_target_size;
2164 unsigned slop = target *
2165 cct->_conf->bluestore_extent_map_shard_target_size_slop;
2166 unsigned extent_avg = bytes / MAX(1, extents);
2167 dout(20) << __func__ << " extent_avg " << extent_avg << ", target " << target
2168 << ", slop " << slop << dendl;
2169
2170 // reshard
2171 unsigned estimate = 0;
2172 unsigned offset = needs_reshard_begin;
2173 vector<bluestore_onode_t::shard_info> new_shard_info;
2174 unsigned max_blob_end = 0;
2175 Extent dummy(needs_reshard_begin);
2176 for (auto e = extent_map.lower_bound(dummy);
2177 e != extent_map.end();
2178 ++e) {
2179 if (e->logical_offset >= needs_reshard_end) {
2180 break;
2181 }
2182 dout(30) << " extent " << *e << dendl;
2183
2184 // disfavor shard boundaries that span a blob
2185 bool would_span = (e->logical_offset < max_blob_end) || e->blob_offset;
2186 if (estimate &&
2187 estimate + extent_avg > target + (would_span ? slop : 0)) {
2188 // new shard
2189 if (offset == needs_reshard_begin) {
2190 new_shard_info.emplace_back(bluestore_onode_t::shard_info());
2191 new_shard_info.back().offset = offset;
2192 dout(20) << __func__ << " new shard 0x" << std::hex << offset
2193 << std::dec << dendl;
2194 }
2195 offset = e->logical_offset;
2196 new_shard_info.emplace_back(bluestore_onode_t::shard_info());
2197 new_shard_info.back().offset = offset;
2198 dout(20) << __func__ << " new shard 0x" << std::hex << offset
2199 << std::dec << dendl;
2200 estimate = 0;
2201 }
2202 estimate += extent_avg;
2203 unsigned bs = e->blob_start();
2204 if (bs < spanning_scan_begin) {
2205 spanning_scan_begin = bs;
2206 }
2207 uint32_t be = e->blob_end();
2208 if (be > max_blob_end) {
2209 max_blob_end = be;
2210 }
2211 if (be > spanning_scan_end) {
2212 spanning_scan_end = be;
2213 }
2214 }
2215 if (new_shard_info.empty() && (si_begin > 0 ||
2216 si_end < shards.size())) {
2217 // we resharded a partial range; we must produce at least one output
2218 // shard
2219 new_shard_info.emplace_back(bluestore_onode_t::shard_info());
2220 new_shard_info.back().offset = needs_reshard_begin;
2221 dout(20) << __func__ << " new shard 0x" << std::hex << needs_reshard_begin
2222 << std::dec << " (singleton degenerate case)" << dendl;
2223 }
2224
2225 auto& sv = onode->onode.extent_map_shards;
2226 dout(20) << __func__ << " new " << new_shard_info << dendl;
2227 dout(20) << __func__ << " old " << sv << dendl;
2228 if (sv.empty()) {
2229 // no old shards to keep
2230 sv.swap(new_shard_info);
2231 init_shards(true, true);
2232 } else {
2233 // splice in new shards
2234 sv.erase(sv.begin() + si_begin, sv.begin() + si_end);
2235 shards.erase(shards.begin() + si_begin, shards.begin() + si_end);
2236 sv.insert(
2237 sv.begin() + si_begin,
2238 new_shard_info.begin(),
2239 new_shard_info.end());
2240 shards.insert(shards.begin() + si_begin, new_shard_info.size(), Shard());
2241 si_end = si_begin + new_shard_info.size();
2242
2243 assert(sv.size() == shards.size());
2244
2245 // note that we need to update every shard_info of shards here,
2246 // as sv might have been totally re-allocated above
2247 for (unsigned i = 0; i < shards.size(); i++) {
2248 shards[i].shard_info = &sv[i];
2249 }
2250
2251 // mark newly added shards as dirty
2252 for (unsigned i = si_begin; i < si_end; ++i) {
2253 shards[i].loaded = true;
2254 shards[i].dirty = true;
2255 }
2256 }
2257 dout(20) << __func__ << " fin " << sv << dendl;
2258 inline_bl.clear();
2259
2260 if (sv.empty()) {
2261 // no more shards; unspan all previously spanning blobs
2262 auto p = spanning_blob_map.begin();
2263 while (p != spanning_blob_map.end()) {
2264 p->second->id = -1;
2265 dout(30) << __func__ << " un-spanning " << *p->second << dendl;
2266 p = spanning_blob_map.erase(p);
2267 }
2268 } else {
2269 // identify new spanning blobs
2270 dout(20) << __func__ << " checking spanning blobs 0x[" << std::hex
2271 << spanning_scan_begin << "," << spanning_scan_end << ")" << dendl;
2272 if (spanning_scan_begin < needs_reshard_begin) {
2273 fault_range(db, spanning_scan_begin,
2274 needs_reshard_begin - spanning_scan_begin);
2275 }
2276 if (spanning_scan_end > needs_reshard_end) {
2277 fault_range(db, needs_reshard_end,
2278 spanning_scan_end - needs_reshard_end);
2279 }
2280 auto sp = sv.begin() + si_begin;
2281 auto esp = sv.end();
2282 unsigned shard_start = sp->offset;
2283 unsigned shard_end;
2284 ++sp;
2285 if (sp == esp) {
2286 shard_end = OBJECT_MAX_SIZE;
2287 } else {
2288 shard_end = sp->offset;
2289 }
2290 Extent dummy(needs_reshard_begin);
2291 for (auto e = extent_map.lower_bound(dummy); e != extent_map.end(); ++e) {
2292 if (e->logical_offset >= needs_reshard_end) {
2293 break;
2294 }
2295 dout(30) << " extent " << *e << dendl;
2296 while (e->logical_offset >= shard_end) {
2297 shard_start = shard_end;
2298 assert(sp != esp);
2299 ++sp;
2300 if (sp == esp) {
2301 shard_end = OBJECT_MAX_SIZE;
2302 } else {
2303 shard_end = sp->offset;
2304 }
2305 dout(30) << __func__ << " shard 0x" << std::hex << shard_start
2306 << " to 0x" << shard_end << std::dec << dendl;
2307 }
2308 if (e->blob_escapes_range(shard_start, shard_end - shard_start)) {
2309 if (!e->blob->is_spanning()) {
2310 // We have two options: (1) split the blob into pieces at the
2311 // shard boundaries (and adjust extents accordingly), or (2)
2312 // mark it spanning. We prefer to cut the blob if we can. Note that
2313 // we may have to split it multiple times--potentially at every
2314 // shard boundary.
2315 bool must_span = false;
2316 BlobRef b = e->blob;
2317 if (b->can_split()) {
2318 uint32_t bstart = e->blob_start();
2319 uint32_t bend = e->blob_end();
2320 for (const auto& sh : shards) {
2321 if (bstart < sh.shard_info->offset &&
2322 bend > sh.shard_info->offset) {
2323 uint32_t blob_offset = sh.shard_info->offset - bstart;
2324 if (b->can_split_at(blob_offset)) {
2325 dout(20) << __func__ << " splitting blob, bstart 0x"
2326 << std::hex << bstart << " blob_offset 0x"
2327 << blob_offset << std::dec << " " << *b << dendl;
2328 b = split_blob(b, blob_offset, sh.shard_info->offset);
2329 // switch b to the new right-hand side, in case it
2330 // *also* has to get split.
2331 bstart += blob_offset;
2332 onode->c->store->logger->inc(l_bluestore_blob_split);
2333 } else {
2334 must_span = true;
2335 break;
2336 }
2337 }
2338 }
2339 } else {
2340 must_span = true;
2341 }
2342 if (must_span) {
2343 auto bid = allocate_spanning_blob_id();
2344 b->id = bid;
2345 spanning_blob_map[b->id] = b;
2346 dout(20) << __func__ << " adding spanning " << *b << dendl;
2347 }
2348 }
2349 } else {
2350 if (e->blob->is_spanning()) {
2351 spanning_blob_map.erase(e->blob->id);
2352 e->blob->id = -1;
2353 dout(30) << __func__ << " un-spanning " << *e->blob << dendl;
2354 }
2355 }
2356 }
2357 }
2358
2359 clear_needs_reshard();
2360 }
2361
2362 bool BlueStore::ExtentMap::encode_some(
2363 uint32_t offset,
2364 uint32_t length,
2365 bufferlist& bl,
2366 unsigned *pn)
2367 {
2368 auto cct = onode->c->store->cct; //used by dout
2369 Extent dummy(offset);
2370 auto start = extent_map.lower_bound(dummy);
2371 uint32_t end = offset + length;
2372
2373 __u8 struct_v = 2; // Version 2 differs from v1 in blob's ref_map
2374 // serialization only. Hence there is no specific
2375 // handling at ExtentMap level.
2376
2377 unsigned n = 0;
2378 size_t bound = 0;
2379 bool must_reshard = false;
2380 for (auto p = start;
2381 p != extent_map.end() && p->logical_offset < end;
2382 ++p, ++n) {
2383 assert(p->logical_offset >= offset);
2384 p->blob->last_encoded_id = -1;
2385 if (!p->blob->is_spanning() && p->blob_escapes_range(offset, length)) {
2386 dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
2387 << std::dec << " hit new spanning blob " << *p << dendl;
2388 request_reshard(p->blob_start(), p->blob_end());
2389 must_reshard = true;
2390 }
2391 if (!must_reshard) {
2392 denc_varint(0, bound); // blobid
2393 denc_varint(0, bound); // logical_offset
2394 denc_varint(0, bound); // len
2395 denc_varint(0, bound); // blob_offset
2396
2397 p->blob->bound_encode(
2398 bound,
2399 struct_v,
2400 p->blob->shared_blob->get_sbid(),
2401 false);
2402 }
2403 }
2404 if (must_reshard) {
2405 return true;
2406 }
2407
2408 denc(struct_v, bound);
2409 denc_varint(0, bound); // number of extents
2410
2411 {
2412 auto app = bl.get_contiguous_appender(bound);
2413 denc(struct_v, app);
2414 denc_varint(n, app);
2415 if (pn) {
2416 *pn = n;
2417 }
2418
2419 n = 0;
2420 uint64_t pos = 0;
2421 uint64_t prev_len = 0;
2422 for (auto p = start;
2423 p != extent_map.end() && p->logical_offset < end;
2424 ++p, ++n) {
2425 unsigned blobid;
2426 bool include_blob = false;
2427 if (p->blob->is_spanning()) {
2428 blobid = p->blob->id << BLOBID_SHIFT_BITS;
2429 blobid |= BLOBID_FLAG_SPANNING;
2430 } else if (p->blob->last_encoded_id < 0) {
2431 p->blob->last_encoded_id = n + 1; // so it is always non-zero
2432 include_blob = true;
2433 blobid = 0; // the decoder will infer the id from n
2434 } else {
2435 blobid = p->blob->last_encoded_id << BLOBID_SHIFT_BITS;
2436 }
2437 if (p->logical_offset == pos) {
2438 blobid |= BLOBID_FLAG_CONTIGUOUS;
2439 }
2440 if (p->blob_offset == 0) {
2441 blobid |= BLOBID_FLAG_ZEROOFFSET;
2442 }
2443 if (p->length == prev_len) {
2444 blobid |= BLOBID_FLAG_SAMELENGTH;
2445 } else {
2446 prev_len = p->length;
2447 }
2448 denc_varint(blobid, app);
2449 if ((blobid & BLOBID_FLAG_CONTIGUOUS) == 0) {
2450 denc_varint_lowz(p->logical_offset - pos, app);
2451 }
2452 if ((blobid & BLOBID_FLAG_ZEROOFFSET) == 0) {
2453 denc_varint_lowz(p->blob_offset, app);
2454 }
2455 if ((blobid & BLOBID_FLAG_SAMELENGTH) == 0) {
2456 denc_varint_lowz(p->length, app);
2457 }
2458 pos = p->logical_end();
2459 if (include_blob) {
2460 p->blob->encode(app, struct_v, p->blob->shared_blob->get_sbid(), false);
2461 }
2462 }
2463 }
2464 /*derr << __func__ << bl << dendl;
2465 derr << __func__ << ":";
2466 bl.hexdump(*_dout);
2467 *_dout << dendl;
2468 */
2469 return false;
2470 }
2471
2472 unsigned BlueStore::ExtentMap::decode_some(bufferlist& bl)
2473 {
2474 auto cct = onode->c->store->cct; //used by dout
2475 /*
2476 derr << __func__ << ":";
2477 bl.hexdump(*_dout);
2478 *_dout << dendl;
2479 */
2480
2481 assert(bl.get_num_buffers() <= 1);
2482 auto p = bl.front().begin_deep();
2483 __u8 struct_v;
2484 denc(struct_v, p);
2485 // Version 2 differs from v1 in blob's ref_map
2486 // serialization only. Hence there is no specific
2487 // handling at ExtentMap level below.
2488 assert(struct_v == 1 || struct_v == 2);
2489
2490 uint32_t num;
2491 denc_varint(num, p);
2492 vector<BlobRef> blobs(num);
2493 uint64_t pos = 0;
2494 uint64_t prev_len = 0;
2495 unsigned n = 0;
2496
2497 while (!p.end()) {
2498 Extent *le = new Extent();
2499 uint64_t blobid;
2500 denc_varint(blobid, p);
2501 if ((blobid & BLOBID_FLAG_CONTIGUOUS) == 0) {
2502 uint64_t gap;
2503 denc_varint_lowz(gap, p);
2504 pos += gap;
2505 }
2506 le->logical_offset = pos;
2507 if ((blobid & BLOBID_FLAG_ZEROOFFSET) == 0) {
2508 denc_varint_lowz(le->blob_offset, p);
2509 } else {
2510 le->blob_offset = 0;
2511 }
2512 if ((blobid & BLOBID_FLAG_SAMELENGTH) == 0) {
2513 denc_varint_lowz(prev_len, p);
2514 }
2515 le->length = prev_len;
2516
2517 if (blobid & BLOBID_FLAG_SPANNING) {
2518 dout(30) << __func__ << " getting spanning blob "
2519 << (blobid >> BLOBID_SHIFT_BITS) << dendl;
2520 le->assign_blob(get_spanning_blob(blobid >> BLOBID_SHIFT_BITS));
2521 } else {
2522 blobid >>= BLOBID_SHIFT_BITS;
2523 if (blobid) {
2524 le->assign_blob(blobs[blobid - 1]);
2525 assert(le->blob);
2526 } else {
2527 Blob *b = new Blob();
2528 uint64_t sbid = 0;
2529 b->decode(onode->c, p, struct_v, &sbid, false);
2530 blobs[n] = b;
2531 onode->c->open_shared_blob(sbid, b);
2532 le->assign_blob(b);
2533 }
2534 // we build ref_map dynamically for non-spanning blobs
2535 le->blob->get_ref(
2536 onode->c,
2537 le->blob_offset,
2538 le->length);
2539 }
2540 pos += prev_len;
2541 ++n;
2542 extent_map.insert(*le);
2543 }
2544
2545 assert(n == num);
2546 return num;
2547 }
2548
2549 void BlueStore::ExtentMap::bound_encode_spanning_blobs(size_t& p)
2550 {
2551 // Version 2 differs from v1 in blob's ref_map
2552 // serialization only. Hence there is no specific
2553 // handling at ExtentMap level.
2554 __u8 struct_v = 2;
2555
2556 denc(struct_v, p);
2557 denc_varint((uint32_t)0, p);
2558 size_t key_size = 0;
2559 denc_varint((uint32_t)0, key_size);
2560 p += spanning_blob_map.size() * key_size;
2561 for (const auto& i : spanning_blob_map) {
2562 i.second->bound_encode(p, struct_v, i.second->shared_blob->get_sbid(), true);
2563 }
2564 }
2565
2566 void BlueStore::ExtentMap::encode_spanning_blobs(
2567 bufferlist::contiguous_appender& p)
2568 {
2569 // Version 2 differs from v1 in blob's ref_map
2570 // serialization only. Hence there is no specific
2571 // handling at ExtentMap level.
2572 __u8 struct_v = 2;
2573
2574 denc(struct_v, p);
2575 denc_varint(spanning_blob_map.size(), p);
2576 for (auto& i : spanning_blob_map) {
2577 denc_varint(i.second->id, p);
2578 i.second->encode(p, struct_v, i.second->shared_blob->get_sbid(), true);
2579 }
2580 }
2581
2582 void BlueStore::ExtentMap::decode_spanning_blobs(
2583 bufferptr::iterator& p)
2584 {
2585 __u8 struct_v;
2586 denc(struct_v, p);
2587 // Version 2 differs from v1 in blob's ref_map
2588 // serialization only. Hence there is no specific
2589 // handling at ExtentMap level.
2590 assert(struct_v == 1 || struct_v == 2);
2591
2592 unsigned n;
2593 denc_varint(n, p);
2594 while (n--) {
2595 BlobRef b(new Blob());
2596 denc_varint(b->id, p);
2597 spanning_blob_map[b->id] = b;
2598 uint64_t sbid = 0;
2599 b->decode(onode->c, p, struct_v, &sbid, true);
2600 onode->c->open_shared_blob(sbid, b);
2601 }
2602 }
2603
2604 void BlueStore::ExtentMap::init_shards(bool loaded, bool dirty)
2605 {
2606 shards.resize(onode->onode.extent_map_shards.size());
2607 unsigned i = 0;
2608 for (auto &s : onode->onode.extent_map_shards) {
2609 shards[i].shard_info = &s;
2610 shards[i].loaded = loaded;
2611 shards[i].dirty = dirty;
2612 ++i;
2613 }
2614 }
2615
2616 void BlueStore::ExtentMap::fault_range(
2617 KeyValueDB *db,
2618 uint32_t offset,
2619 uint32_t length)
2620 {
2621 auto cct = onode->c->store->cct; //used by dout
2622 dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
2623 << std::dec << dendl;
2624 auto start = seek_shard(offset);
2625 auto last = seek_shard(offset + length);
2626
2627 if (start < 0)
2628 return;
2629
2630 assert(last >= start);
2631 string key;
2632 while (start <= last) {
2633 assert((size_t)start < shards.size());
2634 auto p = &shards[start];
2635 if (!p->loaded) {
2636 dout(30) << __func__ << " opening shard 0x" << std::hex
2637 << p->shard_info->offset << std::dec << dendl;
2638 bufferlist v;
2639 generate_extent_shard_key_and_apply(
2640 onode->key, p->shard_info->offset, &key,
2641 [&](const string& final_key) {
2642 int r = db->get(PREFIX_OBJ, final_key, &v);
2643 if (r < 0) {
2644 derr << __func__ << " missing shard 0x" << std::hex
2645 << p->shard_info->offset << std::dec << " for " << onode->oid
2646 << dendl;
2647 assert(r >= 0);
2648 }
2649 }
2650 );
2651 p->extents = decode_some(v);
2652 p->loaded = true;
2653 dout(20) << __func__ << " open shard 0x" << std::hex
2654 << p->shard_info->offset << std::dec
2655 << " (" << v.length() << " bytes)" << dendl;
2656 assert(p->dirty == false);
2657 assert(v.length() == p->shard_info->bytes);
2658 onode->c->store->logger->inc(l_bluestore_onode_shard_misses);
2659 } else {
2660 onode->c->store->logger->inc(l_bluestore_onode_shard_hits);
2661 }
2662 ++start;
2663 }
2664 }
2665
2666 void BlueStore::ExtentMap::dirty_range(
2667 uint32_t offset,
2668 uint32_t length)
2669 {
2670 auto cct = onode->c->store->cct; //used by dout
2671 dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
2672 << std::dec << dendl;
2673 if (shards.empty()) {
2674 dout(20) << __func__ << " mark inline shard dirty" << dendl;
2675 inline_bl.clear();
2676 return;
2677 }
2678 auto start = seek_shard(offset);
2679 auto last = seek_shard(offset + length);
2680 if (start < 0)
2681 return;
2682
2683 assert(last >= start);
2684 while (start <= last) {
2685 assert((size_t)start < shards.size());
2686 auto p = &shards[start];
2687 if (!p->loaded) {
2688 dout(20) << __func__ << " shard 0x" << std::hex << p->shard_info->offset
2689 << std::dec << " is not loaded, can't mark dirty" << dendl;
2690 assert(0 == "can't mark unloaded shard dirty");
2691 }
2692 if (!p->dirty) {
2693 dout(20) << __func__ << " mark shard 0x" << std::hex
2694 << p->shard_info->offset << std::dec << " dirty" << dendl;
2695 p->dirty = true;
2696 }
2697 ++start;
2698 }
2699 }
2700
2701 BlueStore::extent_map_t::iterator BlueStore::ExtentMap::find(
2702 uint64_t offset)
2703 {
2704 Extent dummy(offset);
2705 return extent_map.find(dummy);
2706 }
2707
2708 BlueStore::extent_map_t::iterator BlueStore::ExtentMap::seek_lextent(
2709 uint64_t offset)
2710 {
2711 Extent dummy(offset);
2712 auto fp = extent_map.lower_bound(dummy);
2713 if (fp != extent_map.begin()) {
2714 --fp;
2715 if (fp->logical_end() <= offset) {
2716 ++fp;
2717 }
2718 }
2719 return fp;
2720 }
2721
2722 BlueStore::extent_map_t::const_iterator BlueStore::ExtentMap::seek_lextent(
2723 uint64_t offset) const
2724 {
2725 Extent dummy(offset);
2726 auto fp = extent_map.lower_bound(dummy);
2727 if (fp != extent_map.begin()) {
2728 --fp;
2729 if (fp->logical_end() <= offset) {
2730 ++fp;
2731 }
2732 }
2733 return fp;
2734 }
2735
2736 bool BlueStore::ExtentMap::has_any_lextents(uint64_t offset, uint64_t length)
2737 {
2738 auto fp = seek_lextent(offset);
2739 if (fp == extent_map.end() || fp->logical_offset >= offset + length) {
2740 return false;
2741 }
2742 return true;
2743 }
2744
2745 int BlueStore::ExtentMap::compress_extent_map(
2746 uint64_t offset,
2747 uint64_t length)
2748 {
2749 auto cct = onode->c->store->cct; //used by dout
2750 if (extent_map.empty())
2751 return 0;
2752 int removed = 0;
2753 auto p = seek_lextent(offset);
2754 if (p != extent_map.begin()) {
2755 --p; // start to the left of offset
2756 }
2757 // the caller should have just written to this region
2758 assert(p != extent_map.end());
2759
2760 // identify the *next* shard
2761 auto pshard = shards.begin();
2762 while (pshard != shards.end() &&
2763 p->logical_offset >= pshard->shard_info->offset) {
2764 ++pshard;
2765 }
2766 uint64_t shard_end;
2767 if (pshard != shards.end()) {
2768 shard_end = pshard->shard_info->offset;
2769 } else {
2770 shard_end = OBJECT_MAX_SIZE;
2771 }
2772
2773 auto n = p;
2774 for (++n; n != extent_map.end(); p = n++) {
2775 if (n->logical_offset > offset + length) {
2776 break; // stop after end
2777 }
2778 while (n != extent_map.end() &&
2779 p->logical_end() == n->logical_offset &&
2780 p->blob == n->blob &&
2781 p->blob_offset + p->length == n->blob_offset &&
2782 n->logical_offset < shard_end) {
2783 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
2784 << " next shard 0x" << shard_end << std::dec
2785 << " merging " << *p << " and " << *n << dendl;
2786 p->length += n->length;
2787 rm(n++);
2788 ++removed;
2789 }
2790 if (n == extent_map.end()) {
2791 break;
2792 }
2793 if (n->logical_offset >= shard_end) {
2794 assert(pshard != shards.end());
2795 ++pshard;
2796 if (pshard != shards.end()) {
2797 shard_end = pshard->shard_info->offset;
2798 } else {
2799 shard_end = OBJECT_MAX_SIZE;
2800 }
2801 }
2802 }
2803 if (removed && onode) {
2804 onode->c->store->logger->inc(l_bluestore_extent_compress, removed);
2805 }
2806 return removed;
2807 }
2808
2809 void BlueStore::ExtentMap::punch_hole(
2810 CollectionRef &c,
2811 uint64_t offset,
2812 uint64_t length,
2813 old_extent_map_t *old_extents)
2814 {
2815 auto p = seek_lextent(offset);
2816 uint64_t end = offset + length;
2817 while (p != extent_map.end()) {
2818 if (p->logical_offset >= end) {
2819 break;
2820 }
2821 if (p->logical_offset < offset) {
2822 if (p->logical_end() > end) {
2823 // split and deref middle
2824 uint64_t front = offset - p->logical_offset;
2825 OldExtent* oe = OldExtent::create(c, offset, p->blob_offset + front,
2826 length, p->blob);
2827 old_extents->push_back(*oe);
2828 add(end,
2829 p->blob_offset + front + length,
2830 p->length - front - length,
2831 p->blob);
2832 p->length = front;
2833 break;
2834 } else {
2835 // deref tail
2836 assert(p->logical_end() > offset); // else seek_lextent bug
2837 uint64_t keep = offset - p->logical_offset;
2838 OldExtent* oe = OldExtent::create(c, offset, p->blob_offset + keep,
2839 p->length - keep, p->blob);
2840 old_extents->push_back(*oe);
2841 p->length = keep;
2842 ++p;
2843 continue;
2844 }
2845 }
2846 if (p->logical_offset + p->length <= end) {
2847 // deref whole lextent
2848 OldExtent* oe = OldExtent::create(c, p->logical_offset, p->blob_offset,
2849 p->length, p->blob);
2850 old_extents->push_back(*oe);
2851 rm(p++);
2852 continue;
2853 }
2854 // deref head
2855 uint64_t keep = p->logical_end() - end;
2856 OldExtent* oe = OldExtent::create(c, p->logical_offset, p->blob_offset,
2857 p->length - keep, p->blob);
2858 old_extents->push_back(*oe);
2859
2860 add(end, p->blob_offset + p->length - keep, keep, p->blob);
2861 rm(p);
2862 break;
2863 }
2864 }
2865
2866 BlueStore::Extent *BlueStore::ExtentMap::set_lextent(
2867 CollectionRef &c,
2868 uint64_t logical_offset,
2869 uint64_t blob_offset, uint64_t length, BlobRef b,
2870 old_extent_map_t *old_extents)
2871 {
2872 // We need to have completely initialized Blob to increment its ref counters.
2873 assert(b->get_blob().get_logical_length() != 0);
2874
2875 // Do get_ref prior to punch_hole to prevent from putting reused blob into
2876 // old_extents list if we overwre the blob totally
2877 // This might happen during WAL overwrite.
2878 b->get_ref(onode->c, blob_offset, length);
2879
2880 if (old_extents) {
2881 punch_hole(c, logical_offset, length, old_extents);
2882 }
2883
2884 Extent *le = new Extent(logical_offset, blob_offset, length, b);
2885 extent_map.insert(*le);
2886 if (spans_shard(logical_offset, length)) {
2887 request_reshard(logical_offset, logical_offset + length);
2888 }
2889 return le;
2890 }
2891
2892 BlueStore::BlobRef BlueStore::ExtentMap::split_blob(
2893 BlobRef lb,
2894 uint32_t blob_offset,
2895 uint32_t pos)
2896 {
2897 auto cct = onode->c->store->cct; //used by dout
2898
2899 uint32_t end_pos = pos + lb->get_blob().get_logical_length() - blob_offset;
2900 dout(20) << __func__ << " 0x" << std::hex << pos << " end 0x" << end_pos
2901 << " blob_offset 0x" << blob_offset << std::dec << " " << *lb
2902 << dendl;
2903 BlobRef rb = onode->c->new_blob();
2904 lb->split(onode->c, blob_offset, rb.get());
2905
2906 for (auto ep = seek_lextent(pos);
2907 ep != extent_map.end() && ep->logical_offset < end_pos;
2908 ++ep) {
2909 if (ep->blob != lb) {
2910 continue;
2911 }
2912 if (ep->logical_offset < pos) {
2913 // split extent
2914 size_t left = pos - ep->logical_offset;
2915 Extent *ne = new Extent(pos, 0, ep->length - left, rb);
2916 extent_map.insert(*ne);
2917 ep->length = left;
2918 dout(30) << __func__ << " split " << *ep << dendl;
2919 dout(30) << __func__ << " to " << *ne << dendl;
2920 } else {
2921 // switch blob
2922 assert(ep->blob_offset >= blob_offset);
2923
2924 ep->blob = rb;
2925 ep->blob_offset -= blob_offset;
2926 dout(30) << __func__ << " adjusted " << *ep << dendl;
2927 }
2928 }
2929 return rb;
2930 }
2931
2932 // Onode
2933
2934 #undef dout_prefix
2935 #define dout_prefix *_dout << "bluestore.onode(" << this << ")." << __func__ << " "
2936
2937 void BlueStore::Onode::flush()
2938 {
2939 if (flushing_count.load()) {
2940 ldout(c->store->cct, 20) << __func__ << " cnt:" << flushing_count << dendl;
2941 std::unique_lock<std::mutex> l(flush_lock);
2942 while (flushing_count.load()) {
2943 flush_cond.wait(l);
2944 }
2945 }
2946 ldout(c->store->cct, 20) << __func__ << " done" << dendl;
2947 }
2948
2949 // =======================================================
2950 // WriteContext
2951
2952 /// Checks for writes to the same pextent within a blob
2953 bool BlueStore::WriteContext::has_conflict(
2954 BlobRef b,
2955 uint64_t loffs,
2956 uint64_t loffs_end,
2957 uint64_t min_alloc_size)
2958 {
2959 assert((loffs % min_alloc_size) == 0);
2960 assert((loffs_end % min_alloc_size) == 0);
2961 for (auto w : writes) {
2962 if (b == w.b) {
2963 auto loffs2 = P2ALIGN(w.logical_offset, min_alloc_size);
2964 auto loffs2_end = P2ROUNDUP(w.logical_offset + w.length0, min_alloc_size);
2965 if ((loffs <= loffs2 && loffs_end > loffs2) ||
2966 (loffs >= loffs2 && loffs < loffs2_end)) {
2967 return true;
2968 }
2969 }
2970 }
2971 return false;
2972 }
2973
2974 // =======================================================
2975
2976 // DeferredBatch
2977 #undef dout_prefix
2978 #define dout_prefix *_dout << "bluestore.DeferredBatch(" << this << ") "
2979
2980 void BlueStore::DeferredBatch::prepare_write(
2981 CephContext *cct,
2982 uint64_t seq, uint64_t offset, uint64_t length,
2983 bufferlist::const_iterator& blp)
2984 {
2985 _discard(cct, offset, length);
2986 auto i = iomap.insert(make_pair(offset, deferred_io()));
2987 assert(i.second); // this should be a new insertion
2988 i.first->second.seq = seq;
2989 blp.copy(length, i.first->second.bl);
2990 i.first->second.bl.reassign_to_mempool(
2991 mempool::mempool_bluestore_writing_deferred);
2992 dout(20) << __func__ << " seq " << seq
2993 << " 0x" << std::hex << offset << "~" << length
2994 << " crc " << i.first->second.bl.crc32c(-1)
2995 << std::dec << dendl;
2996 seq_bytes[seq] += length;
2997 #ifdef DEBUG_DEFERRED
2998 _audit(cct);
2999 #endif
3000 }
3001
3002 void BlueStore::DeferredBatch::_discard(
3003 CephContext *cct, uint64_t offset, uint64_t length)
3004 {
3005 generic_dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
3006 << std::dec << dendl;
3007 auto p = iomap.lower_bound(offset);
3008 if (p != iomap.begin()) {
3009 --p;
3010 auto end = p->first + p->second.bl.length();
3011 if (end > offset) {
3012 bufferlist head;
3013 head.substr_of(p->second.bl, 0, offset - p->first);
3014 dout(20) << __func__ << " keep head " << p->second.seq
3015 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3016 << " -> 0x" << head.length() << std::dec << dendl;
3017 auto i = seq_bytes.find(p->second.seq);
3018 assert(i != seq_bytes.end());
3019 if (end > offset + length) {
3020 bufferlist tail;
3021 tail.substr_of(p->second.bl, offset + length - p->first,
3022 end - (offset + length));
3023 dout(20) << __func__ << " keep tail " << p->second.seq
3024 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3025 << " -> 0x" << tail.length() << std::dec << dendl;
3026 auto &n = iomap[offset + length];
3027 n.bl.swap(tail);
3028 n.seq = p->second.seq;
3029 i->second -= length;
3030 } else {
3031 i->second -= end - offset;
3032 }
3033 assert(i->second >= 0);
3034 p->second.bl.swap(head);
3035 }
3036 ++p;
3037 }
3038 while (p != iomap.end()) {
3039 if (p->first >= offset + length) {
3040 break;
3041 }
3042 auto i = seq_bytes.find(p->second.seq);
3043 assert(i != seq_bytes.end());
3044 auto end = p->first + p->second.bl.length();
3045 if (end > offset + length) {
3046 unsigned drop_front = offset + length - p->first;
3047 unsigned keep_tail = end - (offset + length);
3048 dout(20) << __func__ << " truncate front " << p->second.seq
3049 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3050 << " drop_front 0x" << drop_front << " keep_tail 0x" << keep_tail
3051 << " to 0x" << (offset + length) << "~" << keep_tail
3052 << std::dec << dendl;
3053 auto &s = iomap[offset + length];
3054 s.seq = p->second.seq;
3055 s.bl.substr_of(p->second.bl, drop_front, keep_tail);
3056 i->second -= drop_front;
3057 } else {
3058 dout(20) << __func__ << " drop " << p->second.seq
3059 << " 0x" << std::hex << p->first << "~" << p->second.bl.length()
3060 << std::dec << dendl;
3061 i->second -= p->second.bl.length();
3062 }
3063 assert(i->second >= 0);
3064 p = iomap.erase(p);
3065 }
3066 }
3067
3068 void BlueStore::DeferredBatch::_audit(CephContext *cct)
3069 {
3070 map<uint64_t,int> sb;
3071 for (auto p : seq_bytes) {
3072 sb[p.first] = 0; // make sure we have the same set of keys
3073 }
3074 uint64_t pos = 0;
3075 for (auto& p : iomap) {
3076 assert(p.first >= pos);
3077 sb[p.second.seq] += p.second.bl.length();
3078 pos = p.first + p.second.bl.length();
3079 }
3080 assert(sb == seq_bytes);
3081 }
3082
3083
3084 // Collection
3085
3086 #undef dout_prefix
3087 #define dout_prefix *_dout << "bluestore(" << store->path << ").collection(" << cid << " " << this << ") "
3088
3089 BlueStore::Collection::Collection(BlueStore *ns, Cache *c, coll_t cid)
3090 : store(ns),
3091 cache(c),
3092 cid(cid),
3093 lock("BlueStore::Collection::lock", true, false),
3094 exists(true),
3095 onode_map(c)
3096 {
3097 }
3098
3099 void BlueStore::Collection::open_shared_blob(uint64_t sbid, BlobRef b)
3100 {
3101 assert(!b->shared_blob);
3102 const bluestore_blob_t& blob = b->get_blob();
3103 if (!blob.is_shared()) {
3104 b->shared_blob = new SharedBlob(this);
3105 return;
3106 }
3107
3108 b->shared_blob = shared_blob_set.lookup(sbid);
3109 if (b->shared_blob) {
3110 ldout(store->cct, 10) << __func__ << " sbid 0x" << std::hex << sbid
3111 << std::dec << " had " << *b->shared_blob << dendl;
3112 } else {
3113 b->shared_blob = new SharedBlob(sbid, this);
3114 shared_blob_set.add(this, b->shared_blob.get());
3115 ldout(store->cct, 10) << __func__ << " sbid 0x" << std::hex << sbid
3116 << std::dec << " opened " << *b->shared_blob
3117 << dendl;
3118 }
3119 }
3120
3121 void BlueStore::Collection::load_shared_blob(SharedBlobRef sb)
3122 {
3123 if (!sb->is_loaded()) {
3124
3125 bufferlist v;
3126 string key;
3127 auto sbid = sb->get_sbid();
3128 get_shared_blob_key(sbid, &key);
3129 int r = store->db->get(PREFIX_SHARED_BLOB, key, &v);
3130 if (r < 0) {
3131 lderr(store->cct) << __func__ << " sbid 0x" << std::hex << sbid
3132 << std::dec << " not found at key "
3133 << pretty_binary_string(key) << dendl;
3134 assert(0 == "uh oh, missing shared_blob");
3135 }
3136
3137 sb->loaded = true;
3138 sb->persistent = new bluestore_shared_blob_t(sbid);
3139 bufferlist::iterator p = v.begin();
3140 ::decode(*(sb->persistent), p);
3141 ldout(store->cct, 10) << __func__ << " sbid 0x" << std::hex << sbid
3142 << std::dec << " loaded shared_blob " << *sb << dendl;
3143 }
3144 }
3145
3146 void BlueStore::Collection::make_blob_shared(uint64_t sbid, BlobRef b)
3147 {
3148 ldout(store->cct, 10) << __func__ << " " << *b << dendl;
3149 assert(!b->shared_blob->is_loaded());
3150
3151 // update blob
3152 bluestore_blob_t& blob = b->dirty_blob();
3153 blob.set_flag(bluestore_blob_t::FLAG_SHARED);
3154
3155 // update shared blob
3156 b->shared_blob->loaded = true;
3157 b->shared_blob->persistent = new bluestore_shared_blob_t(sbid);
3158 shared_blob_set.add(this, b->shared_blob.get());
3159 for (auto p : blob.get_extents()) {
3160 if (p.is_valid()) {
3161 b->shared_blob->get_ref(
3162 p.offset,
3163 p.length);
3164 }
3165 }
3166 ldout(store->cct, 20) << __func__ << " now " << *b << dendl;
3167 }
3168
3169 uint64_t BlueStore::Collection::make_blob_unshared(SharedBlob *sb)
3170 {
3171 ldout(store->cct, 10) << __func__ << " " << *sb << dendl;
3172 assert(sb->is_loaded());
3173
3174 uint64_t sbid = sb->get_sbid();
3175 shared_blob_set.remove(sb);
3176 sb->loaded = false;
3177 delete sb->persistent;
3178 sb->sbid_unloaded = 0;
3179 ldout(store->cct, 20) << __func__ << " now " << *sb << dendl;
3180 return sbid;
3181 }
3182
3183 BlueStore::OnodeRef BlueStore::Collection::get_onode(
3184 const ghobject_t& oid,
3185 bool create)
3186 {
3187 assert(create ? lock.is_wlocked() : lock.is_locked());
3188
3189 spg_t pgid;
3190 if (cid.is_pg(&pgid)) {
3191 if (!oid.match(cnode.bits, pgid.ps())) {
3192 lderr(store->cct) << __func__ << " oid " << oid << " not part of "
3193 << pgid << " bits " << cnode.bits << dendl;
3194 ceph_abort();
3195 }
3196 }
3197
3198 OnodeRef o = onode_map.lookup(oid);
3199 if (o)
3200 return o;
3201
3202 mempool::bluestore_cache_other::string key;
3203 get_object_key(store->cct, oid, &key);
3204
3205 ldout(store->cct, 20) << __func__ << " oid " << oid << " key "
3206 << pretty_binary_string(key) << dendl;
3207
3208 bufferlist v;
3209 int r = store->db->get(PREFIX_OBJ, key.c_str(), key.size(), &v);
3210 ldout(store->cct, 20) << " r " << r << " v.len " << v.length() << dendl;
3211 Onode *on;
3212 if (v.length() == 0) {
3213 assert(r == -ENOENT);
3214 if (!store->cct->_conf->bluestore_debug_misc &&
3215 !create)
3216 return OnodeRef();
3217
3218 // new object, new onode
3219 on = new Onode(this, oid, key);
3220 } else {
3221 // loaded
3222 assert(r >= 0);
3223 on = new Onode(this, oid, key);
3224 on->exists = true;
3225 bufferptr::iterator p = v.front().begin_deep();
3226 on->onode.decode(p);
3227
3228 // initialize extent_map
3229 on->extent_map.decode_spanning_blobs(p);
3230 if (on->onode.extent_map_shards.empty()) {
3231 denc(on->extent_map.inline_bl, p);
3232 on->extent_map.decode_some(on->extent_map.inline_bl);
3233 } else {
3234 on->extent_map.init_shards(false, false);
3235 }
3236 }
3237 o.reset(on);
3238 return onode_map.add(oid, o);
3239 }
3240
3241 void BlueStore::Collection::split_cache(
3242 Collection *dest)
3243 {
3244 ldout(store->cct, 10) << __func__ << " to " << dest << dendl;
3245
3246 // lock (one or both) cache shards
3247 std::lock(cache->lock, dest->cache->lock);
3248 std::lock_guard<std::recursive_mutex> l(cache->lock, std::adopt_lock);
3249 std::lock_guard<std::recursive_mutex> l2(dest->cache->lock, std::adopt_lock);
3250
3251 int destbits = dest->cnode.bits;
3252 spg_t destpg;
3253 bool is_pg = dest->cid.is_pg(&destpg);
3254 assert(is_pg);
3255
3256 auto p = onode_map.onode_map.begin();
3257 while (p != onode_map.onode_map.end()) {
3258 if (!p->second->oid.match(destbits, destpg.pgid.ps())) {
3259 // onode does not belong to this child
3260 ++p;
3261 } else {
3262 OnodeRef o = p->second;
3263 ldout(store->cct, 20) << __func__ << " moving " << o << " " << o->oid
3264 << dendl;
3265
3266 cache->_rm_onode(p->second);
3267 p = onode_map.onode_map.erase(p);
3268
3269 o->c = dest;
3270 dest->cache->_add_onode(o, 1);
3271 dest->onode_map.onode_map[o->oid] = o;
3272 dest->onode_map.cache = dest->cache;
3273
3274 // move over shared blobs and buffers. cover shared blobs from
3275 // both extent map and spanning blob map (the full extent map
3276 // may not be faulted in)
3277 vector<SharedBlob*> sbvec;
3278 for (auto& e : o->extent_map.extent_map) {
3279 sbvec.push_back(e.blob->shared_blob.get());
3280 }
3281 for (auto& b : o->extent_map.spanning_blob_map) {
3282 sbvec.push_back(b.second->shared_blob.get());
3283 }
3284 for (auto sb : sbvec) {
3285 if (sb->coll == dest) {
3286 ldout(store->cct, 20) << __func__ << " already moved " << *sb
3287 << dendl;
3288 continue;
3289 }
3290 ldout(store->cct, 20) << __func__ << " moving " << *sb << dendl;
3291 sb->coll = dest;
3292 if (sb->get_sbid()) {
3293 ldout(store->cct, 20) << __func__
3294 << " moving registration " << *sb << dendl;
3295 shared_blob_set.remove(sb);
3296 dest->shared_blob_set.add(dest, sb);
3297 }
3298 if (dest->cache != cache) {
3299 for (auto& i : sb->bc.buffer_map) {
3300 if (!i.second->is_writing()) {
3301 ldout(store->cct, 20) << __func__ << " moving " << *i.second
3302 << dendl;
3303 dest->cache->_move_buffer(cache, i.second.get());
3304 }
3305 }
3306 }
3307 }
3308 }
3309 }
3310 }
3311
3312 // =======================================================
3313
3314 void *BlueStore::MempoolThread::entry()
3315 {
3316 Mutex::Locker l(lock);
3317 while (!stop) {
3318 uint64_t meta_bytes =
3319 mempool::bluestore_cache_other::allocated_bytes() +
3320 mempool::bluestore_cache_onode::allocated_bytes();
3321 uint64_t onode_num =
3322 mempool::bluestore_cache_onode::allocated_items();
3323
3324 if (onode_num < 2) {
3325 onode_num = 2;
3326 }
3327
3328 float bytes_per_onode = (float)meta_bytes / (float)onode_num;
3329 size_t num_shards = store->cache_shards.size();
3330 float target_ratio = store->cache_meta_ratio + store->cache_data_ratio;
3331 // A little sloppy but should be close enough
3332 uint64_t shard_target = target_ratio * (store->cache_size / num_shards);
3333
3334 for (auto i : store->cache_shards) {
3335 i->trim(shard_target,
3336 store->cache_meta_ratio,
3337 store->cache_data_ratio,
3338 bytes_per_onode);
3339 }
3340
3341 store->_update_cache_logger();
3342
3343 utime_t wait;
3344 wait += store->cct->_conf->bluestore_cache_trim_interval;
3345 cond.WaitInterval(lock, wait);
3346 }
3347 stop = false;
3348 return NULL;
3349 }
3350
3351 // =======================================================
3352
3353 // OmapIteratorImpl
3354
3355 #undef dout_prefix
3356 #define dout_prefix *_dout << "bluestore.OmapIteratorImpl(" << this << ") "
3357
3358 BlueStore::OmapIteratorImpl::OmapIteratorImpl(
3359 CollectionRef c, OnodeRef o, KeyValueDB::Iterator it)
3360 : c(c), o(o), it(it)
3361 {
3362 RWLock::RLocker l(c->lock);
3363 if (o->onode.has_omap()) {
3364 get_omap_key(o->onode.nid, string(), &head);
3365 get_omap_tail(o->onode.nid, &tail);
3366 it->lower_bound(head);
3367 }
3368 }
3369
3370 int BlueStore::OmapIteratorImpl::seek_to_first()
3371 {
3372 RWLock::RLocker l(c->lock);
3373 if (o->onode.has_omap()) {
3374 it->lower_bound(head);
3375 } else {
3376 it = KeyValueDB::Iterator();
3377 }
3378 return 0;
3379 }
3380
3381 int BlueStore::OmapIteratorImpl::upper_bound(const string& after)
3382 {
3383 RWLock::RLocker l(c->lock);
3384 if (o->onode.has_omap()) {
3385 string key;
3386 get_omap_key(o->onode.nid, after, &key);
3387 ldout(c->store->cct,20) << __func__ << " after " << after << " key "
3388 << pretty_binary_string(key) << dendl;
3389 it->upper_bound(key);
3390 } else {
3391 it = KeyValueDB::Iterator();
3392 }
3393 return 0;
3394 }
3395
3396 int BlueStore::OmapIteratorImpl::lower_bound(const string& to)
3397 {
3398 RWLock::RLocker l(c->lock);
3399 if (o->onode.has_omap()) {
3400 string key;
3401 get_omap_key(o->onode.nid, to, &key);
3402 ldout(c->store->cct,20) << __func__ << " to " << to << " key "
3403 << pretty_binary_string(key) << dendl;
3404 it->lower_bound(key);
3405 } else {
3406 it = KeyValueDB::Iterator();
3407 }
3408 return 0;
3409 }
3410
3411 bool BlueStore::OmapIteratorImpl::valid()
3412 {
3413 RWLock::RLocker l(c->lock);
3414 bool r = o->onode.has_omap() && it && it->valid() &&
3415 it->raw_key().second <= tail;
3416 if (it && it->valid()) {
3417 ldout(c->store->cct,20) << __func__ << " is at "
3418 << pretty_binary_string(it->raw_key().second)
3419 << dendl;
3420 }
3421 return r;
3422 }
3423
3424 int BlueStore::OmapIteratorImpl::next(bool validate)
3425 {
3426 RWLock::RLocker l(c->lock);
3427 if (o->onode.has_omap()) {
3428 it->next();
3429 return 0;
3430 } else {
3431 return -1;
3432 }
3433 }
3434
3435 string BlueStore::OmapIteratorImpl::key()
3436 {
3437 RWLock::RLocker l(c->lock);
3438 assert(it->valid());
3439 string db_key = it->raw_key().second;
3440 string user_key;
3441 decode_omap_key(db_key, &user_key);
3442 return user_key;
3443 }
3444
3445 bufferlist BlueStore::OmapIteratorImpl::value()
3446 {
3447 RWLock::RLocker l(c->lock);
3448 assert(it->valid());
3449 return it->value();
3450 }
3451
3452
3453 // =====================================
3454
3455 #undef dout_prefix
3456 #define dout_prefix *_dout << "bluestore(" << path << ") "
3457
3458
3459 static void aio_cb(void *priv, void *priv2)
3460 {
3461 BlueStore *store = static_cast<BlueStore*>(priv);
3462 BlueStore::AioContext *c = static_cast<BlueStore::AioContext*>(priv2);
3463 c->aio_finish(store);
3464 }
3465
3466 BlueStore::BlueStore(CephContext *cct, const string& path)
3467 : ObjectStore(cct, path),
3468 throttle_bytes(cct, "bluestore_throttle_bytes",
3469 cct->_conf->bluestore_throttle_bytes),
3470 throttle_deferred_bytes(cct, "bluestore_throttle_deferred_bytes",
3471 cct->_conf->bluestore_throttle_bytes +
3472 cct->_conf->bluestore_throttle_deferred_bytes),
3473 kv_sync_thread(this),
3474 kv_finalize_thread(this),
3475 mempool_thread(this)
3476 {
3477 _init_logger();
3478 cct->_conf->add_observer(this);
3479 set_cache_shards(1);
3480 }
3481
3482 BlueStore::BlueStore(CephContext *cct,
3483 const string& path,
3484 uint64_t _min_alloc_size)
3485 : ObjectStore(cct, path),
3486 throttle_bytes(cct, "bluestore_throttle_bytes",
3487 cct->_conf->bluestore_throttle_bytes),
3488 throttle_deferred_bytes(cct, "bluestore_throttle_deferred_bytes",
3489 cct->_conf->bluestore_throttle_bytes +
3490 cct->_conf->bluestore_throttle_deferred_bytes),
3491 kv_sync_thread(this),
3492 kv_finalize_thread(this),
3493 min_alloc_size(_min_alloc_size),
3494 min_alloc_size_order(ctz(_min_alloc_size)),
3495 mempool_thread(this)
3496 {
3497 _init_logger();
3498 cct->_conf->add_observer(this);
3499 set_cache_shards(1);
3500 }
3501
3502 BlueStore::~BlueStore()
3503 {
3504 for (auto f : finishers) {
3505 delete f;
3506 }
3507 finishers.clear();
3508
3509 cct->_conf->remove_observer(this);
3510 _shutdown_logger();
3511 assert(!mounted);
3512 assert(db == NULL);
3513 assert(bluefs == NULL);
3514 assert(fsid_fd < 0);
3515 assert(path_fd < 0);
3516 for (auto i : cache_shards) {
3517 delete i;
3518 }
3519 cache_shards.clear();
3520 }
3521
3522 const char **BlueStore::get_tracked_conf_keys() const
3523 {
3524 static const char* KEYS[] = {
3525 "bluestore_csum_type",
3526 "bluestore_compression_mode",
3527 "bluestore_compression_algorithm",
3528 "bluestore_compression_min_blob_size",
3529 "bluestore_compression_min_blob_size_ssd",
3530 "bluestore_compression_min_blob_size_hdd",
3531 "bluestore_compression_max_blob_size",
3532 "bluestore_compression_max_blob_size_ssd",
3533 "bluestore_compression_max_blob_size_hdd",
3534 "bluestore_max_alloc_size",
3535 "bluestore_prefer_deferred_size",
3536 "bluestore_deferred_batch_ops",
3537 "bluestore_deferred_batch_ops_hdd",
3538 "bluestore_deferred_batch_ops_ssd",
3539 "bluestore_throttle_bytes",
3540 "bluestore_throttle_deferred_bytes",
3541 "bluestore_throttle_cost_per_io_hdd",
3542 "bluestore_throttle_cost_per_io_ssd",
3543 "bluestore_throttle_cost_per_io",
3544 "bluestore_max_blob_size",
3545 "bluestore_max_blob_size_ssd",
3546 "bluestore_max_blob_size_hdd",
3547 NULL
3548 };
3549 return KEYS;
3550 }
3551
3552 void BlueStore::handle_conf_change(const struct md_config_t *conf,
3553 const std::set<std::string> &changed)
3554 {
3555 if (changed.count("bluestore_csum_type")) {
3556 _set_csum();
3557 }
3558 if (changed.count("bluestore_compression_mode") ||
3559 changed.count("bluestore_compression_algorithm") ||
3560 changed.count("bluestore_compression_min_blob_size") ||
3561 changed.count("bluestore_compression_max_blob_size")) {
3562 if (bdev) {
3563 _set_compression();
3564 }
3565 }
3566 if (changed.count("bluestore_max_blob_size") ||
3567 changed.count("bluestore_max_blob_size_ssd") ||
3568 changed.count("bluestore_max_blob_size_hdd")) {
3569 if (bdev) {
3570 // only after startup
3571 _set_blob_size();
3572 }
3573 }
3574 if (changed.count("bluestore_prefer_deferred_size") ||
3575 changed.count("bluestore_max_alloc_size") ||
3576 changed.count("bluestore_deferred_batch_ops") ||
3577 changed.count("bluestore_deferred_batch_ops_hdd") ||
3578 changed.count("bluestore_deferred_batch_ops_ssd")) {
3579 if (bdev) {
3580 // only after startup
3581 _set_alloc_sizes();
3582 }
3583 }
3584 if (changed.count("bluestore_throttle_cost_per_io") ||
3585 changed.count("bluestore_throttle_cost_per_io_hdd") ||
3586 changed.count("bluestore_throttle_cost_per_io_ssd")) {
3587 if (bdev) {
3588 _set_throttle_params();
3589 }
3590 }
3591 if (changed.count("bluestore_throttle_bytes")) {
3592 throttle_bytes.reset_max(conf->bluestore_throttle_bytes);
3593 throttle_deferred_bytes.reset_max(
3594 conf->bluestore_throttle_bytes + conf->bluestore_throttle_deferred_bytes);
3595 }
3596 if (changed.count("bluestore_throttle_deferred_bytes")) {
3597 throttle_deferred_bytes.reset_max(
3598 conf->bluestore_throttle_bytes + conf->bluestore_throttle_deferred_bytes);
3599 }
3600 }
3601
3602 void BlueStore::_set_compression()
3603 {
3604 auto m = Compressor::get_comp_mode_type(cct->_conf->bluestore_compression_mode);
3605 if (m) {
3606 comp_mode = *m;
3607 } else {
3608 derr << __func__ << " unrecognized value '"
3609 << cct->_conf->bluestore_compression_mode
3610 << "' for bluestore_compression_mode, reverting to 'none'"
3611 << dendl;
3612 comp_mode = Compressor::COMP_NONE;
3613 }
3614
3615 compressor = nullptr;
3616
3617 if (comp_mode == Compressor::COMP_NONE) {
3618 dout(10) << __func__ << " compression mode set to 'none', "
3619 << "ignore other compression setttings" << dendl;
3620 return;
3621 }
3622
3623 if (cct->_conf->bluestore_compression_max_blob_size) {
3624 comp_min_blob_size = cct->_conf->bluestore_compression_max_blob_size;
3625 } else {
3626 assert(bdev);
3627 if (bdev->is_rotational()) {
3628 comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_hdd;
3629 } else {
3630 comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_ssd;
3631 }
3632 }
3633
3634 if (cct->_conf->bluestore_compression_max_blob_size) {
3635 comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size;
3636 } else {
3637 assert(bdev);
3638 if (bdev->is_rotational()) {
3639 comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_hdd;
3640 } else {
3641 comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_ssd;
3642 }
3643 }
3644
3645 auto& alg_name = cct->_conf->bluestore_compression_algorithm;
3646 if (!alg_name.empty()) {
3647 compressor = Compressor::create(cct, alg_name);
3648 if (!compressor) {
3649 derr << __func__ << " unable to initialize " << alg_name.c_str() << " compressor"
3650 << dendl;
3651 }
3652 }
3653
3654 dout(10) << __func__ << " mode " << Compressor::get_comp_mode_name(comp_mode)
3655 << " alg " << (compressor ? compressor->get_type_name() : "(none)")
3656 << dendl;
3657 }
3658
3659 void BlueStore::_set_csum()
3660 {
3661 csum_type = Checksummer::CSUM_NONE;
3662 int t = Checksummer::get_csum_string_type(cct->_conf->bluestore_csum_type);
3663 if (t > Checksummer::CSUM_NONE)
3664 csum_type = t;
3665
3666 dout(10) << __func__ << " csum_type "
3667 << Checksummer::get_csum_type_string(csum_type)
3668 << dendl;
3669 }
3670
3671 void BlueStore::_set_throttle_params()
3672 {
3673 if (cct->_conf->bluestore_throttle_cost_per_io) {
3674 throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io;
3675 } else {
3676 assert(bdev);
3677 if (bdev->is_rotational()) {
3678 throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_hdd;
3679 } else {
3680 throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_ssd;
3681 }
3682 }
3683
3684 dout(10) << __func__ << " throttle_cost_per_io " << throttle_cost_per_io
3685 << dendl;
3686 }
3687 void BlueStore::_set_blob_size()
3688 {
3689 if (cct->_conf->bluestore_max_blob_size) {
3690 max_blob_size = cct->_conf->bluestore_max_blob_size;
3691 } else {
3692 assert(bdev);
3693 if (bdev->is_rotational()) {
3694 max_blob_size = cct->_conf->bluestore_max_blob_size_hdd;
3695 } else {
3696 max_blob_size = cct->_conf->bluestore_max_blob_size_ssd;
3697 }
3698 }
3699 dout(10) << __func__ << " max_blob_size 0x" << std::hex << max_blob_size
3700 << std::dec << dendl;
3701 }
3702
3703 int BlueStore::_set_cache_sizes()
3704 {
3705 assert(bdev);
3706 if (cct->_conf->bluestore_cache_size) {
3707 cache_size = cct->_conf->bluestore_cache_size;
3708 } else {
3709 // choose global cache size based on backend type
3710 if (bdev->is_rotational()) {
3711 cache_size = cct->_conf->bluestore_cache_size_hdd;
3712 } else {
3713 cache_size = cct->_conf->bluestore_cache_size_ssd;
3714 }
3715 }
3716 cache_meta_ratio = cct->_conf->bluestore_cache_meta_ratio;
3717 cache_kv_ratio = cct->_conf->bluestore_cache_kv_ratio;
3718
3719 double cache_kv_max = cct->_conf->bluestore_cache_kv_max;
3720 double cache_kv_max_ratio = 0;
3721
3722 // if cache_kv_max is negative, disable it
3723 if (cache_size > 0 && cache_kv_max >= 0) {
3724 cache_kv_max_ratio = (double) cache_kv_max / (double) cache_size;
3725 if (cache_kv_max_ratio < 1.0 && cache_kv_max_ratio < cache_kv_ratio) {
3726 dout(1) << __func__ << " max " << cache_kv_max_ratio
3727 << " < ratio " << cache_kv_ratio
3728 << dendl;
3729 cache_meta_ratio = cache_meta_ratio + cache_kv_ratio - cache_kv_max_ratio;
3730 cache_kv_ratio = cache_kv_max_ratio;
3731 }
3732 }
3733
3734 cache_data_ratio =
3735 (double)1.0 - (double)cache_meta_ratio - (double)cache_kv_ratio;
3736
3737 if (cache_meta_ratio < 0 || cache_meta_ratio > 1.0) {
3738 derr << __func__ << "bluestore_cache_meta_ratio (" << cache_meta_ratio
3739 << ") must be in range [0,1.0]" << dendl;
3740 return -EINVAL;
3741 }
3742 if (cache_kv_ratio < 0 || cache_kv_ratio > 1.0) {
3743 derr << __func__ << "bluestore_cache_kv_ratio (" << cache_kv_ratio
3744 << ") must be in range [0,1.0]" << dendl;
3745 return -EINVAL;
3746 }
3747 if (cache_meta_ratio + cache_kv_ratio > 1.0) {
3748 derr << __func__ << "bluestore_cache_meta_ratio (" << cache_meta_ratio
3749 << ") + bluestore_cache_kv_ratio (" << cache_kv_ratio
3750 << ") = " << cache_meta_ratio + cache_kv_ratio << "; must be <= 1.0"
3751 << dendl;
3752 return -EINVAL;
3753 }
3754 if (cache_data_ratio < 0) {
3755 // deal with floating point imprecision
3756 cache_data_ratio = 0;
3757 }
3758 dout(1) << __func__ << " cache_size " << cache_size
3759 << " meta " << cache_meta_ratio
3760 << " kv " << cache_kv_ratio
3761 << " data " << cache_data_ratio
3762 << dendl;
3763 return 0;
3764 }
3765
3766 void BlueStore::_init_logger()
3767 {
3768 PerfCountersBuilder b(cct, "bluestore",
3769 l_bluestore_first, l_bluestore_last);
3770 b.add_time_avg(l_bluestore_kv_flush_lat, "kv_flush_lat",
3771 "Average kv_thread flush latency",
3772 "fl_l", PerfCountersBuilder::PRIO_INTERESTING);
3773 b.add_time_avg(l_bluestore_kv_commit_lat, "kv_commit_lat",
3774 "Average kv_thread commit latency");
3775 b.add_time_avg(l_bluestore_kv_lat, "kv_lat",
3776 "Average kv_thread sync latency",
3777 "k_l", PerfCountersBuilder::PRIO_INTERESTING);
3778 b.add_time_avg(l_bluestore_state_prepare_lat, "state_prepare_lat",
3779 "Average prepare state latency");
3780 b.add_time_avg(l_bluestore_state_aio_wait_lat, "state_aio_wait_lat",
3781 "Average aio_wait state latency",
3782 "io_l", PerfCountersBuilder::PRIO_INTERESTING);
3783 b.add_time_avg(l_bluestore_state_io_done_lat, "state_io_done_lat",
3784 "Average io_done state latency");
3785 b.add_time_avg(l_bluestore_state_kv_queued_lat, "state_kv_queued_lat",
3786 "Average kv_queued state latency");
3787 b.add_time_avg(l_bluestore_state_kv_committing_lat, "state_kv_commiting_lat",
3788 "Average kv_commiting state latency");
3789 b.add_time_avg(l_bluestore_state_kv_done_lat, "state_kv_done_lat",
3790 "Average kv_done state latency");
3791 b.add_time_avg(l_bluestore_state_deferred_queued_lat, "state_deferred_queued_lat",
3792 "Average deferred_queued state latency");
3793 b.add_time_avg(l_bluestore_state_deferred_aio_wait_lat, "state_deferred_aio_wait_lat",
3794 "Average aio_wait state latency");
3795 b.add_time_avg(l_bluestore_state_deferred_cleanup_lat, "state_deferred_cleanup_lat",
3796 "Average cleanup state latency");
3797 b.add_time_avg(l_bluestore_state_finishing_lat, "state_finishing_lat",
3798 "Average finishing state latency");
3799 b.add_time_avg(l_bluestore_state_done_lat, "state_done_lat",
3800 "Average done state latency");
3801 b.add_time_avg(l_bluestore_throttle_lat, "throttle_lat",
3802 "Average submit throttle latency",
3803 "th_l", PerfCountersBuilder::PRIO_CRITICAL);
3804 b.add_time_avg(l_bluestore_submit_lat, "submit_lat",
3805 "Average submit latency",
3806 "s_l", PerfCountersBuilder::PRIO_CRITICAL);
3807 b.add_time_avg(l_bluestore_commit_lat, "commit_lat",
3808 "Average commit latency",
3809 "c_l", PerfCountersBuilder::PRIO_CRITICAL);
3810 b.add_time_avg(l_bluestore_read_lat, "read_lat",
3811 "Average read latency",
3812 "r_l", PerfCountersBuilder::PRIO_CRITICAL);
3813 b.add_time_avg(l_bluestore_read_onode_meta_lat, "read_onode_meta_lat",
3814 "Average read onode metadata latency");
3815 b.add_time_avg(l_bluestore_read_wait_aio_lat, "read_wait_aio_lat",
3816 "Average read latency");
3817 b.add_time_avg(l_bluestore_compress_lat, "compress_lat",
3818 "Average compress latency");
3819 b.add_time_avg(l_bluestore_decompress_lat, "decompress_lat",
3820 "Average decompress latency");
3821 b.add_time_avg(l_bluestore_csum_lat, "csum_lat",
3822 "Average checksum latency");
3823 b.add_u64_counter(l_bluestore_compress_success_count, "compress_success_count",
3824 "Sum for beneficial compress ops");
3825 b.add_u64_counter(l_bluestore_compress_rejected_count, "compress_rejected_count",
3826 "Sum for compress ops rejected due to low net gain of space");
3827 b.add_u64_counter(l_bluestore_write_pad_bytes, "write_pad_bytes",
3828 "Sum for write-op padded bytes");
3829 b.add_u64_counter(l_bluestore_deferred_write_ops, "deferred_write_ops",
3830 "Sum for deferred write op");
3831 b.add_u64_counter(l_bluestore_deferred_write_bytes, "deferred_write_bytes",
3832 "Sum for deferred write bytes", "def");
3833 b.add_u64_counter(l_bluestore_write_penalty_read_ops, "write_penalty_read_ops",
3834 "Sum for write penalty read ops");
3835 b.add_u64(l_bluestore_allocated, "bluestore_allocated",
3836 "Sum for allocated bytes");
3837 b.add_u64(l_bluestore_stored, "bluestore_stored",
3838 "Sum for stored bytes");
3839 b.add_u64(l_bluestore_compressed, "bluestore_compressed",
3840 "Sum for stored compressed bytes");
3841 b.add_u64(l_bluestore_compressed_allocated, "bluestore_compressed_allocated",
3842 "Sum for bytes allocated for compressed data");
3843 b.add_u64(l_bluestore_compressed_original, "bluestore_compressed_original",
3844 "Sum for original bytes that were compressed");
3845
3846 b.add_u64(l_bluestore_onodes, "bluestore_onodes",
3847 "Number of onodes in cache");
3848 b.add_u64_counter(l_bluestore_onode_hits, "bluestore_onode_hits",
3849 "Sum for onode-lookups hit in the cache");
3850 b.add_u64_counter(l_bluestore_onode_misses, "bluestore_onode_misses",
3851 "Sum for onode-lookups missed in the cache");
3852 b.add_u64_counter(l_bluestore_onode_shard_hits, "bluestore_onode_shard_hits",
3853 "Sum for onode-shard lookups hit in the cache");
3854 b.add_u64_counter(l_bluestore_onode_shard_misses,
3855 "bluestore_onode_shard_misses",
3856 "Sum for onode-shard lookups missed in the cache");
3857 b.add_u64(l_bluestore_extents, "bluestore_extents",
3858 "Number of extents in cache");
3859 b.add_u64(l_bluestore_blobs, "bluestore_blobs",
3860 "Number of blobs in cache");
3861 b.add_u64(l_bluestore_buffers, "bluestore_buffers",
3862 "Number of buffers in cache");
3863 b.add_u64(l_bluestore_buffer_bytes, "bluestore_buffer_bytes",
3864 "Number of buffer bytes in cache");
3865 b.add_u64(l_bluestore_buffer_hit_bytes, "bluestore_buffer_hit_bytes",
3866 "Sum for bytes of read hit in the cache");
3867 b.add_u64(l_bluestore_buffer_miss_bytes, "bluestore_buffer_miss_bytes",
3868 "Sum for bytes of read missed in the cache");
3869
3870 b.add_u64_counter(l_bluestore_write_big, "bluestore_write_big",
3871 "Large aligned writes into fresh blobs");
3872 b.add_u64_counter(l_bluestore_write_big_bytes, "bluestore_write_big_bytes",
3873 "Large aligned writes into fresh blobs (bytes)");
3874 b.add_u64_counter(l_bluestore_write_big_blobs, "bluestore_write_big_blobs",
3875 "Large aligned writes into fresh blobs (blobs)");
3876 b.add_u64_counter(l_bluestore_write_small, "bluestore_write_small",
3877 "Small writes into existing or sparse small blobs");
3878 b.add_u64_counter(l_bluestore_write_small_bytes, "bluestore_write_small_bytes",
3879 "Small writes into existing or sparse small blobs (bytes)");
3880 b.add_u64_counter(l_bluestore_write_small_unused,
3881 "bluestore_write_small_unused",
3882 "Small writes into unused portion of existing blob");
3883 b.add_u64_counter(l_bluestore_write_small_deferred,
3884 "bluestore_write_small_deferred",
3885 "Small overwrites using deferred");
3886 b.add_u64_counter(l_bluestore_write_small_pre_read,
3887 "bluestore_write_small_pre_read",
3888 "Small writes that required we read some data (possibly "
3889 "cached) to fill out the block");
3890 b.add_u64_counter(l_bluestore_write_small_new, "bluestore_write_small_new",
3891 "Small write into new (sparse) blob");
3892
3893 b.add_u64_counter(l_bluestore_txc, "bluestore_txc", "Transactions committed");
3894 b.add_u64_counter(l_bluestore_onode_reshard, "bluestore_onode_reshard",
3895 "Onode extent map reshard events");
3896 b.add_u64_counter(l_bluestore_blob_split, "bluestore_blob_split",
3897 "Sum for blob splitting due to resharding");
3898 b.add_u64_counter(l_bluestore_extent_compress, "bluestore_extent_compress",
3899 "Sum for extents that have been removed due to compression");
3900 b.add_u64_counter(l_bluestore_gc_merged, "bluestore_gc_merged",
3901 "Sum for extents that have been merged due to garbage "
3902 "collection");
3903 logger = b.create_perf_counters();
3904 cct->get_perfcounters_collection()->add(logger);
3905 }
3906
3907 int BlueStore::_reload_logger()
3908 {
3909 struct store_statfs_t store_statfs;
3910
3911 int r = statfs(&store_statfs);
3912 if(r >= 0) {
3913 logger->set(l_bluestore_allocated, store_statfs.allocated);
3914 logger->set(l_bluestore_stored, store_statfs.stored);
3915 logger->set(l_bluestore_compressed, store_statfs.compressed);
3916 logger->set(l_bluestore_compressed_allocated, store_statfs.compressed_allocated);
3917 logger->set(l_bluestore_compressed_original, store_statfs.compressed_original);
3918 }
3919 return r;
3920 }
3921
3922 void BlueStore::_shutdown_logger()
3923 {
3924 cct->get_perfcounters_collection()->remove(logger);
3925 delete logger;
3926 }
3927
3928 int BlueStore::get_block_device_fsid(CephContext* cct, const string& path,
3929 uuid_d *fsid)
3930 {
3931 bluestore_bdev_label_t label;
3932 int r = _read_bdev_label(cct, path, &label);
3933 if (r < 0)
3934 return r;
3935 *fsid = label.osd_uuid;
3936 return 0;
3937 }
3938
3939 int BlueStore::_open_path()
3940 {
3941 assert(path_fd < 0);
3942 path_fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_DIRECTORY));
3943 if (path_fd < 0) {
3944 int r = -errno;
3945 derr << __func__ << " unable to open " << path << ": " << cpp_strerror(r)
3946 << dendl;
3947 return r;
3948 }
3949 return 0;
3950 }
3951
3952 void BlueStore::_close_path()
3953 {
3954 VOID_TEMP_FAILURE_RETRY(::close(path_fd));
3955 path_fd = -1;
3956 }
3957
3958 int BlueStore::_write_bdev_label(string path, bluestore_bdev_label_t label)
3959 {
3960 dout(10) << __func__ << " path " << path << " label " << label << dendl;
3961 bufferlist bl;
3962 ::encode(label, bl);
3963 uint32_t crc = bl.crc32c(-1);
3964 ::encode(crc, bl);
3965 assert(bl.length() <= BDEV_LABEL_BLOCK_SIZE);
3966 bufferptr z(BDEV_LABEL_BLOCK_SIZE - bl.length());
3967 z.zero();
3968 bl.append(std::move(z));
3969
3970 int fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_WRONLY));
3971 if (fd < 0) {
3972 fd = -errno;
3973 derr << __func__ << " failed to open " << path << ": " << cpp_strerror(fd)
3974 << dendl;
3975 return fd;
3976 }
3977 int r = bl.write_fd(fd);
3978 if (r < 0) {
3979 derr << __func__ << " failed to write to " << path
3980 << ": " << cpp_strerror(r) << dendl;
3981 }
3982 VOID_TEMP_FAILURE_RETRY(::close(fd));
3983 return r;
3984 }
3985
3986 int BlueStore::_read_bdev_label(CephContext* cct, string path,
3987 bluestore_bdev_label_t *label)
3988 {
3989 dout(10) << __func__ << dendl;
3990 int fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_RDONLY));
3991 if (fd < 0) {
3992 fd = -errno;
3993 derr << __func__ << " failed to open " << path << ": " << cpp_strerror(fd)
3994 << dendl;
3995 return fd;
3996 }
3997 bufferlist bl;
3998 int r = bl.read_fd(fd, BDEV_LABEL_BLOCK_SIZE);
3999 VOID_TEMP_FAILURE_RETRY(::close(fd));
4000 if (r < 0) {
4001 derr << __func__ << " failed to read from " << path
4002 << ": " << cpp_strerror(r) << dendl;
4003 return r;
4004 }
4005
4006 uint32_t crc, expected_crc;
4007 bufferlist::iterator p = bl.begin();
4008 try {
4009 ::decode(*label, p);
4010 bufferlist t;
4011 t.substr_of(bl, 0, p.get_off());
4012 crc = t.crc32c(-1);
4013 ::decode(expected_crc, p);
4014 }
4015 catch (buffer::error& e) {
4016 derr << __func__ << " unable to decode label at offset " << p.get_off()
4017 << ": " << e.what()
4018 << dendl;
4019 return -EINVAL;
4020 }
4021 if (crc != expected_crc) {
4022 derr << __func__ << " bad crc on label, expected " << expected_crc
4023 << " != actual " << crc << dendl;
4024 return -EIO;
4025 }
4026 dout(10) << __func__ << " got " << *label << dendl;
4027 return 0;
4028 }
4029
4030 int BlueStore::_check_or_set_bdev_label(
4031 string path, uint64_t size, string desc, bool create)
4032 {
4033 bluestore_bdev_label_t label;
4034 if (create) {
4035 label.osd_uuid = fsid;
4036 label.size = size;
4037 label.btime = ceph_clock_now();
4038 label.description = desc;
4039 int r = _write_bdev_label(path, label);
4040 if (r < 0)
4041 return r;
4042 } else {
4043 int r = _read_bdev_label(cct, path, &label);
4044 if (r < 0)
4045 return r;
4046 if (cct->_conf->bluestore_debug_permit_any_bdev_label) {
4047 dout(20) << __func__ << " bdev " << path << " fsid " << label.osd_uuid
4048 << " and fsid " << fsid << " check bypassed" << dendl;
4049 }
4050 else if (label.osd_uuid != fsid) {
4051 derr << __func__ << " bdev " << path << " fsid " << label.osd_uuid
4052 << " does not match our fsid " << fsid << dendl;
4053 return -EIO;
4054 }
4055 }
4056 return 0;
4057 }
4058
4059 void BlueStore::_set_alloc_sizes(void)
4060 {
4061 max_alloc_size = cct->_conf->bluestore_max_alloc_size;
4062
4063 if (cct->_conf->bluestore_prefer_deferred_size) {
4064 prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size;
4065 } else {
4066 assert(bdev);
4067 if (bdev->is_rotational()) {
4068 prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_hdd;
4069 } else {
4070 prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_ssd;
4071 }
4072 }
4073
4074 if (cct->_conf->bluestore_deferred_batch_ops) {
4075 deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops;
4076 } else {
4077 assert(bdev);
4078 if (bdev->is_rotational()) {
4079 deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_hdd;
4080 } else {
4081 deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_ssd;
4082 }
4083 }
4084
4085 dout(10) << __func__ << " min_alloc_size 0x" << std::hex << min_alloc_size
4086 << std::dec << " order " << min_alloc_size_order
4087 << " max_alloc_size 0x" << std::hex << max_alloc_size
4088 << " prefer_deferred_size 0x" << prefer_deferred_size
4089 << std::dec
4090 << " deferred_batch_ops " << deferred_batch_ops
4091 << dendl;
4092 }
4093
4094 int BlueStore::_open_bdev(bool create)
4095 {
4096 assert(bdev == NULL);
4097 string p = path + "/block";
4098 bdev = BlockDevice::create(cct, p, aio_cb, static_cast<void*>(this));
4099 int r = bdev->open(p);
4100 if (r < 0)
4101 goto fail;
4102
4103 if (bdev->supported_bdev_label()) {
4104 r = _check_or_set_bdev_label(p, bdev->get_size(), "main", create);
4105 if (r < 0)
4106 goto fail_close;
4107 }
4108
4109 // initialize global block parameters
4110 block_size = bdev->get_block_size();
4111 block_mask = ~(block_size - 1);
4112 block_size_order = ctz(block_size);
4113 assert(block_size == 1u << block_size_order);
4114 // and set cache_size based on device type
4115 r = _set_cache_sizes();
4116 if (r < 0) {
4117 goto fail_close;
4118 }
4119 return 0;
4120
4121 fail_close:
4122 bdev->close();
4123 fail:
4124 delete bdev;
4125 bdev = NULL;
4126 return r;
4127 }
4128
4129 void BlueStore::_close_bdev()
4130 {
4131 assert(bdev);
4132 bdev->close();
4133 delete bdev;
4134 bdev = NULL;
4135 }
4136
4137 int BlueStore::_open_fm(bool create)
4138 {
4139 assert(fm == NULL);
4140 fm = FreelistManager::create(cct, freelist_type, db, PREFIX_ALLOC);
4141
4142 if (create) {
4143 // initialize freespace
4144 dout(20) << __func__ << " initializing freespace" << dendl;
4145 KeyValueDB::Transaction t = db->get_transaction();
4146 {
4147 bufferlist bl;
4148 bl.append(freelist_type);
4149 t->set(PREFIX_SUPER, "freelist_type", bl);
4150 }
4151 fm->create(bdev->get_size(), t);
4152
4153 // allocate superblock reserved space. note that we do not mark
4154 // bluefs space as allocated in the freelist; we instead rely on
4155 // bluefs_extents.
4156 fm->allocate(0, SUPER_RESERVED, t);
4157
4158 uint64_t reserved = 0;
4159 if (cct->_conf->bluestore_bluefs) {
4160 assert(bluefs_extents.num_intervals() == 1);
4161 interval_set<uint64_t>::iterator p = bluefs_extents.begin();
4162 reserved = p.get_start() + p.get_len();
4163 dout(20) << __func__ << " reserved 0x" << std::hex << reserved << std::dec
4164 << " for bluefs" << dendl;
4165 bufferlist bl;
4166 ::encode(bluefs_extents, bl);
4167 t->set(PREFIX_SUPER, "bluefs_extents", bl);
4168 dout(20) << __func__ << " bluefs_extents 0x" << std::hex << bluefs_extents
4169 << std::dec << dendl;
4170 } else {
4171 reserved = SUPER_RESERVED;
4172 }
4173
4174 if (cct->_conf->bluestore_debug_prefill > 0) {
4175 uint64_t end = bdev->get_size() - reserved;
4176 dout(1) << __func__ << " pre-fragmenting freespace, using "
4177 << cct->_conf->bluestore_debug_prefill << " with max free extent "
4178 << cct->_conf->bluestore_debug_prefragment_max << dendl;
4179 uint64_t start = P2ROUNDUP(reserved, min_alloc_size);
4180 uint64_t max_b = cct->_conf->bluestore_debug_prefragment_max / min_alloc_size;
4181 float r = cct->_conf->bluestore_debug_prefill;
4182 r /= 1.0 - r;
4183 bool stop = false;
4184
4185 while (!stop && start < end) {
4186 uint64_t l = (rand() % max_b + 1) * min_alloc_size;
4187 if (start + l > end) {
4188 l = end - start;
4189 l = P2ALIGN(l, min_alloc_size);
4190 }
4191 assert(start + l <= end);
4192
4193 uint64_t u = 1 + (uint64_t)(r * (double)l);
4194 u = P2ROUNDUP(u, min_alloc_size);
4195 if (start + l + u > end) {
4196 u = end - (start + l);
4197 // trim to align so we don't overflow again
4198 u = P2ALIGN(u, min_alloc_size);
4199 stop = true;
4200 }
4201 assert(start + l + u <= end);
4202
4203 dout(20) << " free 0x" << std::hex << start << "~" << l
4204 << " use 0x" << u << std::dec << dendl;
4205
4206 if (u == 0) {
4207 // break if u has been trimmed to nothing
4208 break;
4209 }
4210
4211 fm->allocate(start + l, u, t);
4212 start += l + u;
4213 }
4214 }
4215 db->submit_transaction_sync(t);
4216 }
4217
4218 int r = fm->init();
4219 if (r < 0) {
4220 derr << __func__ << " freelist init failed: " << cpp_strerror(r) << dendl;
4221 delete fm;
4222 fm = NULL;
4223 return r;
4224 }
4225 return 0;
4226 }
4227
4228 void BlueStore::_close_fm()
4229 {
4230 dout(10) << __func__ << dendl;
4231 assert(fm);
4232 fm->shutdown();
4233 delete fm;
4234 fm = NULL;
4235 }
4236
4237 int BlueStore::_open_alloc()
4238 {
4239 assert(alloc == NULL);
4240 assert(bdev->get_size());
4241 alloc = Allocator::create(cct, cct->_conf->bluestore_allocator,
4242 bdev->get_size(),
4243 min_alloc_size);
4244 if (!alloc) {
4245 lderr(cct) << __func__ << " Allocator::unknown alloc type "
4246 << cct->_conf->bluestore_allocator
4247 << dendl;
4248 return -EINVAL;
4249 }
4250
4251 uint64_t num = 0, bytes = 0;
4252
4253 dout(1) << __func__ << " opening allocation metadata" << dendl;
4254 // initialize from freelist
4255 fm->enumerate_reset();
4256 uint64_t offset, length;
4257 while (fm->enumerate_next(&offset, &length)) {
4258 alloc->init_add_free(offset, length);
4259 ++num;
4260 bytes += length;
4261 }
4262 fm->enumerate_reset();
4263 dout(1) << __func__ << " loaded " << pretty_si_t(bytes)
4264 << " in " << num << " extents"
4265 << dendl;
4266
4267 // also mark bluefs space as allocated
4268 for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
4269 alloc->init_rm_free(e.get_start(), e.get_len());
4270 }
4271 dout(10) << __func__ << " marked bluefs_extents 0x" << std::hex
4272 << bluefs_extents << std::dec << " as allocated" << dendl;
4273
4274 return 0;
4275 }
4276
4277 void BlueStore::_close_alloc()
4278 {
4279 assert(alloc);
4280 alloc->shutdown();
4281 delete alloc;
4282 alloc = NULL;
4283 }
4284
4285 int BlueStore::_open_fsid(bool create)
4286 {
4287 assert(fsid_fd < 0);
4288 int flags = O_RDWR;
4289 if (create)
4290 flags |= O_CREAT;
4291 fsid_fd = ::openat(path_fd, "fsid", flags, 0644);
4292 if (fsid_fd < 0) {
4293 int err = -errno;
4294 derr << __func__ << " " << cpp_strerror(err) << dendl;
4295 return err;
4296 }
4297 return 0;
4298 }
4299
4300 int BlueStore::_read_fsid(uuid_d *uuid)
4301 {
4302 char fsid_str[40];
4303 memset(fsid_str, 0, sizeof(fsid_str));
4304 int ret = safe_read(fsid_fd, fsid_str, sizeof(fsid_str));
4305 if (ret < 0) {
4306 derr << __func__ << " failed: " << cpp_strerror(ret) << dendl;
4307 return ret;
4308 }
4309 if (ret > 36)
4310 fsid_str[36] = 0;
4311 else
4312 fsid_str[ret] = 0;
4313 if (!uuid->parse(fsid_str)) {
4314 derr << __func__ << " unparsable uuid " << fsid_str << dendl;
4315 return -EINVAL;
4316 }
4317 return 0;
4318 }
4319
4320 int BlueStore::_write_fsid()
4321 {
4322 int r = ::ftruncate(fsid_fd, 0);
4323 if (r < 0) {
4324 r = -errno;
4325 derr << __func__ << " fsid truncate failed: " << cpp_strerror(r) << dendl;
4326 return r;
4327 }
4328 string str = stringify(fsid) + "\n";
4329 r = safe_write(fsid_fd, str.c_str(), str.length());
4330 if (r < 0) {
4331 derr << __func__ << " fsid write failed: " << cpp_strerror(r) << dendl;
4332 return r;
4333 }
4334 r = ::fsync(fsid_fd);
4335 if (r < 0) {
4336 r = -errno;
4337 derr << __func__ << " fsid fsync failed: " << cpp_strerror(r) << dendl;
4338 return r;
4339 }
4340 return 0;
4341 }
4342
4343 void BlueStore::_close_fsid()
4344 {
4345 VOID_TEMP_FAILURE_RETRY(::close(fsid_fd));
4346 fsid_fd = -1;
4347 }
4348
4349 int BlueStore::_lock_fsid()
4350 {
4351 struct flock l;
4352 memset(&l, 0, sizeof(l));
4353 l.l_type = F_WRLCK;
4354 l.l_whence = SEEK_SET;
4355 int r = ::fcntl(fsid_fd, F_SETLK, &l);
4356 if (r < 0) {
4357 int err = errno;
4358 derr << __func__ << " failed to lock " << path << "/fsid"
4359 << " (is another ceph-osd still running?)"
4360 << cpp_strerror(err) << dendl;
4361 return -err;
4362 }
4363 return 0;
4364 }
4365
4366 bool BlueStore::is_rotational()
4367 {
4368 if (bdev) {
4369 return bdev->is_rotational();
4370 }
4371
4372 bool rotational = true;
4373 int r = _open_path();
4374 if (r < 0)
4375 goto out;
4376 r = _open_fsid(false);
4377 if (r < 0)
4378 goto out_path;
4379 r = _read_fsid(&fsid);
4380 if (r < 0)
4381 goto out_fsid;
4382 r = _lock_fsid();
4383 if (r < 0)
4384 goto out_fsid;
4385 r = _open_bdev(false);
4386 if (r < 0)
4387 goto out_fsid;
4388 rotational = bdev->is_rotational();
4389 _close_bdev();
4390 out_fsid:
4391 _close_fsid();
4392 out_path:
4393 _close_path();
4394 out:
4395 return rotational;
4396 }
4397
4398 bool BlueStore::test_mount_in_use()
4399 {
4400 // most error conditions mean the mount is not in use (e.g., because
4401 // it doesn't exist). only if we fail to lock do we conclude it is
4402 // in use.
4403 bool ret = false;
4404 int r = _open_path();
4405 if (r < 0)
4406 return false;
4407 r = _open_fsid(false);
4408 if (r < 0)
4409 goto out_path;
4410 r = _lock_fsid();
4411 if (r < 0)
4412 ret = true; // if we can't lock, it is in use
4413 _close_fsid();
4414 out_path:
4415 _close_path();
4416 return ret;
4417 }
4418
4419 int BlueStore::_open_db(bool create)
4420 {
4421 int r;
4422 assert(!db);
4423 string fn = path + "/db";
4424 string options;
4425 stringstream err;
4426 ceph::shared_ptr<Int64ArrayMergeOperator> merge_op(new Int64ArrayMergeOperator);
4427
4428 string kv_backend;
4429 if (create) {
4430 kv_backend = cct->_conf->bluestore_kvbackend;
4431 } else {
4432 r = read_meta("kv_backend", &kv_backend);
4433 if (r < 0) {
4434 derr << __func__ << " unable to read 'kv_backend' meta" << dendl;
4435 return -EIO;
4436 }
4437 }
4438 dout(10) << __func__ << " kv_backend = " << kv_backend << dendl;
4439
4440 bool do_bluefs;
4441 if (create) {
4442 do_bluefs = cct->_conf->bluestore_bluefs;
4443 } else {
4444 string s;
4445 r = read_meta("bluefs", &s);
4446 if (r < 0) {
4447 derr << __func__ << " unable to read 'bluefs' meta" << dendl;
4448 return -EIO;
4449 }
4450 if (s == "1") {
4451 do_bluefs = true;
4452 } else if (s == "0") {
4453 do_bluefs = false;
4454 } else {
4455 derr << __func__ << " bluefs = " << s << " : not 0 or 1, aborting"
4456 << dendl;
4457 return -EIO;
4458 }
4459 }
4460 dout(10) << __func__ << " do_bluefs = " << do_bluefs << dendl;
4461
4462 rocksdb::Env *env = NULL;
4463 if (do_bluefs) {
4464 dout(10) << __func__ << " initializing bluefs" << dendl;
4465 if (kv_backend != "rocksdb") {
4466 derr << " backend must be rocksdb to use bluefs" << dendl;
4467 return -EINVAL;
4468 }
4469 bluefs = new BlueFS(cct);
4470
4471 string bfn;
4472 struct stat st;
4473
4474 bfn = path + "/block.db";
4475 if (::stat(bfn.c_str(), &st) == 0) {
4476 r = bluefs->add_block_device(BlueFS::BDEV_DB, bfn);
4477 if (r < 0) {
4478 derr << __func__ << " add block device(" << bfn << ") returned: "
4479 << cpp_strerror(r) << dendl;
4480 goto free_bluefs;
4481 }
4482
4483 if (bluefs->bdev_support_label(BlueFS::BDEV_DB)) {
4484 r = _check_or_set_bdev_label(
4485 bfn,
4486 bluefs->get_block_device_size(BlueFS::BDEV_DB),
4487 "bluefs db", create);
4488 if (r < 0) {
4489 derr << __func__
4490 << " check block device(" << bfn << ") label returned: "
4491 << cpp_strerror(r) << dendl;
4492 goto free_bluefs;
4493 }
4494 }
4495 if (create) {
4496 bluefs->add_block_extent(
4497 BlueFS::BDEV_DB,
4498 SUPER_RESERVED,
4499 bluefs->get_block_device_size(BlueFS::BDEV_DB) - SUPER_RESERVED);
4500 }
4501 bluefs_shared_bdev = BlueFS::BDEV_SLOW;
4502 bluefs_single_shared_device = false;
4503 } else if (::lstat(bfn.c_str(), &st) == -1) {
4504 bluefs_shared_bdev = BlueFS::BDEV_DB;
4505 } else {
4506 //symlink exist is bug
4507 derr << __func__ << " " << bfn << " link target doesn't exist" << dendl;
4508 r = -errno;
4509 goto free_bluefs;
4510 }
4511
4512 // shared device
4513 bfn = path + "/block";
4514 r = bluefs->add_block_device(bluefs_shared_bdev, bfn);
4515 if (r < 0) {
4516 derr << __func__ << " add block device(" << bfn << ") returned: "
4517 << cpp_strerror(r) << dendl;
4518 goto free_bluefs;
4519 }
4520 if (create) {
4521 // note: we always leave the first SUPER_RESERVED (8k) of the device unused
4522 uint64_t initial =
4523 bdev->get_size() * (cct->_conf->bluestore_bluefs_min_ratio +
4524 cct->_conf->bluestore_bluefs_gift_ratio);
4525 initial = MAX(initial, cct->_conf->bluestore_bluefs_min);
4526 // align to bluefs's alloc_size
4527 initial = P2ROUNDUP(initial, cct->_conf->bluefs_alloc_size);
4528 // put bluefs in the middle of the device in case it is an HDD
4529 uint64_t start = P2ALIGN((bdev->get_size() - initial) / 2,
4530 cct->_conf->bluefs_alloc_size);
4531 bluefs->add_block_extent(bluefs_shared_bdev, start, initial);
4532 bluefs_extents.insert(start, initial);
4533 }
4534
4535 bfn = path + "/block.wal";
4536 if (::stat(bfn.c_str(), &st) == 0) {
4537 r = bluefs->add_block_device(BlueFS::BDEV_WAL, bfn);
4538 if (r < 0) {
4539 derr << __func__ << " add block device(" << bfn << ") returned: "
4540 << cpp_strerror(r) << dendl;
4541 goto free_bluefs;
4542 }
4543
4544 if (bluefs->bdev_support_label(BlueFS::BDEV_WAL)) {
4545 r = _check_or_set_bdev_label(
4546 bfn,
4547 bluefs->get_block_device_size(BlueFS::BDEV_WAL),
4548 "bluefs wal", create);
4549 if (r < 0) {
4550 derr << __func__ << " check block device(" << bfn
4551 << ") label returned: " << cpp_strerror(r) << dendl;
4552 goto free_bluefs;
4553 }
4554 }
4555
4556 if (create) {
4557 bluefs->add_block_extent(
4558 BlueFS::BDEV_WAL, BDEV_LABEL_BLOCK_SIZE,
4559 bluefs->get_block_device_size(BlueFS::BDEV_WAL) -
4560 BDEV_LABEL_BLOCK_SIZE);
4561 }
4562 cct->_conf->set_val("rocksdb_separate_wal_dir", "true");
4563 bluefs_single_shared_device = false;
4564 } else if (::lstat(bfn.c_str(), &st) == -1) {
4565 cct->_conf->set_val("rocksdb_separate_wal_dir", "false");
4566 } else {
4567 //symlink exist is bug
4568 derr << __func__ << " " << bfn << " link target doesn't exist" << dendl;
4569 r = -errno;
4570 goto free_bluefs;
4571 }
4572
4573 if (create) {
4574 bluefs->mkfs(fsid);
4575 }
4576 r = bluefs->mount();
4577 if (r < 0) {
4578 derr << __func__ << " failed bluefs mount: " << cpp_strerror(r) << dendl;
4579 goto free_bluefs;
4580 }
4581 if (cct->_conf->bluestore_bluefs_env_mirror) {
4582 rocksdb::Env *a = new BlueRocksEnv(bluefs);
4583 rocksdb::Env *b = rocksdb::Env::Default();
4584 if (create) {
4585 string cmd = "rm -rf " + path + "/db " +
4586 path + "/db.slow " +
4587 path + "/db.wal";
4588 int r = system(cmd.c_str());
4589 (void)r;
4590 }
4591 env = new rocksdb::EnvMirror(b, a, false, true);
4592 } else {
4593 env = new BlueRocksEnv(bluefs);
4594
4595 // simplify the dir names, too, as "seen" by rocksdb
4596 fn = "db";
4597 }
4598
4599 if (bluefs_shared_bdev == BlueFS::BDEV_SLOW) {
4600 // we have both block.db and block; tell rocksdb!
4601 // note: the second (last) size value doesn't really matter
4602 ostringstream db_paths;
4603 uint64_t db_size = bluefs->get_block_device_size(BlueFS::BDEV_DB);
4604 uint64_t slow_size = bluefs->get_block_device_size(BlueFS::BDEV_SLOW);
4605 db_paths << fn << ","
4606 << (uint64_t)(db_size * 95 / 100) << " "
4607 << fn + ".slow" << ","
4608 << (uint64_t)(slow_size * 95 / 100);
4609 cct->_conf->set_val("rocksdb_db_paths", db_paths.str(), false);
4610 dout(10) << __func__ << " set rocksdb_db_paths to "
4611 << cct->_conf->get_val<std::string>("rocksdb_db_paths") << dendl;
4612 }
4613
4614 if (create) {
4615 env->CreateDir(fn);
4616 if (cct->_conf->rocksdb_separate_wal_dir)
4617 env->CreateDir(fn + ".wal");
4618 if (cct->_conf->get_val<std::string>("rocksdb_db_paths").length())
4619 env->CreateDir(fn + ".slow");
4620 }
4621 } else if (create) {
4622 int r = ::mkdir(fn.c_str(), 0755);
4623 if (r < 0)
4624 r = -errno;
4625 if (r < 0 && r != -EEXIST) {
4626 derr << __func__ << " failed to create " << fn << ": " << cpp_strerror(r)
4627 << dendl;
4628 return r;
4629 }
4630
4631 // wal_dir, too!
4632 if (cct->_conf->rocksdb_separate_wal_dir) {
4633 string walfn = path + "/db.wal";
4634 r = ::mkdir(walfn.c_str(), 0755);
4635 if (r < 0)
4636 r = -errno;
4637 if (r < 0 && r != -EEXIST) {
4638 derr << __func__ << " failed to create " << walfn
4639 << ": " << cpp_strerror(r)
4640 << dendl;
4641 return r;
4642 }
4643 }
4644 }
4645
4646 db = KeyValueDB::create(cct,
4647 kv_backend,
4648 fn,
4649 static_cast<void*>(env));
4650 if (!db) {
4651 derr << __func__ << " error creating db" << dendl;
4652 if (bluefs) {
4653 bluefs->umount();
4654 delete bluefs;
4655 bluefs = NULL;
4656 }
4657 // delete env manually here since we can't depend on db to do this
4658 // under this case
4659 delete env;
4660 env = NULL;
4661 return -EIO;
4662 }
4663
4664 FreelistManager::setup_merge_operators(db);
4665 db->set_merge_operator(PREFIX_STAT, merge_op);
4666
4667 db->set_cache_size(cache_size * cache_kv_ratio);
4668
4669 if (kv_backend == "rocksdb")
4670 options = cct->_conf->bluestore_rocksdb_options;
4671 db->init(options);
4672 if (create)
4673 r = db->create_and_open(err);
4674 else
4675 r = db->open(err);
4676 if (r) {
4677 derr << __func__ << " erroring opening db: " << err.str() << dendl;
4678 if (bluefs) {
4679 bluefs->umount();
4680 delete bluefs;
4681 bluefs = NULL;
4682 }
4683 delete db;
4684 db = NULL;
4685 return -EIO;
4686 }
4687 dout(1) << __func__ << " opened " << kv_backend
4688 << " path " << fn << " options " << options << dendl;
4689 return 0;
4690
4691 free_bluefs:
4692 assert(bluefs);
4693 delete bluefs;
4694 bluefs = NULL;
4695 return r;
4696 }
4697
4698 void BlueStore::_close_db()
4699 {
4700 assert(db);
4701 delete db;
4702 db = NULL;
4703 if (bluefs) {
4704 bluefs->umount();
4705 delete bluefs;
4706 bluefs = NULL;
4707 }
4708 }
4709
4710 int BlueStore::_reconcile_bluefs_freespace()
4711 {
4712 dout(10) << __func__ << dendl;
4713 interval_set<uint64_t> bset;
4714 int r = bluefs->get_block_extents(bluefs_shared_bdev, &bset);
4715 assert(r == 0);
4716 if (bset == bluefs_extents) {
4717 dout(10) << __func__ << " we agree bluefs has 0x" << std::hex << bset
4718 << std::dec << dendl;
4719 return 0;
4720 }
4721 dout(10) << __func__ << " bluefs says 0x" << std::hex << bset << std::dec
4722 << dendl;
4723 dout(10) << __func__ << " super says 0x" << std::hex << bluefs_extents
4724 << std::dec << dendl;
4725
4726 interval_set<uint64_t> overlap;
4727 overlap.intersection_of(bset, bluefs_extents);
4728
4729 bset.subtract(overlap);
4730 if (!bset.empty()) {
4731 derr << __func__ << " bluefs extra 0x" << std::hex << bset << std::dec
4732 << dendl;
4733 return -EIO;
4734 }
4735
4736 interval_set<uint64_t> super_extra;
4737 super_extra = bluefs_extents;
4738 super_extra.subtract(overlap);
4739 if (!super_extra.empty()) {
4740 // This is normal: it can happen if we commit to give extents to
4741 // bluefs and we crash before bluefs commits that it owns them.
4742 dout(10) << __func__ << " super extra " << super_extra << dendl;
4743 for (interval_set<uint64_t>::iterator p = super_extra.begin();
4744 p != super_extra.end();
4745 ++p) {
4746 bluefs->add_block_extent(bluefs_shared_bdev, p.get_start(), p.get_len());
4747 }
4748 }
4749
4750 return 0;
4751 }
4752
4753 int BlueStore::_balance_bluefs_freespace(PExtentVector *extents)
4754 {
4755 int ret = 0;
4756 assert(bluefs);
4757
4758 vector<pair<uint64_t,uint64_t>> bluefs_usage; // <free, total> ...
4759 bluefs->get_usage(&bluefs_usage);
4760 assert(bluefs_usage.size() > bluefs_shared_bdev);
4761
4762 // fixme: look at primary bdev only for now
4763 uint64_t bluefs_free = bluefs_usage[bluefs_shared_bdev].first;
4764 uint64_t bluefs_total = bluefs_usage[bluefs_shared_bdev].second;
4765 float bluefs_free_ratio = (float)bluefs_free / (float)bluefs_total;
4766
4767 uint64_t my_free = alloc->get_free();
4768 uint64_t total = bdev->get_size();
4769 float my_free_ratio = (float)my_free / (float)total;
4770
4771 uint64_t total_free = bluefs_free + my_free;
4772
4773 float bluefs_ratio = (float)bluefs_free / (float)total_free;
4774
4775 dout(10) << __func__
4776 << " bluefs " << pretty_si_t(bluefs_free)
4777 << " free (" << bluefs_free_ratio
4778 << ") bluestore " << pretty_si_t(my_free)
4779 << " free (" << my_free_ratio
4780 << "), bluefs_ratio " << bluefs_ratio
4781 << dendl;
4782
4783 uint64_t gift = 0;
4784 uint64_t reclaim = 0;
4785 if (bluefs_ratio < cct->_conf->bluestore_bluefs_min_ratio) {
4786 gift = cct->_conf->bluestore_bluefs_gift_ratio * total_free;
4787 dout(10) << __func__ << " bluefs_ratio " << bluefs_ratio
4788 << " < min_ratio " << cct->_conf->bluestore_bluefs_min_ratio
4789 << ", should gift " << pretty_si_t(gift) << dendl;
4790 } else if (bluefs_ratio > cct->_conf->bluestore_bluefs_max_ratio) {
4791 reclaim = cct->_conf->bluestore_bluefs_reclaim_ratio * total_free;
4792 if (bluefs_total - reclaim < cct->_conf->bluestore_bluefs_min)
4793 reclaim = bluefs_total - cct->_conf->bluestore_bluefs_min;
4794 dout(10) << __func__ << " bluefs_ratio " << bluefs_ratio
4795 << " > max_ratio " << cct->_conf->bluestore_bluefs_max_ratio
4796 << ", should reclaim " << pretty_si_t(reclaim) << dendl;
4797 }
4798 if (bluefs_total < cct->_conf->bluestore_bluefs_min &&
4799 cct->_conf->bluestore_bluefs_min <
4800 (uint64_t)(cct->_conf->bluestore_bluefs_max_ratio * total_free)) {
4801 uint64_t g = cct->_conf->bluestore_bluefs_min - bluefs_total;
4802 dout(10) << __func__ << " bluefs_total " << bluefs_total
4803 << " < min " << cct->_conf->bluestore_bluefs_min
4804 << ", should gift " << pretty_si_t(g) << dendl;
4805 if (g > gift)
4806 gift = g;
4807 reclaim = 0;
4808 }
4809
4810 if (gift) {
4811 // round up to alloc size
4812 gift = P2ROUNDUP(gift, cct->_conf->bluefs_alloc_size);
4813
4814 // hard cap to fit into 32 bits
4815 gift = MIN(gift, 1ull<<31);
4816 dout(10) << __func__ << " gifting " << gift
4817 << " (" << pretty_si_t(gift) << ")" << dendl;
4818
4819 // fixme: just do one allocation to start...
4820 int r = alloc->reserve(gift);
4821 assert(r == 0);
4822
4823 AllocExtentVector exts;
4824 int64_t alloc_len = alloc->allocate(gift, cct->_conf->bluefs_alloc_size,
4825 0, 0, &exts);
4826
4827 if (alloc_len < (int64_t)gift) {
4828 derr << __func__ << " allocate failed on 0x" << std::hex << gift
4829 << " min_alloc_size 0x" << min_alloc_size << std::dec << dendl;
4830 alloc->dump();
4831 assert(0 == "allocate failed, wtf");
4832 return -ENOSPC;
4833 }
4834 for (auto& p : exts) {
4835 bluestore_pextent_t e = bluestore_pextent_t(p);
4836 dout(1) << __func__ << " gifting " << e << " to bluefs" << dendl;
4837 extents->push_back(e);
4838 }
4839 gift = 0;
4840
4841 ret = 1;
4842 }
4843
4844 // reclaim from bluefs?
4845 if (reclaim) {
4846 // round up to alloc size
4847 reclaim = P2ROUNDUP(reclaim, cct->_conf->bluefs_alloc_size);
4848
4849 // hard cap to fit into 32 bits
4850 reclaim = MIN(reclaim, 1ull<<31);
4851 dout(10) << __func__ << " reclaiming " << reclaim
4852 << " (" << pretty_si_t(reclaim) << ")" << dendl;
4853
4854 while (reclaim > 0) {
4855 // NOTE: this will block and do IO.
4856 AllocExtentVector extents;
4857 int r = bluefs->reclaim_blocks(bluefs_shared_bdev, reclaim,
4858 &extents);
4859 if (r < 0) {
4860 derr << __func__ << " failed to reclaim space from bluefs"
4861 << dendl;
4862 break;
4863 }
4864 for (auto e : extents) {
4865 bluefs_extents.erase(e.offset, e.length);
4866 bluefs_extents_reclaiming.insert(e.offset, e.length);
4867 reclaim -= e.length;
4868 }
4869 }
4870
4871 ret = 1;
4872 }
4873
4874 return ret;
4875 }
4876
4877 void BlueStore::_commit_bluefs_freespace(
4878 const PExtentVector& bluefs_gift_extents)
4879 {
4880 dout(10) << __func__ << dendl;
4881 for (auto& p : bluefs_gift_extents) {
4882 bluefs->add_block_extent(bluefs_shared_bdev, p.offset, p.length);
4883 }
4884 }
4885
4886 int BlueStore::_open_collections(int *errors)
4887 {
4888 assert(coll_map.empty());
4889 KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL);
4890 for (it->upper_bound(string());
4891 it->valid();
4892 it->next()) {
4893 coll_t cid;
4894 if (cid.parse(it->key())) {
4895 CollectionRef c(
4896 new Collection(
4897 this,
4898 cache_shards[cid.hash_to_shard(cache_shards.size())],
4899 cid));
4900 bufferlist bl = it->value();
4901 bufferlist::iterator p = bl.begin();
4902 try {
4903 ::decode(c->cnode, p);
4904 } catch (buffer::error& e) {
4905 derr << __func__ << " failed to decode cnode, key:"
4906 << pretty_binary_string(it->key()) << dendl;
4907 return -EIO;
4908 }
4909 dout(20) << __func__ << " opened " << cid << " " << c << dendl;
4910 coll_map[cid] = c;
4911 } else {
4912 derr << __func__ << " unrecognized collection " << it->key() << dendl;
4913 if (errors)
4914 (*errors)++;
4915 }
4916 }
4917 return 0;
4918 }
4919
4920 void BlueStore::_open_statfs()
4921 {
4922 bufferlist bl;
4923 int r = db->get(PREFIX_STAT, "bluestore_statfs", &bl);
4924 if (r >= 0) {
4925 if (size_t(bl.length()) >= sizeof(vstatfs.values)) {
4926 auto it = bl.begin();
4927 vstatfs.decode(it);
4928 } else {
4929 dout(10) << __func__ << " store_statfs is corrupt, using empty" << dendl;
4930 }
4931 }
4932 else {
4933 dout(10) << __func__ << " store_statfs missed, using empty" << dendl;
4934 }
4935 }
4936
4937 int BlueStore::_setup_block_symlink_or_file(
4938 string name,
4939 string epath,
4940 uint64_t size,
4941 bool create)
4942 {
4943 dout(20) << __func__ << " name " << name << " path " << epath
4944 << " size " << size << " create=" << (int)create << dendl;
4945 int r = 0;
4946 int flags = O_RDWR;
4947 if (create)
4948 flags |= O_CREAT;
4949 if (epath.length()) {
4950 r = ::symlinkat(epath.c_str(), path_fd, name.c_str());
4951 if (r < 0) {
4952 r = -errno;
4953 derr << __func__ << " failed to create " << name << " symlink to "
4954 << epath << ": " << cpp_strerror(r) << dendl;
4955 return r;
4956 }
4957
4958 if (!epath.compare(0, strlen(SPDK_PREFIX), SPDK_PREFIX)) {
4959 int fd = ::openat(path_fd, epath.c_str(), flags, 0644);
4960 if (fd < 0) {
4961 r = -errno;
4962 derr << __func__ << " failed to open " << epath << " file: "
4963 << cpp_strerror(r) << dendl;
4964 return r;
4965 }
4966 string serial_number = epath.substr(strlen(SPDK_PREFIX));
4967 r = ::write(fd, serial_number.c_str(), serial_number.size());
4968 assert(r == (int)serial_number.size());
4969 dout(1) << __func__ << " created " << name << " symlink to "
4970 << epath << dendl;
4971 VOID_TEMP_FAILURE_RETRY(::close(fd));
4972 }
4973 }
4974 if (size) {
4975 int fd = ::openat(path_fd, name.c_str(), flags, 0644);
4976 if (fd >= 0) {
4977 // block file is present
4978 struct stat st;
4979 int r = ::fstat(fd, &st);
4980 if (r == 0 &&
4981 S_ISREG(st.st_mode) && // if it is a regular file
4982 st.st_size == 0) { // and is 0 bytes
4983 r = ::ftruncate(fd, size);
4984 if (r < 0) {
4985 r = -errno;
4986 derr << __func__ << " failed to resize " << name << " file to "
4987 << size << ": " << cpp_strerror(r) << dendl;
4988 VOID_TEMP_FAILURE_RETRY(::close(fd));
4989 return r;
4990 }
4991
4992 if (cct->_conf->bluestore_block_preallocate_file) {
4993 #ifdef HAVE_POSIX_FALLOCATE
4994 r = ::posix_fallocate(fd, 0, size);
4995 if (r) {
4996 derr << __func__ << " failed to prefallocate " << name << " file to "
4997 << size << ": " << cpp_strerror(r) << dendl;
4998 VOID_TEMP_FAILURE_RETRY(::close(fd));
4999 return -r;
5000 }
5001 #else
5002 char data[1024*128];
5003 for (uint64_t off = 0; off < size; off += sizeof(data)) {
5004 if (off + sizeof(data) > size)
5005 r = ::write(fd, data, size - off);
5006 else
5007 r = ::write(fd, data, sizeof(data));
5008 if (r < 0) {
5009 r = -errno;
5010 derr << __func__ << " failed to prefallocate w/ write " << name << " file to "
5011 << size << ": " << cpp_strerror(r) << dendl;
5012 VOID_TEMP_FAILURE_RETRY(::close(fd));
5013 return r;
5014 }
5015 }
5016 #endif
5017 }
5018 dout(1) << __func__ << " resized " << name << " file to "
5019 << pretty_si_t(size) << "B" << dendl;
5020 }
5021 VOID_TEMP_FAILURE_RETRY(::close(fd));
5022 } else {
5023 int r = -errno;
5024 if (r != -ENOENT) {
5025 derr << __func__ << " failed to open " << name << " file: "
5026 << cpp_strerror(r) << dendl;
5027 return r;
5028 }
5029 }
5030 }
5031 return 0;
5032 }
5033
5034 int BlueStore::mkfs()
5035 {
5036 dout(1) << __func__ << " path " << path << dendl;
5037 int r;
5038 uuid_d old_fsid;
5039
5040 {
5041 string done;
5042 r = read_meta("mkfs_done", &done);
5043 if (r == 0) {
5044 dout(1) << __func__ << " already created" << dendl;
5045 if (cct->_conf->bluestore_fsck_on_mkfs) {
5046 r = fsck(cct->_conf->bluestore_fsck_on_mkfs_deep);
5047 if (r < 0) {
5048 derr << __func__ << " fsck found fatal error: " << cpp_strerror(r)
5049 << dendl;
5050 return r;
5051 }
5052 if (r > 0) {
5053 derr << __func__ << " fsck found " << r << " errors" << dendl;
5054 r = -EIO;
5055 }
5056 }
5057 return r; // idempotent
5058 }
5059 }
5060
5061 {
5062 string type;
5063 r = read_meta("type", &type);
5064 if (r == 0) {
5065 if (type != "bluestore") {
5066 derr << __func__ << " expected bluestore, but type is " << type << dendl;
5067 return -EIO;
5068 }
5069 } else {
5070 r = write_meta("type", "bluestore");
5071 if (r < 0)
5072 return r;
5073 }
5074 }
5075
5076 freelist_type = "bitmap";
5077
5078 r = _open_path();
5079 if (r < 0)
5080 return r;
5081
5082 r = _open_fsid(true);
5083 if (r < 0)
5084 goto out_path_fd;
5085
5086 r = _lock_fsid();
5087 if (r < 0)
5088 goto out_close_fsid;
5089
5090 r = _read_fsid(&old_fsid);
5091 if (r < 0 || old_fsid.is_zero()) {
5092 if (fsid.is_zero()) {
5093 fsid.generate_random();
5094 dout(1) << __func__ << " generated fsid " << fsid << dendl;
5095 } else {
5096 dout(1) << __func__ << " using provided fsid " << fsid << dendl;
5097 }
5098 // we'll write it later.
5099 } else {
5100 if (!fsid.is_zero() && fsid != old_fsid) {
5101 derr << __func__ << " on-disk fsid " << old_fsid
5102 << " != provided " << fsid << dendl;
5103 r = -EINVAL;
5104 goto out_close_fsid;
5105 }
5106 fsid = old_fsid;
5107 }
5108
5109 r = _setup_block_symlink_or_file("block", cct->_conf->bluestore_block_path,
5110 cct->_conf->bluestore_block_size,
5111 cct->_conf->bluestore_block_create);
5112 if (r < 0)
5113 goto out_close_fsid;
5114 if (cct->_conf->bluestore_bluefs) {
5115 r = _setup_block_symlink_or_file("block.wal", cct->_conf->bluestore_block_wal_path,
5116 cct->_conf->bluestore_block_wal_size,
5117 cct->_conf->bluestore_block_wal_create);
5118 if (r < 0)
5119 goto out_close_fsid;
5120 r = _setup_block_symlink_or_file("block.db", cct->_conf->bluestore_block_db_path,
5121 cct->_conf->bluestore_block_db_size,
5122 cct->_conf->bluestore_block_db_create);
5123 if (r < 0)
5124 goto out_close_fsid;
5125 }
5126
5127 r = _open_bdev(true);
5128 if (r < 0)
5129 goto out_close_fsid;
5130
5131 r = _open_db(true);
5132 if (r < 0)
5133 goto out_close_bdev;
5134
5135 r = _open_fm(true);
5136 if (r < 0)
5137 goto out_close_db;
5138
5139 {
5140 KeyValueDB::Transaction t = db->get_transaction();
5141 {
5142 bufferlist bl;
5143 ::encode((uint64_t)0, bl);
5144 t->set(PREFIX_SUPER, "nid_max", bl);
5145 t->set(PREFIX_SUPER, "blobid_max", bl);
5146 }
5147
5148 // choose min_alloc_size
5149 if (cct->_conf->bluestore_min_alloc_size) {
5150 min_alloc_size = cct->_conf->bluestore_min_alloc_size;
5151 } else {
5152 assert(bdev);
5153 if (bdev->is_rotational()) {
5154 min_alloc_size = cct->_conf->bluestore_min_alloc_size_hdd;
5155 } else {
5156 min_alloc_size = cct->_conf->bluestore_min_alloc_size_ssd;
5157 }
5158 }
5159
5160 // make sure min_alloc_size is power of 2 aligned.
5161 if (!ISP2(min_alloc_size)) {
5162 derr << __func__ << " min_alloc_size 0x"
5163 << std::hex << min_alloc_size << std::dec
5164 << " is not power of 2 aligned!"
5165 << dendl;
5166 r = -EINVAL;
5167 goto out_close_fm;
5168 }
5169
5170 {
5171 bufferlist bl;
5172 ::encode((uint64_t)min_alloc_size, bl);
5173 t->set(PREFIX_SUPER, "min_alloc_size", bl);
5174 }
5175
5176 ondisk_format = latest_ondisk_format;
5177 _prepare_ondisk_format_super(t);
5178 db->submit_transaction_sync(t);
5179 }
5180
5181
5182 r = write_meta("kv_backend", cct->_conf->bluestore_kvbackend);
5183 if (r < 0)
5184 goto out_close_fm;
5185
5186 r = write_meta("bluefs", stringify((int)cct->_conf->bluestore_bluefs));
5187 if (r < 0)
5188 goto out_close_fm;
5189
5190 if (fsid != old_fsid) {
5191 r = _write_fsid();
5192 if (r < 0) {
5193 derr << __func__ << " error writing fsid: " << cpp_strerror(r) << dendl;
5194 goto out_close_fm;
5195 }
5196 }
5197
5198 out_close_fm:
5199 _close_fm();
5200 out_close_db:
5201 _close_db();
5202 out_close_bdev:
5203 _close_bdev();
5204 out_close_fsid:
5205 _close_fsid();
5206 out_path_fd:
5207 _close_path();
5208
5209 if (r == 0 &&
5210 cct->_conf->bluestore_fsck_on_mkfs) {
5211 int rc = fsck(cct->_conf->bluestore_fsck_on_mkfs_deep);
5212 if (rc < 0)
5213 return rc;
5214 if (rc > 0) {
5215 derr << __func__ << " fsck found " << rc << " errors" << dendl;
5216 r = -EIO;
5217 }
5218 }
5219
5220 if (r == 0) {
5221 // indicate success by writing the 'mkfs_done' file
5222 r = write_meta("mkfs_done", "yes");
5223 }
5224
5225 if (r < 0) {
5226 derr << __func__ << " failed, " << cpp_strerror(r) << dendl;
5227 } else {
5228 dout(0) << __func__ << " success" << dendl;
5229 }
5230 return r;
5231 }
5232
5233 void BlueStore::set_cache_shards(unsigned num)
5234 {
5235 dout(10) << __func__ << " " << num << dendl;
5236 size_t old = cache_shards.size();
5237 assert(num >= old);
5238 cache_shards.resize(num);
5239 for (unsigned i = old; i < num; ++i) {
5240 cache_shards[i] = Cache::create(cct, cct->_conf->bluestore_cache_type,
5241 logger);
5242 }
5243 }
5244
5245 int BlueStore::_mount(bool kv_only)
5246 {
5247 dout(1) << __func__ << " path " << path << dendl;
5248
5249 {
5250 string type;
5251 int r = read_meta("type", &type);
5252 if (r < 0) {
5253 derr << __func__ << " failed to load os-type: " << cpp_strerror(r)
5254 << dendl;
5255 return r;
5256 }
5257
5258 if (type != "bluestore") {
5259 derr << __func__ << " expected bluestore, but type is " << type << dendl;
5260 return -EIO;
5261 }
5262 }
5263
5264 if (cct->_conf->bluestore_fsck_on_mount) {
5265 int rc = fsck(cct->_conf->bluestore_fsck_on_mount_deep);
5266 if (rc < 0)
5267 return rc;
5268 if (rc > 0) {
5269 derr << __func__ << " fsck found " << rc << " errors" << dendl;
5270 return -EIO;
5271 }
5272 }
5273
5274 int r = _open_path();
5275 if (r < 0)
5276 return r;
5277 r = _open_fsid(false);
5278 if (r < 0)
5279 goto out_path;
5280
5281 r = _read_fsid(&fsid);
5282 if (r < 0)
5283 goto out_fsid;
5284
5285 r = _lock_fsid();
5286 if (r < 0)
5287 goto out_fsid;
5288
5289 r = _open_bdev(false);
5290 if (r < 0)
5291 goto out_fsid;
5292
5293 r = _open_db(false);
5294 if (r < 0)
5295 goto out_bdev;
5296
5297 if (kv_only)
5298 return 0;
5299
5300 r = _open_super_meta();
5301 if (r < 0)
5302 goto out_db;
5303
5304 r = _open_fm(false);
5305 if (r < 0)
5306 goto out_db;
5307
5308 r = _open_alloc();
5309 if (r < 0)
5310 goto out_fm;
5311
5312 r = _open_collections();
5313 if (r < 0)
5314 goto out_alloc;
5315
5316 r = _reload_logger();
5317 if (r < 0)
5318 goto out_coll;
5319
5320 if (bluefs) {
5321 r = _reconcile_bluefs_freespace();
5322 if (r < 0)
5323 goto out_coll;
5324 }
5325
5326 _kv_start();
5327
5328 r = _deferred_replay();
5329 if (r < 0)
5330 goto out_stop;
5331
5332 mempool_thread.init();
5333
5334
5335 mounted = true;
5336 return 0;
5337
5338 out_stop:
5339 _kv_stop();
5340 out_coll:
5341 _flush_cache();
5342 out_alloc:
5343 _close_alloc();
5344 out_fm:
5345 _close_fm();
5346 out_db:
5347 _close_db();
5348 out_bdev:
5349 _close_bdev();
5350 out_fsid:
5351 _close_fsid();
5352 out_path:
5353 _close_path();
5354 return r;
5355 }
5356
5357 int BlueStore::umount()
5358 {
5359 assert(mounted);
5360 dout(1) << __func__ << dendl;
5361
5362 _osr_drain_all();
5363 _osr_unregister_all();
5364
5365 mempool_thread.shutdown();
5366
5367 dout(20) << __func__ << " stopping kv thread" << dendl;
5368 _kv_stop();
5369 _reap_collections();
5370 _flush_cache();
5371 dout(20) << __func__ << " closing" << dendl;
5372
5373 mounted = false;
5374 _close_alloc();
5375 _close_fm();
5376 _close_db();
5377 _close_bdev();
5378 _close_fsid();
5379 _close_path();
5380
5381 if (cct->_conf->bluestore_fsck_on_umount) {
5382 int rc = fsck(cct->_conf->bluestore_fsck_on_umount_deep);
5383 if (rc < 0)
5384 return rc;
5385 if (rc > 0) {
5386 derr << __func__ << " fsck found " << rc << " errors" << dendl;
5387 return -EIO;
5388 }
5389 }
5390 return 0;
5391 }
5392
5393 static void apply(uint64_t off,
5394 uint64_t len,
5395 uint64_t granularity,
5396 BlueStore::mempool_dynamic_bitset &bitset,
5397 const char *what,
5398 std::function<void(uint64_t,
5399 BlueStore::mempool_dynamic_bitset &)> f) {
5400 auto end = ROUND_UP_TO(off + len, granularity);
5401 while (off < end) {
5402 uint64_t pos = off / granularity;
5403 f(pos, bitset);
5404 off += granularity;
5405 }
5406 }
5407
5408 int BlueStore::_fsck_check_extents(
5409 const ghobject_t& oid,
5410 const PExtentVector& extents,
5411 bool compressed,
5412 mempool_dynamic_bitset &used_blocks,
5413 store_statfs_t& expected_statfs)
5414 {
5415 dout(30) << __func__ << " oid " << oid << " extents " << extents << dendl;
5416 int errors = 0;
5417 for (auto e : extents) {
5418 if (!e.is_valid())
5419 continue;
5420 expected_statfs.allocated += e.length;
5421 if (compressed) {
5422 expected_statfs.compressed_allocated += e.length;
5423 }
5424 bool already = false;
5425 apply(
5426 e.offset, e.length, block_size, used_blocks, __func__,
5427 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5428 if (bs.test(pos))
5429 already = true;
5430 else
5431 bs.set(pos);
5432 });
5433 if (already) {
5434 derr << " " << oid << " extent " << e
5435 << " or a subset is already allocated" << dendl;
5436 ++errors;
5437 }
5438 if (e.end() > bdev->get_size()) {
5439 derr << " " << oid << " extent " << e
5440 << " past end of block device" << dendl;
5441 ++errors;
5442 }
5443 }
5444 return errors;
5445 }
5446
5447 int BlueStore::fsck(bool deep)
5448 {
5449 dout(1) << __func__ << (deep ? " (deep)" : " (shallow)") << " start" << dendl;
5450 int errors = 0;
5451
5452 typedef btree::btree_set<
5453 uint64_t,std::less<uint64_t>,
5454 mempool::bluestore_fsck::pool_allocator<uint64_t>> uint64_t_btree_t;
5455 uint64_t_btree_t used_nids;
5456 uint64_t_btree_t used_omap_head;
5457 uint64_t_btree_t used_sbids;
5458
5459 mempool_dynamic_bitset used_blocks;
5460 KeyValueDB::Iterator it;
5461 store_statfs_t expected_statfs, actual_statfs;
5462 struct sb_info_t {
5463 list<ghobject_t> oids;
5464 SharedBlobRef sb;
5465 bluestore_extent_ref_map_t ref_map;
5466 bool compressed;
5467 };
5468 mempool::bluestore_fsck::map<uint64_t,sb_info_t> sb_info;
5469
5470 uint64_t num_objects = 0;
5471 uint64_t num_extents = 0;
5472 uint64_t num_blobs = 0;
5473 uint64_t num_spanning_blobs = 0;
5474 uint64_t num_shared_blobs = 0;
5475 uint64_t num_sharded_objects = 0;
5476 uint64_t num_object_shards = 0;
5477
5478 utime_t start = ceph_clock_now();
5479
5480 int r = _open_path();
5481 if (r < 0)
5482 return r;
5483 r = _open_fsid(false);
5484 if (r < 0)
5485 goto out_path;
5486
5487 r = _read_fsid(&fsid);
5488 if (r < 0)
5489 goto out_fsid;
5490
5491 r = _lock_fsid();
5492 if (r < 0)
5493 goto out_fsid;
5494
5495 r = _open_bdev(false);
5496 if (r < 0)
5497 goto out_fsid;
5498
5499 r = _open_db(false);
5500 if (r < 0)
5501 goto out_bdev;
5502
5503 r = _open_super_meta();
5504 if (r < 0)
5505 goto out_db;
5506
5507 r = _open_fm(false);
5508 if (r < 0)
5509 goto out_db;
5510
5511 r = _open_alloc();
5512 if (r < 0)
5513 goto out_fm;
5514
5515 r = _open_collections(&errors);
5516 if (r < 0)
5517 goto out_alloc;
5518
5519 mempool_thread.init();
5520
5521 // we need finishers and kv_{sync,finalize}_thread *just* for replay
5522 _kv_start();
5523 r = _deferred_replay();
5524 _kv_stop();
5525 if (r < 0)
5526 goto out_scan;
5527
5528 used_blocks.resize(bdev->get_size() / block_size);
5529 apply(
5530 0, SUPER_RESERVED, block_size, used_blocks, "0~SUPER_RESERVED",
5531 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5532 bs.set(pos);
5533 }
5534 );
5535
5536 if (bluefs) {
5537 for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
5538 apply(
5539 e.get_start(), e.get_len(), block_size, used_blocks, "bluefs",
5540 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5541 bs.set(pos);
5542 }
5543 );
5544 }
5545 r = bluefs->fsck();
5546 if (r < 0) {
5547 goto out_scan;
5548 }
5549 if (r > 0)
5550 errors += r;
5551 }
5552
5553 // get expected statfs; fill unaffected fields to be able to compare
5554 // structs
5555 statfs(&actual_statfs);
5556 expected_statfs.total = actual_statfs.total;
5557 expected_statfs.available = actual_statfs.available;
5558
5559 // walk PREFIX_OBJ
5560 dout(1) << __func__ << " walking object keyspace" << dendl;
5561 it = db->get_iterator(PREFIX_OBJ);
5562 if (it) {
5563 CollectionRef c;
5564 spg_t pgid;
5565 mempool::bluestore_fsck::list<string> expecting_shards;
5566 for (it->lower_bound(string()); it->valid(); it->next()) {
5567 if (g_conf->bluestore_debug_fsck_abort) {
5568 goto out_scan;
5569 }
5570 dout(30) << " key " << pretty_binary_string(it->key()) << dendl;
5571 if (is_extent_shard_key(it->key())) {
5572 while (!expecting_shards.empty() &&
5573 expecting_shards.front() < it->key()) {
5574 derr << __func__ << " error: missing shard key "
5575 << pretty_binary_string(expecting_shards.front())
5576 << dendl;
5577 ++errors;
5578 expecting_shards.pop_front();
5579 }
5580 if (!expecting_shards.empty() &&
5581 expecting_shards.front() == it->key()) {
5582 // all good
5583 expecting_shards.pop_front();
5584 continue;
5585 }
5586
5587 uint32_t offset;
5588 string okey;
5589 get_key_extent_shard(it->key(), &okey, &offset);
5590 derr << __func__ << " error: stray shard 0x" << std::hex << offset
5591 << std::dec << dendl;
5592 if (expecting_shards.empty()) {
5593 derr << __func__ << " error: " << pretty_binary_string(it->key())
5594 << " is unexpected" << dendl;
5595 ++errors;
5596 continue;
5597 }
5598 while (expecting_shards.front() > it->key()) {
5599 derr << __func__ << " error: saw " << pretty_binary_string(it->key())
5600 << dendl;
5601 derr << __func__ << " error: exp "
5602 << pretty_binary_string(expecting_shards.front()) << dendl;
5603 ++errors;
5604 expecting_shards.pop_front();
5605 if (expecting_shards.empty()) {
5606 break;
5607 }
5608 }
5609 continue;
5610 }
5611
5612 ghobject_t oid;
5613 int r = get_key_object(it->key(), &oid);
5614 if (r < 0) {
5615 derr << __func__ << " error: bad object key "
5616 << pretty_binary_string(it->key()) << dendl;
5617 ++errors;
5618 continue;
5619 }
5620 if (!c ||
5621 oid.shard_id != pgid.shard ||
5622 oid.hobj.pool != (int64_t)pgid.pool() ||
5623 !c->contains(oid)) {
5624 c = nullptr;
5625 for (ceph::unordered_map<coll_t, CollectionRef>::iterator p =
5626 coll_map.begin();
5627 p != coll_map.end();
5628 ++p) {
5629 if (p->second->contains(oid)) {
5630 c = p->second;
5631 break;
5632 }
5633 }
5634 if (!c) {
5635 derr << __func__ << " error: stray object " << oid
5636 << " not owned by any collection" << dendl;
5637 ++errors;
5638 continue;
5639 }
5640 c->cid.is_pg(&pgid);
5641 dout(20) << __func__ << " collection " << c->cid << dendl;
5642 }
5643
5644 if (!expecting_shards.empty()) {
5645 for (auto &k : expecting_shards) {
5646 derr << __func__ << " error: missing shard key "
5647 << pretty_binary_string(k) << dendl;
5648 }
5649 ++errors;
5650 expecting_shards.clear();
5651 }
5652
5653 dout(10) << __func__ << " " << oid << dendl;
5654 RWLock::RLocker l(c->lock);
5655 OnodeRef o = c->get_onode(oid, false);
5656 if (o->onode.nid) {
5657 if (o->onode.nid > nid_max) {
5658 derr << __func__ << " error: " << oid << " nid " << o->onode.nid
5659 << " > nid_max " << nid_max << dendl;
5660 ++errors;
5661 }
5662 if (used_nids.count(o->onode.nid)) {
5663 derr << __func__ << " error: " << oid << " nid " << o->onode.nid
5664 << " already in use" << dendl;
5665 ++errors;
5666 continue; // go for next object
5667 }
5668 used_nids.insert(o->onode.nid);
5669 }
5670 ++num_objects;
5671 num_spanning_blobs += o->extent_map.spanning_blob_map.size();
5672 o->extent_map.fault_range(db, 0, OBJECT_MAX_SIZE);
5673 _dump_onode(o, 30);
5674 // shards
5675 if (!o->extent_map.shards.empty()) {
5676 ++num_sharded_objects;
5677 num_object_shards += o->extent_map.shards.size();
5678 }
5679 for (auto& s : o->extent_map.shards) {
5680 dout(20) << __func__ << " shard " << *s.shard_info << dendl;
5681 expecting_shards.push_back(string());
5682 get_extent_shard_key(o->key, s.shard_info->offset,
5683 &expecting_shards.back());
5684 if (s.shard_info->offset >= o->onode.size) {
5685 derr << __func__ << " error: " << oid << " shard 0x" << std::hex
5686 << s.shard_info->offset << " past EOF at 0x" << o->onode.size
5687 << std::dec << dendl;
5688 ++errors;
5689 }
5690 }
5691 // lextents
5692 map<BlobRef,bluestore_blob_t::unused_t> referenced;
5693 uint64_t pos = 0;
5694 mempool::bluestore_fsck::map<BlobRef,
5695 bluestore_blob_use_tracker_t> ref_map;
5696 for (auto& l : o->extent_map.extent_map) {
5697 dout(20) << __func__ << " " << l << dendl;
5698 if (l.logical_offset < pos) {
5699 derr << __func__ << " error: " << oid << " lextent at 0x"
5700 << std::hex << l.logical_offset
5701 << " overlaps with the previous, which ends at 0x" << pos
5702 << std::dec << dendl;
5703 ++errors;
5704 }
5705 if (o->extent_map.spans_shard(l.logical_offset, l.length)) {
5706 derr << __func__ << " error: " << oid << " lextent at 0x"
5707 << std::hex << l.logical_offset << "~" << l.length
5708 << " spans a shard boundary"
5709 << std::dec << dendl;
5710 ++errors;
5711 }
5712 pos = l.logical_offset + l.length;
5713 expected_statfs.stored += l.length;
5714 assert(l.blob);
5715 const bluestore_blob_t& blob = l.blob->get_blob();
5716
5717 auto& ref = ref_map[l.blob];
5718 if (ref.is_empty()) {
5719 uint32_t min_release_size = blob.get_release_size(min_alloc_size);
5720 uint32_t l = blob.get_logical_length();
5721 ref.init(l, min_release_size);
5722 }
5723 ref.get(
5724 l.blob_offset,
5725 l.length);
5726 ++num_extents;
5727 if (blob.has_unused()) {
5728 auto p = referenced.find(l.blob);
5729 bluestore_blob_t::unused_t *pu;
5730 if (p == referenced.end()) {
5731 pu = &referenced[l.blob];
5732 } else {
5733 pu = &p->second;
5734 }
5735 uint64_t blob_len = blob.get_logical_length();
5736 assert((blob_len % (sizeof(*pu)*8)) == 0);
5737 assert(l.blob_offset + l.length <= blob_len);
5738 uint64_t chunk_size = blob_len / (sizeof(*pu)*8);
5739 uint64_t start = l.blob_offset / chunk_size;
5740 uint64_t end =
5741 ROUND_UP_TO(l.blob_offset + l.length, chunk_size) / chunk_size;
5742 for (auto i = start; i < end; ++i) {
5743 (*pu) |= (1u << i);
5744 }
5745 }
5746 }
5747 for (auto &i : referenced) {
5748 dout(20) << __func__ << " referenced 0x" << std::hex << i.second
5749 << std::dec << " for " << *i.first << dendl;
5750 const bluestore_blob_t& blob = i.first->get_blob();
5751 if (i.second & blob.unused) {
5752 derr << __func__ << " error: " << oid << " blob claims unused 0x"
5753 << std::hex << blob.unused
5754 << " but extents reference 0x" << i.second
5755 << " on blob " << *i.first << dendl;
5756 ++errors;
5757 }
5758 if (blob.has_csum()) {
5759 uint64_t blob_len = blob.get_logical_length();
5760 uint64_t unused_chunk_size = blob_len / (sizeof(blob.unused)*8);
5761 unsigned csum_count = blob.get_csum_count();
5762 unsigned csum_chunk_size = blob.get_csum_chunk_size();
5763 for (unsigned p = 0; p < csum_count; ++p) {
5764 unsigned pos = p * csum_chunk_size;
5765 unsigned firstbit = pos / unused_chunk_size; // [firstbit,lastbit]
5766 unsigned lastbit = (pos + csum_chunk_size - 1) / unused_chunk_size;
5767 unsigned mask = 1u << firstbit;
5768 for (unsigned b = firstbit + 1; b <= lastbit; ++b) {
5769 mask |= 1u << b;
5770 }
5771 if ((blob.unused & mask) == mask) {
5772 // this csum chunk region is marked unused
5773 if (blob.get_csum_item(p) != 0) {
5774 derr << __func__ << " error: " << oid
5775 << " blob claims csum chunk 0x" << std::hex << pos
5776 << "~" << csum_chunk_size
5777 << " is unused (mask 0x" << mask << " of unused 0x"
5778 << blob.unused << ") but csum is non-zero 0x"
5779 << blob.get_csum_item(p) << std::dec << " on blob "
5780 << *i.first << dendl;
5781 ++errors;
5782 }
5783 }
5784 }
5785 }
5786 }
5787 for (auto &i : ref_map) {
5788 ++num_blobs;
5789 const bluestore_blob_t& blob = i.first->get_blob();
5790 bool equal = i.first->get_blob_use_tracker().equal(i.second);
5791 if (!equal) {
5792 derr << __func__ << " error: " << oid << " blob " << *i.first
5793 << " doesn't match expected ref_map " << i.second << dendl;
5794 ++errors;
5795 }
5796 if (blob.is_compressed()) {
5797 expected_statfs.compressed += blob.get_compressed_payload_length();
5798 expected_statfs.compressed_original +=
5799 i.first->get_referenced_bytes();
5800 }
5801 if (blob.is_shared()) {
5802 if (i.first->shared_blob->get_sbid() > blobid_max) {
5803 derr << __func__ << " error: " << oid << " blob " << blob
5804 << " sbid " << i.first->shared_blob->get_sbid() << " > blobid_max "
5805 << blobid_max << dendl;
5806 ++errors;
5807 } else if (i.first->shared_blob->get_sbid() == 0) {
5808 derr << __func__ << " error: " << oid << " blob " << blob
5809 << " marked as shared but has uninitialized sbid"
5810 << dendl;
5811 ++errors;
5812 }
5813 sb_info_t& sbi = sb_info[i.first->shared_blob->get_sbid()];
5814 sbi.sb = i.first->shared_blob;
5815 sbi.oids.push_back(oid);
5816 sbi.compressed = blob.is_compressed();
5817 for (auto e : blob.get_extents()) {
5818 if (e.is_valid()) {
5819 sbi.ref_map.get(e.offset, e.length);
5820 }
5821 }
5822 } else {
5823 errors += _fsck_check_extents(oid, blob.get_extents(),
5824 blob.is_compressed(),
5825 used_blocks,
5826 expected_statfs);
5827 }
5828 }
5829 if (deep) {
5830 bufferlist bl;
5831 int r = _do_read(c.get(), o, 0, o->onode.size, bl, 0);
5832 if (r < 0) {
5833 ++errors;
5834 derr << __func__ << " error: " << oid << " error during read: "
5835 << cpp_strerror(r) << dendl;
5836 }
5837 }
5838 // omap
5839 if (o->onode.has_omap()) {
5840 if (used_omap_head.count(o->onode.nid)) {
5841 derr << __func__ << " error: " << oid << " omap_head " << o->onode.nid
5842 << " already in use" << dendl;
5843 ++errors;
5844 } else {
5845 used_omap_head.insert(o->onode.nid);
5846 }
5847 }
5848 }
5849 }
5850 dout(1) << __func__ << " checking shared_blobs" << dendl;
5851 it = db->get_iterator(PREFIX_SHARED_BLOB);
5852 if (it) {
5853 for (it->lower_bound(string()); it->valid(); it->next()) {
5854 string key = it->key();
5855 uint64_t sbid;
5856 if (get_key_shared_blob(key, &sbid)) {
5857 derr << __func__ << " error: bad key '" << key
5858 << "' in shared blob namespace" << dendl;
5859 ++errors;
5860 continue;
5861 }
5862 auto p = sb_info.find(sbid);
5863 if (p == sb_info.end()) {
5864 derr << __func__ << " error: found stray shared blob data for sbid 0x"
5865 << std::hex << sbid << std::dec << dendl;
5866 ++errors;
5867 } else {
5868 ++num_shared_blobs;
5869 sb_info_t& sbi = p->second;
5870 bluestore_shared_blob_t shared_blob(sbid);
5871 bufferlist bl = it->value();
5872 bufferlist::iterator blp = bl.begin();
5873 ::decode(shared_blob, blp);
5874 dout(20) << __func__ << " " << *sbi.sb << " " << shared_blob << dendl;
5875 if (shared_blob.ref_map != sbi.ref_map) {
5876 derr << __func__ << " error: shared blob 0x" << std::hex << sbid
5877 << std::dec << " ref_map " << shared_blob.ref_map
5878 << " != expected " << sbi.ref_map << dendl;
5879 ++errors;
5880 }
5881 PExtentVector extents;
5882 for (auto &r : shared_blob.ref_map.ref_map) {
5883 extents.emplace_back(bluestore_pextent_t(r.first, r.second.length));
5884 }
5885 errors += _fsck_check_extents(p->second.oids.front(),
5886 extents,
5887 p->second.compressed,
5888 used_blocks, expected_statfs);
5889 sb_info.erase(p);
5890 }
5891 }
5892 }
5893 for (auto &p : sb_info) {
5894 derr << __func__ << " error: shared_blob 0x" << p.first
5895 << " key is missing (" << *p.second.sb << ")" << dendl;
5896 ++errors;
5897 }
5898 if (!(actual_statfs == expected_statfs)) {
5899 derr << __func__ << " error: actual " << actual_statfs
5900 << " != expected " << expected_statfs << dendl;
5901 ++errors;
5902 }
5903
5904 dout(1) << __func__ << " checking for stray omap data" << dendl;
5905 it = db->get_iterator(PREFIX_OMAP);
5906 if (it) {
5907 for (it->lower_bound(string()); it->valid(); it->next()) {
5908 uint64_t omap_head;
5909 _key_decode_u64(it->key().c_str(), &omap_head);
5910 if (used_omap_head.count(omap_head) == 0) {
5911 derr << __func__ << " error: found stray omap data on omap_head "
5912 << omap_head << dendl;
5913 ++errors;
5914 }
5915 }
5916 }
5917
5918 dout(1) << __func__ << " checking deferred events" << dendl;
5919 it = db->get_iterator(PREFIX_DEFERRED);
5920 if (it) {
5921 for (it->lower_bound(string()); it->valid(); it->next()) {
5922 bufferlist bl = it->value();
5923 bufferlist::iterator p = bl.begin();
5924 bluestore_deferred_transaction_t wt;
5925 try {
5926 ::decode(wt, p);
5927 } catch (buffer::error& e) {
5928 derr << __func__ << " error: failed to decode deferred txn "
5929 << pretty_binary_string(it->key()) << dendl;
5930 r = -EIO;
5931 goto out_scan;
5932 }
5933 dout(20) << __func__ << " deferred " << wt.seq
5934 << " ops " << wt.ops.size()
5935 << " released 0x" << std::hex << wt.released << std::dec << dendl;
5936 for (auto e = wt.released.begin(); e != wt.released.end(); ++e) {
5937 apply(
5938 e.get_start(), e.get_len(), block_size, used_blocks, "deferred",
5939 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5940 bs.set(pos);
5941 }
5942 );
5943 }
5944 }
5945 }
5946
5947 dout(1) << __func__ << " checking freelist vs allocated" << dendl;
5948 {
5949 // remove bluefs_extents from used set since the freelist doesn't
5950 // know they are allocated.
5951 for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
5952 apply(
5953 e.get_start(), e.get_len(), block_size, used_blocks, "bluefs_extents",
5954 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5955 bs.reset(pos);
5956 }
5957 );
5958 }
5959 fm->enumerate_reset();
5960 uint64_t offset, length;
5961 while (fm->enumerate_next(&offset, &length)) {
5962 bool intersects = false;
5963 apply(
5964 offset, length, block_size, used_blocks, "free",
5965 [&](uint64_t pos, mempool_dynamic_bitset &bs) {
5966 if (bs.test(pos)) {
5967 intersects = true;
5968 } else {
5969 bs.set(pos);
5970 }
5971 }
5972 );
5973 if (intersects) {
5974 derr << __func__ << " error: free extent 0x" << std::hex << offset
5975 << "~" << length << std::dec
5976 << " intersects allocated blocks" << dendl;
5977 ++errors;
5978 }
5979 }
5980 fm->enumerate_reset();
5981 size_t count = used_blocks.count();
5982 if (used_blocks.size() != count) {
5983 assert(used_blocks.size() > count);
5984 derr << __func__ << " error: leaked some space;"
5985 << (used_blocks.size() - count) * min_alloc_size
5986 << " bytes leaked" << dendl;
5987 ++errors;
5988 }
5989 }
5990
5991 out_scan:
5992 mempool_thread.shutdown();
5993 _flush_cache();
5994 out_alloc:
5995 _close_alloc();
5996 out_fm:
5997 _close_fm();
5998 out_db:
5999 it.reset(); // before db is closed
6000 _close_db();
6001 out_bdev:
6002 _close_bdev();
6003 out_fsid:
6004 _close_fsid();
6005 out_path:
6006 _close_path();
6007
6008 // fatal errors take precedence
6009 if (r < 0)
6010 return r;
6011
6012 dout(2) << __func__ << " " << num_objects << " objects, "
6013 << num_sharded_objects << " of them sharded. "
6014 << dendl;
6015 dout(2) << __func__ << " " << num_extents << " extents to "
6016 << num_blobs << " blobs, "
6017 << num_spanning_blobs << " spanning, "
6018 << num_shared_blobs << " shared."
6019 << dendl;
6020
6021 utime_t duration = ceph_clock_now() - start;
6022 dout(1) << __func__ << " finish with " << errors << " errors in "
6023 << duration << " seconds" << dendl;
6024 return errors;
6025 }
6026
6027 void BlueStore::collect_metadata(map<string,string> *pm)
6028 {
6029 dout(10) << __func__ << dendl;
6030 bdev->collect_metadata("bluestore_bdev_", pm);
6031 if (bluefs) {
6032 (*pm)["bluefs"] = "1";
6033 (*pm)["bluefs_single_shared_device"] = stringify((int)bluefs_single_shared_device);
6034 bluefs->collect_metadata(pm);
6035 } else {
6036 (*pm)["bluefs"] = "0";
6037 }
6038 }
6039
6040 int BlueStore::statfs(struct store_statfs_t *buf)
6041 {
6042 buf->reset();
6043 buf->total = bdev->get_size();
6044 buf->available = alloc->get_free();
6045
6046 if (bluefs) {
6047 // part of our shared device is "free" according to BlueFS
6048 // Don't include bluestore_bluefs_min because that space can't
6049 // be used for any other purpose.
6050 buf->available += bluefs->get_free(bluefs_shared_bdev) - cct->_conf->bluestore_bluefs_min;
6051
6052 // include dedicated db, too, if that isn't the shared device.
6053 if (bluefs_shared_bdev != BlueFS::BDEV_DB) {
6054 buf->total += bluefs->get_total(BlueFS::BDEV_DB);
6055 }
6056 }
6057
6058 {
6059 std::lock_guard<std::mutex> l(vstatfs_lock);
6060
6061 buf->allocated = vstatfs.allocated();
6062 buf->stored = vstatfs.stored();
6063 buf->compressed = vstatfs.compressed();
6064 buf->compressed_original = vstatfs.compressed_original();
6065 buf->compressed_allocated = vstatfs.compressed_allocated();
6066 }
6067
6068 dout(20) << __func__ << *buf << dendl;
6069 return 0;
6070 }
6071
6072 // ---------------
6073 // cache
6074
6075 BlueStore::CollectionRef BlueStore::_get_collection(const coll_t& cid)
6076 {
6077 RWLock::RLocker l(coll_lock);
6078 ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
6079 if (cp == coll_map.end())
6080 return CollectionRef();
6081 return cp->second;
6082 }
6083
6084 void BlueStore::_queue_reap_collection(CollectionRef& c)
6085 {
6086 dout(10) << __func__ << " " << c << " " << c->cid << dendl;
6087 std::lock_guard<std::mutex> l(reap_lock);
6088 removed_collections.push_back(c);
6089 }
6090
6091 void BlueStore::_reap_collections()
6092 {
6093 list<CollectionRef> removed_colls;
6094 {
6095 std::lock_guard<std::mutex> l(reap_lock);
6096 removed_colls.swap(removed_collections);
6097 }
6098
6099 bool all_reaped = true;
6100
6101 for (list<CollectionRef>::iterator p = removed_colls.begin();
6102 p != removed_colls.end();
6103 ++p) {
6104 CollectionRef c = *p;
6105 dout(10) << __func__ << " " << c << " " << c->cid << dendl;
6106 if (c->onode_map.map_any([&](OnodeRef o) {
6107 assert(!o->exists);
6108 if (o->flushing_count.load()) {
6109 dout(10) << __func__ << " " << c << " " << c->cid << " " << o->oid
6110 << " flush_txns " << o->flushing_count << dendl;
6111 return false;
6112 }
6113 return true;
6114 })) {
6115 all_reaped = false;
6116 continue;
6117 }
6118 c->onode_map.clear();
6119 dout(10) << __func__ << " " << c << " " << c->cid << " done" << dendl;
6120 }
6121
6122 if (all_reaped) {
6123 dout(10) << __func__ << " all reaped" << dendl;
6124 }
6125 }
6126
6127 void BlueStore::_update_cache_logger()
6128 {
6129 uint64_t num_onodes = 0;
6130 uint64_t num_extents = 0;
6131 uint64_t num_blobs = 0;
6132 uint64_t num_buffers = 0;
6133 uint64_t num_buffer_bytes = 0;
6134 for (auto c : cache_shards) {
6135 c->add_stats(&num_onodes, &num_extents, &num_blobs,
6136 &num_buffers, &num_buffer_bytes);
6137 }
6138 logger->set(l_bluestore_onodes, num_onodes);
6139 logger->set(l_bluestore_extents, num_extents);
6140 logger->set(l_bluestore_blobs, num_blobs);
6141 logger->set(l_bluestore_buffers, num_buffers);
6142 logger->set(l_bluestore_buffer_bytes, num_buffer_bytes);
6143 }
6144
6145 // ---------------
6146 // read operations
6147
6148 ObjectStore::CollectionHandle BlueStore::open_collection(const coll_t& cid)
6149 {
6150 return _get_collection(cid);
6151 }
6152
6153 bool BlueStore::exists(const coll_t& cid, const ghobject_t& oid)
6154 {
6155 CollectionHandle c = _get_collection(cid);
6156 if (!c)
6157 return false;
6158 return exists(c, oid);
6159 }
6160
6161 bool BlueStore::exists(CollectionHandle &c_, const ghobject_t& oid)
6162 {
6163 Collection *c = static_cast<Collection *>(c_.get());
6164 dout(10) << __func__ << " " << c->cid << " " << oid << dendl;
6165 if (!c->exists)
6166 return false;
6167
6168 bool r = true;
6169
6170 {
6171 RWLock::RLocker l(c->lock);
6172 OnodeRef o = c->get_onode(oid, false);
6173 if (!o || !o->exists)
6174 r = false;
6175 }
6176
6177 return r;
6178 }
6179
6180 int BlueStore::stat(
6181 const coll_t& cid,
6182 const ghobject_t& oid,
6183 struct stat *st,
6184 bool allow_eio)
6185 {
6186 CollectionHandle c = _get_collection(cid);
6187 if (!c)
6188 return -ENOENT;
6189 return stat(c, oid, st, allow_eio);
6190 }
6191
6192 int BlueStore::stat(
6193 CollectionHandle &c_,
6194 const ghobject_t& oid,
6195 struct stat *st,
6196 bool allow_eio)
6197 {
6198 Collection *c = static_cast<Collection *>(c_.get());
6199 if (!c->exists)
6200 return -ENOENT;
6201 dout(10) << __func__ << " " << c->get_cid() << " " << oid << dendl;
6202
6203 {
6204 RWLock::RLocker l(c->lock);
6205 OnodeRef o = c->get_onode(oid, false);
6206 if (!o || !o->exists)
6207 return -ENOENT;
6208 st->st_size = o->onode.size;
6209 st->st_blksize = 4096;
6210 st->st_blocks = (st->st_size + st->st_blksize - 1) / st->st_blksize;
6211 st->st_nlink = 1;
6212 }
6213
6214 int r = 0;
6215 if (_debug_mdata_eio(oid)) {
6216 r = -EIO;
6217 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6218 }
6219 return r;
6220 }
6221 int BlueStore::set_collection_opts(
6222 const coll_t& cid,
6223 const pool_opts_t& opts)
6224 {
6225 CollectionHandle ch = _get_collection(cid);
6226 if (!ch)
6227 return -ENOENT;
6228 Collection *c = static_cast<Collection *>(ch.get());
6229 dout(15) << __func__ << " " << cid << " options " << opts << dendl;
6230 if (!c->exists)
6231 return -ENOENT;
6232 RWLock::WLocker l(c->lock);
6233 c->pool_opts = opts;
6234 return 0;
6235 }
6236
6237 int BlueStore::read(
6238 const coll_t& cid,
6239 const ghobject_t& oid,
6240 uint64_t offset,
6241 size_t length,
6242 bufferlist& bl,
6243 uint32_t op_flags)
6244 {
6245 CollectionHandle c = _get_collection(cid);
6246 if (!c)
6247 return -ENOENT;
6248 return read(c, oid, offset, length, bl, op_flags);
6249 }
6250
6251 int BlueStore::read(
6252 CollectionHandle &c_,
6253 const ghobject_t& oid,
6254 uint64_t offset,
6255 size_t length,
6256 bufferlist& bl,
6257 uint32_t op_flags)
6258 {
6259 utime_t start = ceph_clock_now();
6260 Collection *c = static_cast<Collection *>(c_.get());
6261 const coll_t &cid = c->get_cid();
6262 dout(15) << __func__ << " " << cid << " " << oid
6263 << " 0x" << std::hex << offset << "~" << length << std::dec
6264 << dendl;
6265 if (!c->exists)
6266 return -ENOENT;
6267
6268 bl.clear();
6269 int r;
6270 {
6271 RWLock::RLocker l(c->lock);
6272 utime_t start1 = ceph_clock_now();
6273 OnodeRef o = c->get_onode(oid, false);
6274 logger->tinc(l_bluestore_read_onode_meta_lat, ceph_clock_now() - start1);
6275 if (!o || !o->exists) {
6276 r = -ENOENT;
6277 goto out;
6278 }
6279
6280 if (offset == length && offset == 0)
6281 length = o->onode.size;
6282
6283 r = _do_read(c, o, offset, length, bl, op_flags);
6284 }
6285
6286 out:
6287 if (r == 0 && _debug_data_eio(oid)) {
6288 r = -EIO;
6289 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6290 } else if (cct->_conf->bluestore_debug_random_read_err &&
6291 (rand() % (int)(cct->_conf->bluestore_debug_random_read_err * 100.0)) == 0) {
6292 dout(0) << __func__ << ": inject random EIO" << dendl;
6293 r = -EIO;
6294 }
6295 dout(10) << __func__ << " " << cid << " " << oid
6296 << " 0x" << std::hex << offset << "~" << length << std::dec
6297 << " = " << r << dendl;
6298 logger->tinc(l_bluestore_read_lat, ceph_clock_now() - start);
6299 return r;
6300 }
6301
6302 // --------------------------------------------------------
6303 // intermediate data structures used while reading
6304 struct region_t {
6305 uint64_t logical_offset;
6306 uint64_t blob_xoffset; //region offset within the blob
6307 uint64_t length;
6308 bufferlist bl;
6309
6310 // used later in read process
6311 uint64_t front = 0;
6312 uint64_t r_off = 0;
6313
6314 region_t(uint64_t offset, uint64_t b_offs, uint64_t len)
6315 : logical_offset(offset),
6316 blob_xoffset(b_offs),
6317 length(len){}
6318 region_t(const region_t& from)
6319 : logical_offset(from.logical_offset),
6320 blob_xoffset(from.blob_xoffset),
6321 length(from.length){}
6322
6323 friend ostream& operator<<(ostream& out, const region_t& r) {
6324 return out << "0x" << std::hex << r.logical_offset << ":"
6325 << r.blob_xoffset << "~" << r.length << std::dec;
6326 }
6327 };
6328
6329 typedef list<region_t> regions2read_t;
6330 typedef map<BlueStore::BlobRef, regions2read_t> blobs2read_t;
6331
6332 int BlueStore::_do_read(
6333 Collection *c,
6334 OnodeRef o,
6335 uint64_t offset,
6336 size_t length,
6337 bufferlist& bl,
6338 uint32_t op_flags)
6339 {
6340 FUNCTRACE();
6341 int r = 0;
6342
6343 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
6344 << " size 0x" << o->onode.size << " (" << std::dec
6345 << o->onode.size << ")" << dendl;
6346 bl.clear();
6347
6348 if (offset >= o->onode.size) {
6349 return r;
6350 }
6351
6352 // generally, don't buffer anything, unless the client explicitly requests
6353 // it.
6354 bool buffered = false;
6355 if (op_flags & CEPH_OSD_OP_FLAG_FADVISE_WILLNEED) {
6356 dout(20) << __func__ << " will do buffered read" << dendl;
6357 buffered = true;
6358 } else if (cct->_conf->bluestore_default_buffered_read &&
6359 (op_flags & (CEPH_OSD_OP_FLAG_FADVISE_DONTNEED |
6360 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE)) == 0) {
6361 dout(20) << __func__ << " defaulting to buffered read" << dendl;
6362 buffered = true;
6363 }
6364
6365 if (offset + length > o->onode.size) {
6366 length = o->onode.size - offset;
6367 }
6368
6369 utime_t start = ceph_clock_now();
6370 o->extent_map.fault_range(db, offset, length);
6371 logger->tinc(l_bluestore_read_onode_meta_lat, ceph_clock_now() - start);
6372 _dump_onode(o);
6373
6374 ready_regions_t ready_regions;
6375
6376 // build blob-wise list to of stuff read (that isn't cached)
6377 blobs2read_t blobs2read;
6378 unsigned left = length;
6379 uint64_t pos = offset;
6380 unsigned num_regions = 0;
6381 auto lp = o->extent_map.seek_lextent(offset);
6382 while (left > 0 && lp != o->extent_map.extent_map.end()) {
6383 if (pos < lp->logical_offset) {
6384 unsigned hole = lp->logical_offset - pos;
6385 if (hole >= left) {
6386 break;
6387 }
6388 dout(30) << __func__ << " hole 0x" << std::hex << pos << "~" << hole
6389 << std::dec << dendl;
6390 pos += hole;
6391 left -= hole;
6392 }
6393 BlobRef bptr = lp->blob;
6394 unsigned l_off = pos - lp->logical_offset;
6395 unsigned b_off = l_off + lp->blob_offset;
6396 unsigned b_len = std::min(left, lp->length - l_off);
6397
6398 ready_regions_t cache_res;
6399 interval_set<uint32_t> cache_interval;
6400 bptr->shared_blob->bc.read(
6401 bptr->shared_blob->get_cache(), b_off, b_len, cache_res, cache_interval);
6402 dout(20) << __func__ << " blob " << *bptr << std::hex
6403 << " need 0x" << b_off << "~" << b_len
6404 << " cache has 0x" << cache_interval
6405 << std::dec << dendl;
6406
6407 auto pc = cache_res.begin();
6408 while (b_len > 0) {
6409 unsigned l;
6410 if (pc != cache_res.end() &&
6411 pc->first == b_off) {
6412 l = pc->second.length();
6413 ready_regions[pos].claim(pc->second);
6414 dout(30) << __func__ << " use cache 0x" << std::hex << pos << ": 0x"
6415 << b_off << "~" << l << std::dec << dendl;
6416 ++pc;
6417 } else {
6418 l = b_len;
6419 if (pc != cache_res.end()) {
6420 assert(pc->first > b_off);
6421 l = pc->first - b_off;
6422 }
6423 dout(30) << __func__ << " will read 0x" << std::hex << pos << ": 0x"
6424 << b_off << "~" << l << std::dec << dendl;
6425 blobs2read[bptr].emplace_back(region_t(pos, b_off, l));
6426 ++num_regions;
6427 }
6428 pos += l;
6429 b_off += l;
6430 left -= l;
6431 b_len -= l;
6432 }
6433 ++lp;
6434 }
6435
6436 // read raw blob data. use aio if we have >1 blobs to read.
6437 start = ceph_clock_now(); // for the sake of simplicity
6438 // measure the whole block below.
6439 // The error isn't that much...
6440 vector<bufferlist> compressed_blob_bls;
6441 IOContext ioc(cct, NULL);
6442 for (auto& p : blobs2read) {
6443 BlobRef bptr = p.first;
6444 dout(20) << __func__ << " blob " << *bptr << std::hex
6445 << " need " << p.second << std::dec << dendl;
6446 if (bptr->get_blob().is_compressed()) {
6447 // read the whole thing
6448 if (compressed_blob_bls.empty()) {
6449 // ensure we avoid any reallocation on subsequent blobs
6450 compressed_blob_bls.reserve(blobs2read.size());
6451 }
6452 compressed_blob_bls.push_back(bufferlist());
6453 bufferlist& bl = compressed_blob_bls.back();
6454 r = bptr->get_blob().map(
6455 0, bptr->get_blob().get_ondisk_length(),
6456 [&](uint64_t offset, uint64_t length) {
6457 int r;
6458 // use aio if there are more regions to read than those in this blob
6459 if (num_regions > p.second.size()) {
6460 r = bdev->aio_read(offset, length, &bl, &ioc);
6461 } else {
6462 r = bdev->read(offset, length, &bl, &ioc, false);
6463 }
6464 if (r < 0)
6465 return r;
6466 return 0;
6467 });
6468 assert(r == 0);
6469 } else {
6470 // read the pieces
6471 for (auto& reg : p.second) {
6472 // determine how much of the blob to read
6473 uint64_t chunk_size = bptr->get_blob().get_chunk_size(block_size);
6474 reg.r_off = reg.blob_xoffset;
6475 uint64_t r_len = reg.length;
6476 reg.front = reg.r_off % chunk_size;
6477 if (reg.front) {
6478 reg.r_off -= reg.front;
6479 r_len += reg.front;
6480 }
6481 unsigned tail = r_len % chunk_size;
6482 if (tail) {
6483 r_len += chunk_size - tail;
6484 }
6485 dout(20) << __func__ << " region 0x" << std::hex
6486 << reg.logical_offset
6487 << ": 0x" << reg.blob_xoffset << "~" << reg.length
6488 << " reading 0x" << reg.r_off << "~" << r_len << std::dec
6489 << dendl;
6490
6491 // read it
6492 r = bptr->get_blob().map(
6493 reg.r_off, r_len,
6494 [&](uint64_t offset, uint64_t length) {
6495 int r;
6496 // use aio if there is more than one region to read
6497 if (num_regions > 1) {
6498 r = bdev->aio_read(offset, length, &reg.bl, &ioc);
6499 } else {
6500 r = bdev->read(offset, length, &reg.bl, &ioc, false);
6501 }
6502 if (r < 0)
6503 return r;
6504 return 0;
6505 });
6506 assert(r == 0);
6507 assert(reg.bl.length() == r_len);
6508 }
6509 }
6510 }
6511 if (ioc.has_pending_aios()) {
6512 bdev->aio_submit(&ioc);
6513 dout(20) << __func__ << " waiting for aio" << dendl;
6514 ioc.aio_wait();
6515 }
6516 logger->tinc(l_bluestore_read_wait_aio_lat, ceph_clock_now() - start);
6517
6518 // enumerate and decompress desired blobs
6519 auto p = compressed_blob_bls.begin();
6520 blobs2read_t::iterator b2r_it = blobs2read.begin();
6521 while (b2r_it != blobs2read.end()) {
6522 BlobRef bptr = b2r_it->first;
6523 dout(20) << __func__ << " blob " << *bptr << std::hex
6524 << " need 0x" << b2r_it->second << std::dec << dendl;
6525 if (bptr->get_blob().is_compressed()) {
6526 assert(p != compressed_blob_bls.end());
6527 bufferlist& compressed_bl = *p++;
6528 if (_verify_csum(o, &bptr->get_blob(), 0, compressed_bl,
6529 b2r_it->second.front().logical_offset) < 0) {
6530 return -EIO;
6531 }
6532 bufferlist raw_bl;
6533 r = _decompress(compressed_bl, &raw_bl);
6534 if (r < 0)
6535 return r;
6536 if (buffered) {
6537 bptr->shared_blob->bc.did_read(bptr->shared_blob->get_cache(), 0,
6538 raw_bl);
6539 }
6540 for (auto& i : b2r_it->second) {
6541 ready_regions[i.logical_offset].substr_of(
6542 raw_bl, i.blob_xoffset, i.length);
6543 }
6544 } else {
6545 for (auto& reg : b2r_it->second) {
6546 if (_verify_csum(o, &bptr->get_blob(), reg.r_off, reg.bl,
6547 reg.logical_offset) < 0) {
6548 return -EIO;
6549 }
6550 if (buffered) {
6551 bptr->shared_blob->bc.did_read(bptr->shared_blob->get_cache(),
6552 reg.r_off, reg.bl);
6553 }
6554
6555 // prune and keep result
6556 ready_regions[reg.logical_offset].substr_of(
6557 reg.bl, reg.front, reg.length);
6558 }
6559 }
6560 ++b2r_it;
6561 }
6562
6563 // generate a resulting buffer
6564 auto pr = ready_regions.begin();
6565 auto pr_end = ready_regions.end();
6566 pos = 0;
6567 while (pos < length) {
6568 if (pr != pr_end && pr->first == pos + offset) {
6569 dout(30) << __func__ << " assemble 0x" << std::hex << pos
6570 << ": data from 0x" << pr->first << "~" << pr->second.length()
6571 << std::dec << dendl;
6572 pos += pr->second.length();
6573 bl.claim_append(pr->second);
6574 ++pr;
6575 } else {
6576 uint64_t l = length - pos;
6577 if (pr != pr_end) {
6578 assert(pr->first > pos + offset);
6579 l = pr->first - (pos + offset);
6580 }
6581 dout(30) << __func__ << " assemble 0x" << std::hex << pos
6582 << ": zeros for 0x" << (pos + offset) << "~" << l
6583 << std::dec << dendl;
6584 bl.append_zero(l);
6585 pos += l;
6586 }
6587 }
6588 assert(bl.length() == length);
6589 assert(pos == length);
6590 assert(pr == pr_end);
6591 r = bl.length();
6592 return r;
6593 }
6594
6595 int BlueStore::_verify_csum(OnodeRef& o,
6596 const bluestore_blob_t* blob, uint64_t blob_xoffset,
6597 const bufferlist& bl,
6598 uint64_t logical_offset) const
6599 {
6600 int bad;
6601 uint64_t bad_csum;
6602 utime_t start = ceph_clock_now();
6603 int r = blob->verify_csum(blob_xoffset, bl, &bad, &bad_csum);
6604 if (r < 0) {
6605 if (r == -1) {
6606 PExtentVector pex;
6607 blob->map(
6608 bad,
6609 blob->get_csum_chunk_size(),
6610 [&](uint64_t offset, uint64_t length) {
6611 pex.emplace_back(bluestore_pextent_t(offset, length));
6612 return 0;
6613 });
6614 derr << __func__ << " bad "
6615 << Checksummer::get_csum_type_string(blob->csum_type)
6616 << "/0x" << std::hex << blob->get_csum_chunk_size()
6617 << " checksum at blob offset 0x" << bad
6618 << ", got 0x" << bad_csum << ", expected 0x"
6619 << blob->get_csum_item(bad / blob->get_csum_chunk_size()) << std::dec
6620 << ", device location " << pex
6621 << ", logical extent 0x" << std::hex
6622 << (logical_offset + bad - blob_xoffset) << "~"
6623 << blob->get_csum_chunk_size() << std::dec
6624 << ", object " << o->oid
6625 << dendl;
6626 } else {
6627 derr << __func__ << " failed with exit code: " << cpp_strerror(r) << dendl;
6628 }
6629 }
6630 logger->tinc(l_bluestore_csum_lat, ceph_clock_now() - start);
6631 return r;
6632 }
6633
6634 int BlueStore::_decompress(bufferlist& source, bufferlist* result)
6635 {
6636 int r = 0;
6637 utime_t start = ceph_clock_now();
6638 bufferlist::iterator i = source.begin();
6639 bluestore_compression_header_t chdr;
6640 ::decode(chdr, i);
6641 int alg = int(chdr.type);
6642 CompressorRef cp = compressor;
6643 if (!cp || (int)cp->get_type() != alg) {
6644 cp = Compressor::create(cct, alg);
6645 }
6646
6647 if (!cp.get()) {
6648 // if compressor isn't available - error, because cannot return
6649 // decompressed data?
6650 derr << __func__ << " can't load decompressor " << alg << dendl;
6651 r = -EIO;
6652 } else {
6653 r = cp->decompress(i, chdr.length, *result);
6654 if (r < 0) {
6655 derr << __func__ << " decompression failed with exit code " << r << dendl;
6656 r = -EIO;
6657 }
6658 }
6659 logger->tinc(l_bluestore_decompress_lat, ceph_clock_now() - start);
6660 return r;
6661 }
6662
6663 // this stores fiemap into interval_set, other variations
6664 // use it internally
6665 int BlueStore::_fiemap(
6666 CollectionHandle &c_,
6667 const ghobject_t& oid,
6668 uint64_t offset,
6669 size_t length,
6670 interval_set<uint64_t>& destset)
6671 {
6672 Collection *c = static_cast<Collection *>(c_.get());
6673 if (!c->exists)
6674 return -ENOENT;
6675 {
6676 RWLock::RLocker l(c->lock);
6677
6678 OnodeRef o = c->get_onode(oid, false);
6679 if (!o || !o->exists) {
6680 return -ENOENT;
6681 }
6682 _dump_onode(o);
6683
6684 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
6685 << " size 0x" << o->onode.size << std::dec << dendl;
6686
6687 boost::intrusive::set<Extent>::iterator ep, eend;
6688 if (offset >= o->onode.size)
6689 goto out;
6690
6691 if (offset + length > o->onode.size) {
6692 length = o->onode.size - offset;
6693 }
6694
6695 o->extent_map.fault_range(db, offset, length);
6696 eend = o->extent_map.extent_map.end();
6697 ep = o->extent_map.seek_lextent(offset);
6698 while (length > 0) {
6699 dout(20) << __func__ << " offset " << offset << dendl;
6700 if (ep != eend && ep->logical_offset + ep->length <= offset) {
6701 ++ep;
6702 continue;
6703 }
6704
6705 uint64_t x_len = length;
6706 if (ep != eend && ep->logical_offset <= offset) {
6707 uint64_t x_off = offset - ep->logical_offset;
6708 x_len = MIN(x_len, ep->length - x_off);
6709 dout(30) << __func__ << " lextent 0x" << std::hex << offset << "~"
6710 << x_len << std::dec << " blob " << ep->blob << dendl;
6711 destset.insert(offset, x_len);
6712 length -= x_len;
6713 offset += x_len;
6714 if (x_off + x_len == ep->length)
6715 ++ep;
6716 continue;
6717 }
6718 if (ep != eend &&
6719 ep->logical_offset > offset &&
6720 ep->logical_offset - offset < x_len) {
6721 x_len = ep->logical_offset - offset;
6722 }
6723 offset += x_len;
6724 length -= x_len;
6725 }
6726 }
6727
6728 out:
6729 dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
6730 << " size = 0x(" << destset << ")" << std::dec << dendl;
6731 return 0;
6732 }
6733
6734 int BlueStore::fiemap(
6735 const coll_t& cid,
6736 const ghobject_t& oid,
6737 uint64_t offset,
6738 size_t len,
6739 bufferlist& bl)
6740 {
6741 CollectionHandle c = _get_collection(cid);
6742 if (!c)
6743 return -ENOENT;
6744 return fiemap(c, oid, offset, len, bl);
6745 }
6746
6747 int BlueStore::fiemap(
6748 CollectionHandle &c_,
6749 const ghobject_t& oid,
6750 uint64_t offset,
6751 size_t length,
6752 bufferlist& bl)
6753 {
6754 interval_set<uint64_t> m;
6755 int r = _fiemap(c_, oid, offset, length, m);
6756 if (r >= 0) {
6757 ::encode(m, bl);
6758 }
6759 return r;
6760 }
6761
6762 int BlueStore::fiemap(
6763 const coll_t& cid,
6764 const ghobject_t& oid,
6765 uint64_t offset,
6766 size_t len,
6767 map<uint64_t, uint64_t>& destmap)
6768 {
6769 CollectionHandle c = _get_collection(cid);
6770 if (!c)
6771 return -ENOENT;
6772 return fiemap(c, oid, offset, len, destmap);
6773 }
6774
6775 int BlueStore::fiemap(
6776 CollectionHandle &c_,
6777 const ghobject_t& oid,
6778 uint64_t offset,
6779 size_t length,
6780 map<uint64_t, uint64_t>& destmap)
6781 {
6782 interval_set<uint64_t> m;
6783 int r = _fiemap(c_, oid, offset, length, m);
6784 if (r >= 0) {
6785 m.move_into(destmap);
6786 }
6787 return r;
6788 }
6789
6790 int BlueStore::getattr(
6791 const coll_t& cid,
6792 const ghobject_t& oid,
6793 const char *name,
6794 bufferptr& value)
6795 {
6796 CollectionHandle c = _get_collection(cid);
6797 if (!c)
6798 return -ENOENT;
6799 return getattr(c, oid, name, value);
6800 }
6801
6802 int BlueStore::getattr(
6803 CollectionHandle &c_,
6804 const ghobject_t& oid,
6805 const char *name,
6806 bufferptr& value)
6807 {
6808 Collection *c = static_cast<Collection *>(c_.get());
6809 dout(15) << __func__ << " " << c->cid << " " << oid << " " << name << dendl;
6810 if (!c->exists)
6811 return -ENOENT;
6812
6813 int r;
6814 {
6815 RWLock::RLocker l(c->lock);
6816 mempool::bluestore_cache_other::string k(name);
6817
6818 OnodeRef o = c->get_onode(oid, false);
6819 if (!o || !o->exists) {
6820 r = -ENOENT;
6821 goto out;
6822 }
6823
6824 if (!o->onode.attrs.count(k)) {
6825 r = -ENODATA;
6826 goto out;
6827 }
6828 value = o->onode.attrs[k];
6829 r = 0;
6830 }
6831 out:
6832 if (r == 0 && _debug_mdata_eio(oid)) {
6833 r = -EIO;
6834 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6835 }
6836 dout(10) << __func__ << " " << c->cid << " " << oid << " " << name
6837 << " = " << r << dendl;
6838 return r;
6839 }
6840
6841
6842 int BlueStore::getattrs(
6843 const coll_t& cid,
6844 const ghobject_t& oid,
6845 map<string,bufferptr>& aset)
6846 {
6847 CollectionHandle c = _get_collection(cid);
6848 if (!c)
6849 return -ENOENT;
6850 return getattrs(c, oid, aset);
6851 }
6852
6853 int BlueStore::getattrs(
6854 CollectionHandle &c_,
6855 const ghobject_t& oid,
6856 map<string,bufferptr>& aset)
6857 {
6858 Collection *c = static_cast<Collection *>(c_.get());
6859 dout(15) << __func__ << " " << c->cid << " " << oid << dendl;
6860 if (!c->exists)
6861 return -ENOENT;
6862
6863 int r;
6864 {
6865 RWLock::RLocker l(c->lock);
6866
6867 OnodeRef o = c->get_onode(oid, false);
6868 if (!o || !o->exists) {
6869 r = -ENOENT;
6870 goto out;
6871 }
6872 for (auto& i : o->onode.attrs) {
6873 aset.emplace(i.first.c_str(), i.second);
6874 }
6875 r = 0;
6876 }
6877
6878 out:
6879 if (r == 0 && _debug_mdata_eio(oid)) {
6880 r = -EIO;
6881 derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
6882 }
6883 dout(10) << __func__ << " " << c->cid << " " << oid
6884 << " = " << r << dendl;
6885 return r;
6886 }
6887
6888 int BlueStore::list_collections(vector<coll_t>& ls)
6889 {
6890 RWLock::RLocker l(coll_lock);
6891 for (ceph::unordered_map<coll_t, CollectionRef>::iterator p = coll_map.begin();
6892 p != coll_map.end();
6893 ++p)
6894 ls.push_back(p->first);
6895 return 0;
6896 }
6897
6898 bool BlueStore::collection_exists(const coll_t& c)
6899 {
6900 RWLock::RLocker l(coll_lock);
6901 return coll_map.count(c);
6902 }
6903
6904 int BlueStore::collection_empty(const coll_t& cid, bool *empty)
6905 {
6906 dout(15) << __func__ << " " << cid << dendl;
6907 vector<ghobject_t> ls;
6908 ghobject_t next;
6909 int r = collection_list(cid, ghobject_t(), ghobject_t::get_max(), 1,
6910 &ls, &next);
6911 if (r < 0) {
6912 derr << __func__ << " collection_list returned: " << cpp_strerror(r)
6913 << dendl;
6914 return r;
6915 }
6916 *empty = ls.empty();
6917 dout(10) << __func__ << " " << cid << " = " << (int)(*empty) << dendl;
6918 return 0;
6919 }
6920
6921 int BlueStore::collection_bits(const coll_t& cid)
6922 {
6923 dout(15) << __func__ << " " << cid << dendl;
6924 CollectionRef c = _get_collection(cid);
6925 if (!c)
6926 return -ENOENT;
6927 RWLock::RLocker l(c->lock);
6928 dout(10) << __func__ << " " << cid << " = " << c->cnode.bits << dendl;
6929 return c->cnode.bits;
6930 }
6931
6932 int BlueStore::collection_list(
6933 const coll_t& cid, const ghobject_t& start, const ghobject_t& end, int max,
6934 vector<ghobject_t> *ls, ghobject_t *pnext)
6935 {
6936 CollectionHandle c = _get_collection(cid);
6937 if (!c)
6938 return -ENOENT;
6939 return collection_list(c, start, end, max, ls, pnext);
6940 }
6941
6942 int BlueStore::collection_list(
6943 CollectionHandle &c_, const ghobject_t& start, const ghobject_t& end, int max,
6944 vector<ghobject_t> *ls, ghobject_t *pnext)
6945 {
6946 Collection *c = static_cast<Collection *>(c_.get());
6947 dout(15) << __func__ << " " << c->cid
6948 << " start " << start << " end " << end << " max " << max << dendl;
6949 int r;
6950 {
6951 RWLock::RLocker l(c->lock);
6952 r = _collection_list(c, start, end, max, ls, pnext);
6953 }
6954
6955 dout(10) << __func__ << " " << c->cid
6956 << " start " << start << " end " << end << " max " << max
6957 << " = " << r << ", ls.size() = " << ls->size()
6958 << ", next = " << (pnext ? *pnext : ghobject_t()) << dendl;
6959 return r;
6960 }
6961
6962 int BlueStore::_collection_list(
6963 Collection *c, const ghobject_t& start, const ghobject_t& end, int max,
6964 vector<ghobject_t> *ls, ghobject_t *pnext)
6965 {
6966
6967 if (!c->exists)
6968 return -ENOENT;
6969
6970 int r = 0;
6971 ghobject_t static_next;
6972 KeyValueDB::Iterator it;
6973 string temp_start_key, temp_end_key;
6974 string start_key, end_key;
6975 bool set_next = false;
6976 string pend;
6977 bool temp;
6978
6979 if (!pnext)
6980 pnext = &static_next;
6981
6982 if (start == ghobject_t::get_max() ||
6983 start.hobj.is_max()) {
6984 goto out;
6985 }
6986 get_coll_key_range(c->cid, c->cnode.bits, &temp_start_key, &temp_end_key,
6987 &start_key, &end_key);
6988 dout(20) << __func__
6989 << " range " << pretty_binary_string(temp_start_key)
6990 << " to " << pretty_binary_string(temp_end_key)
6991 << " and " << pretty_binary_string(start_key)
6992 << " to " << pretty_binary_string(end_key)
6993 << " start " << start << dendl;
6994 it = db->get_iterator(PREFIX_OBJ);
6995 if (start == ghobject_t() ||
6996 start.hobj == hobject_t() ||
6997 start == c->cid.get_min_hobj()) {
6998 it->upper_bound(temp_start_key);
6999 temp = true;
7000 } else {
7001 string k;
7002 get_object_key(cct, start, &k);
7003 if (start.hobj.is_temp()) {
7004 temp = true;
7005 assert(k >= temp_start_key && k < temp_end_key);
7006 } else {
7007 temp = false;
7008 assert(k >= start_key && k < end_key);
7009 }
7010 dout(20) << " start from " << pretty_binary_string(k)
7011 << " temp=" << (int)temp << dendl;
7012 it->lower_bound(k);
7013 }
7014 if (end.hobj.is_max()) {
7015 pend = temp ? temp_end_key : end_key;
7016 } else {
7017 get_object_key(cct, end, &end_key);
7018 if (end.hobj.is_temp()) {
7019 if (temp)
7020 pend = end_key;
7021 else
7022 goto out;
7023 } else {
7024 pend = temp ? temp_end_key : end_key;
7025 }
7026 }
7027 dout(20) << __func__ << " pend " << pretty_binary_string(pend) << dendl;
7028 while (true) {
7029 if (!it->valid() || it->key() >= pend) {
7030 if (!it->valid())
7031 dout(20) << __func__ << " iterator not valid (end of db?)" << dendl;
7032 else
7033 dout(20) << __func__ << " key " << pretty_binary_string(it->key())
7034 << " >= " << end << dendl;
7035 if (temp) {
7036 if (end.hobj.is_temp()) {
7037 break;
7038 }
7039 dout(30) << __func__ << " switch to non-temp namespace" << dendl;
7040 temp = false;
7041 it->upper_bound(start_key);
7042 pend = end_key;
7043 dout(30) << __func__ << " pend " << pretty_binary_string(pend) << dendl;
7044 continue;
7045 }
7046 break;
7047 }
7048 dout(30) << __func__ << " key " << pretty_binary_string(it->key()) << dendl;
7049 if (is_extent_shard_key(it->key())) {
7050 it->next();
7051 continue;
7052 }
7053 ghobject_t oid;
7054 int r = get_key_object(it->key(), &oid);
7055 assert(r == 0);
7056 dout(20) << __func__ << " oid " << oid << " end " << end << dendl;
7057 if (ls->size() >= (unsigned)max) {
7058 dout(20) << __func__ << " reached max " << max << dendl;
7059 *pnext = oid;
7060 set_next = true;
7061 break;
7062 }
7063 ls->push_back(oid);
7064 it->next();
7065 }
7066 out:
7067 if (!set_next) {
7068 *pnext = ghobject_t::get_max();
7069 }
7070
7071 return r;
7072 }
7073
7074 int BlueStore::omap_get(
7075 const coll_t& cid, ///< [in] Collection containing oid
7076 const ghobject_t &oid, ///< [in] Object containing omap
7077 bufferlist *header, ///< [out] omap header
7078 map<string, bufferlist> *out /// < [out] Key to value map
7079 )
7080 {
7081 CollectionHandle c = _get_collection(cid);
7082 if (!c)
7083 return -ENOENT;
7084 return omap_get(c, oid, header, out);
7085 }
7086
7087 int BlueStore::omap_get(
7088 CollectionHandle &c_, ///< [in] Collection containing oid
7089 const ghobject_t &oid, ///< [in] Object containing omap
7090 bufferlist *header, ///< [out] omap header
7091 map<string, bufferlist> *out /// < [out] Key to value map
7092 )
7093 {
7094 Collection *c = static_cast<Collection *>(c_.get());
7095 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7096 if (!c->exists)
7097 return -ENOENT;
7098 RWLock::RLocker l(c->lock);
7099 int r = 0;
7100 OnodeRef o = c->get_onode(oid, false);
7101 if (!o || !o->exists) {
7102 r = -ENOENT;
7103 goto out;
7104 }
7105 if (!o->onode.has_omap())
7106 goto out;
7107 o->flush();
7108 {
7109 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
7110 string head, tail;
7111 get_omap_header(o->onode.nid, &head);
7112 get_omap_tail(o->onode.nid, &tail);
7113 it->lower_bound(head);
7114 while (it->valid()) {
7115 if (it->key() == head) {
7116 dout(30) << __func__ << " got header" << dendl;
7117 *header = it->value();
7118 } else if (it->key() >= tail) {
7119 dout(30) << __func__ << " reached tail" << dendl;
7120 break;
7121 } else {
7122 string user_key;
7123 decode_omap_key(it->key(), &user_key);
7124 dout(30) << __func__ << " got " << pretty_binary_string(it->key())
7125 << " -> " << user_key << dendl;
7126 (*out)[user_key] = it->value();
7127 }
7128 it->next();
7129 }
7130 }
7131 out:
7132 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7133 << dendl;
7134 return r;
7135 }
7136
7137 int BlueStore::omap_get_header(
7138 const coll_t& cid, ///< [in] Collection containing oid
7139 const ghobject_t &oid, ///< [in] Object containing omap
7140 bufferlist *header, ///< [out] omap header
7141 bool allow_eio ///< [in] don't assert on eio
7142 )
7143 {
7144 CollectionHandle c = _get_collection(cid);
7145 if (!c)
7146 return -ENOENT;
7147 return omap_get_header(c, oid, header, allow_eio);
7148 }
7149
7150 int BlueStore::omap_get_header(
7151 CollectionHandle &c_, ///< [in] Collection containing oid
7152 const ghobject_t &oid, ///< [in] Object containing omap
7153 bufferlist *header, ///< [out] omap header
7154 bool allow_eio ///< [in] don't assert on eio
7155 )
7156 {
7157 Collection *c = static_cast<Collection *>(c_.get());
7158 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7159 if (!c->exists)
7160 return -ENOENT;
7161 RWLock::RLocker l(c->lock);
7162 int r = 0;
7163 OnodeRef o = c->get_onode(oid, false);
7164 if (!o || !o->exists) {
7165 r = -ENOENT;
7166 goto out;
7167 }
7168 if (!o->onode.has_omap())
7169 goto out;
7170 o->flush();
7171 {
7172 string head;
7173 get_omap_header(o->onode.nid, &head);
7174 if (db->get(PREFIX_OMAP, head, header) >= 0) {
7175 dout(30) << __func__ << " got header" << dendl;
7176 } else {
7177 dout(30) << __func__ << " no header" << dendl;
7178 }
7179 }
7180 out:
7181 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7182 << dendl;
7183 return r;
7184 }
7185
7186 int BlueStore::omap_get_keys(
7187 const coll_t& cid, ///< [in] Collection containing oid
7188 const ghobject_t &oid, ///< [in] Object containing omap
7189 set<string> *keys ///< [out] Keys defined on oid
7190 )
7191 {
7192 CollectionHandle c = _get_collection(cid);
7193 if (!c)
7194 return -ENOENT;
7195 return omap_get_keys(c, oid, keys);
7196 }
7197
7198 int BlueStore::omap_get_keys(
7199 CollectionHandle &c_, ///< [in] Collection containing oid
7200 const ghobject_t &oid, ///< [in] Object containing omap
7201 set<string> *keys ///< [out] Keys defined on oid
7202 )
7203 {
7204 Collection *c = static_cast<Collection *>(c_.get());
7205 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7206 if (!c->exists)
7207 return -ENOENT;
7208 RWLock::RLocker l(c->lock);
7209 int r = 0;
7210 OnodeRef o = c->get_onode(oid, false);
7211 if (!o || !o->exists) {
7212 r = -ENOENT;
7213 goto out;
7214 }
7215 if (!o->onode.has_omap())
7216 goto out;
7217 o->flush();
7218 {
7219 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
7220 string head, tail;
7221 get_omap_key(o->onode.nid, string(), &head);
7222 get_omap_tail(o->onode.nid, &tail);
7223 it->lower_bound(head);
7224 while (it->valid()) {
7225 if (it->key() >= tail) {
7226 dout(30) << __func__ << " reached tail" << dendl;
7227 break;
7228 }
7229 string user_key;
7230 decode_omap_key(it->key(), &user_key);
7231 dout(30) << __func__ << " got " << pretty_binary_string(it->key())
7232 << " -> " << user_key << dendl;
7233 keys->insert(user_key);
7234 it->next();
7235 }
7236 }
7237 out:
7238 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7239 << dendl;
7240 return r;
7241 }
7242
7243 int BlueStore::omap_get_values(
7244 const coll_t& cid, ///< [in] Collection containing oid
7245 const ghobject_t &oid, ///< [in] Object containing omap
7246 const set<string> &keys, ///< [in] Keys to get
7247 map<string, bufferlist> *out ///< [out] Returned keys and values
7248 )
7249 {
7250 CollectionHandle c = _get_collection(cid);
7251 if (!c)
7252 return -ENOENT;
7253 return omap_get_values(c, oid, keys, out);
7254 }
7255
7256 int BlueStore::omap_get_values(
7257 CollectionHandle &c_, ///< [in] Collection containing oid
7258 const ghobject_t &oid, ///< [in] Object containing omap
7259 const set<string> &keys, ///< [in] Keys to get
7260 map<string, bufferlist> *out ///< [out] Returned keys and values
7261 )
7262 {
7263 Collection *c = static_cast<Collection *>(c_.get());
7264 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7265 if (!c->exists)
7266 return -ENOENT;
7267 RWLock::RLocker l(c->lock);
7268 int r = 0;
7269 string final_key;
7270 OnodeRef o = c->get_onode(oid, false);
7271 if (!o || !o->exists) {
7272 r = -ENOENT;
7273 goto out;
7274 }
7275 if (!o->onode.has_omap())
7276 goto out;
7277 o->flush();
7278 _key_encode_u64(o->onode.nid, &final_key);
7279 final_key.push_back('.');
7280 for (set<string>::const_iterator p = keys.begin(); p != keys.end(); ++p) {
7281 final_key.resize(9); // keep prefix
7282 final_key += *p;
7283 bufferlist val;
7284 if (db->get(PREFIX_OMAP, final_key, &val) >= 0) {
7285 dout(30) << __func__ << " got " << pretty_binary_string(final_key)
7286 << " -> " << *p << dendl;
7287 out->insert(make_pair(*p, val));
7288 }
7289 }
7290 out:
7291 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7292 << dendl;
7293 return r;
7294 }
7295
7296 int BlueStore::omap_check_keys(
7297 const coll_t& cid, ///< [in] Collection containing oid
7298 const ghobject_t &oid, ///< [in] Object containing omap
7299 const set<string> &keys, ///< [in] Keys to check
7300 set<string> *out ///< [out] Subset of keys defined on oid
7301 )
7302 {
7303 CollectionHandle c = _get_collection(cid);
7304 if (!c)
7305 return -ENOENT;
7306 return omap_check_keys(c, oid, keys, out);
7307 }
7308
7309 int BlueStore::omap_check_keys(
7310 CollectionHandle &c_, ///< [in] Collection containing oid
7311 const ghobject_t &oid, ///< [in] Object containing omap
7312 const set<string> &keys, ///< [in] Keys to check
7313 set<string> *out ///< [out] Subset of keys defined on oid
7314 )
7315 {
7316 Collection *c = static_cast<Collection *>(c_.get());
7317 dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl;
7318 if (!c->exists)
7319 return -ENOENT;
7320 RWLock::RLocker l(c->lock);
7321 int r = 0;
7322 string final_key;
7323 OnodeRef o = c->get_onode(oid, false);
7324 if (!o || !o->exists) {
7325 r = -ENOENT;
7326 goto out;
7327 }
7328 if (!o->onode.has_omap())
7329 goto out;
7330 o->flush();
7331 _key_encode_u64(o->onode.nid, &final_key);
7332 final_key.push_back('.');
7333 for (set<string>::const_iterator p = keys.begin(); p != keys.end(); ++p) {
7334 final_key.resize(9); // keep prefix
7335 final_key += *p;
7336 bufferlist val;
7337 if (db->get(PREFIX_OMAP, final_key, &val) >= 0) {
7338 dout(30) << __func__ << " have " << pretty_binary_string(final_key)
7339 << " -> " << *p << dendl;
7340 out->insert(*p);
7341 } else {
7342 dout(30) << __func__ << " miss " << pretty_binary_string(final_key)
7343 << " -> " << *p << dendl;
7344 }
7345 }
7346 out:
7347 dout(10) << __func__ << " " << c->get_cid() << " oid " << oid << " = " << r
7348 << dendl;
7349 return r;
7350 }
7351
7352 ObjectMap::ObjectMapIterator BlueStore::get_omap_iterator(
7353 const coll_t& cid, ///< [in] collection
7354 const ghobject_t &oid ///< [in] object
7355 )
7356 {
7357 CollectionHandle c = _get_collection(cid);
7358 if (!c) {
7359 dout(10) << __func__ << " " << cid << "doesn't exist" <<dendl;
7360 return ObjectMap::ObjectMapIterator();
7361 }
7362 return get_omap_iterator(c, oid);
7363 }
7364
7365 ObjectMap::ObjectMapIterator BlueStore::get_omap_iterator(
7366 CollectionHandle &c_, ///< [in] collection
7367 const ghobject_t &oid ///< [in] object
7368 )
7369 {
7370 Collection *c = static_cast<Collection *>(c_.get());
7371 dout(10) << __func__ << " " << c->get_cid() << " " << oid << dendl;
7372 if (!c->exists) {
7373 return ObjectMap::ObjectMapIterator();
7374 }
7375 RWLock::RLocker l(c->lock);
7376 OnodeRef o = c->get_onode(oid, false);
7377 if (!o || !o->exists) {
7378 dout(10) << __func__ << " " << oid << "doesn't exist" <<dendl;
7379 return ObjectMap::ObjectMapIterator();
7380 }
7381 o->flush();
7382 dout(10) << __func__ << " has_omap = " << (int)o->onode.has_omap() <<dendl;
7383 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
7384 return ObjectMap::ObjectMapIterator(new OmapIteratorImpl(c, o, it));
7385 }
7386
7387 // -----------------
7388 // write helpers
7389
7390 void BlueStore::_prepare_ondisk_format_super(KeyValueDB::Transaction& t)
7391 {
7392 dout(10) << __func__ << " ondisk_format " << ondisk_format
7393 << " min_compat_ondisk_format " << min_compat_ondisk_format
7394 << dendl;
7395 assert(ondisk_format == latest_ondisk_format);
7396 {
7397 bufferlist bl;
7398 ::encode(ondisk_format, bl);
7399 t->set(PREFIX_SUPER, "ondisk_format", bl);
7400 }
7401 {
7402 bufferlist bl;
7403 ::encode(min_compat_ondisk_format, bl);
7404 t->set(PREFIX_SUPER, "min_compat_ondisk_format", bl);
7405 }
7406 }
7407
7408 int BlueStore::_open_super_meta()
7409 {
7410 // nid
7411 {
7412 nid_max = 0;
7413 bufferlist bl;
7414 db->get(PREFIX_SUPER, "nid_max", &bl);
7415 bufferlist::iterator p = bl.begin();
7416 try {
7417 uint64_t v;
7418 ::decode(v, p);
7419 nid_max = v;
7420 } catch (buffer::error& e) {
7421 derr << __func__ << " unable to read nid_max" << dendl;
7422 return -EIO;
7423 }
7424 dout(10) << __func__ << " old nid_max " << nid_max << dendl;
7425 nid_last = nid_max.load();
7426 }
7427
7428 // blobid
7429 {
7430 blobid_max = 0;
7431 bufferlist bl;
7432 db->get(PREFIX_SUPER, "blobid_max", &bl);
7433 bufferlist::iterator p = bl.begin();
7434 try {
7435 uint64_t v;
7436 ::decode(v, p);
7437 blobid_max = v;
7438 } catch (buffer::error& e) {
7439 derr << __func__ << " unable to read blobid_max" << dendl;
7440 return -EIO;
7441 }
7442 dout(10) << __func__ << " old blobid_max " << blobid_max << dendl;
7443 blobid_last = blobid_max.load();
7444 }
7445
7446 // freelist
7447 {
7448 bufferlist bl;
7449 db->get(PREFIX_SUPER, "freelist_type", &bl);
7450 if (bl.length()) {
7451 freelist_type = std::string(bl.c_str(), bl.length());
7452 dout(10) << __func__ << " freelist_type " << freelist_type << dendl;
7453 } else {
7454 assert("Not Support extent freelist manager" == 0);
7455 }
7456 }
7457
7458 // bluefs alloc
7459 if (cct->_conf->bluestore_bluefs) {
7460 bluefs_extents.clear();
7461 bufferlist bl;
7462 db->get(PREFIX_SUPER, "bluefs_extents", &bl);
7463 bufferlist::iterator p = bl.begin();
7464 try {
7465 ::decode(bluefs_extents, p);
7466 }
7467 catch (buffer::error& e) {
7468 derr << __func__ << " unable to read bluefs_extents" << dendl;
7469 return -EIO;
7470 }
7471 dout(10) << __func__ << " bluefs_extents 0x" << std::hex << bluefs_extents
7472 << std::dec << dendl;
7473 }
7474
7475 // ondisk format
7476 int32_t compat_ondisk_format = 0;
7477 {
7478 bufferlist bl;
7479 int r = db->get(PREFIX_SUPER, "ondisk_format", &bl);
7480 if (r < 0) {
7481 // base case: kraken bluestore is v1 and readable by v1
7482 dout(20) << __func__ << " missing ondisk_format; assuming kraken"
7483 << dendl;
7484 ondisk_format = 1;
7485 compat_ondisk_format = 1;
7486 } else {
7487 auto p = bl.begin();
7488 try {
7489 ::decode(ondisk_format, p);
7490 } catch (buffer::error& e) {
7491 derr << __func__ << " unable to read ondisk_format" << dendl;
7492 return -EIO;
7493 }
7494 bl.clear();
7495 {
7496 r = db->get(PREFIX_SUPER, "min_compat_ondisk_format", &bl);
7497 assert(!r);
7498 auto p = bl.begin();
7499 try {
7500 ::decode(compat_ondisk_format, p);
7501 } catch (buffer::error& e) {
7502 derr << __func__ << " unable to read compat_ondisk_format" << dendl;
7503 return -EIO;
7504 }
7505 }
7506 }
7507 dout(10) << __func__ << " ondisk_format " << ondisk_format
7508 << " compat_ondisk_format " << compat_ondisk_format
7509 << dendl;
7510 }
7511
7512 if (latest_ondisk_format < compat_ondisk_format) {
7513 derr << __func__ << " compat_ondisk_format is "
7514 << compat_ondisk_format << " but we only understand version "
7515 << latest_ondisk_format << dendl;
7516 return -EPERM;
7517 }
7518 if (ondisk_format < latest_ondisk_format) {
7519 int r = _upgrade_super();
7520 if (r < 0) {
7521 return r;
7522 }
7523 }
7524
7525 {
7526 bufferlist bl;
7527 db->get(PREFIX_SUPER, "min_alloc_size", &bl);
7528 auto p = bl.begin();
7529 try {
7530 uint64_t val;
7531 ::decode(val, p);
7532 min_alloc_size = val;
7533 min_alloc_size_order = ctz(val);
7534 assert(min_alloc_size == 1u << min_alloc_size_order);
7535 } catch (buffer::error& e) {
7536 derr << __func__ << " unable to read min_alloc_size" << dendl;
7537 return -EIO;
7538 }
7539 dout(10) << __func__ << " min_alloc_size 0x" << std::hex << min_alloc_size
7540 << std::dec << dendl;
7541 }
7542 _open_statfs();
7543 _set_alloc_sizes();
7544 _set_throttle_params();
7545
7546 _set_csum();
7547 _set_compression();
7548 _set_blob_size();
7549
7550 return 0;
7551 }
7552
7553 int BlueStore::_upgrade_super()
7554 {
7555 dout(1) << __func__ << " from " << ondisk_format << ", latest "
7556 << latest_ondisk_format << dendl;
7557 assert(ondisk_format > 0);
7558 assert(ondisk_format < latest_ondisk_format);
7559
7560 if (ondisk_format == 1) {
7561 // changes:
7562 // - super: added ondisk_format
7563 // - super: added min_readable_ondisk_format
7564 // - super: added min_compat_ondisk_format
7565 // - super: added min_alloc_size
7566 // - super: removed min_min_alloc_size
7567 KeyValueDB::Transaction t = db->get_transaction();
7568 {
7569 bufferlist bl;
7570 db->get(PREFIX_SUPER, "min_min_alloc_size", &bl);
7571 auto p = bl.begin();
7572 try {
7573 uint64_t val;
7574 ::decode(val, p);
7575 min_alloc_size = val;
7576 } catch (buffer::error& e) {
7577 derr << __func__ << " failed to read min_min_alloc_size" << dendl;
7578 return -EIO;
7579 }
7580 t->set(PREFIX_SUPER, "min_alloc_size", bl);
7581 t->rmkey(PREFIX_SUPER, "min_min_alloc_size");
7582 }
7583 ondisk_format = 2;
7584 _prepare_ondisk_format_super(t);
7585 int r = db->submit_transaction_sync(t);
7586 assert(r == 0);
7587 }
7588
7589 // done
7590 dout(1) << __func__ << " done" << dendl;
7591 return 0;
7592 }
7593
7594 void BlueStore::_assign_nid(TransContext *txc, OnodeRef o)
7595 {
7596 if (o->onode.nid) {
7597 assert(o->exists);
7598 return;
7599 }
7600 uint64_t nid = ++nid_last;
7601 dout(20) << __func__ << " " << nid << dendl;
7602 o->onode.nid = nid;
7603 txc->last_nid = nid;
7604 o->exists = true;
7605 }
7606
7607 uint64_t BlueStore::_assign_blobid(TransContext *txc)
7608 {
7609 uint64_t bid = ++blobid_last;
7610 dout(20) << __func__ << " " << bid << dendl;
7611 txc->last_blobid = bid;
7612 return bid;
7613 }
7614
7615 void BlueStore::get_db_statistics(Formatter *f)
7616 {
7617 db->get_statistics(f);
7618 }
7619
7620 BlueStore::TransContext *BlueStore::_txc_create(OpSequencer *osr)
7621 {
7622 TransContext *txc = new TransContext(cct, osr);
7623 txc->t = db->get_transaction();
7624 osr->queue_new(txc);
7625 dout(20) << __func__ << " osr " << osr << " = " << txc
7626 << " seq " << txc->seq << dendl;
7627 return txc;
7628 }
7629
7630 void BlueStore::_txc_calc_cost(TransContext *txc)
7631 {
7632 // this is about the simplest model for transaction cost you can
7633 // imagine. there is some fixed overhead cost by saying there is a
7634 // minimum of one "io". and then we have some cost per "io" that is
7635 // a configurable (with different hdd and ssd defaults), and add
7636 // that to the bytes value.
7637 int ios = 1; // one "io" for the kv commit
7638 for (auto& p : txc->ioc.pending_aios) {
7639 ios += p.iov.size();
7640 }
7641 auto cost = throttle_cost_per_io.load();
7642 txc->cost = ios * cost + txc->bytes;
7643 dout(10) << __func__ << " " << txc << " cost " << txc->cost << " ("
7644 << ios << " ios * " << cost << " + " << txc->bytes
7645 << " bytes)" << dendl;
7646 }
7647
7648 void BlueStore::_txc_update_store_statfs(TransContext *txc)
7649 {
7650 if (txc->statfs_delta.is_empty())
7651 return;
7652
7653 logger->inc(l_bluestore_allocated, txc->statfs_delta.allocated());
7654 logger->inc(l_bluestore_stored, txc->statfs_delta.stored());
7655 logger->inc(l_bluestore_compressed, txc->statfs_delta.compressed());
7656 logger->inc(l_bluestore_compressed_allocated, txc->statfs_delta.compressed_allocated());
7657 logger->inc(l_bluestore_compressed_original, txc->statfs_delta.compressed_original());
7658
7659 {
7660 std::lock_guard<std::mutex> l(vstatfs_lock);
7661 vstatfs += txc->statfs_delta;
7662 }
7663
7664 bufferlist bl;
7665 txc->statfs_delta.encode(bl);
7666
7667 txc->t->merge(PREFIX_STAT, "bluestore_statfs", bl);
7668 txc->statfs_delta.reset();
7669 }
7670
7671 void BlueStore::_txc_state_proc(TransContext *txc)
7672 {
7673 while (true) {
7674 dout(10) << __func__ << " txc " << txc
7675 << " " << txc->get_state_name() << dendl;
7676 switch (txc->state) {
7677 case TransContext::STATE_PREPARE:
7678 txc->log_state_latency(logger, l_bluestore_state_prepare_lat);
7679 if (txc->ioc.has_pending_aios()) {
7680 txc->state = TransContext::STATE_AIO_WAIT;
7681 txc->had_ios = true;
7682 _txc_aio_submit(txc);
7683 return;
7684 }
7685 // ** fall-thru **
7686
7687 case TransContext::STATE_AIO_WAIT:
7688 txc->log_state_latency(logger, l_bluestore_state_aio_wait_lat);
7689 _txc_finish_io(txc); // may trigger blocked txc's too
7690 return;
7691
7692 case TransContext::STATE_IO_DONE:
7693 //assert(txc->osr->qlock.is_locked()); // see _txc_finish_io
7694 if (txc->had_ios) {
7695 ++txc->osr->txc_with_unstable_io;
7696 }
7697 txc->log_state_latency(logger, l_bluestore_state_io_done_lat);
7698 txc->state = TransContext::STATE_KV_QUEUED;
7699 if (cct->_conf->bluestore_sync_submit_transaction) {
7700 if (txc->last_nid >= nid_max ||
7701 txc->last_blobid >= blobid_max) {
7702 dout(20) << __func__
7703 << " last_{nid,blobid} exceeds max, submit via kv thread"
7704 << dendl;
7705 } else if (txc->osr->kv_committing_serially) {
7706 dout(20) << __func__ << " prior txc submitted via kv thread, us too"
7707 << dendl;
7708 // note: this is starvation-prone. once we have a txc in a busy
7709 // sequencer that is committing serially it is possible to keep
7710 // submitting new transactions fast enough that we get stuck doing
7711 // so. the alternative is to block here... fixme?
7712 } else if (txc->osr->txc_with_unstable_io) {
7713 dout(20) << __func__ << " prior txc(s) with unstable ios "
7714 << txc->osr->txc_with_unstable_io.load() << dendl;
7715 } else if (cct->_conf->bluestore_debug_randomize_serial_transaction &&
7716 rand() % cct->_conf->bluestore_debug_randomize_serial_transaction
7717 == 0) {
7718 dout(20) << __func__ << " DEBUG randomly forcing submit via kv thread"
7719 << dendl;
7720 } else {
7721 txc->state = TransContext::STATE_KV_SUBMITTED;
7722 int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
7723 assert(r == 0);
7724 _txc_applied_kv(txc);
7725 }
7726 }
7727 {
7728 std::lock_guard<std::mutex> l(kv_lock);
7729 kv_queue.push_back(txc);
7730 kv_cond.notify_one();
7731 if (txc->state != TransContext::STATE_KV_SUBMITTED) {
7732 kv_queue_unsubmitted.push_back(txc);
7733 ++txc->osr->kv_committing_serially;
7734 }
7735 if (txc->had_ios)
7736 kv_ios++;
7737 kv_throttle_costs += txc->cost;
7738 }
7739 return;
7740 case TransContext::STATE_KV_SUBMITTED:
7741 txc->log_state_latency(logger, l_bluestore_state_kv_committing_lat);
7742 txc->state = TransContext::STATE_KV_DONE;
7743 _txc_committed_kv(txc);
7744 // ** fall-thru **
7745
7746 case TransContext::STATE_KV_DONE:
7747 txc->log_state_latency(logger, l_bluestore_state_kv_done_lat);
7748 if (txc->deferred_txn) {
7749 txc->state = TransContext::STATE_DEFERRED_QUEUED;
7750 _deferred_queue(txc);
7751 return;
7752 }
7753 txc->state = TransContext::STATE_FINISHING;
7754 break;
7755
7756 case TransContext::STATE_DEFERRED_CLEANUP:
7757 txc->log_state_latency(logger, l_bluestore_state_deferred_cleanup_lat);
7758 txc->state = TransContext::STATE_FINISHING;
7759 // ** fall-thru **
7760
7761 case TransContext::STATE_FINISHING:
7762 txc->log_state_latency(logger, l_bluestore_state_finishing_lat);
7763 _txc_finish(txc);
7764 return;
7765
7766 default:
7767 derr << __func__ << " unexpected txc " << txc
7768 << " state " << txc->get_state_name() << dendl;
7769 assert(0 == "unexpected txc state");
7770 return;
7771 }
7772 }
7773 }
7774
7775 void BlueStore::_txc_finish_io(TransContext *txc)
7776 {
7777 dout(20) << __func__ << " " << txc << dendl;
7778
7779 /*
7780 * we need to preserve the order of kv transactions,
7781 * even though aio will complete in any order.
7782 */
7783
7784 OpSequencer *osr = txc->osr.get();
7785 std::lock_guard<std::mutex> l(osr->qlock);
7786 txc->state = TransContext::STATE_IO_DONE;
7787
7788 // release aio contexts (including pinned buffers).
7789 txc->ioc.running_aios.clear();
7790
7791 OpSequencer::q_list_t::iterator p = osr->q.iterator_to(*txc);
7792 while (p != osr->q.begin()) {
7793 --p;
7794 if (p->state < TransContext::STATE_IO_DONE) {
7795 dout(20) << __func__ << " " << txc << " blocked by " << &*p << " "
7796 << p->get_state_name() << dendl;
7797 return;
7798 }
7799 if (p->state > TransContext::STATE_IO_DONE) {
7800 ++p;
7801 break;
7802 }
7803 }
7804 do {
7805 _txc_state_proc(&*p++);
7806 } while (p != osr->q.end() &&
7807 p->state == TransContext::STATE_IO_DONE);
7808
7809 if (osr->kv_submitted_waiters &&
7810 osr->_is_all_kv_submitted()) {
7811 osr->qcond.notify_all();
7812 }
7813 }
7814
7815 void BlueStore::_txc_write_nodes(TransContext *txc, KeyValueDB::Transaction t)
7816 {
7817 dout(20) << __func__ << " txc " << txc
7818 << " onodes " << txc->onodes
7819 << " shared_blobs " << txc->shared_blobs
7820 << dendl;
7821
7822 // finalize onodes
7823 for (auto o : txc->onodes) {
7824 // finalize extent_map shards
7825 o->extent_map.update(t, false);
7826 if (o->extent_map.needs_reshard()) {
7827 o->extent_map.reshard(db, t);
7828 o->extent_map.update(t, true);
7829 if (o->extent_map.needs_reshard()) {
7830 dout(20) << __func__ << " warning: still wants reshard, check options?"
7831 << dendl;
7832 o->extent_map.clear_needs_reshard();
7833 }
7834 logger->inc(l_bluestore_onode_reshard);
7835 }
7836
7837 // bound encode
7838 size_t bound = 0;
7839 denc(o->onode, bound);
7840 o->extent_map.bound_encode_spanning_blobs(bound);
7841 if (o->onode.extent_map_shards.empty()) {
7842 denc(o->extent_map.inline_bl, bound);
7843 }
7844
7845 // encode
7846 bufferlist bl;
7847 unsigned onode_part, blob_part, extent_part;
7848 {
7849 auto p = bl.get_contiguous_appender(bound, true);
7850 denc(o->onode, p);
7851 onode_part = p.get_logical_offset();
7852 o->extent_map.encode_spanning_blobs(p);
7853 blob_part = p.get_logical_offset() - onode_part;
7854 if (o->onode.extent_map_shards.empty()) {
7855 denc(o->extent_map.inline_bl, p);
7856 }
7857 extent_part = p.get_logical_offset() - onode_part - blob_part;
7858 }
7859
7860 dout(20) << " onode " << o->oid << " is " << bl.length()
7861 << " (" << onode_part << " bytes onode + "
7862 << blob_part << " bytes spanning blobs + "
7863 << extent_part << " bytes inline extents)"
7864 << dendl;
7865 t->set(PREFIX_OBJ, o->key.c_str(), o->key.size(), bl);
7866 o->flushing_count++;
7867 }
7868
7869 // objects we modified but didn't affect the onode
7870 auto p = txc->modified_objects.begin();
7871 while (p != txc->modified_objects.end()) {
7872 if (txc->onodes.count(*p) == 0) {
7873 (*p)->flushing_count++;
7874 ++p;
7875 } else {
7876 // remove dups with onodes list to avoid problems in _txc_finish
7877 p = txc->modified_objects.erase(p);
7878 }
7879 }
7880
7881 // finalize shared_blobs
7882 for (auto sb : txc->shared_blobs) {
7883 string key;
7884 auto sbid = sb->get_sbid();
7885 get_shared_blob_key(sbid, &key);
7886 if (sb->persistent->empty()) {
7887 dout(20) << " shared_blob 0x" << std::hex << sbid << std::dec
7888 << " is empty" << dendl;
7889 t->rmkey(PREFIX_SHARED_BLOB, key);
7890 } else {
7891 bufferlist bl;
7892 ::encode(*(sb->persistent), bl);
7893 dout(20) << " shared_blob 0x" << std::hex << sbid << std::dec
7894 << " is " << bl.length() << " " << *sb << dendl;
7895 t->set(PREFIX_SHARED_BLOB, key, bl);
7896 }
7897 }
7898 }
7899
7900 void BlueStore::BSPerfTracker::update_from_perfcounters(
7901 PerfCounters &logger)
7902 {
7903 os_commit_latency.consume_next(
7904 logger.get_tavg_ms(
7905 l_bluestore_commit_lat));
7906 os_apply_latency.consume_next(
7907 logger.get_tavg_ms(
7908 l_bluestore_commit_lat));
7909 }
7910
7911 void BlueStore::_txc_finalize_kv(TransContext *txc, KeyValueDB::Transaction t)
7912 {
7913 dout(20) << __func__ << " txc " << txc << std::hex
7914 << " allocated 0x" << txc->allocated
7915 << " released 0x" << txc->released
7916 << std::dec << dendl;
7917
7918 // We have to handle the case where we allocate *and* deallocate the
7919 // same region in this transaction. The freelist doesn't like that.
7920 // (Actually, the only thing that cares is the BitmapFreelistManager
7921 // debug check. But that's important.)
7922 interval_set<uint64_t> tmp_allocated, tmp_released;
7923 interval_set<uint64_t> *pallocated = &txc->allocated;
7924 interval_set<uint64_t> *preleased = &txc->released;
7925 if (!txc->allocated.empty() && !txc->released.empty()) {
7926 interval_set<uint64_t> overlap;
7927 overlap.intersection_of(txc->allocated, txc->released);
7928 if (!overlap.empty()) {
7929 tmp_allocated = txc->allocated;
7930 tmp_allocated.subtract(overlap);
7931 tmp_released = txc->released;
7932 tmp_released.subtract(overlap);
7933 dout(20) << __func__ << " overlap 0x" << std::hex << overlap
7934 << ", new allocated 0x" << tmp_allocated
7935 << " released 0x" << tmp_released << std::dec
7936 << dendl;
7937 pallocated = &tmp_allocated;
7938 preleased = &tmp_released;
7939 }
7940 }
7941
7942 // update freelist with non-overlap sets
7943 for (interval_set<uint64_t>::iterator p = pallocated->begin();
7944 p != pallocated->end();
7945 ++p) {
7946 fm->allocate(p.get_start(), p.get_len(), t);
7947 }
7948 for (interval_set<uint64_t>::iterator p = preleased->begin();
7949 p != preleased->end();
7950 ++p) {
7951 dout(20) << __func__ << " release 0x" << std::hex << p.get_start()
7952 << "~" << p.get_len() << std::dec << dendl;
7953 fm->release(p.get_start(), p.get_len(), t);
7954 }
7955
7956 _txc_update_store_statfs(txc);
7957 }
7958
7959 void BlueStore::_txc_applied_kv(TransContext *txc)
7960 {
7961 for (auto ls : { &txc->onodes, &txc->modified_objects }) {
7962 for (auto& o : *ls) {
7963 dout(20) << __func__ << " onode " << o << " had " << o->flushing_count
7964 << dendl;
7965 if (--o->flushing_count == 0) {
7966 std::lock_guard<std::mutex> l(o->flush_lock);
7967 o->flush_cond.notify_all();
7968 }
7969 }
7970 }
7971 }
7972
7973 void BlueStore::_txc_committed_kv(TransContext *txc)
7974 {
7975 dout(20) << __func__ << " txc " << txc << dendl;
7976
7977 // warning: we're calling onreadable_sync inside the sequencer lock
7978 if (txc->onreadable_sync) {
7979 txc->onreadable_sync->complete(0);
7980 txc->onreadable_sync = NULL;
7981 }
7982 unsigned n = txc->osr->parent->shard_hint.hash_to_shard(m_finisher_num);
7983 if (txc->oncommit) {
7984 logger->tinc(l_bluestore_commit_lat, ceph_clock_now() - txc->start);
7985 finishers[n]->queue(txc->oncommit);
7986 txc->oncommit = NULL;
7987 }
7988 if (txc->onreadable) {
7989 finishers[n]->queue(txc->onreadable);
7990 txc->onreadable = NULL;
7991 }
7992
7993 if (!txc->oncommits.empty()) {
7994 finishers[n]->queue(txc->oncommits);
7995 }
7996 }
7997
7998 void BlueStore::_txc_finish(TransContext *txc)
7999 {
8000 dout(20) << __func__ << " " << txc << " onodes " << txc->onodes << dendl;
8001 assert(txc->state == TransContext::STATE_FINISHING);
8002
8003 for (auto& sb : txc->shared_blobs_written) {
8004 sb->bc.finish_write(sb->get_cache(), txc->seq);
8005 }
8006 txc->shared_blobs_written.clear();
8007
8008 while (!txc->removed_collections.empty()) {
8009 _queue_reap_collection(txc->removed_collections.front());
8010 txc->removed_collections.pop_front();
8011 }
8012
8013 OpSequencerRef osr = txc->osr;
8014 CollectionRef c;
8015 bool empty = false;
8016 bool submit_deferred = false;
8017 OpSequencer::q_list_t releasing_txc;
8018 {
8019 std::lock_guard<std::mutex> l(osr->qlock);
8020 txc->state = TransContext::STATE_DONE;
8021 bool notify = false;
8022 while (!osr->q.empty()) {
8023 TransContext *txc = &osr->q.front();
8024 dout(20) << __func__ << " txc " << txc << " " << txc->get_state_name()
8025 << dendl;
8026 if (txc->state != TransContext::STATE_DONE) {
8027 if (txc->state == TransContext::STATE_PREPARE &&
8028 deferred_aggressive) {
8029 // for _osr_drain_preceding()
8030 notify = true;
8031 }
8032 if (txc->state == TransContext::STATE_DEFERRED_QUEUED &&
8033 osr->q.size() > g_conf->bluestore_max_deferred_txc) {
8034 submit_deferred = true;
8035 }
8036 break;
8037 }
8038
8039 if (!c && txc->first_collection) {
8040 c = txc->first_collection;
8041 }
8042 osr->q.pop_front();
8043 releasing_txc.push_back(*txc);
8044 notify = true;
8045 }
8046 if (notify) {
8047 osr->qcond.notify_all();
8048 }
8049 if (osr->q.empty()) {
8050 dout(20) << __func__ << " osr " << osr << " q now empty" << dendl;
8051 empty = true;
8052 }
8053 }
8054 while (!releasing_txc.empty()) {
8055 // release to allocator only after all preceding txc's have also
8056 // finished any deferred writes that potentially land in these
8057 // blocks
8058 auto txc = &releasing_txc.front();
8059 _txc_release_alloc(txc);
8060 releasing_txc.pop_front();
8061 txc->log_state_latency(logger, l_bluestore_state_done_lat);
8062 delete txc;
8063 }
8064
8065 if (submit_deferred) {
8066 // we're pinning memory; flush! we could be more fine-grained here but
8067 // i'm not sure it's worth the bother.
8068 deferred_try_submit();
8069 }
8070
8071 if (empty && osr->zombie) {
8072 dout(10) << __func__ << " reaping empty zombie osr " << osr << dendl;
8073 osr->_unregister();
8074 }
8075 }
8076
8077 void BlueStore::_txc_release_alloc(TransContext *txc)
8078 {
8079 // update allocator with full released set
8080 if (!cct->_conf->bluestore_debug_no_reuse_blocks) {
8081 dout(10) << __func__ << " " << txc << " " << txc->released << dendl;
8082 for (interval_set<uint64_t>::iterator p = txc->released.begin();
8083 p != txc->released.end();
8084 ++p) {
8085 alloc->release(p.get_start(), p.get_len());
8086 }
8087 }
8088
8089 txc->allocated.clear();
8090 txc->released.clear();
8091 }
8092
8093 void BlueStore::_osr_drain_preceding(TransContext *txc)
8094 {
8095 OpSequencer *osr = txc->osr.get();
8096 dout(10) << __func__ << " " << txc << " osr " << osr << dendl;
8097 ++deferred_aggressive; // FIXME: maybe osr-local aggressive flag?
8098 {
8099 // submit anything pending
8100 deferred_lock.lock();
8101 if (osr->deferred_pending) {
8102 _deferred_submit_unlock(osr);
8103 } else {
8104 deferred_lock.unlock();
8105 }
8106 }
8107 {
8108 // wake up any previously finished deferred events
8109 std::lock_guard<std::mutex> l(kv_lock);
8110 kv_cond.notify_one();
8111 }
8112 osr->drain_preceding(txc);
8113 --deferred_aggressive;
8114 dout(10) << __func__ << " " << osr << " done" << dendl;
8115 }
8116
8117 void BlueStore::_osr_drain_all()
8118 {
8119 dout(10) << __func__ << dendl;
8120
8121 set<OpSequencerRef> s;
8122 {
8123 std::lock_guard<std::mutex> l(osr_lock);
8124 s = osr_set;
8125 }
8126 dout(20) << __func__ << " osr_set " << s << dendl;
8127
8128 ++deferred_aggressive;
8129 {
8130 // submit anything pending
8131 deferred_try_submit();
8132 }
8133 {
8134 // wake up any previously finished deferred events
8135 std::lock_guard<std::mutex> l(kv_lock);
8136 kv_cond.notify_one();
8137 }
8138 {
8139 std::lock_guard<std::mutex> l(kv_finalize_lock);
8140 kv_finalize_cond.notify_one();
8141 }
8142 for (auto osr : s) {
8143 dout(20) << __func__ << " drain " << osr << dendl;
8144 osr->drain();
8145 }
8146 --deferred_aggressive;
8147
8148 dout(10) << __func__ << " done" << dendl;
8149 }
8150
8151 void BlueStore::_osr_unregister_all()
8152 {
8153 set<OpSequencerRef> s;
8154 {
8155 std::lock_guard<std::mutex> l(osr_lock);
8156 s = osr_set;
8157 }
8158 dout(10) << __func__ << " " << s << dendl;
8159 for (auto osr : s) {
8160 osr->_unregister();
8161
8162 if (!osr->zombie) {
8163 // break link from Sequencer to us so that this OpSequencer
8164 // instance can die with this mount/umount cycle. note that
8165 // we assume umount() will not race against ~Sequencer.
8166 assert(osr->parent);
8167 osr->parent->p.reset();
8168 }
8169 }
8170 // nobody should be creating sequencers during umount either.
8171 {
8172 std::lock_guard<std::mutex> l(osr_lock);
8173 assert(osr_set.empty());
8174 }
8175 }
8176
8177 void BlueStore::_kv_start()
8178 {
8179 dout(10) << __func__ << dendl;
8180
8181 if (cct->_conf->bluestore_shard_finishers) {
8182 if (cct->_conf->osd_op_num_shards) {
8183 m_finisher_num = cct->_conf->osd_op_num_shards;
8184 } else {
8185 assert(bdev);
8186 if (bdev->is_rotational()) {
8187 m_finisher_num = cct->_conf->osd_op_num_shards_hdd;
8188 } else {
8189 m_finisher_num = cct->_conf->osd_op_num_shards_ssd;
8190 }
8191 }
8192 }
8193
8194 assert(m_finisher_num != 0);
8195
8196 for (int i = 0; i < m_finisher_num; ++i) {
8197 ostringstream oss;
8198 oss << "finisher-" << i;
8199 Finisher *f = new Finisher(cct, oss.str(), "finisher");
8200 finishers.push_back(f);
8201 }
8202
8203 for (auto f : finishers) {
8204 f->start();
8205 }
8206 kv_sync_thread.create("bstore_kv_sync");
8207 kv_finalize_thread.create("bstore_kv_final");
8208 }
8209
8210 void BlueStore::_kv_stop()
8211 {
8212 dout(10) << __func__ << dendl;
8213 {
8214 std::unique_lock<std::mutex> l(kv_lock);
8215 while (!kv_sync_started) {
8216 kv_cond.wait(l);
8217 }
8218 kv_stop = true;
8219 kv_cond.notify_all();
8220 }
8221 {
8222 std::unique_lock<std::mutex> l(kv_finalize_lock);
8223 while (!kv_finalize_started) {
8224 kv_finalize_cond.wait(l);
8225 }
8226 kv_finalize_stop = true;
8227 kv_finalize_cond.notify_all();
8228 }
8229 kv_sync_thread.join();
8230 kv_finalize_thread.join();
8231 {
8232 std::lock_guard<std::mutex> l(kv_lock);
8233 kv_stop = false;
8234 }
8235 {
8236 std::lock_guard<std::mutex> l(kv_finalize_lock);
8237 kv_finalize_stop = false;
8238 }
8239 dout(10) << __func__ << " stopping finishers" << dendl;
8240 for (auto f : finishers) {
8241 f->wait_for_empty();
8242 f->stop();
8243 }
8244 dout(10) << __func__ << " stopped" << dendl;
8245 }
8246
8247 void BlueStore::_kv_sync_thread()
8248 {
8249 dout(10) << __func__ << " start" << dendl;
8250 std::unique_lock<std::mutex> l(kv_lock);
8251 assert(!kv_sync_started);
8252 kv_sync_started = true;
8253 kv_cond.notify_all();
8254 while (true) {
8255 assert(kv_committing.empty());
8256 if (kv_queue.empty() &&
8257 ((deferred_done_queue.empty() && deferred_stable_queue.empty()) ||
8258 !deferred_aggressive)) {
8259 if (kv_stop)
8260 break;
8261 dout(20) << __func__ << " sleep" << dendl;
8262 kv_cond.wait(l);
8263 dout(20) << __func__ << " wake" << dendl;
8264 } else {
8265 deque<TransContext*> kv_submitting;
8266 deque<DeferredBatch*> deferred_done, deferred_stable;
8267 uint64_t aios = 0, costs = 0;
8268
8269 dout(20) << __func__ << " committing " << kv_queue.size()
8270 << " submitting " << kv_queue_unsubmitted.size()
8271 << " deferred done " << deferred_done_queue.size()
8272 << " stable " << deferred_stable_queue.size()
8273 << dendl;
8274 kv_committing.swap(kv_queue);
8275 kv_submitting.swap(kv_queue_unsubmitted);
8276 deferred_done.swap(deferred_done_queue);
8277 deferred_stable.swap(deferred_stable_queue);
8278 aios = kv_ios;
8279 costs = kv_throttle_costs;
8280 kv_ios = 0;
8281 kv_throttle_costs = 0;
8282 utime_t start = ceph_clock_now();
8283 l.unlock();
8284
8285 dout(30) << __func__ << " committing " << kv_committing << dendl;
8286 dout(30) << __func__ << " submitting " << kv_submitting << dendl;
8287 dout(30) << __func__ << " deferred_done " << deferred_done << dendl;
8288 dout(30) << __func__ << " deferred_stable " << deferred_stable << dendl;
8289
8290 bool force_flush = false;
8291 // if bluefs is sharing the same device as data (only), then we
8292 // can rely on the bluefs commit to flush the device and make
8293 // deferred aios stable. that means that if we do have done deferred
8294 // txcs AND we are not on a single device, we need to force a flush.
8295 if (bluefs_single_shared_device && bluefs) {
8296 if (aios) {
8297 force_flush = true;
8298 } else if (kv_committing.empty() && kv_submitting.empty() &&
8299 deferred_stable.empty()) {
8300 force_flush = true; // there's nothing else to commit!
8301 } else if (deferred_aggressive) {
8302 force_flush = true;
8303 }
8304 } else
8305 force_flush = true;
8306
8307 if (force_flush) {
8308 dout(20) << __func__ << " num_aios=" << aios
8309 << " force_flush=" << (int)force_flush
8310 << ", flushing, deferred done->stable" << dendl;
8311 // flush/barrier on block device
8312 bdev->flush();
8313
8314 // if we flush then deferred done are now deferred stable
8315 deferred_stable.insert(deferred_stable.end(), deferred_done.begin(),
8316 deferred_done.end());
8317 deferred_done.clear();
8318 }
8319 utime_t after_flush = ceph_clock_now();
8320
8321 // we will use one final transaction to force a sync
8322 KeyValueDB::Transaction synct = db->get_transaction();
8323
8324 // increase {nid,blobid}_max? note that this covers both the
8325 // case where we are approaching the max and the case we passed
8326 // it. in either case, we increase the max in the earlier txn
8327 // we submit.
8328 uint64_t new_nid_max = 0, new_blobid_max = 0;
8329 if (nid_last + cct->_conf->bluestore_nid_prealloc/2 > nid_max) {
8330 KeyValueDB::Transaction t =
8331 kv_submitting.empty() ? synct : kv_submitting.front()->t;
8332 new_nid_max = nid_last + cct->_conf->bluestore_nid_prealloc;
8333 bufferlist bl;
8334 ::encode(new_nid_max, bl);
8335 t->set(PREFIX_SUPER, "nid_max", bl);
8336 dout(10) << __func__ << " new_nid_max " << new_nid_max << dendl;
8337 }
8338 if (blobid_last + cct->_conf->bluestore_blobid_prealloc/2 > blobid_max) {
8339 KeyValueDB::Transaction t =
8340 kv_submitting.empty() ? synct : kv_submitting.front()->t;
8341 new_blobid_max = blobid_last + cct->_conf->bluestore_blobid_prealloc;
8342 bufferlist bl;
8343 ::encode(new_blobid_max, bl);
8344 t->set(PREFIX_SUPER, "blobid_max", bl);
8345 dout(10) << __func__ << " new_blobid_max " << new_blobid_max << dendl;
8346 }
8347 for (auto txc : kv_submitting) {
8348 assert(txc->state == TransContext::STATE_KV_QUEUED);
8349 txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
8350 int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
8351 assert(r == 0);
8352 _txc_applied_kv(txc);
8353 --txc->osr->kv_committing_serially;
8354 txc->state = TransContext::STATE_KV_SUBMITTED;
8355 if (txc->osr->kv_submitted_waiters) {
8356 std::lock_guard<std::mutex> l(txc->osr->qlock);
8357 if (txc->osr->_is_all_kv_submitted()) {
8358 txc->osr->qcond.notify_all();
8359 }
8360 }
8361 }
8362 for (auto txc : kv_committing) {
8363 if (txc->had_ios) {
8364 --txc->osr->txc_with_unstable_io;
8365 }
8366 txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
8367 }
8368
8369 // release throttle *before* we commit. this allows new ops
8370 // to be prepared and enter pipeline while we are waiting on
8371 // the kv commit sync/flush. then hopefully on the next
8372 // iteration there will already be ops awake. otherwise, we
8373 // end up going to sleep, and then wake up when the very first
8374 // transaction is ready for commit.
8375 throttle_bytes.put(costs);
8376
8377 PExtentVector bluefs_gift_extents;
8378 if (bluefs &&
8379 after_flush - bluefs_last_balance >
8380 cct->_conf->bluestore_bluefs_balance_interval) {
8381 bluefs_last_balance = after_flush;
8382 int r = _balance_bluefs_freespace(&bluefs_gift_extents);
8383 assert(r >= 0);
8384 if (r > 0) {
8385 for (auto& p : bluefs_gift_extents) {
8386 bluefs_extents.insert(p.offset, p.length);
8387 }
8388 bufferlist bl;
8389 ::encode(bluefs_extents, bl);
8390 dout(10) << __func__ << " bluefs_extents now 0x" << std::hex
8391 << bluefs_extents << std::dec << dendl;
8392 synct->set(PREFIX_SUPER, "bluefs_extents", bl);
8393 }
8394 }
8395
8396 // cleanup sync deferred keys
8397 for (auto b : deferred_stable) {
8398 for (auto& txc : b->txcs) {
8399 bluestore_deferred_transaction_t& wt = *txc.deferred_txn;
8400 if (!wt.released.empty()) {
8401 // kraken replay compat only
8402 txc.released = wt.released;
8403 dout(10) << __func__ << " deferred txn has released "
8404 << txc.released
8405 << " (we just upgraded from kraken) on " << &txc << dendl;
8406 _txc_finalize_kv(&txc, synct);
8407 }
8408 // cleanup the deferred
8409 string key;
8410 get_deferred_key(wt.seq, &key);
8411 synct->rm_single_key(PREFIX_DEFERRED, key);
8412 }
8413 }
8414
8415 // submit synct synchronously (block and wait for it to commit)
8416 int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction_sync(synct);
8417 assert(r == 0);
8418
8419 if (new_nid_max) {
8420 nid_max = new_nid_max;
8421 dout(10) << __func__ << " nid_max now " << nid_max << dendl;
8422 }
8423 if (new_blobid_max) {
8424 blobid_max = new_blobid_max;
8425 dout(10) << __func__ << " blobid_max now " << blobid_max << dendl;
8426 }
8427
8428 {
8429 utime_t finish = ceph_clock_now();
8430 utime_t dur_flush = after_flush - start;
8431 utime_t dur_kv = finish - after_flush;
8432 utime_t dur = finish - start;
8433 dout(20) << __func__ << " committed " << kv_committing.size()
8434 << " cleaned " << deferred_stable.size()
8435 << " in " << dur
8436 << " (" << dur_flush << " flush + " << dur_kv << " kv commit)"
8437 << dendl;
8438 logger->tinc(l_bluestore_kv_flush_lat, dur_flush);
8439 logger->tinc(l_bluestore_kv_commit_lat, dur_kv);
8440 logger->tinc(l_bluestore_kv_lat, dur);
8441 }
8442
8443 if (bluefs) {
8444 if (!bluefs_gift_extents.empty()) {
8445 _commit_bluefs_freespace(bluefs_gift_extents);
8446 }
8447 for (auto p = bluefs_extents_reclaiming.begin();
8448 p != bluefs_extents_reclaiming.end();
8449 ++p) {
8450 dout(20) << __func__ << " releasing old bluefs 0x" << std::hex
8451 << p.get_start() << "~" << p.get_len() << std::dec
8452 << dendl;
8453 alloc->release(p.get_start(), p.get_len());
8454 }
8455 bluefs_extents_reclaiming.clear();
8456 }
8457
8458 {
8459 std::unique_lock<std::mutex> m(kv_finalize_lock);
8460 if (kv_committing_to_finalize.empty()) {
8461 kv_committing_to_finalize.swap(kv_committing);
8462 } else {
8463 kv_committing_to_finalize.insert(
8464 kv_committing_to_finalize.end(),
8465 kv_committing.begin(),
8466 kv_committing.end());
8467 kv_committing.clear();
8468 }
8469 if (deferred_stable_to_finalize.empty()) {
8470 deferred_stable_to_finalize.swap(deferred_stable);
8471 } else {
8472 deferred_stable_to_finalize.insert(
8473 deferred_stable_to_finalize.end(),
8474 deferred_stable.begin(),
8475 deferred_stable.end());
8476 deferred_stable.clear();
8477 }
8478 kv_finalize_cond.notify_one();
8479 }
8480
8481 l.lock();
8482 // previously deferred "done" are now "stable" by virtue of this
8483 // commit cycle.
8484 deferred_stable_queue.swap(deferred_done);
8485 }
8486 }
8487 dout(10) << __func__ << " finish" << dendl;
8488 kv_sync_started = false;
8489 }
8490
8491 void BlueStore::_kv_finalize_thread()
8492 {
8493 deque<TransContext*> kv_committed;
8494 deque<DeferredBatch*> deferred_stable;
8495 dout(10) << __func__ << " start" << dendl;
8496 std::unique_lock<std::mutex> l(kv_finalize_lock);
8497 assert(!kv_finalize_started);
8498 kv_finalize_started = true;
8499 kv_finalize_cond.notify_all();
8500 while (true) {
8501 assert(kv_committed.empty());
8502 assert(deferred_stable.empty());
8503 if (kv_committing_to_finalize.empty() &&
8504 deferred_stable_to_finalize.empty()) {
8505 if (kv_finalize_stop)
8506 break;
8507 dout(20) << __func__ << " sleep" << dendl;
8508 kv_finalize_cond.wait(l);
8509 dout(20) << __func__ << " wake" << dendl;
8510 } else {
8511 kv_committed.swap(kv_committing_to_finalize);
8512 deferred_stable.swap(deferred_stable_to_finalize);
8513 l.unlock();
8514 dout(20) << __func__ << " kv_committed " << kv_committed << dendl;
8515 dout(20) << __func__ << " deferred_stable " << deferred_stable << dendl;
8516
8517 while (!kv_committed.empty()) {
8518 TransContext *txc = kv_committed.front();
8519 assert(txc->state == TransContext::STATE_KV_SUBMITTED);
8520 _txc_state_proc(txc);
8521 kv_committed.pop_front();
8522 }
8523
8524 for (auto b : deferred_stable) {
8525 auto p = b->txcs.begin();
8526 while (p != b->txcs.end()) {
8527 TransContext *txc = &*p;
8528 p = b->txcs.erase(p); // unlink here because
8529 _txc_state_proc(txc); // this may destroy txc
8530 }
8531 delete b;
8532 }
8533 deferred_stable.clear();
8534
8535 if (!deferred_aggressive) {
8536 if (deferred_queue_size >= deferred_batch_ops.load() ||
8537 throttle_deferred_bytes.past_midpoint()) {
8538 deferred_try_submit();
8539 }
8540 }
8541
8542 // this is as good a place as any ...
8543 _reap_collections();
8544
8545 l.lock();
8546 }
8547 }
8548 dout(10) << __func__ << " finish" << dendl;
8549 kv_finalize_started = false;
8550 }
8551
8552 bluestore_deferred_op_t *BlueStore::_get_deferred_op(
8553 TransContext *txc, OnodeRef o)
8554 {
8555 if (!txc->deferred_txn) {
8556 txc->deferred_txn = new bluestore_deferred_transaction_t;
8557 }
8558 txc->deferred_txn->ops.push_back(bluestore_deferred_op_t());
8559 return &txc->deferred_txn->ops.back();
8560 }
8561
8562 void BlueStore::_deferred_queue(TransContext *txc)
8563 {
8564 dout(20) << __func__ << " txc " << txc << " osr " << txc->osr << dendl;
8565 deferred_lock.lock();
8566 if (!txc->osr->deferred_pending &&
8567 !txc->osr->deferred_running) {
8568 deferred_queue.push_back(*txc->osr);
8569 }
8570 if (!txc->osr->deferred_pending) {
8571 txc->osr->deferred_pending = new DeferredBatch(cct, txc->osr.get());
8572 }
8573 ++deferred_queue_size;
8574 txc->osr->deferred_pending->txcs.push_back(*txc);
8575 bluestore_deferred_transaction_t& wt = *txc->deferred_txn;
8576 for (auto opi = wt.ops.begin(); opi != wt.ops.end(); ++opi) {
8577 const auto& op = *opi;
8578 assert(op.op == bluestore_deferred_op_t::OP_WRITE);
8579 bufferlist::const_iterator p = op.data.begin();
8580 for (auto e : op.extents) {
8581 txc->osr->deferred_pending->prepare_write(
8582 cct, wt.seq, e.offset, e.length, p);
8583 }
8584 }
8585 if (deferred_aggressive &&
8586 !txc->osr->deferred_running) {
8587 _deferred_submit_unlock(txc->osr.get());
8588 } else {
8589 deferred_lock.unlock();
8590 }
8591 }
8592
8593 void BlueStore::deferred_try_submit()
8594 {
8595 dout(20) << __func__ << " " << deferred_queue.size() << " osrs, "
8596 << deferred_queue_size << " txcs" << dendl;
8597 std::lock_guard<std::mutex> l(deferred_lock);
8598 vector<OpSequencerRef> osrs;
8599 osrs.reserve(deferred_queue.size());
8600 for (auto& osr : deferred_queue) {
8601 osrs.push_back(&osr);
8602 }
8603 for (auto& osr : osrs) {
8604 if (osr->deferred_pending && !osr->deferred_running) {
8605 _deferred_submit_unlock(osr.get());
8606 deferred_lock.lock();
8607 }
8608 }
8609 }
8610
8611 void BlueStore::_deferred_submit_unlock(OpSequencer *osr)
8612 {
8613 dout(10) << __func__ << " osr " << osr
8614 << " " << osr->deferred_pending->iomap.size() << " ios pending "
8615 << dendl;
8616 assert(osr->deferred_pending);
8617 assert(!osr->deferred_running);
8618
8619 auto b = osr->deferred_pending;
8620 deferred_queue_size -= b->seq_bytes.size();
8621 assert(deferred_queue_size >= 0);
8622
8623 osr->deferred_running = osr->deferred_pending;
8624 osr->deferred_pending = nullptr;
8625
8626 uint64_t start = 0, pos = 0;
8627 bufferlist bl;
8628 auto i = b->iomap.begin();
8629 while (true) {
8630 if (i == b->iomap.end() || i->first != pos) {
8631 if (bl.length()) {
8632 dout(20) << __func__ << " write 0x" << std::hex
8633 << start << "~" << bl.length()
8634 << " crc " << bl.crc32c(-1) << std::dec << dendl;
8635 if (!g_conf->bluestore_debug_omit_block_device_write) {
8636 logger->inc(l_bluestore_deferred_write_ops);
8637 logger->inc(l_bluestore_deferred_write_bytes, bl.length());
8638 int r = bdev->aio_write(start, bl, &b->ioc, false);
8639 assert(r == 0);
8640 }
8641 }
8642 if (i == b->iomap.end()) {
8643 break;
8644 }
8645 start = 0;
8646 pos = i->first;
8647 bl.clear();
8648 }
8649 dout(20) << __func__ << " seq " << i->second.seq << " 0x"
8650 << std::hex << pos << "~" << i->second.bl.length() << std::dec
8651 << dendl;
8652 if (!bl.length()) {
8653 start = pos;
8654 }
8655 pos += i->second.bl.length();
8656 bl.claim_append(i->second.bl);
8657 ++i;
8658 }
8659
8660 // demote to deferred_submit_lock, then drop that too
8661 std::lock_guard<std::mutex> l(deferred_submit_lock);
8662 deferred_lock.unlock();
8663 bdev->aio_submit(&b->ioc);
8664 }
8665
8666 void BlueStore::_deferred_aio_finish(OpSequencer *osr)
8667 {
8668 dout(10) << __func__ << " osr " << osr << dendl;
8669 assert(osr->deferred_running);
8670 DeferredBatch *b = osr->deferred_running;
8671
8672 {
8673 std::lock_guard<std::mutex> l(deferred_lock);
8674 assert(osr->deferred_running == b);
8675 osr->deferred_running = nullptr;
8676 if (!osr->deferred_pending) {
8677 auto q = deferred_queue.iterator_to(*osr);
8678 deferred_queue.erase(q);
8679 } else if (deferred_aggressive) {
8680 dout(20) << __func__ << " queuing async deferred_try_submit" << dendl;
8681 finishers[0]->queue(new FunctionContext([&](int) {
8682 deferred_try_submit();
8683 }));
8684 }
8685 }
8686
8687 {
8688 uint64_t costs = 0;
8689 std::lock_guard<std::mutex> l2(osr->qlock);
8690 for (auto& i : b->txcs) {
8691 TransContext *txc = &i;
8692 txc->state = TransContext::STATE_DEFERRED_CLEANUP;
8693 costs += txc->cost;
8694 }
8695 osr->qcond.notify_all();
8696 throttle_deferred_bytes.put(costs);
8697 std::lock_guard<std::mutex> l(kv_lock);
8698 deferred_done_queue.emplace_back(b);
8699 }
8700
8701 // in the normal case, do not bother waking up the kv thread; it will
8702 // catch us on the next commit anyway.
8703 if (deferred_aggressive) {
8704 std::lock_guard<std::mutex> l(kv_lock);
8705 kv_cond.notify_one();
8706 }
8707 }
8708
8709 int BlueStore::_deferred_replay()
8710 {
8711 dout(10) << __func__ << " start" << dendl;
8712 OpSequencerRef osr = new OpSequencer(cct, this);
8713 int count = 0;
8714 int r = 0;
8715 KeyValueDB::Iterator it = db->get_iterator(PREFIX_DEFERRED);
8716 for (it->lower_bound(string()); it->valid(); it->next(), ++count) {
8717 dout(20) << __func__ << " replay " << pretty_binary_string(it->key())
8718 << dendl;
8719 bluestore_deferred_transaction_t *deferred_txn =
8720 new bluestore_deferred_transaction_t;
8721 bufferlist bl = it->value();
8722 bufferlist::iterator p = bl.begin();
8723 try {
8724 ::decode(*deferred_txn, p);
8725 } catch (buffer::error& e) {
8726 derr << __func__ << " failed to decode deferred txn "
8727 << pretty_binary_string(it->key()) << dendl;
8728 delete deferred_txn;
8729 r = -EIO;
8730 goto out;
8731 }
8732 TransContext *txc = _txc_create(osr.get());
8733 txc->deferred_txn = deferred_txn;
8734 txc->state = TransContext::STATE_KV_DONE;
8735 _txc_state_proc(txc);
8736 }
8737 out:
8738 dout(20) << __func__ << " draining osr" << dendl;
8739 _osr_drain_all();
8740 osr->discard();
8741 dout(10) << __func__ << " completed " << count << " events" << dendl;
8742 return r;
8743 }
8744
8745 // ---------------------------
8746 // transactions
8747
8748 int BlueStore::queue_transactions(
8749 Sequencer *posr,
8750 vector<Transaction>& tls,
8751 TrackedOpRef op,
8752 ThreadPool::TPHandle *handle)
8753 {
8754 FUNCTRACE();
8755 Context *onreadable;
8756 Context *ondisk;
8757 Context *onreadable_sync;
8758 ObjectStore::Transaction::collect_contexts(
8759 tls, &onreadable, &ondisk, &onreadable_sync);
8760
8761 if (cct->_conf->objectstore_blackhole) {
8762 dout(0) << __func__ << " objectstore_blackhole = TRUE, dropping transaction"
8763 << dendl;
8764 delete ondisk;
8765 delete onreadable;
8766 delete onreadable_sync;
8767 return 0;
8768 }
8769 utime_t start = ceph_clock_now();
8770 // set up the sequencer
8771 OpSequencer *osr;
8772 assert(posr);
8773 if (posr->p) {
8774 osr = static_cast<OpSequencer *>(posr->p.get());
8775 dout(10) << __func__ << " existing " << osr << " " << *osr << dendl;
8776 } else {
8777 osr = new OpSequencer(cct, this);
8778 osr->parent = posr;
8779 posr->p = osr;
8780 dout(10) << __func__ << " new " << osr << " " << *osr << dendl;
8781 }
8782
8783 // prepare
8784 TransContext *txc = _txc_create(osr);
8785 txc->onreadable = onreadable;
8786 txc->onreadable_sync = onreadable_sync;
8787 txc->oncommit = ondisk;
8788
8789 for (vector<Transaction>::iterator p = tls.begin(); p != tls.end(); ++p) {
8790 (*p).set_osr(osr);
8791 txc->bytes += (*p).get_num_bytes();
8792 _txc_add_transaction(txc, &(*p));
8793 }
8794 _txc_calc_cost(txc);
8795
8796 _txc_write_nodes(txc, txc->t);
8797
8798 // journal deferred items
8799 if (txc->deferred_txn) {
8800 txc->deferred_txn->seq = ++deferred_seq;
8801 bufferlist bl;
8802 ::encode(*txc->deferred_txn, bl);
8803 string key;
8804 get_deferred_key(txc->deferred_txn->seq, &key);
8805 txc->t->set(PREFIX_DEFERRED, key, bl);
8806 }
8807
8808 _txc_finalize_kv(txc, txc->t);
8809 if (handle)
8810 handle->suspend_tp_timeout();
8811
8812 utime_t tstart = ceph_clock_now();
8813 throttle_bytes.get(txc->cost);
8814 if (txc->deferred_txn) {
8815 // ensure we do not block here because of deferred writes
8816 if (!throttle_deferred_bytes.get_or_fail(txc->cost)) {
8817 deferred_try_submit();
8818 throttle_deferred_bytes.get(txc->cost);
8819 }
8820 }
8821 utime_t tend = ceph_clock_now();
8822
8823 if (handle)
8824 handle->reset_tp_timeout();
8825
8826 logger->inc(l_bluestore_txc);
8827
8828 // execute (start)
8829 _txc_state_proc(txc);
8830
8831 logger->tinc(l_bluestore_submit_lat, ceph_clock_now() - start);
8832 logger->tinc(l_bluestore_throttle_lat, tend - tstart);
8833 return 0;
8834 }
8835
8836 void BlueStore::_txc_aio_submit(TransContext *txc)
8837 {
8838 dout(10) << __func__ << " txc " << txc << dendl;
8839 bdev->aio_submit(&txc->ioc);
8840 }
8841
8842 void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t)
8843 {
8844 Transaction::iterator i = t->begin();
8845
8846 _dump_transaction(t);
8847
8848 vector<CollectionRef> cvec(i.colls.size());
8849 unsigned j = 0;
8850 for (vector<coll_t>::iterator p = i.colls.begin(); p != i.colls.end();
8851 ++p, ++j) {
8852 cvec[j] = _get_collection(*p);
8853
8854 // note first collection we reference
8855 if (!txc->first_collection)
8856 txc->first_collection = cvec[j];
8857 }
8858 vector<OnodeRef> ovec(i.objects.size());
8859
8860 for (int pos = 0; i.have_op(); ++pos) {
8861 Transaction::Op *op = i.decode_op();
8862 int r = 0;
8863
8864 // no coll or obj
8865 if (op->op == Transaction::OP_NOP)
8866 continue;
8867
8868 // collection operations
8869 CollectionRef &c = cvec[op->cid];
8870 switch (op->op) {
8871 case Transaction::OP_RMCOLL:
8872 {
8873 const coll_t &cid = i.get_cid(op->cid);
8874 r = _remove_collection(txc, cid, &c);
8875 if (!r)
8876 continue;
8877 }
8878 break;
8879
8880 case Transaction::OP_MKCOLL:
8881 {
8882 assert(!c);
8883 const coll_t &cid = i.get_cid(op->cid);
8884 r = _create_collection(txc, cid, op->split_bits, &c);
8885 if (!r)
8886 continue;
8887 }
8888 break;
8889
8890 case Transaction::OP_SPLIT_COLLECTION:
8891 assert(0 == "deprecated");
8892 break;
8893
8894 case Transaction::OP_SPLIT_COLLECTION2:
8895 {
8896 uint32_t bits = op->split_bits;
8897 uint32_t rem = op->split_rem;
8898 r = _split_collection(txc, c, cvec[op->dest_cid], bits, rem);
8899 if (!r)
8900 continue;
8901 }
8902 break;
8903
8904 case Transaction::OP_COLL_HINT:
8905 {
8906 uint32_t type = op->hint_type;
8907 bufferlist hint;
8908 i.decode_bl(hint);
8909 bufferlist::iterator hiter = hint.begin();
8910 if (type == Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS) {
8911 uint32_t pg_num;
8912 uint64_t num_objs;
8913 ::decode(pg_num, hiter);
8914 ::decode(num_objs, hiter);
8915 dout(10) << __func__ << " collection hint objects is a no-op, "
8916 << " pg_num " << pg_num << " num_objects " << num_objs
8917 << dendl;
8918 } else {
8919 // Ignore the hint
8920 dout(10) << __func__ << " unknown collection hint " << type << dendl;
8921 }
8922 continue;
8923 }
8924 break;
8925
8926 case Transaction::OP_COLL_SETATTR:
8927 r = -EOPNOTSUPP;
8928 break;
8929
8930 case Transaction::OP_COLL_RMATTR:
8931 r = -EOPNOTSUPP;
8932 break;
8933
8934 case Transaction::OP_COLL_RENAME:
8935 assert(0 == "not implemented");
8936 break;
8937 }
8938 if (r < 0) {
8939 derr << __func__ << " error " << cpp_strerror(r)
8940 << " not handled on operation " << op->op
8941 << " (op " << pos << ", counting from 0)" << dendl;
8942 _dump_transaction(t, 0);
8943 assert(0 == "unexpected error");
8944 }
8945
8946 // these operations implicity create the object
8947 bool create = false;
8948 if (op->op == Transaction::OP_TOUCH ||
8949 op->op == Transaction::OP_WRITE ||
8950 op->op == Transaction::OP_ZERO) {
8951 create = true;
8952 }
8953
8954 // object operations
8955 RWLock::WLocker l(c->lock);
8956 OnodeRef &o = ovec[op->oid];
8957 if (!o) {
8958 ghobject_t oid = i.get_oid(op->oid);
8959 o = c->get_onode(oid, create);
8960 }
8961 if (!create && (!o || !o->exists)) {
8962 dout(10) << __func__ << " op " << op->op << " got ENOENT on "
8963 << i.get_oid(op->oid) << dendl;
8964 r = -ENOENT;
8965 goto endop;
8966 }
8967
8968 switch (op->op) {
8969 case Transaction::OP_TOUCH:
8970 r = _touch(txc, c, o);
8971 break;
8972
8973 case Transaction::OP_WRITE:
8974 {
8975 uint64_t off = op->off;
8976 uint64_t len = op->len;
8977 uint32_t fadvise_flags = i.get_fadvise_flags();
8978 bufferlist bl;
8979 i.decode_bl(bl);
8980 r = _write(txc, c, o, off, len, bl, fadvise_flags);
8981 }
8982 break;
8983
8984 case Transaction::OP_ZERO:
8985 {
8986 uint64_t off = op->off;
8987 uint64_t len = op->len;
8988 r = _zero(txc, c, o, off, len);
8989 }
8990 break;
8991
8992 case Transaction::OP_TRIMCACHE:
8993 {
8994 // deprecated, no-op
8995 }
8996 break;
8997
8998 case Transaction::OP_TRUNCATE:
8999 {
9000 uint64_t off = op->off;
9001 _truncate(txc, c, o, off);
9002 }
9003 break;
9004
9005 case Transaction::OP_REMOVE:
9006 {
9007 r = _remove(txc, c, o);
9008 }
9009 break;
9010
9011 case Transaction::OP_SETATTR:
9012 {
9013 string name = i.decode_string();
9014 bufferptr bp;
9015 i.decode_bp(bp);
9016 r = _setattr(txc, c, o, name, bp);
9017 }
9018 break;
9019
9020 case Transaction::OP_SETATTRS:
9021 {
9022 map<string, bufferptr> aset;
9023 i.decode_attrset(aset);
9024 r = _setattrs(txc, c, o, aset);
9025 }
9026 break;
9027
9028 case Transaction::OP_RMATTR:
9029 {
9030 string name = i.decode_string();
9031 r = _rmattr(txc, c, o, name);
9032 }
9033 break;
9034
9035 case Transaction::OP_RMATTRS:
9036 {
9037 r = _rmattrs(txc, c, o);
9038 }
9039 break;
9040
9041 case Transaction::OP_CLONE:
9042 {
9043 OnodeRef& no = ovec[op->dest_oid];
9044 if (!no) {
9045 const ghobject_t& noid = i.get_oid(op->dest_oid);
9046 no = c->get_onode(noid, true);
9047 }
9048 r = _clone(txc, c, o, no);
9049 }
9050 break;
9051
9052 case Transaction::OP_CLONERANGE:
9053 assert(0 == "deprecated");
9054 break;
9055
9056 case Transaction::OP_CLONERANGE2:
9057 {
9058 OnodeRef& no = ovec[op->dest_oid];
9059 if (!no) {
9060 const ghobject_t& noid = i.get_oid(op->dest_oid);
9061 no = c->get_onode(noid, true);
9062 }
9063 uint64_t srcoff = op->off;
9064 uint64_t len = op->len;
9065 uint64_t dstoff = op->dest_off;
9066 r = _clone_range(txc, c, o, no, srcoff, len, dstoff);
9067 }
9068 break;
9069
9070 case Transaction::OP_COLL_ADD:
9071 assert(0 == "not implemented");
9072 break;
9073
9074 case Transaction::OP_COLL_REMOVE:
9075 assert(0 == "not implemented");
9076 break;
9077
9078 case Transaction::OP_COLL_MOVE:
9079 assert(0 == "deprecated");
9080 break;
9081
9082 case Transaction::OP_COLL_MOVE_RENAME:
9083 case Transaction::OP_TRY_RENAME:
9084 {
9085 assert(op->cid == op->dest_cid);
9086 const ghobject_t& noid = i.get_oid(op->dest_oid);
9087 OnodeRef& no = ovec[op->dest_oid];
9088 if (!no) {
9089 no = c->get_onode(noid, false);
9090 }
9091 r = _rename(txc, c, o, no, noid);
9092 }
9093 break;
9094
9095 case Transaction::OP_OMAP_CLEAR:
9096 {
9097 r = _omap_clear(txc, c, o);
9098 }
9099 break;
9100 case Transaction::OP_OMAP_SETKEYS:
9101 {
9102 bufferlist aset_bl;
9103 i.decode_attrset_bl(&aset_bl);
9104 r = _omap_setkeys(txc, c, o, aset_bl);
9105 }
9106 break;
9107 case Transaction::OP_OMAP_RMKEYS:
9108 {
9109 bufferlist keys_bl;
9110 i.decode_keyset_bl(&keys_bl);
9111 r = _omap_rmkeys(txc, c, o, keys_bl);
9112 }
9113 break;
9114 case Transaction::OP_OMAP_RMKEYRANGE:
9115 {
9116 string first, last;
9117 first = i.decode_string();
9118 last = i.decode_string();
9119 r = _omap_rmkey_range(txc, c, o, first, last);
9120 }
9121 break;
9122 case Transaction::OP_OMAP_SETHEADER:
9123 {
9124 bufferlist bl;
9125 i.decode_bl(bl);
9126 r = _omap_setheader(txc, c, o, bl);
9127 }
9128 break;
9129
9130 case Transaction::OP_SETALLOCHINT:
9131 {
9132 r = _set_alloc_hint(txc, c, o,
9133 op->expected_object_size,
9134 op->expected_write_size,
9135 op->alloc_hint_flags);
9136 }
9137 break;
9138
9139 default:
9140 derr << __func__ << "bad op " << op->op << dendl;
9141 ceph_abort();
9142 }
9143
9144 endop:
9145 if (r < 0) {
9146 bool ok = false;
9147
9148 if (r == -ENOENT && !(op->op == Transaction::OP_CLONERANGE ||
9149 op->op == Transaction::OP_CLONE ||
9150 op->op == Transaction::OP_CLONERANGE2 ||
9151 op->op == Transaction::OP_COLL_ADD ||
9152 op->op == Transaction::OP_SETATTR ||
9153 op->op == Transaction::OP_SETATTRS ||
9154 op->op == Transaction::OP_RMATTR ||
9155 op->op == Transaction::OP_OMAP_SETKEYS ||
9156 op->op == Transaction::OP_OMAP_RMKEYS ||
9157 op->op == Transaction::OP_OMAP_RMKEYRANGE ||
9158 op->op == Transaction::OP_OMAP_SETHEADER))
9159 // -ENOENT is usually okay
9160 ok = true;
9161 if (r == -ENODATA)
9162 ok = true;
9163
9164 if (!ok) {
9165 const char *msg = "unexpected error code";
9166
9167 if (r == -ENOENT && (op->op == Transaction::OP_CLONERANGE ||
9168 op->op == Transaction::OP_CLONE ||
9169 op->op == Transaction::OP_CLONERANGE2))
9170 msg = "ENOENT on clone suggests osd bug";
9171
9172 if (r == -ENOSPC)
9173 // For now, if we hit _any_ ENOSPC, crash, before we do any damage
9174 // by partially applying transactions.
9175 msg = "ENOSPC from bluestore, misconfigured cluster";
9176
9177 if (r == -ENOTEMPTY) {
9178 msg = "ENOTEMPTY suggests garbage data in osd data dir";
9179 }
9180
9181 derr << __func__ << " error " << cpp_strerror(r)
9182 << " not handled on operation " << op->op
9183 << " (op " << pos << ", counting from 0)"
9184 << dendl;
9185 derr << msg << dendl;
9186 _dump_transaction(t, 0);
9187 assert(0 == "unexpected error");
9188 }
9189 }
9190 }
9191 }
9192
9193
9194
9195 // -----------------
9196 // write operations
9197
9198 int BlueStore::_touch(TransContext *txc,
9199 CollectionRef& c,
9200 OnodeRef &o)
9201 {
9202 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
9203 int r = 0;
9204 _assign_nid(txc, o);
9205 txc->write_onode(o);
9206 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
9207 return r;
9208 }
9209
9210 void BlueStore::_dump_onode(OnodeRef o, int log_level)
9211 {
9212 if (!cct->_conf->subsys.should_gather(ceph_subsys_bluestore, log_level))
9213 return;
9214 dout(log_level) << __func__ << " " << o << " " << o->oid
9215 << " nid " << o->onode.nid
9216 << " size 0x" << std::hex << o->onode.size
9217 << " (" << std::dec << o->onode.size << ")"
9218 << " expected_object_size " << o->onode.expected_object_size
9219 << " expected_write_size " << o->onode.expected_write_size
9220 << " in " << o->onode.extent_map_shards.size() << " shards"
9221 << ", " << o->extent_map.spanning_blob_map.size()
9222 << " spanning blobs"
9223 << dendl;
9224 for (auto p = o->onode.attrs.begin();
9225 p != o->onode.attrs.end();
9226 ++p) {
9227 dout(log_level) << __func__ << " attr " << p->first
9228 << " len " << p->second.length() << dendl;
9229 }
9230 _dump_extent_map(o->extent_map, log_level);
9231 }
9232
9233 void BlueStore::_dump_extent_map(ExtentMap &em, int log_level)
9234 {
9235 uint64_t pos = 0;
9236 for (auto& s : em.shards) {
9237 dout(log_level) << __func__ << " shard " << *s.shard_info
9238 << (s.loaded ? " (loaded)" : "")
9239 << (s.dirty ? " (dirty)" : "")
9240 << dendl;
9241 }
9242 for (auto& e : em.extent_map) {
9243 dout(log_level) << __func__ << " " << e << dendl;
9244 assert(e.logical_offset >= pos);
9245 pos = e.logical_offset + e.length;
9246 const bluestore_blob_t& blob = e.blob->get_blob();
9247 if (blob.has_csum()) {
9248 vector<uint64_t> v;
9249 unsigned n = blob.get_csum_count();
9250 for (unsigned i = 0; i < n; ++i)
9251 v.push_back(blob.get_csum_item(i));
9252 dout(log_level) << __func__ << " csum: " << std::hex << v << std::dec
9253 << dendl;
9254 }
9255 std::lock_guard<std::recursive_mutex> l(e.blob->shared_blob->get_cache()->lock);
9256 for (auto& i : e.blob->shared_blob->bc.buffer_map) {
9257 dout(log_level) << __func__ << " 0x" << std::hex << i.first
9258 << "~" << i.second->length << std::dec
9259 << " " << *i.second << dendl;
9260 }
9261 }
9262 }
9263
9264 void BlueStore::_dump_transaction(Transaction *t, int log_level)
9265 {
9266 dout(log_level) << " transaction dump:\n";
9267 JSONFormatter f(true);
9268 f.open_object_section("transaction");
9269 t->dump(&f);
9270 f.close_section();
9271 f.flush(*_dout);
9272 *_dout << dendl;
9273 }
9274
9275 void BlueStore::_pad_zeros(
9276 bufferlist *bl, uint64_t *offset,
9277 uint64_t chunk_size)
9278 {
9279 auto length = bl->length();
9280 dout(30) << __func__ << " 0x" << std::hex << *offset << "~" << length
9281 << " chunk_size 0x" << chunk_size << std::dec << dendl;
9282 dout(40) << "before:\n";
9283 bl->hexdump(*_dout);
9284 *_dout << dendl;
9285 // front
9286 size_t front_pad = *offset % chunk_size;
9287 size_t back_pad = 0;
9288 size_t pad_count = 0;
9289 if (front_pad) {
9290 size_t front_copy = MIN(chunk_size - front_pad, length);
9291 bufferptr z = buffer::create_page_aligned(chunk_size);
9292 z.zero(0, front_pad, false);
9293 pad_count += front_pad;
9294 bl->copy(0, front_copy, z.c_str() + front_pad);
9295 if (front_copy + front_pad < chunk_size) {
9296 back_pad = chunk_size - (length + front_pad);
9297 z.zero(front_pad + length, back_pad, false);
9298 pad_count += back_pad;
9299 }
9300 bufferlist old, t;
9301 old.swap(*bl);
9302 t.substr_of(old, front_copy, length - front_copy);
9303 bl->append(z);
9304 bl->claim_append(t);
9305 *offset -= front_pad;
9306 length += pad_count;
9307 }
9308
9309 // back
9310 uint64_t end = *offset + length;
9311 unsigned back_copy = end % chunk_size;
9312 if (back_copy) {
9313 assert(back_pad == 0);
9314 back_pad = chunk_size - back_copy;
9315 assert(back_copy <= length);
9316 bufferptr tail(chunk_size);
9317 bl->copy(length - back_copy, back_copy, tail.c_str());
9318 tail.zero(back_copy, back_pad, false);
9319 bufferlist old;
9320 old.swap(*bl);
9321 bl->substr_of(old, 0, length - back_copy);
9322 bl->append(tail);
9323 length += back_pad;
9324 pad_count += back_pad;
9325 }
9326 dout(20) << __func__ << " pad 0x" << std::hex << front_pad << " + 0x"
9327 << back_pad << " on front/back, now 0x" << *offset << "~"
9328 << length << std::dec << dendl;
9329 dout(40) << "after:\n";
9330 bl->hexdump(*_dout);
9331 *_dout << dendl;
9332 if (pad_count)
9333 logger->inc(l_bluestore_write_pad_bytes, pad_count);
9334 assert(bl->length() == length);
9335 }
9336
9337 void BlueStore::_do_write_small(
9338 TransContext *txc,
9339 CollectionRef &c,
9340 OnodeRef o,
9341 uint64_t offset, uint64_t length,
9342 bufferlist::iterator& blp,
9343 WriteContext *wctx)
9344 {
9345 dout(10) << __func__ << " 0x" << std::hex << offset << "~" << length
9346 << std::dec << dendl;
9347 assert(length < min_alloc_size);
9348 uint64_t end_offs = offset + length;
9349
9350 logger->inc(l_bluestore_write_small);
9351 logger->inc(l_bluestore_write_small_bytes, length);
9352
9353 bufferlist bl;
9354 blp.copy(length, bl);
9355
9356 // Look for an existing mutable blob we can use.
9357 auto begin = o->extent_map.extent_map.begin();
9358 auto end = o->extent_map.extent_map.end();
9359 auto ep = o->extent_map.seek_lextent(offset);
9360 if (ep != begin) {
9361 --ep;
9362 if (ep->blob_end() <= offset) {
9363 ++ep;
9364 }
9365 }
9366 auto prev_ep = ep;
9367 if (prev_ep != begin) {
9368 --prev_ep;
9369 } else {
9370 prev_ep = end; // to avoid this extent check as it's a duplicate
9371 }
9372
9373 auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
9374 auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
9375 uint32_t alloc_len = min_alloc_size;
9376 auto offset0 = P2ALIGN(offset, alloc_len);
9377
9378 bool any_change;
9379
9380 // search suitable extent in both forward and reverse direction in
9381 // [offset - target_max_blob_size, offset + target_max_blob_size] range
9382 // then check if blob can be reused via can_reuse_blob func or apply
9383 // direct/deferred write (the latter for extents including or higher
9384 // than 'offset' only).
9385 do {
9386 any_change = false;
9387
9388 if (ep != end && ep->logical_offset < offset + max_bsize) {
9389 BlobRef b = ep->blob;
9390 auto bstart = ep->blob_start();
9391 dout(20) << __func__ << " considering " << *b
9392 << " bstart 0x" << std::hex << bstart << std::dec << dendl;
9393 if (bstart >= end_offs) {
9394 dout(20) << __func__ << " ignoring distant " << *b << dendl;
9395 } else if (!b->get_blob().is_mutable()) {
9396 dout(20) << __func__ << " ignoring immutable " << *b << dendl;
9397 } else if (ep->logical_offset % min_alloc_size !=
9398 ep->blob_offset % min_alloc_size) {
9399 dout(20) << __func__ << " ignoring offset-skewed " << *b << dendl;
9400 } else {
9401 uint64_t chunk_size = b->get_blob().get_chunk_size(block_size);
9402 // can we pad our head/tail out with zeros?
9403 uint64_t head_pad, tail_pad;
9404 head_pad = P2PHASE(offset, chunk_size);
9405 tail_pad = P2NPHASE(end_offs, chunk_size);
9406 if (head_pad || tail_pad) {
9407 o->extent_map.fault_range(db, offset - head_pad,
9408 end_offs - offset + head_pad + tail_pad);
9409 }
9410 if (head_pad &&
9411 o->extent_map.has_any_lextents(offset - head_pad, chunk_size)) {
9412 head_pad = 0;
9413 }
9414 if (tail_pad && o->extent_map.has_any_lextents(end_offs, tail_pad)) {
9415 tail_pad = 0;
9416 }
9417
9418 uint64_t b_off = offset - head_pad - bstart;
9419 uint64_t b_len = length + head_pad + tail_pad;
9420
9421 // direct write into unused blocks of an existing mutable blob?
9422 if ((b_off % chunk_size == 0 && b_len % chunk_size == 0) &&
9423 b->get_blob().get_ondisk_length() >= b_off + b_len &&
9424 b->get_blob().is_unused(b_off, b_len) &&
9425 b->get_blob().is_allocated(b_off, b_len)) {
9426 _apply_padding(head_pad, tail_pad, bl);
9427
9428 dout(20) << __func__ << " write to unused 0x" << std::hex
9429 << b_off << "~" << b_len
9430 << " pad 0x" << head_pad << " + 0x" << tail_pad
9431 << std::dec << " of mutable " << *b << dendl;
9432 _buffer_cache_write(txc, b, b_off, bl,
9433 wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
9434
9435 if (!g_conf->bluestore_debug_omit_block_device_write) {
9436 if (b_len <= prefer_deferred_size) {
9437 dout(20) << __func__ << " deferring small 0x" << std::hex
9438 << b_len << std::dec << " unused write via deferred" << dendl;
9439 bluestore_deferred_op_t *op = _get_deferred_op(txc, o);
9440 op->op = bluestore_deferred_op_t::OP_WRITE;
9441 b->get_blob().map(
9442 b_off, b_len,
9443 [&](uint64_t offset, uint64_t length) {
9444 op->extents.emplace_back(bluestore_pextent_t(offset, length));
9445 return 0;
9446 });
9447 op->data = bl;
9448 } else {
9449 b->get_blob().map_bl(
9450 b_off, bl,
9451 [&](uint64_t offset, bufferlist& t) {
9452 bdev->aio_write(offset, t,
9453 &txc->ioc, wctx->buffered);
9454 });
9455 }
9456 }
9457 b->dirty_blob().calc_csum(b_off, bl);
9458 dout(20) << __func__ << " lex old " << *ep << dendl;
9459 Extent *le = o->extent_map.set_lextent(c, offset, b_off + head_pad, length,
9460 b,
9461 &wctx->old_extents);
9462 b->dirty_blob().mark_used(le->blob_offset, le->length);
9463 txc->statfs_delta.stored() += le->length;
9464 dout(20) << __func__ << " lex " << *le << dendl;
9465 logger->inc(l_bluestore_write_small_unused);
9466 return;
9467 }
9468 // read some data to fill out the chunk?
9469 uint64_t head_read = P2PHASE(b_off, chunk_size);
9470 uint64_t tail_read = P2NPHASE(b_off + b_len, chunk_size);
9471 if ((head_read || tail_read) &&
9472 (b->get_blob().get_ondisk_length() >= b_off + b_len + tail_read) &&
9473 head_read + tail_read < min_alloc_size) {
9474 b_off -= head_read;
9475 b_len += head_read + tail_read;
9476
9477 } else {
9478 head_read = tail_read = 0;
9479 }
9480
9481 // chunk-aligned deferred overwrite?
9482 if (b->get_blob().get_ondisk_length() >= b_off + b_len &&
9483 b_off % chunk_size == 0 &&
9484 b_len % chunk_size == 0 &&
9485 b->get_blob().is_allocated(b_off, b_len)) {
9486
9487 _apply_padding(head_pad, tail_pad, bl);
9488
9489 dout(20) << __func__ << " reading head 0x" << std::hex << head_read
9490 << " and tail 0x" << tail_read << std::dec << dendl;
9491 if (head_read) {
9492 bufferlist head_bl;
9493 int r = _do_read(c.get(), o, offset - head_pad - head_read, head_read,
9494 head_bl, 0);
9495 assert(r >= 0 && r <= (int)head_read);
9496 size_t zlen = head_read - r;
9497 if (zlen) {
9498 head_bl.append_zero(zlen);
9499 logger->inc(l_bluestore_write_pad_bytes, zlen);
9500 }
9501 bl.claim_prepend(head_bl);
9502 logger->inc(l_bluestore_write_penalty_read_ops);
9503 }
9504 if (tail_read) {
9505 bufferlist tail_bl;
9506 int r = _do_read(c.get(), o, offset + length + tail_pad, tail_read,
9507 tail_bl, 0);
9508 assert(r >= 0 && r <= (int)tail_read);
9509 size_t zlen = tail_read - r;
9510 if (zlen) {
9511 tail_bl.append_zero(zlen);
9512 logger->inc(l_bluestore_write_pad_bytes, zlen);
9513 }
9514 bl.claim_append(tail_bl);
9515 logger->inc(l_bluestore_write_penalty_read_ops);
9516 }
9517 logger->inc(l_bluestore_write_small_pre_read);
9518
9519 bluestore_deferred_op_t *op = _get_deferred_op(txc, o);
9520 op->op = bluestore_deferred_op_t::OP_WRITE;
9521 _buffer_cache_write(txc, b, b_off, bl,
9522 wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
9523
9524 int r = b->get_blob().map(
9525 b_off, b_len,
9526 [&](uint64_t offset, uint64_t length) {
9527 op->extents.emplace_back(bluestore_pextent_t(offset, length));
9528 return 0;
9529 });
9530 assert(r == 0);
9531 if (b->get_blob().csum_type) {
9532 b->dirty_blob().calc_csum(b_off, bl);
9533 }
9534 op->data.claim(bl);
9535 dout(20) << __func__ << " deferred write 0x" << std::hex << b_off << "~"
9536 << b_len << std::dec << " of mutable " << *b
9537 << " at " << op->extents << dendl;
9538 Extent *le = o->extent_map.set_lextent(c, offset, offset - bstart, length,
9539 b, &wctx->old_extents);
9540 b->dirty_blob().mark_used(le->blob_offset, le->length);
9541 txc->statfs_delta.stored() += le->length;
9542 dout(20) << __func__ << " lex " << *le << dendl;
9543 logger->inc(l_bluestore_write_small_deferred);
9544 return;
9545 }
9546 // try to reuse blob if we can
9547 if (b->can_reuse_blob(min_alloc_size,
9548 max_bsize,
9549 offset0 - bstart,
9550 &alloc_len)) {
9551 assert(alloc_len == min_alloc_size); // expecting data always
9552 // fit into reused blob
9553 // Need to check for pending writes desiring to
9554 // reuse the same pextent. The rationale is that during GC two chunks
9555 // from garbage blobs(compressed?) can share logical space within the same
9556 // AU. That's in turn might be caused by unaligned len in clone_range2.
9557 // Hence the second write will fail in an attempt to reuse blob at
9558 // do_alloc_write().
9559 if (!wctx->has_conflict(b,
9560 offset0,
9561 offset0 + alloc_len,
9562 min_alloc_size)) {
9563
9564 // we can't reuse pad_head/pad_tail since they might be truncated
9565 // due to existent extents
9566 uint64_t b_off = offset - bstart;
9567 uint64_t b_off0 = b_off;
9568 _pad_zeros(&bl, &b_off0, chunk_size);
9569
9570 dout(20) << __func__ << " reuse blob " << *b << std::hex
9571 << " (0x" << b_off0 << "~" << bl.length() << ")"
9572 << " (0x" << b_off << "~" << length << ")"
9573 << std::dec << dendl;
9574
9575 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9576 wctx->write(offset, b, alloc_len, b_off0, bl, b_off, length,
9577 false, false);
9578 logger->inc(l_bluestore_write_small_unused);
9579 return;
9580 }
9581 }
9582 }
9583 ++ep;
9584 any_change = true;
9585 } // if (ep != end && ep->logical_offset < offset + max_bsize)
9586
9587 // check extent for reuse in reverse order
9588 if (prev_ep != end && prev_ep->logical_offset >= min_off) {
9589 BlobRef b = prev_ep->blob;
9590 auto bstart = prev_ep->blob_start();
9591 dout(20) << __func__ << " considering " << *b
9592 << " bstart 0x" << std::hex << bstart << std::dec << dendl;
9593 if (b->can_reuse_blob(min_alloc_size,
9594 max_bsize,
9595 offset0 - bstart,
9596 &alloc_len)) {
9597 assert(alloc_len == min_alloc_size); // expecting data always
9598 // fit into reused blob
9599 // Need to check for pending writes desiring to
9600 // reuse the same pextent. The rationale is that during GC two chunks
9601 // from garbage blobs(compressed?) can share logical space within the same
9602 // AU. That's in turn might be caused by unaligned len in clone_range2.
9603 // Hence the second write will fail in an attempt to reuse blob at
9604 // do_alloc_write().
9605 if (!wctx->has_conflict(b,
9606 offset0,
9607 offset0 + alloc_len,
9608 min_alloc_size)) {
9609
9610 uint64_t chunk_size = b->get_blob().get_chunk_size(block_size);
9611 uint64_t b_off = offset - bstart;
9612 uint64_t b_off0 = b_off;
9613 _pad_zeros(&bl, &b_off0, chunk_size);
9614
9615 dout(20) << __func__ << " reuse blob " << *b << std::hex
9616 << " (0x" << b_off0 << "~" << bl.length() << ")"
9617 << " (0x" << b_off << "~" << length << ")"
9618 << std::dec << dendl;
9619
9620 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9621 wctx->write(offset, b, alloc_len, b_off0, bl, b_off, length,
9622 false, false);
9623 logger->inc(l_bluestore_write_small_unused);
9624 return;
9625 }
9626 }
9627 if (prev_ep != begin) {
9628 --prev_ep;
9629 any_change = true;
9630 } else {
9631 prev_ep = end; // to avoid useless first extent re-check
9632 }
9633 } // if (prev_ep != end && prev_ep->logical_offset >= min_off)
9634 } while (any_change);
9635
9636 // new blob.
9637
9638 BlobRef b = c->new_blob();
9639 uint64_t b_off = P2PHASE(offset, alloc_len);
9640 uint64_t b_off0 = b_off;
9641 _pad_zeros(&bl, &b_off0, block_size);
9642 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9643 wctx->write(offset, b, alloc_len, b_off0, bl, b_off, length, true, true);
9644 logger->inc(l_bluestore_write_small_new);
9645
9646 return;
9647 }
9648
9649 void BlueStore::_do_write_big(
9650 TransContext *txc,
9651 CollectionRef &c,
9652 OnodeRef o,
9653 uint64_t offset, uint64_t length,
9654 bufferlist::iterator& blp,
9655 WriteContext *wctx)
9656 {
9657 dout(10) << __func__ << " 0x" << std::hex << offset << "~" << length
9658 << " target_blob_size 0x" << wctx->target_blob_size << std::dec
9659 << " compress " << (int)wctx->compress
9660 << dendl;
9661 logger->inc(l_bluestore_write_big);
9662 logger->inc(l_bluestore_write_big_bytes, length);
9663 o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
9664 auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
9665 while (length > 0) {
9666 bool new_blob = false;
9667 uint32_t l = MIN(max_bsize, length);
9668 BlobRef b;
9669 uint32_t b_off = 0;
9670
9671 //attempting to reuse existing blob
9672 if (!wctx->compress) {
9673 // look for an existing mutable blob we can reuse
9674 auto begin = o->extent_map.extent_map.begin();
9675 auto end = o->extent_map.extent_map.end();
9676 auto ep = o->extent_map.seek_lextent(offset);
9677 auto prev_ep = ep;
9678 if (prev_ep != begin) {
9679 --prev_ep;
9680 } else {
9681 prev_ep = end; // to avoid this extent check as it's a duplicate
9682 }
9683 auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
9684 // search suitable extent in both forward and reverse direction in
9685 // [offset - target_max_blob_size, offset + target_max_blob_size] range
9686 // then check if blob can be reused via can_reuse_blob func.
9687 bool any_change;
9688 do {
9689 any_change = false;
9690 if (ep != end && ep->logical_offset < offset + max_bsize) {
9691 if (offset >= ep->blob_start() &&
9692 ep->blob->can_reuse_blob(min_alloc_size, max_bsize,
9693 offset - ep->blob_start(),
9694 &l)) {
9695 b = ep->blob;
9696 b_off = offset - ep->blob_start();
9697 prev_ep = end; // to avoid check below
9698 dout(20) << __func__ << " reuse blob " << *b << std::hex
9699 << " (0x" << b_off << "~" << l << ")" << std::dec << dendl;
9700 } else {
9701 ++ep;
9702 any_change = true;
9703 }
9704 }
9705
9706 if (prev_ep != end && prev_ep->logical_offset >= min_off) {
9707 if (prev_ep->blob->can_reuse_blob(min_alloc_size, max_bsize,
9708 offset - prev_ep->blob_start(),
9709 &l)) {
9710 b = prev_ep->blob;
9711 b_off = offset - prev_ep->blob_start();
9712 dout(20) << __func__ << " reuse blob " << *b << std::hex
9713 << " (0x" << b_off << "~" << l << ")" << std::dec << dendl;
9714 } else if (prev_ep != begin) {
9715 --prev_ep;
9716 any_change = true;
9717 } else {
9718 prev_ep = end; // to avoid useless first extent re-check
9719 }
9720 }
9721 } while (b == nullptr && any_change);
9722 }
9723 if (b == nullptr) {
9724 b = c->new_blob();
9725 b_off = 0;
9726 new_blob = true;
9727 }
9728
9729 bufferlist t;
9730 blp.copy(l, t);
9731 wctx->write(offset, b, l, b_off, t, b_off, l, false, new_blob);
9732 offset += l;
9733 length -= l;
9734 logger->inc(l_bluestore_write_big_blobs);
9735 }
9736 }
9737
9738 int BlueStore::_do_alloc_write(
9739 TransContext *txc,
9740 CollectionRef coll,
9741 OnodeRef o,
9742 WriteContext *wctx)
9743 {
9744 dout(20) << __func__ << " txc " << txc
9745 << " " << wctx->writes.size() << " blobs"
9746 << dendl;
9747
9748 uint64_t need = 0;
9749 auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
9750 for (auto &wi : wctx->writes) {
9751 need += wi.blob_length;
9752 }
9753 int r = alloc->reserve(need);
9754 if (r < 0) {
9755 derr << __func__ << " failed to reserve 0x" << std::hex << need << std::dec
9756 << dendl;
9757 return r;
9758 }
9759
9760 uint64_t hint = 0;
9761 CompressorRef c;
9762 double crr = 0;
9763 if (wctx->compress) {
9764 c = select_option(
9765 "compression_algorithm",
9766 compressor,
9767 [&]() {
9768 string val;
9769 if (coll->pool_opts.get(pool_opts_t::COMPRESSION_ALGORITHM, &val)) {
9770 CompressorRef cp = compressor;
9771 if (!cp || cp->get_type_name() != val) {
9772 cp = Compressor::create(cct, val);
9773 }
9774 return boost::optional<CompressorRef>(cp);
9775 }
9776 return boost::optional<CompressorRef>();
9777 }
9778 );
9779
9780 crr = select_option(
9781 "compression_required_ratio",
9782 cct->_conf->bluestore_compression_required_ratio,
9783 [&]() {
9784 double val;
9785 if(coll->pool_opts.get(pool_opts_t::COMPRESSION_REQUIRED_RATIO, &val)) {
9786 return boost::optional<double>(val);
9787 }
9788 return boost::optional<double>();
9789 }
9790 );
9791 }
9792
9793 // checksum
9794 int csum = csum_type.load();
9795 csum = select_option(
9796 "csum_type",
9797 csum,
9798 [&]() {
9799 int val;
9800 if(coll->pool_opts.get(pool_opts_t::CSUM_TYPE, &val)) {
9801 return boost::optional<int>(val);
9802 }
9803 return boost::optional<int>();
9804 }
9805 );
9806
9807 for (auto& wi : wctx->writes) {
9808 BlobRef b = wi.b;
9809 bluestore_blob_t& dblob = b->dirty_blob();
9810 uint64_t b_off = wi.b_off;
9811 bufferlist *l = &wi.bl;
9812 uint64_t final_length = wi.blob_length;
9813 uint64_t csum_length = wi.blob_length;
9814 unsigned csum_order = block_size_order;
9815 bufferlist compressed_bl;
9816 bool compressed = false;
9817 if(c && wi.blob_length > min_alloc_size) {
9818
9819 utime_t start = ceph_clock_now();
9820
9821 // compress
9822 assert(b_off == 0);
9823 assert(wi.blob_length == l->length());
9824 bluestore_compression_header_t chdr;
9825 chdr.type = c->get_type();
9826 // FIXME: memory alignment here is bad
9827 bufferlist t;
9828
9829 r = c->compress(*l, t);
9830 assert(r == 0);
9831
9832 chdr.length = t.length();
9833 ::encode(chdr, compressed_bl);
9834 compressed_bl.claim_append(t);
9835 uint64_t rawlen = compressed_bl.length();
9836 uint64_t newlen = P2ROUNDUP(rawlen, min_alloc_size);
9837 uint64_t want_len_raw = final_length * crr;
9838 uint64_t want_len = P2ROUNDUP(want_len_raw, min_alloc_size);
9839 if (newlen <= want_len && newlen < final_length) {
9840 // Cool. We compressed at least as much as we were hoping to.
9841 // pad out to min_alloc_size
9842 compressed_bl.append_zero(newlen - rawlen);
9843 logger->inc(l_bluestore_write_pad_bytes, newlen - rawlen);
9844 dout(20) << __func__ << std::hex << " compressed 0x" << wi.blob_length
9845 << " -> 0x" << rawlen << " => 0x" << newlen
9846 << " with " << c->get_type()
9847 << std::dec << dendl;
9848 txc->statfs_delta.compressed() += rawlen;
9849 txc->statfs_delta.compressed_original() += l->length();
9850 txc->statfs_delta.compressed_allocated() += newlen;
9851 l = &compressed_bl;
9852 final_length = newlen;
9853 csum_length = newlen;
9854 csum_order = ctz(newlen);
9855 dblob.set_compressed(wi.blob_length, rawlen);
9856 compressed = true;
9857 logger->inc(l_bluestore_compress_success_count);
9858 } else {
9859 dout(20) << __func__ << std::hex << " 0x" << l->length()
9860 << " compressed to 0x" << rawlen << " -> 0x" << newlen
9861 << " with " << c->get_type()
9862 << ", which is more than required 0x" << want_len_raw
9863 << " -> 0x" << want_len
9864 << ", leaving uncompressed"
9865 << std::dec << dendl;
9866 logger->inc(l_bluestore_compress_rejected_count);
9867 }
9868 logger->tinc(l_bluestore_compress_lat,
9869 ceph_clock_now() - start);
9870 }
9871 if (!compressed && wi.new_blob) {
9872 // initialize newly created blob only
9873 assert(dblob.is_mutable());
9874 if (l->length() != wi.blob_length) {
9875 // hrm, maybe we could do better here, but let's not bother.
9876 dout(20) << __func__ << " forcing csum_order to block_size_order "
9877 << block_size_order << dendl;
9878 csum_order = block_size_order;
9879 } else {
9880 csum_order = std::min(wctx->csum_order, ctz(l->length()));
9881 }
9882 // try to align blob with max_blob_size to improve
9883 // its reuse ratio, e.g. in case of reverse write
9884 uint32_t suggested_boff =
9885 (wi.logical_offset - (wi.b_off0 - wi.b_off)) % max_bsize;
9886 if ((suggested_boff % (1 << csum_order)) == 0 &&
9887 suggested_boff + final_length <= max_bsize &&
9888 suggested_boff > b_off) {
9889 dout(20) << __func__ << " forcing blob_offset to "
9890 << std::hex << suggested_boff << std::dec << dendl;
9891 assert(suggested_boff >= b_off);
9892 csum_length += suggested_boff - b_off;
9893 b_off = suggested_boff;
9894 }
9895 }
9896
9897 AllocExtentVector extents;
9898 extents.reserve(4); // 4 should be (more than) enough for most allocations
9899 int64_t got = alloc->allocate(final_length, min_alloc_size,
9900 max_alloc_size.load(),
9901 hint, &extents);
9902 assert(got == (int64_t)final_length);
9903 need -= got;
9904 txc->statfs_delta.allocated() += got;
9905 for (auto& p : extents) {
9906 bluestore_pextent_t e = bluestore_pextent_t(p);
9907 txc->allocated.insert(e.offset, e.length);
9908 hint = p.end();
9909 }
9910 dblob.allocated(P2ALIGN(b_off, min_alloc_size), final_length, extents);
9911
9912 dout(20) << __func__ << " blob " << *b
9913 << " csum_type " << Checksummer::get_csum_type_string(csum)
9914 << " csum_order " << csum_order
9915 << " csum_length 0x" << std::hex << csum_length << std::dec
9916 << dendl;
9917
9918 if (csum != Checksummer::CSUM_NONE) {
9919 if (!dblob.has_csum()) {
9920 dblob.init_csum(csum, csum_order, csum_length);
9921 }
9922 dblob.calc_csum(b_off, *l);
9923 }
9924 if (wi.mark_unused) {
9925 auto b_end = b_off + wi.bl.length();
9926 if (b_off) {
9927 dblob.add_unused(0, b_off);
9928 }
9929 if (b_end < wi.blob_length) {
9930 dblob.add_unused(b_end, wi.blob_length - b_end);
9931 }
9932 }
9933
9934 Extent *le = o->extent_map.set_lextent(coll, wi.logical_offset,
9935 b_off + (wi.b_off0 - wi.b_off),
9936 wi.length0,
9937 wi.b,
9938 nullptr);
9939 wi.b->dirty_blob().mark_used(le->blob_offset, le->length);
9940 txc->statfs_delta.stored() += le->length;
9941 dout(20) << __func__ << " lex " << *le << dendl;
9942 _buffer_cache_write(txc, wi.b, b_off, wi.bl,
9943 wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
9944
9945 // queue io
9946 if (!g_conf->bluestore_debug_omit_block_device_write) {
9947 if (l->length() <= prefer_deferred_size.load()) {
9948 dout(20) << __func__ << " deferring small 0x" << std::hex
9949 << l->length() << std::dec << " write via deferred" << dendl;
9950 bluestore_deferred_op_t *op = _get_deferred_op(txc, o);
9951 op->op = bluestore_deferred_op_t::OP_WRITE;
9952 int r = b->get_blob().map(
9953 b_off, l->length(),
9954 [&](uint64_t offset, uint64_t length) {
9955 op->extents.emplace_back(bluestore_pextent_t(offset, length));
9956 return 0;
9957 });
9958 assert(r == 0);
9959 op->data = *l;
9960 } else {
9961 b->get_blob().map_bl(
9962 b_off, *l,
9963 [&](uint64_t offset, bufferlist& t) {
9964 bdev->aio_write(offset, t, &txc->ioc, false);
9965 });
9966 }
9967 }
9968 }
9969 if (need > 0) {
9970 alloc->unreserve(need);
9971 }
9972 return 0;
9973 }
9974
9975 void BlueStore::_wctx_finish(
9976 TransContext *txc,
9977 CollectionRef& c,
9978 OnodeRef o,
9979 WriteContext *wctx,
9980 set<SharedBlob*> *maybe_unshared_blobs)
9981 {
9982 auto oep = wctx->old_extents.begin();
9983 while (oep != wctx->old_extents.end()) {
9984 auto &lo = *oep;
9985 oep = wctx->old_extents.erase(oep);
9986 dout(20) << __func__ << " lex_old " << lo.e << dendl;
9987 BlobRef b = lo.e.blob;
9988 const bluestore_blob_t& blob = b->get_blob();
9989 if (blob.is_compressed()) {
9990 if (lo.blob_empty) {
9991 txc->statfs_delta.compressed() -= blob.get_compressed_payload_length();
9992 }
9993 txc->statfs_delta.compressed_original() -= lo.e.length;
9994 }
9995 auto& r = lo.r;
9996 txc->statfs_delta.stored() -= lo.e.length;
9997 if (!r.empty()) {
9998 dout(20) << __func__ << " blob release " << r << dendl;
9999 if (blob.is_shared()) {
10000 PExtentVector final;
10001 c->load_shared_blob(b->shared_blob);
10002 for (auto e : r) {
10003 b->shared_blob->put_ref(
10004 e.offset, e.length, &final,
10005 b->is_referenced() ? nullptr : maybe_unshared_blobs);
10006 }
10007 dout(20) << __func__ << " shared_blob release " << final
10008 << " from " << *b->shared_blob << dendl;
10009 txc->write_shared_blob(b->shared_blob);
10010 r.clear();
10011 r.swap(final);
10012 }
10013 }
10014 // we can't invalidate our logical extents as we drop them because
10015 // other lextents (either in our onode or others) may still
10016 // reference them. but we can throw out anything that is no
10017 // longer allocated. Note that this will leave behind edge bits
10018 // that are no longer referenced but not deallocated (until they
10019 // age out of the cache naturally).
10020 b->discard_unallocated(c.get());
10021 for (auto e : r) {
10022 dout(20) << __func__ << " release " << e << dendl;
10023 txc->released.insert(e.offset, e.length);
10024 txc->statfs_delta.allocated() -= e.length;
10025 if (blob.is_compressed()) {
10026 txc->statfs_delta.compressed_allocated() -= e.length;
10027 }
10028 }
10029 delete &lo;
10030 if (b->is_spanning() && !b->is_referenced()) {
10031 dout(20) << __func__ << " spanning_blob_map removing empty " << *b
10032 << dendl;
10033 o->extent_map.spanning_blob_map.erase(b->id);
10034 }
10035 }
10036 }
10037
10038 void BlueStore::_do_write_data(
10039 TransContext *txc,
10040 CollectionRef& c,
10041 OnodeRef o,
10042 uint64_t offset,
10043 uint64_t length,
10044 bufferlist& bl,
10045 WriteContext *wctx)
10046 {
10047 uint64_t end = offset + length;
10048 bufferlist::iterator p = bl.begin();
10049
10050 if (offset / min_alloc_size == (end - 1) / min_alloc_size &&
10051 (length != min_alloc_size)) {
10052 // we fall within the same block
10053 _do_write_small(txc, c, o, offset, length, p, wctx);
10054 } else {
10055 uint64_t head_offset, head_length;
10056 uint64_t middle_offset, middle_length;
10057 uint64_t tail_offset, tail_length;
10058
10059 head_offset = offset;
10060 head_length = P2NPHASE(offset, min_alloc_size);
10061
10062 tail_offset = P2ALIGN(end, min_alloc_size);
10063 tail_length = P2PHASE(end, min_alloc_size);
10064
10065 middle_offset = head_offset + head_length;
10066 middle_length = length - head_length - tail_length;
10067
10068 if (head_length) {
10069 _do_write_small(txc, c, o, head_offset, head_length, p, wctx);
10070 }
10071
10072 if (middle_length) {
10073 _do_write_big(txc, c, o, middle_offset, middle_length, p, wctx);
10074 }
10075
10076 if (tail_length) {
10077 _do_write_small(txc, c, o, tail_offset, tail_length, p, wctx);
10078 }
10079 }
10080 }
10081
10082 void BlueStore::_choose_write_options(
10083 CollectionRef& c,
10084 OnodeRef o,
10085 uint32_t fadvise_flags,
10086 WriteContext *wctx)
10087 {
10088 if (fadvise_flags & CEPH_OSD_OP_FLAG_FADVISE_WILLNEED) {
10089 dout(20) << __func__ << " will do buffered write" << dendl;
10090 wctx->buffered = true;
10091 } else if (cct->_conf->bluestore_default_buffered_write &&
10092 (fadvise_flags & (CEPH_OSD_OP_FLAG_FADVISE_DONTNEED |
10093 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE)) == 0) {
10094 dout(20) << __func__ << " defaulting to buffered write" << dendl;
10095 wctx->buffered = true;
10096 }
10097
10098 // apply basic csum block size
10099 wctx->csum_order = block_size_order;
10100
10101 // compression parameters
10102 unsigned alloc_hints = o->onode.alloc_hint_flags;
10103 auto cm = select_option(
10104 "compression_mode",
10105 comp_mode.load(),
10106 [&]() {
10107 string val;
10108 if(c->pool_opts.get(pool_opts_t::COMPRESSION_MODE, &val)) {
10109 return boost::optional<Compressor::CompressionMode>(
10110 Compressor::get_comp_mode_type(val));
10111 }
10112 return boost::optional<Compressor::CompressionMode>();
10113 }
10114 );
10115
10116 wctx->compress = (cm != Compressor::COMP_NONE) &&
10117 ((cm == Compressor::COMP_FORCE) ||
10118 (cm == Compressor::COMP_AGGRESSIVE &&
10119 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE) == 0) ||
10120 (cm == Compressor::COMP_PASSIVE &&
10121 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE)));
10122
10123 if ((alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ) &&
10124 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ) == 0 &&
10125 (alloc_hints & (CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE |
10126 CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY)) &&
10127 (alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE) == 0) {
10128
10129 dout(20) << __func__ << " will prefer large blob and csum sizes" << dendl;
10130
10131 if (o->onode.expected_write_size) {
10132 wctx->csum_order = std::max(min_alloc_size_order,
10133 (uint8_t)ctz(o->onode.expected_write_size));
10134 } else {
10135 wctx->csum_order = min_alloc_size_order;
10136 }
10137
10138 if (wctx->compress) {
10139 wctx->target_blob_size = select_option(
10140 "compression_max_blob_size",
10141 comp_max_blob_size.load(),
10142 [&]() {
10143 int val;
10144 if(c->pool_opts.get(pool_opts_t::COMPRESSION_MAX_BLOB_SIZE, &val)) {
10145 return boost::optional<uint64_t>((uint64_t)val);
10146 }
10147 return boost::optional<uint64_t>();
10148 }
10149 );
10150 }
10151 } else {
10152 if (wctx->compress) {
10153 wctx->target_blob_size = select_option(
10154 "compression_min_blob_size",
10155 comp_min_blob_size.load(),
10156 [&]() {
10157 int val;
10158 if(c->pool_opts.get(pool_opts_t::COMPRESSION_MIN_BLOB_SIZE, &val)) {
10159 return boost::optional<uint64_t>((uint64_t)val);
10160 }
10161 return boost::optional<uint64_t>();
10162 }
10163 );
10164 }
10165 }
10166
10167 uint64_t max_bsize = max_blob_size.load();
10168 if (wctx->target_blob_size == 0 || wctx->target_blob_size > max_bsize) {
10169 wctx->target_blob_size = max_bsize;
10170 }
10171
10172 // set the min blob size floor at 2x the min_alloc_size, or else we
10173 // won't be able to allocate a smaller extent for the compressed
10174 // data.
10175 if (wctx->compress &&
10176 wctx->target_blob_size < min_alloc_size * 2) {
10177 wctx->target_blob_size = min_alloc_size * 2;
10178 }
10179
10180 dout(20) << __func__ << " prefer csum_order " << wctx->csum_order
10181 << " target_blob_size 0x" << std::hex << wctx->target_blob_size
10182 << std::dec << dendl;
10183 }
10184
10185 int BlueStore::_do_gc(
10186 TransContext *txc,
10187 CollectionRef& c,
10188 OnodeRef o,
10189 const GarbageCollector& gc,
10190 const WriteContext& wctx,
10191 uint64_t *dirty_start,
10192 uint64_t *dirty_end)
10193 {
10194 auto& extents_to_collect = gc.get_extents_to_collect();
10195
10196 WriteContext wctx_gc;
10197 wctx_gc.fork(wctx); // make a clone for garbage collection
10198
10199 for (auto it = extents_to_collect.begin();
10200 it != extents_to_collect.end();
10201 ++it) {
10202 bufferlist bl;
10203 int r = _do_read(c.get(), o, it->offset, it->length, bl, 0);
10204 assert(r == (int)it->length);
10205
10206 o->extent_map.fault_range(db, it->offset, it->length);
10207 _do_write_data(txc, c, o, it->offset, it->length, bl, &wctx_gc);
10208 logger->inc(l_bluestore_gc_merged, it->length);
10209
10210 if (*dirty_start > it->offset) {
10211 *dirty_start = it->offset;
10212 }
10213
10214 if (*dirty_end < it->offset + it->length) {
10215 *dirty_end = it->offset + it->length;
10216 }
10217 }
10218
10219 dout(30) << __func__ << " alloc write" << dendl;
10220 int r = _do_alloc_write(txc, c, o, &wctx_gc);
10221 if (r < 0) {
10222 derr << __func__ << " _do_alloc_write failed with " << cpp_strerror(r)
10223 << dendl;
10224 return r;
10225 }
10226
10227 _wctx_finish(txc, c, o, &wctx_gc);
10228 return 0;
10229 }
10230
10231 int BlueStore::_do_write(
10232 TransContext *txc,
10233 CollectionRef& c,
10234 OnodeRef o,
10235 uint64_t offset,
10236 uint64_t length,
10237 bufferlist& bl,
10238 uint32_t fadvise_flags)
10239 {
10240 int r = 0;
10241
10242 dout(20) << __func__
10243 << " " << o->oid
10244 << " 0x" << std::hex << offset << "~" << length
10245 << " - have 0x" << o->onode.size
10246 << " (" << std::dec << o->onode.size << ")"
10247 << " bytes"
10248 << " fadvise_flags 0x" << std::hex << fadvise_flags << std::dec
10249 << dendl;
10250 _dump_onode(o);
10251
10252 if (length == 0) {
10253 return 0;
10254 }
10255
10256 uint64_t end = offset + length;
10257
10258 GarbageCollector gc(c->store->cct);
10259 int64_t benefit;
10260 auto dirty_start = offset;
10261 auto dirty_end = end;
10262
10263 WriteContext wctx;
10264 _choose_write_options(c, o, fadvise_flags, &wctx);
10265 o->extent_map.fault_range(db, offset, length);
10266 _do_write_data(txc, c, o, offset, length, bl, &wctx);
10267 r = _do_alloc_write(txc, c, o, &wctx);
10268 if (r < 0) {
10269 derr << __func__ << " _do_alloc_write failed with " << cpp_strerror(r)
10270 << dendl;
10271 goto out;
10272 }
10273
10274 // NB: _wctx_finish() will empty old_extents
10275 // so we must do gc estimation before that
10276 benefit = gc.estimate(offset,
10277 length,
10278 o->extent_map,
10279 wctx.old_extents,
10280 min_alloc_size);
10281
10282 _wctx_finish(txc, c, o, &wctx);
10283 if (end > o->onode.size) {
10284 dout(20) << __func__ << " extending size to 0x" << std::hex << end
10285 << std::dec << dendl;
10286 o->onode.size = end;
10287 }
10288
10289 if (benefit >= g_conf->bluestore_gc_enable_total_threshold) {
10290 if (!gc.get_extents_to_collect().empty()) {
10291 dout(20) << __func__ << " perform garbage collection, "
10292 << "expected benefit = " << benefit << " AUs" << dendl;
10293 r = _do_gc(txc, c, o, gc, wctx, &dirty_start, &dirty_end);
10294 if (r < 0) {
10295 derr << __func__ << " _do_gc failed with " << cpp_strerror(r)
10296 << dendl;
10297 goto out;
10298 }
10299 }
10300 }
10301
10302 o->extent_map.compress_extent_map(dirty_start, dirty_end - dirty_start);
10303 o->extent_map.dirty_range(dirty_start, dirty_end - dirty_start);
10304
10305 r = 0;
10306
10307 out:
10308 return r;
10309 }
10310
10311 int BlueStore::_write(TransContext *txc,
10312 CollectionRef& c,
10313 OnodeRef& o,
10314 uint64_t offset, size_t length,
10315 bufferlist& bl,
10316 uint32_t fadvise_flags)
10317 {
10318 dout(15) << __func__ << " " << c->cid << " " << o->oid
10319 << " 0x" << std::hex << offset << "~" << length << std::dec
10320 << dendl;
10321 _assign_nid(txc, o);
10322 int r = _do_write(txc, c, o, offset, length, bl, fadvise_flags);
10323 txc->write_onode(o);
10324
10325 dout(10) << __func__ << " " << c->cid << " " << o->oid
10326 << " 0x" << std::hex << offset << "~" << length << std::dec
10327 << " = " << r << dendl;
10328 return r;
10329 }
10330
10331 int BlueStore::_zero(TransContext *txc,
10332 CollectionRef& c,
10333 OnodeRef& o,
10334 uint64_t offset, size_t length)
10335 {
10336 dout(15) << __func__ << " " << c->cid << " " << o->oid
10337 << " 0x" << std::hex << offset << "~" << length << std::dec
10338 << dendl;
10339 _assign_nid(txc, o);
10340 int r = _do_zero(txc, c, o, offset, length);
10341 dout(10) << __func__ << " " << c->cid << " " << o->oid
10342 << " 0x" << std::hex << offset << "~" << length << std::dec
10343 << " = " << r << dendl;
10344 return r;
10345 }
10346
10347 int BlueStore::_do_zero(TransContext *txc,
10348 CollectionRef& c,
10349 OnodeRef& o,
10350 uint64_t offset, size_t length)
10351 {
10352 dout(15) << __func__ << " " << c->cid << " " << o->oid
10353 << " 0x" << std::hex << offset << "~" << length << std::dec
10354 << dendl;
10355 int r = 0;
10356
10357 _dump_onode(o);
10358
10359 WriteContext wctx;
10360 o->extent_map.fault_range(db, offset, length);
10361 o->extent_map.punch_hole(c, offset, length, &wctx.old_extents);
10362 o->extent_map.dirty_range(offset, length);
10363 _wctx_finish(txc, c, o, &wctx);
10364
10365 if (offset + length > o->onode.size) {
10366 o->onode.size = offset + length;
10367 dout(20) << __func__ << " extending size to " << offset + length
10368 << dendl;
10369 }
10370 txc->write_onode(o);
10371
10372 dout(10) << __func__ << " " << c->cid << " " << o->oid
10373 << " 0x" << std::hex << offset << "~" << length << std::dec
10374 << " = " << r << dendl;
10375 return r;
10376 }
10377
10378 void BlueStore::_do_truncate(
10379 TransContext *txc, CollectionRef& c, OnodeRef o, uint64_t offset,
10380 set<SharedBlob*> *maybe_unshared_blobs)
10381 {
10382 dout(15) << __func__ << " " << c->cid << " " << o->oid
10383 << " 0x" << std::hex << offset << std::dec << dendl;
10384
10385 _dump_onode(o, 30);
10386
10387 if (offset == o->onode.size)
10388 return;
10389
10390 if (offset < o->onode.size) {
10391 WriteContext wctx;
10392 uint64_t length = o->onode.size - offset;
10393 o->extent_map.fault_range(db, offset, length);
10394 o->extent_map.punch_hole(c, offset, length, &wctx.old_extents);
10395 o->extent_map.dirty_range(offset, length);
10396 _wctx_finish(txc, c, o, &wctx, maybe_unshared_blobs);
10397
10398 // if we have shards past EOF, ask for a reshard
10399 if (!o->onode.extent_map_shards.empty() &&
10400 o->onode.extent_map_shards.back().offset >= offset) {
10401 dout(10) << __func__ << " request reshard past EOF" << dendl;
10402 if (offset) {
10403 o->extent_map.request_reshard(offset - 1, offset + length);
10404 } else {
10405 o->extent_map.request_reshard(0, length);
10406 }
10407 }
10408 }
10409
10410 o->onode.size = offset;
10411
10412 txc->write_onode(o);
10413 }
10414
10415 void BlueStore::_truncate(TransContext *txc,
10416 CollectionRef& c,
10417 OnodeRef& o,
10418 uint64_t offset)
10419 {
10420 dout(15) << __func__ << " " << c->cid << " " << o->oid
10421 << " 0x" << std::hex << offset << std::dec
10422 << dendl;
10423 _do_truncate(txc, c, o, offset);
10424 }
10425
10426 int BlueStore::_do_remove(
10427 TransContext *txc,
10428 CollectionRef& c,
10429 OnodeRef o)
10430 {
10431 set<SharedBlob*> maybe_unshared_blobs;
10432 bool is_gen = !o->oid.is_no_gen();
10433 _do_truncate(txc, c, o, 0, is_gen ? &maybe_unshared_blobs : nullptr);
10434 if (o->onode.has_omap()) {
10435 o->flush();
10436 _do_omap_clear(txc, o->onode.nid);
10437 }
10438 o->exists = false;
10439 string key;
10440 for (auto &s : o->extent_map.shards) {
10441 dout(20) << __func__ << " removing shard 0x" << std::hex
10442 << s.shard_info->offset << std::dec << dendl;
10443 generate_extent_shard_key_and_apply(o->key, s.shard_info->offset, &key,
10444 [&](const string& final_key) {
10445 txc->t->rmkey(PREFIX_OBJ, final_key);
10446 }
10447 );
10448 }
10449 txc->t->rmkey(PREFIX_OBJ, o->key.c_str(), o->key.size());
10450 txc->removed(o);
10451 o->extent_map.clear();
10452 o->onode = bluestore_onode_t();
10453 _debug_obj_on_delete(o->oid);
10454
10455 if (!is_gen || maybe_unshared_blobs.empty()) {
10456 return 0;
10457 }
10458
10459 // see if we can unshare blobs still referenced by the head
10460 dout(10) << __func__ << " gen and maybe_unshared_blobs "
10461 << maybe_unshared_blobs << dendl;
10462 ghobject_t nogen = o->oid;
10463 nogen.generation = ghobject_t::NO_GEN;
10464 OnodeRef h = c->onode_map.lookup(nogen);
10465
10466 if (!h || !h->exists) {
10467 return 0;
10468 }
10469
10470 dout(20) << __func__ << " checking for unshareable blobs on " << h
10471 << " " << h->oid << dendl;
10472 map<SharedBlob*,bluestore_extent_ref_map_t> expect;
10473 for (auto& e : h->extent_map.extent_map) {
10474 const bluestore_blob_t& b = e.blob->get_blob();
10475 SharedBlob *sb = e.blob->shared_blob.get();
10476 if (b.is_shared() &&
10477 sb->loaded &&
10478 maybe_unshared_blobs.count(sb)) {
10479 b.map(e.blob_offset, e.length, [&](uint64_t off, uint64_t len) {
10480 expect[sb].get(off, len);
10481 return 0;
10482 });
10483 }
10484 }
10485
10486 vector<SharedBlob*> unshared_blobs;
10487 unshared_blobs.reserve(maybe_unshared_blobs.size());
10488 for (auto& p : expect) {
10489 dout(20) << " ? " << *p.first << " vs " << p.second << dendl;
10490 if (p.first->persistent->ref_map == p.second) {
10491 SharedBlob *sb = p.first;
10492 dout(20) << __func__ << " unsharing " << *sb << dendl;
10493 unshared_blobs.push_back(sb);
10494 txc->unshare_blob(sb);
10495 uint64_t sbid = c->make_blob_unshared(sb);
10496 string key;
10497 get_shared_blob_key(sbid, &key);
10498 txc->t->rmkey(PREFIX_SHARED_BLOB, key);
10499 }
10500 }
10501
10502 if (unshared_blobs.empty()) {
10503 return 0;
10504 }
10505
10506 uint32_t b_start = OBJECT_MAX_SIZE;
10507 uint32_t b_end = 0;
10508 for (auto& e : h->extent_map.extent_map) {
10509 const bluestore_blob_t& b = e.blob->get_blob();
10510 SharedBlob *sb = e.blob->shared_blob.get();
10511 if (b.is_shared() &&
10512 std::find(unshared_blobs.begin(), unshared_blobs.end(),
10513 sb) != unshared_blobs.end()) {
10514 dout(20) << __func__ << " unsharing " << e << dendl;
10515 bluestore_blob_t& blob = e.blob->dirty_blob();
10516 blob.clear_flag(bluestore_blob_t::FLAG_SHARED);
10517 if (e.logical_offset < b_start) {
10518 b_start = e.logical_offset;
10519 }
10520 if (e.logical_end() > b_end) {
10521 b_end = e.logical_end();
10522 }
10523 }
10524 }
10525
10526 assert(b_end > b_start);
10527 h->extent_map.dirty_range(b_start, b_end - b_start);
10528 txc->write_onode(h);
10529
10530 return 0;
10531 }
10532
10533 int BlueStore::_remove(TransContext *txc,
10534 CollectionRef& c,
10535 OnodeRef &o)
10536 {
10537 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10538 int r = _do_remove(txc, c, o);
10539 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10540 return r;
10541 }
10542
10543 int BlueStore::_setattr(TransContext *txc,
10544 CollectionRef& c,
10545 OnodeRef& o,
10546 const string& name,
10547 bufferptr& val)
10548 {
10549 dout(15) << __func__ << " " << c->cid << " " << o->oid
10550 << " " << name << " (" << val.length() << " bytes)"
10551 << dendl;
10552 int r = 0;
10553 if (val.is_partial())
10554 o->onode.attrs[name.c_str()] = bufferptr(val.c_str(), val.length());
10555 else
10556 o->onode.attrs[name.c_str()] = val;
10557 txc->write_onode(o);
10558 dout(10) << __func__ << " " << c->cid << " " << o->oid
10559 << " " << name << " (" << val.length() << " bytes)"
10560 << " = " << r << dendl;
10561 return r;
10562 }
10563
10564 int BlueStore::_setattrs(TransContext *txc,
10565 CollectionRef& c,
10566 OnodeRef& o,
10567 const map<string,bufferptr>& aset)
10568 {
10569 dout(15) << __func__ << " " << c->cid << " " << o->oid
10570 << " " << aset.size() << " keys"
10571 << dendl;
10572 int r = 0;
10573 for (map<string,bufferptr>::const_iterator p = aset.begin();
10574 p != aset.end(); ++p) {
10575 if (p->second.is_partial())
10576 o->onode.attrs[p->first.c_str()] =
10577 bufferptr(p->second.c_str(), p->second.length());
10578 else
10579 o->onode.attrs[p->first.c_str()] = p->second;
10580 }
10581 txc->write_onode(o);
10582 dout(10) << __func__ << " " << c->cid << " " << o->oid
10583 << " " << aset.size() << " keys"
10584 << " = " << r << dendl;
10585 return r;
10586 }
10587
10588
10589 int BlueStore::_rmattr(TransContext *txc,
10590 CollectionRef& c,
10591 OnodeRef& o,
10592 const string& name)
10593 {
10594 dout(15) << __func__ << " " << c->cid << " " << o->oid
10595 << " " << name << dendl;
10596 int r = 0;
10597 auto it = o->onode.attrs.find(name.c_str());
10598 if (it == o->onode.attrs.end())
10599 goto out;
10600
10601 o->onode.attrs.erase(it);
10602 txc->write_onode(o);
10603
10604 out:
10605 dout(10) << __func__ << " " << c->cid << " " << o->oid
10606 << " " << name << " = " << r << dendl;
10607 return r;
10608 }
10609
10610 int BlueStore::_rmattrs(TransContext *txc,
10611 CollectionRef& c,
10612 OnodeRef& o)
10613 {
10614 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10615 int r = 0;
10616
10617 if (o->onode.attrs.empty())
10618 goto out;
10619
10620 o->onode.attrs.clear();
10621 txc->write_onode(o);
10622
10623 out:
10624 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10625 return r;
10626 }
10627
10628 void BlueStore::_do_omap_clear(TransContext *txc, uint64_t id)
10629 {
10630 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
10631 string prefix, tail;
10632 get_omap_header(id, &prefix);
10633 get_omap_tail(id, &tail);
10634 it->lower_bound(prefix);
10635 while (it->valid()) {
10636 if (it->key() >= tail) {
10637 dout(30) << __func__ << " stop at " << pretty_binary_string(tail)
10638 << dendl;
10639 break;
10640 }
10641 txc->t->rmkey(PREFIX_OMAP, it->key());
10642 dout(30) << __func__ << " rm " << pretty_binary_string(it->key()) << dendl;
10643 it->next();
10644 }
10645 }
10646
10647 int BlueStore::_omap_clear(TransContext *txc,
10648 CollectionRef& c,
10649 OnodeRef& o)
10650 {
10651 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10652 int r = 0;
10653 if (o->onode.has_omap()) {
10654 o->flush();
10655 _do_omap_clear(txc, o->onode.nid);
10656 o->onode.clear_omap_flag();
10657 txc->write_onode(o);
10658 }
10659 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10660 return r;
10661 }
10662
10663 int BlueStore::_omap_setkeys(TransContext *txc,
10664 CollectionRef& c,
10665 OnodeRef& o,
10666 bufferlist &bl)
10667 {
10668 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10669 int r;
10670 bufferlist::iterator p = bl.begin();
10671 __u32 num;
10672 if (!o->onode.has_omap()) {
10673 o->onode.set_omap_flag();
10674 txc->write_onode(o);
10675 } else {
10676 txc->note_modified_object(o);
10677 }
10678 string final_key;
10679 _key_encode_u64(o->onode.nid, &final_key);
10680 final_key.push_back('.');
10681 ::decode(num, p);
10682 while (num--) {
10683 string key;
10684 bufferlist value;
10685 ::decode(key, p);
10686 ::decode(value, p);
10687 final_key.resize(9); // keep prefix
10688 final_key += key;
10689 dout(30) << __func__ << " " << pretty_binary_string(final_key)
10690 << " <- " << key << dendl;
10691 txc->t->set(PREFIX_OMAP, final_key, value);
10692 }
10693 r = 0;
10694 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10695 return r;
10696 }
10697
10698 int BlueStore::_omap_setheader(TransContext *txc,
10699 CollectionRef& c,
10700 OnodeRef &o,
10701 bufferlist& bl)
10702 {
10703 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10704 int r;
10705 string key;
10706 if (!o->onode.has_omap()) {
10707 o->onode.set_omap_flag();
10708 txc->write_onode(o);
10709 } else {
10710 txc->note_modified_object(o);
10711 }
10712 get_omap_header(o->onode.nid, &key);
10713 txc->t->set(PREFIX_OMAP, key, bl);
10714 r = 0;
10715 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10716 return r;
10717 }
10718
10719 int BlueStore::_omap_rmkeys(TransContext *txc,
10720 CollectionRef& c,
10721 OnodeRef& o,
10722 bufferlist& bl)
10723 {
10724 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10725 int r = 0;
10726 bufferlist::iterator p = bl.begin();
10727 __u32 num;
10728 string final_key;
10729
10730 if (!o->onode.has_omap()) {
10731 goto out;
10732 }
10733 _key_encode_u64(o->onode.nid, &final_key);
10734 final_key.push_back('.');
10735 ::decode(num, p);
10736 while (num--) {
10737 string key;
10738 ::decode(key, p);
10739 final_key.resize(9); // keep prefix
10740 final_key += key;
10741 dout(30) << __func__ << " rm " << pretty_binary_string(final_key)
10742 << " <- " << key << dendl;
10743 txc->t->rmkey(PREFIX_OMAP, final_key);
10744 }
10745 txc->note_modified_object(o);
10746
10747 out:
10748 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10749 return r;
10750 }
10751
10752 int BlueStore::_omap_rmkey_range(TransContext *txc,
10753 CollectionRef& c,
10754 OnodeRef& o,
10755 const string& first, const string& last)
10756 {
10757 dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
10758 KeyValueDB::Iterator it;
10759 string key_first, key_last;
10760 int r = 0;
10761 if (!o->onode.has_omap()) {
10762 goto out;
10763 }
10764 o->flush();
10765 it = db->get_iterator(PREFIX_OMAP);
10766 get_omap_key(o->onode.nid, first, &key_first);
10767 get_omap_key(o->onode.nid, last, &key_last);
10768 it->lower_bound(key_first);
10769 while (it->valid()) {
10770 if (it->key() >= key_last) {
10771 dout(30) << __func__ << " stop at " << pretty_binary_string(key_last)
10772 << dendl;
10773 break;
10774 }
10775 txc->t->rmkey(PREFIX_OMAP, it->key());
10776 dout(30) << __func__ << " rm " << pretty_binary_string(it->key()) << dendl;
10777 it->next();
10778 }
10779 txc->note_modified_object(o);
10780
10781 out:
10782 dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
10783 return r;
10784 }
10785
10786 int BlueStore::_set_alloc_hint(
10787 TransContext *txc,
10788 CollectionRef& c,
10789 OnodeRef& o,
10790 uint64_t expected_object_size,
10791 uint64_t expected_write_size,
10792 uint32_t flags)
10793 {
10794 dout(15) << __func__ << " " << c->cid << " " << o->oid
10795 << " object_size " << expected_object_size
10796 << " write_size " << expected_write_size
10797 << " flags " << ceph_osd_alloc_hint_flag_string(flags)
10798 << dendl;
10799 int r = 0;
10800 o->onode.expected_object_size = expected_object_size;
10801 o->onode.expected_write_size = expected_write_size;
10802 o->onode.alloc_hint_flags = flags;
10803 txc->write_onode(o);
10804 dout(10) << __func__ << " " << c->cid << " " << o->oid
10805 << " object_size " << expected_object_size
10806 << " write_size " << expected_write_size
10807 << " flags " << ceph_osd_alloc_hint_flag_string(flags)
10808 << " = " << r << dendl;
10809 return r;
10810 }
10811
10812 int BlueStore::_clone(TransContext *txc,
10813 CollectionRef& c,
10814 OnodeRef& oldo,
10815 OnodeRef& newo)
10816 {
10817 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
10818 << newo->oid << dendl;
10819 int r = 0;
10820 if (oldo->oid.hobj.get_hash() != newo->oid.hobj.get_hash()) {
10821 derr << __func__ << " mismatched hash on " << oldo->oid
10822 << " and " << newo->oid << dendl;
10823 return -EINVAL;
10824 }
10825
10826 _assign_nid(txc, newo);
10827
10828 // clone data
10829 oldo->flush();
10830 _do_truncate(txc, c, newo, 0);
10831 if (cct->_conf->bluestore_clone_cow) {
10832 _do_clone_range(txc, c, oldo, newo, 0, oldo->onode.size, 0);
10833 } else {
10834 bufferlist bl;
10835 r = _do_read(c.get(), oldo, 0, oldo->onode.size, bl, 0);
10836 if (r < 0)
10837 goto out;
10838 r = _do_write(txc, c, newo, 0, oldo->onode.size, bl, 0);
10839 if (r < 0)
10840 goto out;
10841 }
10842
10843 // clone attrs
10844 newo->onode.attrs = oldo->onode.attrs;
10845
10846 // clone omap
10847 if (newo->onode.has_omap()) {
10848 dout(20) << __func__ << " clearing old omap data" << dendl;
10849 newo->flush();
10850 _do_omap_clear(txc, newo->onode.nid);
10851 }
10852 if (oldo->onode.has_omap()) {
10853 dout(20) << __func__ << " copying omap data" << dendl;
10854 if (!newo->onode.has_omap()) {
10855 newo->onode.set_omap_flag();
10856 }
10857 KeyValueDB::Iterator it = db->get_iterator(PREFIX_OMAP);
10858 string head, tail;
10859 get_omap_header(oldo->onode.nid, &head);
10860 get_omap_tail(oldo->onode.nid, &tail);
10861 it->lower_bound(head);
10862 while (it->valid()) {
10863 if (it->key() >= tail) {
10864 dout(30) << __func__ << " reached tail" << dendl;
10865 break;
10866 } else {
10867 dout(30) << __func__ << " got header/data "
10868 << pretty_binary_string(it->key()) << dendl;
10869 string key;
10870 rewrite_omap_key(newo->onode.nid, it->key(), &key);
10871 txc->t->set(PREFIX_OMAP, key, it->value());
10872 }
10873 it->next();
10874 }
10875 } else {
10876 newo->onode.clear_omap_flag();
10877 }
10878
10879 txc->write_onode(newo);
10880 r = 0;
10881
10882 out:
10883 dout(10) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
10884 << newo->oid << " = " << r << dendl;
10885 return r;
10886 }
10887
10888 int BlueStore::_do_clone_range(
10889 TransContext *txc,
10890 CollectionRef& c,
10891 OnodeRef& oldo,
10892 OnodeRef& newo,
10893 uint64_t srcoff,
10894 uint64_t length,
10895 uint64_t dstoff)
10896 {
10897 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
10898 << newo->oid
10899 << " 0x" << std::hex << srcoff << "~" << length << " -> "
10900 << " 0x" << dstoff << "~" << length << std::dec << dendl;
10901 oldo->extent_map.fault_range(db, srcoff, length);
10902 newo->extent_map.fault_range(db, dstoff, length);
10903 _dump_onode(oldo);
10904 _dump_onode(newo);
10905
10906 // hmm, this could go into an ExtentMap::dup() method.
10907 vector<BlobRef> id_to_blob(oldo->extent_map.extent_map.size());
10908 for (auto &e : oldo->extent_map.extent_map) {
10909 e.blob->last_encoded_id = -1;
10910 }
10911 int n = 0;
10912 uint64_t end = srcoff + length;
10913 uint32_t dirty_range_begin = 0;
10914 uint32_t dirty_range_end = 0;
10915 for (auto ep = oldo->extent_map.seek_lextent(srcoff);
10916 ep != oldo->extent_map.extent_map.end();
10917 ++ep) {
10918 auto& e = *ep;
10919 if (e.logical_offset >= end) {
10920 break;
10921 }
10922 dout(20) << __func__ << " src " << e << dendl;
10923 BlobRef cb;
10924 bool blob_duped = true;
10925 if (e.blob->last_encoded_id >= 0) {
10926 // blob is already duped
10927 cb = id_to_blob[e.blob->last_encoded_id];
10928 blob_duped = false;
10929 } else {
10930 // dup the blob
10931 const bluestore_blob_t& blob = e.blob->get_blob();
10932 // make sure it is shared
10933 if (!blob.is_shared()) {
10934 c->make_blob_shared(_assign_blobid(txc), e.blob);
10935 if (dirty_range_begin == 0) {
10936 dirty_range_begin = e.logical_offset;
10937 }
10938 assert(e.logical_end() > 0);
10939 // -1 to exclude next potential shard
10940 dirty_range_end = e.logical_end() - 1;
10941 } else {
10942 c->load_shared_blob(e.blob->shared_blob);
10943 }
10944 cb = new Blob();
10945 e.blob->last_encoded_id = n;
10946 id_to_blob[n] = cb;
10947 e.blob->dup(*cb);
10948 // bump the extent refs on the copied blob's extents
10949 for (auto p : blob.get_extents()) {
10950 if (p.is_valid()) {
10951 e.blob->shared_blob->get_ref(p.offset, p.length);
10952 }
10953 }
10954 txc->write_shared_blob(e.blob->shared_blob);
10955 dout(20) << __func__ << " new " << *cb << dendl;
10956 }
10957 // dup extent
10958 int skip_front, skip_back;
10959 if (e.logical_offset < srcoff) {
10960 skip_front = srcoff - e.logical_offset;
10961 } else {
10962 skip_front = 0;
10963 }
10964 if (e.logical_end() > end) {
10965 skip_back = e.logical_end() - end;
10966 } else {
10967 skip_back = 0;
10968 }
10969 Extent *ne = new Extent(e.logical_offset + skip_front + dstoff - srcoff,
10970 e.blob_offset + skip_front,
10971 e.length - skip_front - skip_back, cb);
10972 newo->extent_map.extent_map.insert(*ne);
10973 ne->blob->get_ref(c.get(), ne->blob_offset, ne->length);
10974 // fixme: we may leave parts of new blob unreferenced that could
10975 // be freed (relative to the shared_blob).
10976 txc->statfs_delta.stored() += ne->length;
10977 if (e.blob->get_blob().is_compressed()) {
10978 txc->statfs_delta.compressed_original() += ne->length;
10979 if (blob_duped){
10980 txc->statfs_delta.compressed() +=
10981 cb->get_blob().get_compressed_payload_length();
10982 }
10983 }
10984 dout(20) << __func__ << " dst " << *ne << dendl;
10985 ++n;
10986 }
10987 if (dirty_range_end > dirty_range_begin) {
10988 oldo->extent_map.dirty_range(dirty_range_begin,
10989 dirty_range_end - dirty_range_begin);
10990 txc->write_onode(oldo);
10991 }
10992 txc->write_onode(newo);
10993
10994 if (dstoff + length > newo->onode.size) {
10995 newo->onode.size = dstoff + length;
10996 }
10997 newo->extent_map.dirty_range(dstoff, length);
10998 _dump_onode(oldo);
10999 _dump_onode(newo);
11000 return 0;
11001 }
11002
11003 int BlueStore::_clone_range(TransContext *txc,
11004 CollectionRef& c,
11005 OnodeRef& oldo,
11006 OnodeRef& newo,
11007 uint64_t srcoff, uint64_t length, uint64_t dstoff)
11008 {
11009 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
11010 << newo->oid << " from 0x" << std::hex << srcoff << "~" << length
11011 << " to offset 0x" << dstoff << std::dec << dendl;
11012 int r = 0;
11013
11014 if (srcoff + length > oldo->onode.size) {
11015 r = -EINVAL;
11016 goto out;
11017 }
11018
11019 _assign_nid(txc, newo);
11020
11021 if (length > 0) {
11022 if (cct->_conf->bluestore_clone_cow) {
11023 _do_zero(txc, c, newo, dstoff, length);
11024 _do_clone_range(txc, c, oldo, newo, srcoff, length, dstoff);
11025 } else {
11026 bufferlist bl;
11027 r = _do_read(c.get(), oldo, srcoff, length, bl, 0);
11028 if (r < 0)
11029 goto out;
11030 r = _do_write(txc, c, newo, dstoff, bl.length(), bl, 0);
11031 if (r < 0)
11032 goto out;
11033 }
11034 }
11035
11036 txc->write_onode(newo);
11037 r = 0;
11038
11039 out:
11040 dout(10) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
11041 << newo->oid << " from 0x" << std::hex << srcoff << "~" << length
11042 << " to offset 0x" << dstoff << std::dec
11043 << " = " << r << dendl;
11044 return r;
11045 }
11046
11047 int BlueStore::_rename(TransContext *txc,
11048 CollectionRef& c,
11049 OnodeRef& oldo,
11050 OnodeRef& newo,
11051 const ghobject_t& new_oid)
11052 {
11053 dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
11054 << new_oid << dendl;
11055 int r;
11056 ghobject_t old_oid = oldo->oid;
11057 mempool::bluestore_cache_other::string new_okey;
11058
11059 if (newo) {
11060 if (newo->exists) {
11061 r = -EEXIST;
11062 goto out;
11063 }
11064 assert(txc->onodes.count(newo) == 0);
11065 }
11066
11067 txc->t->rmkey(PREFIX_OBJ, oldo->key.c_str(), oldo->key.size());
11068
11069 // rewrite shards
11070 {
11071 oldo->extent_map.fault_range(db, 0, oldo->onode.size);
11072 get_object_key(cct, new_oid, &new_okey);
11073 string key;
11074 for (auto &s : oldo->extent_map.shards) {
11075 generate_extent_shard_key_and_apply(oldo->key, s.shard_info->offset, &key,
11076 [&](const string& final_key) {
11077 txc->t->rmkey(PREFIX_OBJ, final_key);
11078 }
11079 );
11080 s.dirty = true;
11081 }
11082 }
11083
11084 newo = oldo;
11085 txc->write_onode(newo);
11086
11087 // this adjusts oldo->{oid,key}, and reset oldo to a fresh empty
11088 // Onode in the old slot
11089 c->onode_map.rename(oldo, old_oid, new_oid, new_okey);
11090 r = 0;
11091
11092 out:
11093 dout(10) << __func__ << " " << c->cid << " " << old_oid << " -> "
11094 << new_oid << " = " << r << dendl;
11095 return r;
11096 }
11097
11098 // collections
11099
11100 int BlueStore::_create_collection(
11101 TransContext *txc,
11102 const coll_t &cid,
11103 unsigned bits,
11104 CollectionRef *c)
11105 {
11106 dout(15) << __func__ << " " << cid << " bits " << bits << dendl;
11107 int r;
11108 bufferlist bl;
11109
11110 {
11111 RWLock::WLocker l(coll_lock);
11112 if (*c) {
11113 r = -EEXIST;
11114 goto out;
11115 }
11116 c->reset(
11117 new Collection(
11118 this,
11119 cache_shards[cid.hash_to_shard(cache_shards.size())],
11120 cid));
11121 (*c)->cnode.bits = bits;
11122 coll_map[cid] = *c;
11123 }
11124 ::encode((*c)->cnode, bl);
11125 txc->t->set(PREFIX_COLL, stringify(cid), bl);
11126 r = 0;
11127
11128 out:
11129 dout(10) << __func__ << " " << cid << " bits " << bits << " = " << r << dendl;
11130 return r;
11131 }
11132
11133 int BlueStore::_remove_collection(TransContext *txc, const coll_t &cid,
11134 CollectionRef *c)
11135 {
11136 dout(15) << __func__ << " " << cid << dendl;
11137 int r;
11138
11139 {
11140 RWLock::WLocker l(coll_lock);
11141 if (!*c) {
11142 r = -ENOENT;
11143 goto out;
11144 }
11145 size_t nonexistent_count = 0;
11146 assert((*c)->exists);
11147 if ((*c)->onode_map.map_any([&](OnodeRef o) {
11148 if (o->exists) {
11149 dout(10) << __func__ << " " << o->oid << " " << o
11150 << " exists in onode_map" << dendl;
11151 return true;
11152 }
11153 ++nonexistent_count;
11154 return false;
11155 })) {
11156 r = -ENOTEMPTY;
11157 goto out;
11158 }
11159
11160 vector<ghobject_t> ls;
11161 ghobject_t next;
11162 // Enumerate onodes in db, up to nonexistent_count + 1
11163 // then check if all of them are marked as non-existent.
11164 // Bypass the check if returned number is greater than nonexistent_count
11165 r = _collection_list(c->get(), ghobject_t(), ghobject_t::get_max(),
11166 nonexistent_count + 1, &ls, &next);
11167 if (r >= 0) {
11168 bool exists = false; //ls.size() > nonexistent_count;
11169 for (auto it = ls.begin(); !exists && it < ls.end(); ++it) {
11170 dout(10) << __func__ << " oid " << *it << dendl;
11171 auto onode = (*c)->onode_map.lookup(*it);
11172 exists = !onode || onode->exists;
11173 if (exists) {
11174 dout(10) << __func__ << " " << *it
11175 << " exists in db" << dendl;
11176 }
11177 }
11178 if (!exists) {
11179 coll_map.erase(cid);
11180 txc->removed_collections.push_back(*c);
11181 (*c)->exists = false;
11182 c->reset();
11183 txc->t->rmkey(PREFIX_COLL, stringify(cid));
11184 r = 0;
11185 } else {
11186 dout(10) << __func__ << " " << cid
11187 << " is non-empty" << dendl;
11188 r = -ENOTEMPTY;
11189 }
11190 }
11191 }
11192
11193 out:
11194 dout(10) << __func__ << " " << cid << " = " << r << dendl;
11195 return r;
11196 }
11197
11198 int BlueStore::_split_collection(TransContext *txc,
11199 CollectionRef& c,
11200 CollectionRef& d,
11201 unsigned bits, int rem)
11202 {
11203 dout(15) << __func__ << " " << c->cid << " to " << d->cid << " "
11204 << " bits " << bits << dendl;
11205 RWLock::WLocker l(c->lock);
11206 RWLock::WLocker l2(d->lock);
11207 int r;
11208
11209 // flush all previous deferred writes on this sequencer. this is a bit
11210 // heavyweight, but we need to make sure all deferred writes complete
11211 // before we split as the new collection's sequencer may need to order
11212 // this after those writes, and we don't bother with the complexity of
11213 // moving those TransContexts over to the new osr.
11214 _osr_drain_preceding(txc);
11215
11216 // move any cached items (onodes and referenced shared blobs) that will
11217 // belong to the child collection post-split. leave everything else behind.
11218 // this may include things that don't strictly belong to the now-smaller
11219 // parent split, but the OSD will always send us a split for every new
11220 // child.
11221
11222 spg_t pgid, dest_pgid;
11223 bool is_pg = c->cid.is_pg(&pgid);
11224 assert(is_pg);
11225 is_pg = d->cid.is_pg(&dest_pgid);
11226 assert(is_pg);
11227
11228 // the destination should initially be empty.
11229 assert(d->onode_map.empty());
11230 assert(d->shared_blob_set.empty());
11231 assert(d->cnode.bits == bits);
11232
11233 c->split_cache(d.get());
11234
11235 // adjust bits. note that this will be redundant for all but the first
11236 // split call for this parent (first child).
11237 c->cnode.bits = bits;
11238 assert(d->cnode.bits == bits);
11239 r = 0;
11240
11241 bufferlist bl;
11242 ::encode(c->cnode, bl);
11243 txc->t->set(PREFIX_COLL, stringify(c->cid), bl);
11244
11245 dout(10) << __func__ << " " << c->cid << " to " << d->cid << " "
11246 << " bits " << bits << " = " << r << dendl;
11247 return r;
11248 }
11249
11250 // DB key value Histogram
11251 #define KEY_SLAB 32
11252 #define VALUE_SLAB 64
11253
11254 const string prefix_onode = "o";
11255 const string prefix_onode_shard = "x";
11256 const string prefix_other = "Z";
11257
11258 int BlueStore::DBHistogram::get_key_slab(size_t sz)
11259 {
11260 return (sz/KEY_SLAB);
11261 }
11262
11263 string BlueStore::DBHistogram::get_key_slab_to_range(int slab)
11264 {
11265 int lower_bound = slab * KEY_SLAB;
11266 int upper_bound = (slab + 1) * KEY_SLAB;
11267 string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
11268 return ret;
11269 }
11270
11271 int BlueStore::DBHistogram::get_value_slab(size_t sz)
11272 {
11273 return (sz/VALUE_SLAB);
11274 }
11275
11276 string BlueStore::DBHistogram::get_value_slab_to_range(int slab)
11277 {
11278 int lower_bound = slab * VALUE_SLAB;
11279 int upper_bound = (slab + 1) * VALUE_SLAB;
11280 string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
11281 return ret;
11282 }
11283
11284 void BlueStore::DBHistogram::update_hist_entry(map<string, map<int, struct key_dist> > &key_hist,
11285 const string &prefix, size_t key_size, size_t value_size)
11286 {
11287 uint32_t key_slab = get_key_slab(key_size);
11288 uint32_t value_slab = get_value_slab(value_size);
11289 key_hist[prefix][key_slab].count++;
11290 key_hist[prefix][key_slab].max_len = MAX(key_size, key_hist[prefix][key_slab].max_len);
11291 key_hist[prefix][key_slab].val_map[value_slab].count++;
11292 key_hist[prefix][key_slab].val_map[value_slab].max_len =
11293 MAX(value_size, key_hist[prefix][key_slab].val_map[value_slab].max_len);
11294 }
11295
11296 void BlueStore::DBHistogram::dump(Formatter *f)
11297 {
11298 f->open_object_section("rocksdb_value_distribution");
11299 for (auto i : value_hist) {
11300 f->dump_unsigned(get_value_slab_to_range(i.first).data(), i.second);
11301 }
11302 f->close_section();
11303
11304 f->open_object_section("rocksdb_key_value_histogram");
11305 for (auto i : key_hist) {
11306 f->dump_string("prefix", i.first);
11307 f->open_object_section("key_hist");
11308 for ( auto k : i.second) {
11309 f->dump_unsigned(get_key_slab_to_range(k.first).data(), k.second.count);
11310 f->dump_unsigned("max_len", k.second.max_len);
11311 f->open_object_section("value_hist");
11312 for ( auto j : k.second.val_map) {
11313 f->dump_unsigned(get_value_slab_to_range(j.first).data(), j.second.count);
11314 f->dump_unsigned("max_len", j.second.max_len);
11315 }
11316 f->close_section();
11317 }
11318 f->close_section();
11319 }
11320 f->close_section();
11321 }
11322
11323 //Itrerates through the db and collects the stats
11324 void BlueStore::generate_db_histogram(Formatter *f)
11325 {
11326 //globals
11327 uint64_t num_onodes = 0;
11328 uint64_t num_shards = 0;
11329 uint64_t num_super = 0;
11330 uint64_t num_coll = 0;
11331 uint64_t num_omap = 0;
11332 uint64_t num_deferred = 0;
11333 uint64_t num_alloc = 0;
11334 uint64_t num_stat = 0;
11335 uint64_t num_others = 0;
11336 uint64_t num_shared_shards = 0;
11337 size_t max_key_size =0, max_value_size = 0;
11338 uint64_t total_key_size = 0, total_value_size = 0;
11339 size_t key_size = 0, value_size = 0;
11340 DBHistogram hist;
11341
11342 utime_t start = ceph_clock_now();
11343
11344 KeyValueDB::WholeSpaceIterator iter = db->get_iterator();
11345 iter->seek_to_first();
11346 while (iter->valid()) {
11347 dout(30) << __func__ << " Key: " << iter->key() << dendl;
11348 key_size = iter->key_size();
11349 value_size = iter->value_size();
11350 hist.value_hist[hist.get_value_slab(value_size)]++;
11351 max_key_size = MAX(max_key_size, key_size);
11352 max_value_size = MAX(max_value_size, value_size);
11353 total_key_size += key_size;
11354 total_value_size += value_size;
11355
11356 pair<string,string> key(iter->raw_key());
11357
11358 if (key.first == PREFIX_SUPER) {
11359 hist.update_hist_entry(hist.key_hist, PREFIX_SUPER, key_size, value_size);
11360 num_super++;
11361 } else if (key.first == PREFIX_STAT) {
11362 hist.update_hist_entry(hist.key_hist, PREFIX_STAT, key_size, value_size);
11363 num_stat++;
11364 } else if (key.first == PREFIX_COLL) {
11365 hist.update_hist_entry(hist.key_hist, PREFIX_COLL, key_size, value_size);
11366 num_coll++;
11367 } else if (key.first == PREFIX_OBJ) {
11368 if (key.second.back() == ONODE_KEY_SUFFIX) {
11369 hist.update_hist_entry(hist.key_hist, prefix_onode, key_size, value_size);
11370 num_onodes++;
11371 } else {
11372 hist.update_hist_entry(hist.key_hist, prefix_onode_shard, key_size, value_size);
11373 num_shards++;
11374 }
11375 } else if (key.first == PREFIX_OMAP) {
11376 hist.update_hist_entry(hist.key_hist, PREFIX_OMAP, key_size, value_size);
11377 num_omap++;
11378 } else if (key.first == PREFIX_DEFERRED) {
11379 hist.update_hist_entry(hist.key_hist, PREFIX_DEFERRED, key_size, value_size);
11380 num_deferred++;
11381 } else if (key.first == PREFIX_ALLOC || key.first == "b" ) {
11382 hist.update_hist_entry(hist.key_hist, PREFIX_ALLOC, key_size, value_size);
11383 num_alloc++;
11384 } else if (key.first == PREFIX_SHARED_BLOB) {
11385 hist.update_hist_entry(hist.key_hist, PREFIX_SHARED_BLOB, key_size, value_size);
11386 num_shared_shards++;
11387 } else {
11388 hist.update_hist_entry(hist.key_hist, prefix_other, key_size, value_size);
11389 num_others++;
11390 }
11391 iter->next();
11392 }
11393
11394 utime_t duration = ceph_clock_now() - start;
11395 f->open_object_section("rocksdb_key_value_stats");
11396 f->dump_unsigned("num_onodes", num_onodes);
11397 f->dump_unsigned("num_shards", num_shards);
11398 f->dump_unsigned("num_super", num_super);
11399 f->dump_unsigned("num_coll", num_coll);
11400 f->dump_unsigned("num_omap", num_omap);
11401 f->dump_unsigned("num_deferred", num_deferred);
11402 f->dump_unsigned("num_alloc", num_alloc);
11403 f->dump_unsigned("num_stat", num_stat);
11404 f->dump_unsigned("num_shared_shards", num_shared_shards);
11405 f->dump_unsigned("num_others", num_others);
11406 f->dump_unsigned("max_key_size", max_key_size);
11407 f->dump_unsigned("max_value_size", max_value_size);
11408 f->dump_unsigned("total_key_size", total_key_size);
11409 f->dump_unsigned("total_value_size", total_value_size);
11410 f->close_section();
11411
11412 hist.dump(f);
11413
11414 dout(20) << __func__ << " finished in " << duration << " seconds" << dendl;
11415
11416 }
11417
11418 void BlueStore::_flush_cache()
11419 {
11420 dout(10) << __func__ << dendl;
11421 for (auto i : cache_shards) {
11422 i->trim_all();
11423 assert(i->empty());
11424 }
11425 for (auto& p : coll_map) {
11426 assert(p.second->onode_map.empty());
11427 assert(p.second->shared_blob_set.empty());
11428 }
11429 coll_map.clear();
11430 }
11431
11432 // For external caller.
11433 // We use a best-effort policy instead, e.g.,
11434 // we don't care if there are still some pinned onodes/data in the cache
11435 // after this command is completed.
11436 void BlueStore::flush_cache()
11437 {
11438 dout(10) << __func__ << dendl;
11439 for (auto i : cache_shards) {
11440 i->trim_all();
11441 }
11442 }
11443
11444 void BlueStore::_apply_padding(uint64_t head_pad,
11445 uint64_t tail_pad,
11446 bufferlist& padded)
11447 {
11448 if (head_pad) {
11449 padded.prepend_zero(head_pad);
11450 }
11451 if (tail_pad) {
11452 padded.append_zero(tail_pad);
11453 }
11454 if (head_pad || tail_pad) {
11455 dout(20) << __func__ << " can pad head 0x" << std::hex << head_pad
11456 << " tail 0x" << tail_pad << std::dec << dendl;
11457 logger->inc(l_bluestore_write_pad_bytes, head_pad + tail_pad);
11458 }
11459 }
11460
11461 // ===========================================