template<typename S>
static void append_escaped(const string &in, S *out)
{
- char hexbyte[8];
+ char hexbyte[in.length() * 3 + 1];
+ char* ptr = &hexbyte[0];
for (string::const_iterator i = in.begin(); i != in.end(); ++i) {
if (*i <= '#') {
- snprintf(hexbyte, sizeof(hexbyte), "#%02x", (uint8_t)*i);
- out->append(hexbyte);
+ *ptr++ = '#';
+ *ptr++ = "0123456789abcdef"[(*i >> 4) & 0x0f];
+ *ptr++ = "0123456789abcdef"[*i & 0x0f];
} else if (*i >= '~') {
- snprintf(hexbyte, sizeof(hexbyte), "~%02x", (uint8_t)*i);
- out->append(hexbyte);
+ *ptr++ = '~';
+ *ptr++ = "0123456789abcdef"[(*i >> 4) & 0x0f];
+ *ptr++ = "0123456789abcdef"[*i & 0x0f];
} else {
- out->push_back(*i);
+ *ptr++ = *i;
}
}
- out->push_back('!');
+ *ptr++ = '!';
+ out->append(hexbyte, ptr - &hexbyte[0]);
+}
+
+inline unsigned h2i(char c)
+{
+ if ((c >= '0') && (c <= '9')) {
+ return c - 0x30;
+ } else if ((c >= 'a') && (c <= 'f')) {
+ return c - 'a' + 10;
+ } else if ((c >= 'A') && (c <= 'F')) {
+ return c - 'A' + 10;
+ } else {
+ return 256; // make it always larger than 255
+ }
}
static int decode_escaped(const char *p, string *out)
{
+ char buff[256];
+ char* ptr = &buff[0];
+ char* max = &buff[252];
const char *orig_p = p;
while (*p && *p != '!') {
if (*p == '#' || *p == '~') {
- unsigned hex;
- int r = sscanf(++p, "%2x", &hex);
- if (r < 1)
- return -EINVAL;
- out->push_back((char)hex);
- p += 2;
+ unsigned hex = 0;
+ p++;
+ hex = h2i(*p++) << 4;
+ if (hex > 255) {
+ return -EINVAL;
+ }
+ hex |= h2i(*p++);
+ if (hex > 255) {
+ return -EINVAL;
+ }
+ *ptr++ = hex;
} else {
- out->push_back(*p++);
+ *ptr++ = *p++;
}
+ if (ptr > max) {
+ out->append(buff, ptr-buff);
+ ptr = &buff[0];
+ }
+ }
+ if (ptr != buff) {
+ out->append(buff, ptr-buff);
}
return p - orig_p;
}
const char *p = key.c_str();
if (key.length() < sizeof(uint64_t))
return -1;
- p = _key_decode_u64(p, sbid);
+ _key_decode_u64(p, sbid);
return 0;
}
int okey_len = key.size() - sizeof(uint32_t) - 1;
*onode_key = key.substr(0, okey_len);
const char *p = key.data() + okey_len;
- p = _key_decode_u32(p, offset);
+ _key_decode_u32(p, offset);
return 0;
}
void BlueStore::BufferSpace::read(
Cache* cache,
- uint32_t offset, uint32_t length,
+ uint32_t offset,
+ uint32_t length,
BlueStore::ready_regions_t& res,
interval_set<uint32_t>& res_intervals)
{
- std::lock_guard<std::recursive_mutex> l(cache->lock);
res.clear();
res_intervals.clear();
uint32_t want_bytes = length;
uint32_t end = offset + length;
- for (auto i = _data_lower_bound(offset);
- i != buffer_map.end() && offset < end && i->first < end;
- ++i) {
- Buffer *b = i->second.get();
- assert(b->end() > offset);
- if (b->is_writing() || b->is_clean()) {
- if (b->offset < offset) {
- uint32_t skip = offset - b->offset;
- uint32_t l = MIN(length, b->length - skip);
- res[offset].substr_of(b->data, skip, l);
- res_intervals.insert(offset, l);
- offset += l;
- length -= l;
- if (!b->is_writing()) {
+
+ {
+ std::lock_guard<std::recursive_mutex> l(cache->lock);
+ for (auto i = _data_lower_bound(offset);
+ i != buffer_map.end() && offset < end && i->first < end;
+ ++i) {
+ Buffer *b = i->second.get();
+ assert(b->end() > offset);
+ if (b->is_writing() || b->is_clean()) {
+ if (b->offset < offset) {
+ uint32_t skip = offset - b->offset;
+ uint32_t l = MIN(length, b->length - skip);
+ res[offset].substr_of(b->data, skip, l);
+ res_intervals.insert(offset, l);
+ offset += l;
+ length -= l;
+ if (!b->is_writing()) {
+ cache->_touch_buffer(b);
+ }
+ continue;
+ }
+ if (b->offset > offset) {
+ uint32_t gap = b->offset - offset;
+ if (length <= gap) {
+ break;
+ }
+ offset += gap;
+ length -= gap;
+ }
+ if (!b->is_writing()) {
cache->_touch_buffer(b);
- }
- continue;
- }
- if (b->offset > offset) {
- uint32_t gap = b->offset - offset;
- if (length <= gap) {
- break;
- }
- offset += gap;
- length -= gap;
- }
- if (!b->is_writing()) {
- cache->_touch_buffer(b);
- }
- if (b->length > length) {
- res[offset].substr_of(b->data, 0, length);
- res_intervals.insert(offset, length);
- break;
- } else {
- res[offset].append(b->data);
- res_intervals.insert(offset, b->length);
- if (b->length == length)
+ }
+ if (b->length > length) {
+ res[offset].substr_of(b->data, 0, length);
+ res_intervals.insert(offset, length);
break;
- offset += b->length;
- length -= b->length;
+ } else {
+ res[offset].append(b->data);
+ res_intervals.insert(offset, b->length);
+ if (b->length == length)
+ break;
+ offset += b->length;
+ length -= b->length;
+ }
}
}
}
BlueStore::OnodeRef BlueStore::OnodeSpace::lookup(const ghobject_t& oid)
{
- std::lock_guard<std::recursive_mutex> l(cache->lock);
ldout(cache->cct, 30) << __func__ << dendl;
- ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(oid);
- if (p == onode_map.end()) {
- ldout(cache->cct, 30) << __func__ << " " << oid << " miss" << dendl;
+ OnodeRef o;
+ bool hit = false;
+
+ {
+ std::lock_guard<std::recursive_mutex> l(cache->lock);
+ ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(oid);
+ if (p == onode_map.end()) {
+ ldout(cache->cct, 30) << __func__ << " " << oid << " miss" << dendl;
+ } else {
+ ldout(cache->cct, 30) << __func__ << " " << oid << " hit " << p->second
+ << dendl;
+ cache->_touch_onode(p->second);
+ hit = true;
+ o = p->second;
+ }
+ }
+
+ if (hit) {
+ cache->logger->inc(l_bluestore_onode_hits);
+ } else {
cache->logger->inc(l_bluestore_onode_misses);
- return OnodeRef();
}
- ldout(cache->cct, 30) << __func__ << " " << oid << " hit " << p->second
- << dendl;
- cache->_touch_onode(p->second);
- cache->logger->inc(l_bluestore_onode_hits);
- return p->second;
+ return o;
}
void BlueStore::OnodeSpace::clear()
return false;
}
+void BlueStore::OnodeSpace::dump(CephContext *cct, int lvl)
+{
+ for (auto& i : onode_map) {
+ ldout(cct, lvl) << i.first << " : " << i.second << dendl;
+ }
+}
// SharedBlob
<< " removing self from set " << get_parent()
<< dendl;
if (get_parent()) {
- if (get_parent()->remove(this)) {
+ if (get_parent()->try_remove(this)) {
delete this;
} else {
ldout(coll->store->cct, 20)
}
}
+// SharedBlobSet
+
+#undef dout_prefix
+#define dout_prefix *_dout << "bluestore.sharedblobset(" << this << ") "
+
+void BlueStore::SharedBlobSet::dump(CephContext *cct, int lvl)
+{
+ std::lock_guard<std::mutex> l(lock);
+ for (auto& i : sb_map) {
+ ldout(cct, lvl) << i.first << " : " << *i.second << dendl;
+ }
+}
+
// Blob
#undef dout_prefix
if (b.is_spanning()) {
out << " spanning " << b.id;
}
- out << " " << b.get_blob() << " " << b.get_blob_use_tracker()
- << " " << *b.shared_blob
- << ")";
+ out << " " << b.get_blob() << " " << b.get_blob_use_tracker();
+ if (b.shared_blob) {
+ out << " " << *b.shared_blob;
+ } else {
+ out << " (shared_blob=NULL)";
+ }
+ out << ")";
return out;
}
void BlueStore::Blob::discard_unallocated(Collection *coll)
{
- if (blob.is_shared()) {
+ if (get_blob().is_shared()) {
return;
}
- if (blob.is_compressed()) {
+ if (get_blob().is_compressed()) {
bool discard = false;
bool all_invalid = true;
- for (auto e : blob.get_extents()) {
+ for (auto e : get_blob().get_extents()) {
if (!e.is_valid()) {
discard = true;
} else {
assert(discard == all_invalid); // in case of compressed blob all
// or none pextents are invalid.
if (discard) {
- shared_blob->bc.discard(shared_blob->get_cache(), 0, blob.get_logical_length());
+ shared_blob->bc.discard(shared_blob->get_cache(), 0,
+ get_blob().get_logical_length());
}
} else {
size_t pos = 0;
- for (auto e : blob.get_extents()) {
+ for (auto e : get_blob().get_extents()) {
if (!e.is_valid()) {
ldout(coll->store->cct, 20) << __func__ << " 0x" << std::hex << pos
<< "~" << e.length
}
pos += e.length;
}
- if (blob.can_prune_tail()) {
- dirty_blob();
- blob.prune_tail();
- used_in_blob.prune_tail(blob.get_ondisk_length());
+ if (get_blob().can_prune_tail()) {
+ dirty_blob().prune_tail();
+ used_in_blob.prune_tail(get_blob().get_ondisk_length());
auto cct = coll->store->cct; //used by dout
- dout(20) << __func__ << " pruned tail, now " << blob << dendl;
+ dout(20) << __func__ << " pruned tail, now " << get_blob() << dendl;
}
}
}
if (used_in_blob.is_empty()) {
uint32_t min_release_size =
- blob.get_release_size(coll->store->min_alloc_size);
- uint64_t l = blob.get_logical_length();
- dout(20) << __func__ << " init 0x" << std::hex << l << ", " << min_release_size
- << std::dec << dendl;
+ get_blob().get_release_size(coll->store->min_alloc_size);
+ uint64_t l = get_blob().get_logical_length();
+ dout(20) << __func__ << " init 0x" << std::hex << l << ", "
+ << min_release_size << std::dec << dendl;
used_in_blob.init(l, min_release_size);
}
used_in_blob.get(
return b.release_extents(empty, logical, r);
}
-bool BlueStore::Blob::try_reuse_blob(uint32_t min_alloc_size,
+bool BlueStore::Blob::can_reuse_blob(uint32_t min_alloc_size,
uint32_t target_blob_size,
uint32_t b_offset,
uint32_t *length0) {
target_blob_size = MAX(blen, target_blob_size);
if (b_offset >= blen) {
- //new data totally stands out of the existing blob
- new_blen = b_offset + length;
+ // new data totally stands out of the existing blob
+ new_blen = end;
} else {
- //new data overlaps with the existing blob
- new_blen = MAX(blen, length + b_offset);
- if (!get_blob().is_unallocated(
- b_offset,
- new_blen > blen ? blen - b_offset : length)) {
- return false;
+ // new data overlaps with the existing blob
+ new_blen = MAX(blen, end);
+
+ uint32_t overlap = 0;
+ if (new_blen > blen) {
+ overlap = blen - b_offset;
+ } else {
+ overlap = length;
+ }
+
+ if (!get_blob().is_unallocated(b_offset, overlap)) {
+ // abort if any piece of the overlap has already been allocated
+ return false;
}
}
+
if (new_blen > blen) {
int64_t overflow = int64_t(new_blen) - target_blob_size;
// Unable to decrease the provided length to fit into max_blob_size
length -= overflow;
*length0 = length;
}
+
if (new_blen > blen) {
dirty_blob().add_tail(new_blen);
used_in_blob.add_tail(new_blen,
- blob.get_release_size(min_alloc_size));
+ get_blob().get_release_size(min_alloc_size));
}
}
return true;
unsigned n;
// we need to encode inline_bl to measure encoded length
bool never_happen = encode_some(0, OBJECT_MAX_SIZE, inline_bl, &n);
+ inline_bl.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
assert(!never_happen);
size_t len = inline_bl.length();
dout(20) << __func__ << " inline shard " << len << " bytes from " << n
<< needs_reshard_end << ")" << std::dec << dendl;
}
- fault_range(db, needs_reshard_begin, needs_reshard_end);
+ fault_range(db, needs_reshard_begin, (needs_reshard_end - needs_reshard_begin));
// we may need to fault in a larger interval later must have all
// referring extents for spanning blobs loaded in order to have
for (auto w : writes) {
if (b == w.b) {
auto loffs2 = P2ALIGN(w.logical_offset, min_alloc_size);
- auto loffs2_end = ROUND_UP_TO( w.logical_offset + w.length0, min_alloc_size);
+ auto loffs2_end = P2ROUNDUP(w.logical_offset + w.length0, min_alloc_size);
if ((loffs <= loffs2 && loffs_end > loffs2) ||
- (loffs >= loffs2 && loffs < loffs2_end)) {
+ (loffs >= loffs2 && loffs < loffs2_end)) {
return true;
}
}
<< " 0x" << std::hex << p->first << "~" << p->second.bl.length()
<< " -> 0x" << head.length() << std::dec << dendl;
auto i = seq_bytes.find(p->second.seq);
+ assert(i != seq_bytes.end());
if (end > offset + length) {
bufferlist tail;
tail.substr_of(p->second.bl, offset + length - p->first,
} else {
i->second -= end - offset;
}
+ assert(i->second >= 0);
p->second.bl.swap(head);
}
++p;
break;
}
auto i = seq_bytes.find(p->second.seq);
+ assert(i != seq_bytes.end());
auto end = p->first + p->second.bl.length();
if (end > offset + length) {
unsigned drop_front = offset + length - p->first;
<< std::dec << dendl;
i->second -= p->second.bl.length();
}
+ assert(i->second >= 0);
p = iomap.erase(p);
}
}
on->exists = true;
bufferptr::iterator p = v.front().begin_deep();
on->onode.decode(p);
+ for (auto& i : on->onode.attrs) {
+ i.second.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
+ }
// initialize extent_map
on->extent_map.decode_spanning_blobs(p);
if (on->onode.extent_map_shards.empty()) {
denc(on->extent_map.inline_bl, p);
on->extent_map.decode_some(on->extent_map.inline_bl);
+ on->extent_map.inline_bl.reassign_to_mempool(
+ mempool::mempool_bluestore_cache_other);
} else {
on->extent_map.init_shards(false, false);
}
continue;
}
ldout(store->cct, 20) << __func__ << " moving " << *sb << dendl;
- sb->coll = dest;
if (sb->get_sbid()) {
ldout(store->cct, 20) << __func__
<< " moving registration " << *sb << dendl;
shared_blob_set.remove(sb);
dest->shared_blob_set.add(dest, sb);
}
+ sb->coll = dest;
if (dest->cache != cache) {
for (auto& i : sb->bc.buffer_map) {
if (!i.second->is_writing()) {
size_t num_shards = store->cache_shards.size();
float target_ratio = store->cache_meta_ratio + store->cache_data_ratio;
// A little sloppy but should be close enough
- uint64_t shard_target = target_ratio * (store->cct->_conf->bluestore_cache_size / num_shards);
+ uint64_t shard_target = target_ratio * (store->cache_size / num_shards);
for (auto i : store->cache_shards) {
i->trim(shard_target,
throttle_deferred_bytes(cct, "bluestore_throttle_deferred_bytes",
cct->_conf->bluestore_throttle_bytes +
cct->_conf->bluestore_throttle_deferred_bytes),
+ deferred_finisher(cct, "defered_finisher", "dfin"),
kv_sync_thread(this),
kv_finalize_thread(this),
mempool_thread(this)
throttle_deferred_bytes(cct, "bluestore_throttle_deferred_bytes",
cct->_conf->bluestore_throttle_bytes +
cct->_conf->bluestore_throttle_deferred_bytes),
+ deferred_finisher(cct, "defered_finisher", "dfin"),
kv_sync_thread(this),
kv_finalize_thread(this),
min_alloc_size(_min_alloc_size),
_init_logger();
cct->_conf->add_observer(this);
set_cache_shards(1);
-
- if (cct->_conf->bluestore_shard_finishers) {
- m_finisher_num = cct->_conf->osd_op_num_shards;
- }
-
- for (int i = 0; i < m_finisher_num; ++i) {
- ostringstream oss;
- oss << "finisher-" << i;
- Finisher *f = new Finisher(cct, oss.str(), "finisher");
- finishers.push_back(f);
- }
}
BlueStore::~BlueStore()
"bluestore_compression_max_blob_size",
"bluestore_compression_max_blob_size_ssd",
"bluestore_compression_max_blob_size_hdd",
+ "bluestore_compression_required_ratio",
"bluestore_max_alloc_size",
"bluestore_prefer_deferred_size",
+ "bluestore_prefer_deferred_size_hdd",
+ "bluestore_prefer_deferred_size_ssd",
"bluestore_deferred_batch_ops",
"bluestore_deferred_batch_ops_hdd",
"bluestore_deferred_batch_ops_ssd",
}
}
if (changed.count("bluestore_prefer_deferred_size") ||
+ changed.count("bluestore_prefer_deferred_size_hdd") ||
+ changed.count("bluestore_prefer_deferred_size_ssd") ||
changed.count("bluestore_max_alloc_size") ||
changed.count("bluestore_deferred_batch_ops") ||
changed.count("bluestore_deferred_batch_ops_hdd") ||
void BlueStore::_set_compression()
{
- if (cct->_conf->bluestore_compression_max_blob_size) {
- comp_min_blob_size = cct->_conf->bluestore_compression_max_blob_size;
+ auto m = Compressor::get_comp_mode_type(cct->_conf->bluestore_compression_mode);
+ if (m) {
+ comp_mode = *m;
+ } else {
+ derr << __func__ << " unrecognized value '"
+ << cct->_conf->bluestore_compression_mode
+ << "' for bluestore_compression_mode, reverting to 'none'"
+ << dendl;
+ comp_mode = Compressor::COMP_NONE;
+ }
+
+ compressor = nullptr;
+
+ if (comp_mode == Compressor::COMP_NONE) {
+ dout(10) << __func__ << " compression mode set to 'none', "
+ << "ignore other compression setttings" << dendl;
+ return;
+ }
+
+ if (cct->_conf->bluestore_compression_min_blob_size) {
+ comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size;
} else {
assert(bdev);
if (bdev->is_rotational()) {
}
}
- auto m = Compressor::get_comp_mode_type(cct->_conf->bluestore_compression_mode);
- if (m) {
- comp_mode = *m;
- } else {
- derr << __func__ << " unrecognized value '"
- << cct->_conf->bluestore_compression_mode
- << "' for bluestore_compression_mode, reverting to 'none'"
- << dendl;
- comp_mode = Compressor::COMP_NONE;
- }
-
- compressor = nullptr;
-
auto& alg_name = cct->_conf->bluestore_compression_algorithm;
if (!alg_name.empty()) {
compressor = Compressor::create(cct, alg_name);
int BlueStore::_set_cache_sizes()
{
+ assert(bdev);
+ if (cct->_conf->bluestore_cache_size) {
+ cache_size = cct->_conf->bluestore_cache_size;
+ } else {
+ // choose global cache size based on backend type
+ if (bdev->is_rotational()) {
+ cache_size = cct->_conf->bluestore_cache_size_hdd;
+ } else {
+ cache_size = cct->_conf->bluestore_cache_size_ssd;
+ }
+ }
cache_meta_ratio = cct->_conf->bluestore_cache_meta_ratio;
cache_kv_ratio = cct->_conf->bluestore_cache_kv_ratio;
+
+ double cache_kv_max = cct->_conf->bluestore_cache_kv_max;
+ double cache_kv_max_ratio = 0;
+
+ // if cache_kv_max is negative, disable it
+ if (cache_size > 0 && cache_kv_max >= 0) {
+ cache_kv_max_ratio = (double) cache_kv_max / (double) cache_size;
+ if (cache_kv_max_ratio < 1.0 && cache_kv_max_ratio < cache_kv_ratio) {
+ dout(1) << __func__ << " max " << cache_kv_max_ratio
+ << " < ratio " << cache_kv_ratio
+ << dendl;
+ cache_meta_ratio = cache_meta_ratio + cache_kv_ratio - cache_kv_max_ratio;
+ cache_kv_ratio = cache_kv_max_ratio;
+ }
+ }
+
cache_data_ratio =
(double)1.0 - (double)cache_meta_ratio - (double)cache_kv_ratio;
- if (cache_meta_ratio <= 0 || cache_meta_ratio > 1.0) {
- derr << __func__ << "bluestore_cache_meta_ratio (" << cache_meta_ratio
- << ") must be in range (0,1.0]" << dendl;
+ if (cache_meta_ratio < 0 || cache_meta_ratio > 1.0) {
+ derr << __func__ << " bluestore_cache_meta_ratio (" << cache_meta_ratio
+ << ") must be in range [0,1.0]" << dendl;
return -EINVAL;
}
- if (cache_kv_ratio <= 0 || cache_kv_ratio > 1.0) {
- derr << __func__ << "bluestore_cache_kv_ratio (" << cache_kv_ratio
- << ") must be in range (0,1.0]" << dendl;
+ if (cache_kv_ratio < 0 || cache_kv_ratio > 1.0) {
+ derr << __func__ << " bluestore_cache_kv_ratio (" << cache_kv_ratio
+ << ") must be in range [0,1.0]" << dendl;
return -EINVAL;
}
if (cache_meta_ratio + cache_kv_ratio > 1.0) {
- derr << __func__ << "bluestore_cache_meta_ratio (" << cache_meta_ratio
+ derr << __func__ << " bluestore_cache_meta_ratio (" << cache_meta_ratio
<< ") + bluestore_cache_kv_ratio (" << cache_kv_ratio
<< ") = " << cache_meta_ratio + cache_kv_ratio << "; must be <= 1.0"
<< dendl;
// deal with floating point imprecision
cache_data_ratio = 0;
}
- dout(1) << __func__ << " meta " << cache_meta_ratio
+ dout(1) << __func__ << " cache_size " << cache_size
+ << " meta " << cache_meta_ratio
<< " kv " << cache_kv_ratio
<< " data " << cache_data_ratio
<< dendl;
return 0;
}
+int BlueStore::write_meta(const std::string& key, const std::string& value)
+{
+ bluestore_bdev_label_t label;
+ string p = path + "/block";
+ int r = _read_bdev_label(cct, p, &label);
+ if (r < 0) {
+ return ObjectStore::write_meta(key, value);
+ }
+ label.meta[key] = value;
+ r = _write_bdev_label(cct, p, label);
+ assert(r == 0);
+ return ObjectStore::write_meta(key, value);
+}
+
+int BlueStore::read_meta(const std::string& key, std::string *value)
+{
+ bluestore_bdev_label_t label;
+ string p = path + "/block";
+ int r = _read_bdev_label(cct, p, &label);
+ if (r < 0) {
+ return ObjectStore::read_meta(key, value);
+ }
+ auto i = label.meta.find(key);
+ if (i == label.meta.end()) {
+ return ObjectStore::read_meta(key, value);
+ }
+ *value = i->second;
+ return 0;
+}
+
void BlueStore::_init_logger()
{
PerfCountersBuilder b(cct, "bluestore",
int BlueStore::_open_path()
{
- // initial sanity check
- int r = _set_cache_sizes();
- if (r < 0) {
- return r;
- }
-
assert(path_fd < 0);
- path_fd = ::open(path.c_str(), O_DIRECTORY);
+ path_fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_DIRECTORY));
if (path_fd < 0) {
int r = -errno;
derr << __func__ << " unable to open " << path << ": " << cpp_strerror(r)
path_fd = -1;
}
-int BlueStore::_write_bdev_label(string path, bluestore_bdev_label_t label)
+int BlueStore::_write_bdev_label(CephContext *cct,
+ string path, bluestore_bdev_label_t label)
{
dout(10) << __func__ << " path " << path << " label " << label << dendl;
bufferlist bl;
z.zero();
bl.append(std::move(z));
- int fd = ::open(path.c_str(), O_WRONLY);
+ int fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_WRONLY));
if (fd < 0) {
fd = -errno;
derr << __func__ << " failed to open " << path << ": " << cpp_strerror(fd)
derr << __func__ << " failed to write to " << path
<< ": " << cpp_strerror(r) << dendl;
}
+ r = ::fsync(fd);
+ if (r < 0) {
+ derr << __func__ << " failed to fsync " << path
+ << ": " << cpp_strerror(r) << dendl;
+ }
VOID_TEMP_FAILURE_RETRY(::close(fd));
return r;
}
bluestore_bdev_label_t *label)
{
dout(10) << __func__ << dendl;
- int fd = ::open(path.c_str(), O_RDONLY);
+ int fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_RDONLY));
if (fd < 0) {
fd = -errno;
derr << __func__ << " failed to open " << path << ": " << cpp_strerror(fd)
label.size = size;
label.btime = ceph_clock_now();
label.description = desc;
- int r = _write_bdev_label(path, label);
+ int r = _write_bdev_label(cct, path, label);
if (r < 0)
return r;
} else {
void BlueStore::_set_alloc_sizes(void)
{
- min_alloc_size_order = ctz(min_alloc_size);
- assert(min_alloc_size == 1u << min_alloc_size_order);
-
max_alloc_size = cct->_conf->bluestore_max_alloc_size;
if (cct->_conf->bluestore_prefer_deferred_size) {
block_mask = ~(block_size - 1);
block_size_order = ctz(block_size);
assert(block_size == 1u << block_size_order);
+ // and set cache_size based on device type
+ r = _set_cache_sizes();
+ if (r < 0) {
+ goto fail_close;
+ }
return 0;
fail_close:
bl.append(freelist_type);
t->set(PREFIX_SUPER, "freelist_type", bl);
}
- fm->create(bdev->get_size(), t);
+ fm->create(bdev->get_size(), min_alloc_size, t);
// allocate superblock reserved space. note that we do not mark
// bluefs space as allocated in the freelist; we instead rely on
// bluefs_extents.
- fm->allocate(0, SUPER_RESERVED, t);
+ uint64_t reserved = ROUND_UP_TO(MAX(SUPER_RESERVED, min_alloc_size),
+ min_alloc_size);
+ fm->allocate(0, reserved, t);
- uint64_t reserved = 0;
if (cct->_conf->bluestore_bluefs) {
assert(bluefs_extents.num_intervals() == 1);
interval_set<uint64_t>::iterator p = bluefs_extents.begin();
- reserved = p.get_start() + p.get_len();
+ reserved = ROUND_UP_TO(p.get_start() + p.get_len(), min_alloc_size);
dout(20) << __func__ << " reserved 0x" << std::hex << reserved << std::dec
<< " for bluefs" << dendl;
bufferlist bl;
t->set(PREFIX_SUPER, "bluefs_extents", bl);
dout(20) << __func__ << " bluefs_extents 0x" << std::hex << bluefs_extents
<< std::dec << dendl;
- } else {
- reserved = SUPER_RESERVED;
}
if (cct->_conf->bluestore_debug_prefill > 0) {
db->submit_transaction_sync(t);
}
- int r = fm->init();
+ int r = fm->init(bdev->get_size());
if (r < 0) {
derr << __func__ << " freelist init failed: " << cpp_strerror(r) << dendl;
delete fm;
++num;
bytes += length;
}
+ fm->enumerate_reset();
dout(1) << __func__ << " loaded " << pretty_si_t(bytes)
<< " in " << num << " extents"
<< dendl;
return rotational;
}
+bool BlueStore::is_journal_rotational()
+{
+ if (!bluefs) {
+ dout(5) << __func__ << " bluefs disabled, default to store media type"
+ << dendl;
+ return is_rotational();
+ }
+ dout(10) << __func__ << " " << (int)bluefs->wal_is_rotational() << dendl;
+ return bluefs->wal_is_rotational();
+}
+
bool BlueStore::test_mount_in_use()
{
// most error conditions mean the mount is not in use (e.g., because
string bfn;
struct stat st;
- bfn = path + "/block.db";
+ if (read_meta("path_block.db", &bfn) < 0) {
+ bfn = path + "/block.db";
+ }
if (::stat(bfn.c_str(), &st) == 0) {
r = bluefs->add_block_device(BlueFS::BDEV_DB, bfn);
if (r < 0) {
}
// shared device
- bfn = path + "/block";
+ if (read_meta("path_block", &bfn) < 0) {
+ bfn = path + "/block";
+ }
r = bluefs->add_block_device(bluefs_shared_bdev, bfn);
if (r < 0) {
derr << __func__ << " add block device(" << bfn << ") returned: "
bdev->get_size() * (cct->_conf->bluestore_bluefs_min_ratio +
cct->_conf->bluestore_bluefs_gift_ratio);
initial = MAX(initial, cct->_conf->bluestore_bluefs_min);
+ if (cct->_conf->bluefs_alloc_size % min_alloc_size) {
+ derr << __func__ << " bluefs_alloc_size 0x" << std::hex
+ << cct->_conf->bluefs_alloc_size << " is not a multiple of "
+ << "min_alloc_size 0x" << min_alloc_size << std::dec << dendl;
+ r = -EINVAL;
+ goto free_bluefs;
+ }
// align to bluefs's alloc_size
initial = P2ROUNDUP(initial, cct->_conf->bluefs_alloc_size);
// put bluefs in the middle of the device in case it is an HDD
bluefs_extents.insert(start, initial);
}
- bfn = path + "/block.wal";
+ if (read_meta("path_block.wal", &bfn) < 0) {
+ bfn = path + "/block.wal";
+ }
if (::stat(bfn.c_str(), &st) == 0) {
r = bluefs->add_block_device(BlueFS::BDEV_WAL, bfn);
if (r < 0) {
FreelistManager::setup_merge_operators(db);
db->set_merge_operator(PREFIX_STAT, merge_op);
- db->set_cache_size(cct->_conf->bluestore_cache_size * cache_kv_ratio);
+ db->set_cache_size(cache_size * cache_kv_ratio);
if (kv_backend == "rocksdb")
options = cct->_conf->bluestore_rocksdb_options;
<< " > max_ratio " << cct->_conf->bluestore_bluefs_max_ratio
<< ", should reclaim " << pretty_si_t(reclaim) << dendl;
}
+
+ // don't take over too much of the freespace
+ uint64_t free_cap = cct->_conf->bluestore_bluefs_max_ratio * total_free;
if (bluefs_total < cct->_conf->bluestore_bluefs_min &&
- cct->_conf->bluestore_bluefs_min <
- (uint64_t)(cct->_conf->bluestore_bluefs_max_ratio * total_free)) {
+ cct->_conf->bluestore_bluefs_min < free_cap) {
uint64_t g = cct->_conf->bluestore_bluefs_min - bluefs_total;
dout(10) << __func__ << " bluefs_total " << bluefs_total
<< " < min " << cct->_conf->bluestore_bluefs_min
gift = g;
reclaim = 0;
}
+ uint64_t min_free = cct->_conf->get_val<uint64_t>("bluestore_bluefs_min_free");
+ if (bluefs_free < min_free &&
+ min_free < free_cap) {
+ uint64_t g = min_free - bluefs_free;
+ dout(10) << __func__ << " bluefs_free " << bluefs_total
+ << " < min " << min_free
+ << ", should gift " << pretty_si_t(g) << dendl;
+ if (g > gift)
+ gift = g;
+ reclaim = 0;
+ }
if (gift) {
// round up to alloc size
return 0;
}
-void BlueStore::open_statfs()
+void BlueStore::_open_statfs()
{
bufferlist bl;
int r = db->get(PREFIX_STAT, "bluestore_statfs", &bl);
if (size_t(bl.length()) >= sizeof(vstatfs.values)) {
auto it = bl.begin();
vstatfs.decode(it);
- }
- else {
+ } else {
dout(10) << __func__ << " store_statfs is corrupt, using empty" << dendl;
}
}
if (r < 0)
goto out_close_fsid;
+ {
+ string wal_path = cct->_conf->get_val<string>("bluestore_block_wal_path");
+ if (wal_path.size()) {
+ write_meta("path_block.wal", wal_path);
+ }
+ string db_path = cct->_conf->get_val<string>("bluestore_block_db_path");
+ if (db_path.size()) {
+ write_meta("path_block.db", db_path);
+ }
+ }
+
+ // choose min_alloc_size
+ if (cct->_conf->bluestore_min_alloc_size) {
+ min_alloc_size = cct->_conf->bluestore_min_alloc_size;
+ } else {
+ assert(bdev);
+ if (bdev->is_rotational()) {
+ min_alloc_size = cct->_conf->bluestore_min_alloc_size_hdd;
+ } else {
+ min_alloc_size = cct->_conf->bluestore_min_alloc_size_ssd;
+ }
+ }
+
+ // make sure min_alloc_size is power of 2 aligned.
+ if (!ISP2(min_alloc_size)) {
+ derr << __func__ << " min_alloc_size 0x"
+ << std::hex << min_alloc_size << std::dec
+ << " is not power of 2 aligned!"
+ << dendl;
+ r = -EINVAL;
+ goto out_close_bdev;
+ }
+
r = _open_db(true);
if (r < 0)
goto out_close_bdev;
t->set(PREFIX_SUPER, "blobid_max", bl);
}
- // choose min_alloc_size
- if (cct->_conf->bluestore_min_alloc_size) {
- min_alloc_size = cct->_conf->bluestore_min_alloc_size;
- } else {
- assert(bdev);
- if (bdev->is_rotational()) {
- min_alloc_size = cct->_conf->bluestore_min_alloc_size_hdd;
- } else {
- min_alloc_size = cct->_conf->bluestore_min_alloc_size_ssd;
- }
- }
- _set_alloc_sizes();
{
bufferlist bl;
::encode((uint64_t)min_alloc_size, bl);
db->submit_transaction_sync(t);
}
- r = _open_alloc();
- if (r < 0)
- goto out_close_fm;
r = write_meta("kv_backend", cct->_conf->bluestore_kvbackend);
if (r < 0)
- goto out_close_alloc;
- r = write_meta("bluefs", stringify((int)cct->_conf->bluestore_bluefs));
+ goto out_close_fm;
+
+ r = write_meta("bluefs", stringify(bluefs ? 1 : 0));
if (r < 0)
- goto out_close_alloc;
+ goto out_close_fm;
if (fsid != old_fsid) {
r = _write_fsid();
if (r < 0) {
derr << __func__ << " error writing fsid: " << cpp_strerror(r) << dendl;
- goto out_close_alloc;
+ goto out_close_fm;
}
}
- out_close_alloc:
- _close_alloc();
out_close_fm:
_close_fm();
out_close_db:
{
dout(1) << __func__ << " path " << path << dendl;
+ _kv_only = kv_only;
+
{
string type;
int r = read_meta("type", &type);
int BlueStore::umount()
{
- assert(mounted);
+ assert(_kv_only || mounted);
dout(1) << __func__ << dendl;
_osr_drain_all();
_osr_unregister_all();
- mempool_thread.shutdown();
-
- dout(20) << __func__ << " stopping kv thread" << dendl;
- _kv_stop();
- _reap_collections();
- _flush_cache();
- dout(20) << __func__ << " closing" << dendl;
-
mounted = false;
- _close_alloc();
- _close_fm();
+ if (!_kv_only) {
+ mempool_thread.shutdown();
+ dout(20) << __func__ << " stopping kv thread" << dendl;
+ _kv_stop();
+ _reap_collections();
+ _flush_cache();
+ dout(20) << __func__ << " closing" << dendl;
+
+ _close_alloc();
+ _close_fm();
+ }
_close_db();
_close_bdev();
_close_fsid();
}
bool already = false;
apply(
- e.offset, e.length, block_size, used_blocks, __func__,
+ e.offset, e.length, min_alloc_size, used_blocks, __func__,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
if (bs.test(pos))
already = true;
return errors;
}
-int BlueStore::fsck(bool deep)
+int BlueStore::_fsck(bool deep, bool repair)
{
- dout(1) << __func__ << (deep ? " (deep)" : " (shallow)") << " start" << dendl;
+ dout(1) << __func__
+ << (repair ? " fsck" : " repair")
+ << (deep ? " (deep)" : " (shallow)") << " start" << dendl;
int errors = 0;
+ int repaired = 0;
typedef btree::btree_set<
uint64_t,std::less<uint64_t>,
if (r < 0)
goto out_scan;
- used_blocks.resize(bdev->get_size() / block_size);
+ used_blocks.resize(bdev->get_size() / min_alloc_size);
apply(
- 0, SUPER_RESERVED, block_size, used_blocks, "0~SUPER_RESERVED",
+ 0, MAX(min_alloc_size, SUPER_RESERVED), min_alloc_size, used_blocks,
+ "0~SUPER_RESERVED",
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
bs.set(pos);
}
if (bluefs) {
for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
apply(
- e.get_start(), e.get_len(), block_size, used_blocks, "bluefs",
+ e.get_start(), e.get_len(), min_alloc_size, used_blocks, "bluefs",
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
bs.set(pos);
}
if (is_extent_shard_key(it->key())) {
while (!expecting_shards.empty() &&
expecting_shards.front() < it->key()) {
- derr << __func__ << " error: missing shard key "
+ derr << "fsck error: missing shard key "
<< pretty_binary_string(expecting_shards.front())
<< dendl;
++errors;
uint32_t offset;
string okey;
get_key_extent_shard(it->key(), &okey, &offset);
- derr << __func__ << " error: stray shard 0x" << std::hex << offset
+ derr << "fsck error: stray shard 0x" << std::hex << offset
<< std::dec << dendl;
if (expecting_shards.empty()) {
- derr << __func__ << " error: " << pretty_binary_string(it->key())
+ derr << "fsck error: " << pretty_binary_string(it->key())
<< " is unexpected" << dendl;
++errors;
continue;
}
while (expecting_shards.front() > it->key()) {
- derr << __func__ << " error: saw " << pretty_binary_string(it->key())
+ derr << "fsck error: saw " << pretty_binary_string(it->key())
<< dendl;
- derr << __func__ << " error: exp "
+ derr << "fsck error: exp "
<< pretty_binary_string(expecting_shards.front()) << dendl;
++errors;
expecting_shards.pop_front();
ghobject_t oid;
int r = get_key_object(it->key(), &oid);
if (r < 0) {
- derr << __func__ << " error: bad object key "
+ derr << "fsck error: bad object key "
<< pretty_binary_string(it->key()) << dendl;
++errors;
continue;
}
}
if (!c) {
- derr << __func__ << " error: stray object " << oid
+ derr << "fsck error: stray object " << oid
<< " not owned by any collection" << dendl;
++errors;
continue;
if (!expecting_shards.empty()) {
for (auto &k : expecting_shards) {
- derr << __func__ << " error: missing shard key "
+ derr << "fsck error: missing shard key "
<< pretty_binary_string(k) << dendl;
}
++errors;
OnodeRef o = c->get_onode(oid, false);
if (o->onode.nid) {
if (o->onode.nid > nid_max) {
- derr << __func__ << " error: " << oid << " nid " << o->onode.nid
+ derr << "fsck error: " << oid << " nid " << o->onode.nid
<< " > nid_max " << nid_max << dendl;
++errors;
}
if (used_nids.count(o->onode.nid)) {
- derr << __func__ << " error: " << oid << " nid " << o->onode.nid
+ derr << "fsck error: " << oid << " nid " << o->onode.nid
<< " already in use" << dendl;
++errors;
continue; // go for next object
get_extent_shard_key(o->key, s.shard_info->offset,
&expecting_shards.back());
if (s.shard_info->offset >= o->onode.size) {
- derr << __func__ << " error: " << oid << " shard 0x" << std::hex
+ derr << "fsck error: " << oid << " shard 0x" << std::hex
<< s.shard_info->offset << " past EOF at 0x" << o->onode.size
<< std::dec << dendl;
++errors;
for (auto& l : o->extent_map.extent_map) {
dout(20) << __func__ << " " << l << dendl;
if (l.logical_offset < pos) {
- derr << __func__ << " error: " << oid << " lextent at 0x"
+ derr << "fsck error: " << oid << " lextent at 0x"
<< std::hex << l.logical_offset
<< " overlaps with the previous, which ends at 0x" << pos
<< std::dec << dendl;
++errors;
}
if (o->extent_map.spans_shard(l.logical_offset, l.length)) {
- derr << __func__ << " error: " << oid << " lextent at 0x"
+ derr << "fsck error: " << oid << " lextent at 0x"
<< std::hex << l.logical_offset << "~" << l.length
<< " spans a shard boundary"
<< std::dec << dendl;
<< std::dec << " for " << *i.first << dendl;
const bluestore_blob_t& blob = i.first->get_blob();
if (i.second & blob.unused) {
- derr << __func__ << " error: " << oid << " blob claims unused 0x"
+ derr << "fsck error: " << oid << " blob claims unused 0x"
<< std::hex << blob.unused
<< " but extents reference 0x" << i.second
<< " on blob " << *i.first << dendl;
if ((blob.unused & mask) == mask) {
// this csum chunk region is marked unused
if (blob.get_csum_item(p) != 0) {
- derr << __func__ << " error: " << oid
+ derr << "fsck error: " << oid
<< " blob claims csum chunk 0x" << std::hex << pos
<< "~" << csum_chunk_size
<< " is unused (mask 0x" << mask << " of unused 0x"
const bluestore_blob_t& blob = i.first->get_blob();
bool equal = i.first->get_blob_use_tracker().equal(i.second);
if (!equal) {
- derr << __func__ << " error: " << oid << " blob " << *i.first
+ derr << "fsck error: " << oid << " blob " << *i.first
<< " doesn't match expected ref_map " << i.second << dendl;
++errors;
}
}
if (blob.is_shared()) {
if (i.first->shared_blob->get_sbid() > blobid_max) {
- derr << __func__ << " error: " << oid << " blob " << blob
+ derr << "fsck error: " << oid << " blob " << blob
<< " sbid " << i.first->shared_blob->get_sbid() << " > blobid_max "
<< blobid_max << dendl;
++errors;
} else if (i.first->shared_blob->get_sbid() == 0) {
- derr << __func__ << " error: " << oid << " blob " << blob
+ derr << "fsck error: " << oid << " blob " << blob
<< " marked as shared but has uninitialized sbid"
<< dendl;
++errors;
int r = _do_read(c.get(), o, 0, o->onode.size, bl, 0);
if (r < 0) {
++errors;
- derr << __func__ << " error: " << oid << " error during read: "
+ derr << "fsck error: " << oid << " error during read: "
<< cpp_strerror(r) << dendl;
}
}
// omap
if (o->onode.has_omap()) {
if (used_omap_head.count(o->onode.nid)) {
- derr << __func__ << " error: " << oid << " omap_head " << o->onode.nid
+ derr << "fsck error: " << oid << " omap_head " << o->onode.nid
<< " already in use" << dendl;
++errors;
} else {
string key = it->key();
uint64_t sbid;
if (get_key_shared_blob(key, &sbid)) {
- derr << __func__ << " error: bad key '" << key
+ derr << "fsck error: bad key '" << key
<< "' in shared blob namespace" << dendl;
++errors;
continue;
}
auto p = sb_info.find(sbid);
if (p == sb_info.end()) {
- derr << __func__ << " error: found stray shared blob data for sbid 0x"
+ derr << "fsck error: found stray shared blob data for sbid 0x"
<< std::hex << sbid << std::dec << dendl;
++errors;
} else {
::decode(shared_blob, blp);
dout(20) << __func__ << " " << *sbi.sb << " " << shared_blob << dendl;
if (shared_blob.ref_map != sbi.ref_map) {
- derr << __func__ << " error: shared blob 0x" << std::hex << sbid
+ derr << "fsck error: shared blob 0x" << std::hex << sbid
<< std::dec << " ref_map " << shared_blob.ref_map
<< " != expected " << sbi.ref_map << dendl;
++errors;
}
}
for (auto &p : sb_info) {
- derr << __func__ << " error: shared_blob 0x" << p.first
+ derr << "fsck error: shared_blob 0x" << p.first
<< " key is missing (" << *p.second.sb << ")" << dendl;
++errors;
}
if (!(actual_statfs == expected_statfs)) {
- derr << __func__ << " error: actual " << actual_statfs
+ derr << "fsck error: actual " << actual_statfs
<< " != expected " << expected_statfs << dendl;
++errors;
}
uint64_t omap_head;
_key_decode_u64(it->key().c_str(), &omap_head);
if (used_omap_head.count(omap_head) == 0) {
- derr << __func__ << " error: found stray omap data on omap_head "
+ derr << "fsck error: found stray omap data on omap_head "
<< omap_head << dendl;
++errors;
}
try {
::decode(wt, p);
} catch (buffer::error& e) {
- derr << __func__ << " error: failed to decode deferred txn "
+ derr << "fsck error: failed to decode deferred txn "
<< pretty_binary_string(it->key()) << dendl;
r = -EIO;
goto out_scan;
<< " released 0x" << std::hex << wt.released << std::dec << dendl;
for (auto e = wt.released.begin(); e != wt.released.end(); ++e) {
apply(
- e.get_start(), e.get_len(), block_size, used_blocks, "deferred",
+ e.get_start(), e.get_len(), min_alloc_size, used_blocks, "deferred",
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
bs.set(pos);
}
// know they are allocated.
for (auto e = bluefs_extents.begin(); e != bluefs_extents.end(); ++e) {
apply(
- e.get_start(), e.get_len(), block_size, used_blocks, "bluefs_extents",
+ e.get_start(), e.get_len(), min_alloc_size, used_blocks,
+ "bluefs_extents",
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
bs.reset(pos);
}
while (fm->enumerate_next(&offset, &length)) {
bool intersects = false;
apply(
- offset, length, block_size, used_blocks, "free",
+ offset, length, min_alloc_size, used_blocks, "free",
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
if (bs.test(pos)) {
intersects = true;
}
);
if (intersects) {
- derr << __func__ << " error: free extent 0x" << std::hex << offset
- << "~" << length << std::dec
- << " intersects allocated blocks" << dendl;
- ++errors;
+ if (offset == SUPER_RESERVED &&
+ length == min_alloc_size - SUPER_RESERVED) {
+ // this is due to the change just after luminous to min_alloc_size
+ // granularity allocations, and our baked in assumption at the top
+ // of _fsck that 0~ROUND_UP_TO(SUPER_RESERVED,min_alloc_size) is used
+ // (vs luminous's ROUND_UP_TO(SUPER_RESERVED,block_size)). harmless,
+ // since we will never allocate this region below min_alloc_size.
+ dout(10) << __func__ << " ignoring free extent between SUPER_RESERVED"
+ << " and min_alloc_size, 0x" << std::hex << offset << "~"
+ << length << dendl;
+ } else {
+ derr << "fsck error: free extent 0x" << std::hex << offset
+ << "~" << length << std::dec
+ << " intersects allocated blocks" << dendl;
+ ++errors;
+ }
}
}
+ fm->enumerate_reset();
size_t count = used_blocks.count();
if (used_blocks.size() != count) {
assert(used_blocks.size() > count);
- derr << __func__ << " error: leaked some space;"
- << (used_blocks.size() - count) * min_alloc_size
- << " bytes leaked" << dendl;
++errors;
+ used_blocks.flip();
+ size_t start = used_blocks.find_first();
+ while (start != decltype(used_blocks)::npos) {
+ size_t cur = start;
+ while (true) {
+ size_t next = used_blocks.find_next(cur);
+ if (next != cur + 1) {
+ derr << "fsck error: leaked extent 0x" << std::hex
+ << ((uint64_t)start * min_alloc_size) << "~"
+ << ((cur + 1 - start) * min_alloc_size) << std::dec
+ << dendl;
+ start = next;
+ break;
+ }
+ cur = next;
+ }
+ }
+ used_blocks.flip();
}
}
<< dendl;
utime_t duration = ceph_clock_now() - start;
- dout(1) << __func__ << " finish with " << errors << " errors in "
+ dout(1) << __func__ << " finish with " << errors << " errors, " << repaired
+ << " repaired, " << (errors - repaired) << " remaining in "
<< duration << " seconds" << dendl;
- return errors;
+ return errors - repaired;
}
void BlueStore::collect_metadata(map<string,string> *pm)
uint64_t offset,
size_t length,
bufferlist& bl,
- uint32_t op_flags,
- bool allow_eio)
+ uint32_t op_flags)
{
CollectionHandle c = _get_collection(cid);
if (!c)
return -ENOENT;
- return read(c, oid, offset, length, bl, op_flags, allow_eio);
+ return read(c, oid, offset, length, bl, op_flags);
}
int BlueStore::read(
uint64_t offset,
size_t length,
bufferlist& bl,
- uint32_t op_flags,
- bool allow_eio)
+ uint32_t op_flags)
{
utime_t start = ceph_clock_now();
Collection *c = static_cast<Collection *>(c_.get());
}
out:
- assert(allow_eio || r != -EIO);
if (r == 0 && _debug_data_eio(oid)) {
r = -EIO;
derr << __func__ << " " << c->cid << " " << oid << " INJECT EIO" << dendl;
+ } else if (cct->_conf->bluestore_debug_random_read_err &&
+ (rand() % (int)(cct->_conf->bluestore_debug_random_read_err * 100.0)) == 0) {
+ dout(0) << __func__ << ": inject random EIO" << dendl;
+ r = -EIO;
}
dout(10) << __func__ << " " << cid << " " << oid
<< " 0x" << std::hex << offset << "~" << length << std::dec
uint64_t val;
::decode(val, p);
min_alloc_size = val;
+ min_alloc_size_order = ctz(val);
+ assert(min_alloc_size == 1u << min_alloc_size_order);
} catch (buffer::error& e) {
derr << __func__ << " unable to read min_alloc_size" << dendl;
return -EIO;
dout(10) << __func__ << " min_alloc_size 0x" << std::hex << min_alloc_size
<< std::dec << dendl;
}
- open_statfs();
+ _open_statfs();
_set_alloc_sizes();
_set_throttle_params();
void BlueStore::_assign_nid(TransContext *txc, OnodeRef o)
{
- if (o->onode.nid)
+ if (o->onode.nid) {
+ assert(o->exists);
return;
+ }
uint64_t nid = ++nid_last;
dout(20) << __func__ << " " << nid << dendl;
o->onode.nid = nid;
txc->last_nid = nid;
+ o->exists = true;
}
uint64_t BlueStore::_assign_blobid(TransContext *txc)
}
OpSequencerRef osr = txc->osr;
- CollectionRef c;
bool empty = false;
bool submit_deferred = false;
OpSequencer::q_list_t releasing_txc;
break;
}
- if (!c && txc->first_collection) {
- c = txc->first_collection;
- }
osr->q.pop_front();
releasing_txc.push_back(*txc);
notify = true;
++deferred_aggressive; // FIXME: maybe osr-local aggressive flag?
{
// submit anything pending
- std::lock_guard<std::mutex> l(deferred_lock);
+ deferred_lock.lock();
if (osr->deferred_pending) {
- _deferred_submit(osr);
+ _deferred_submit_unlock(osr);
+ } else {
+ deferred_lock.unlock();
}
}
{
++deferred_aggressive;
{
// submit anything pending
- std::lock_guard<std::mutex> l(deferred_lock);
- _deferred_try_submit();
+ deferred_try_submit();
}
{
// wake up any previously finished deferred events
finishers.push_back(f);
}
+ deferred_finisher.start();
for (auto f : finishers) {
f->start();
}
kv_finalize_stop = false;
}
dout(10) << __func__ << " stopping finishers" << dendl;
+ deferred_finisher.wait_for_empty();
+ deferred_finisher.stop();
for (auto f : finishers) {
f->wait_for_empty();
f->stop();
t->set(PREFIX_SUPER, "blobid_max", bl);
dout(10) << __func__ << " new_blobid_max " << new_blobid_max << dendl;
}
- for (auto txc : kv_submitting) {
- assert(txc->state == TransContext::STATE_KV_QUEUED);
- txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
- int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
- assert(r == 0);
- _txc_applied_kv(txc);
- --txc->osr->kv_committing_serially;
- txc->state = TransContext::STATE_KV_SUBMITTED;
- if (txc->osr->kv_submitted_waiters) {
- std::lock_guard<std::mutex> l(txc->osr->qlock);
- if (txc->osr->_is_all_kv_submitted()) {
- txc->osr->qcond.notify_all();
+
+ for (auto txc : kv_committing) {
+ if (txc->state == TransContext::STATE_KV_QUEUED) {
+ txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
+ int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
+ assert(r == 0);
+ _txc_applied_kv(txc);
+ --txc->osr->kv_committing_serially;
+ txc->state = TransContext::STATE_KV_SUBMITTED;
+ if (txc->osr->kv_submitted_waiters) {
+ std::lock_guard<std::mutex> l(txc->osr->qlock);
+ if (txc->osr->_is_all_kv_submitted()) {
+ txc->osr->qcond.notify_all();
+ }
}
+
+ } else {
+ assert(txc->state == TransContext::STATE_KV_SUBMITTED);
+ txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
}
- }
- for (auto txc : kv_committing) {
if (txc->had_ios) {
--txc->osr->txc_with_unstable_io;
}
- txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
}
// release throttle *before* we commit. this allows new ops
dout(10) << __func__ << " blobid_max now " << blobid_max << dendl;
}
- utime_t finish = ceph_clock_now();
- utime_t dur_flush = after_flush - start;
- utime_t dur_kv = finish - after_flush;
- utime_t dur = finish - start;
- dout(20) << __func__ << " committed " << kv_committing.size()
- << " cleaned " << deferred_stable.size()
- << " in " << dur
- << " (" << dur_flush << " flush + " << dur_kv << " kv commit)"
- << dendl;
- if (logger) {
+ {
+ utime_t finish = ceph_clock_now();
+ utime_t dur_flush = after_flush - start;
+ utime_t dur_kv = finish - after_flush;
+ utime_t dur = finish - start;
+ dout(20) << __func__ << " committed " << kv_committing.size()
+ << " cleaned " << deferred_stable.size()
+ << " in " << dur
+ << " (" << dur_flush << " flush + " << dur_kv << " kv commit)"
+ << dendl;
logger->tinc(l_bluestore_kv_flush_lat, dur_flush);
logger->tinc(l_bluestore_kv_commit_lat, dur_kv);
logger->tinc(l_bluestore_kv_lat, dur);
deferred_stable.clear();
if (!deferred_aggressive) {
- std::lock_guard<std::mutex> l(deferred_lock);
if (deferred_queue_size >= deferred_batch_ops.load() ||
throttle_deferred_bytes.past_midpoint()) {
- _deferred_try_submit();
+ deferred_try_submit();
}
}
void BlueStore::_deferred_queue(TransContext *txc)
{
dout(20) << __func__ << " txc " << txc << " osr " << txc->osr << dendl;
- std::lock_guard<std::mutex> l(deferred_lock);
+ deferred_lock.lock();
if (!txc->osr->deferred_pending &&
!txc->osr->deferred_running) {
deferred_queue.push_back(*txc->osr);
}
if (deferred_aggressive &&
!txc->osr->deferred_running) {
- _deferred_submit(txc->osr.get());
+ _deferred_submit_unlock(txc->osr.get());
+ } else {
+ deferred_lock.unlock();
}
}
-void BlueStore::_deferred_try_submit()
+void BlueStore::deferred_try_submit()
{
dout(20) << __func__ << " " << deferred_queue.size() << " osrs, "
<< deferred_queue_size << " txcs" << dendl;
+ std::lock_guard<std::mutex> l(deferred_lock);
+ vector<OpSequencerRef> osrs;
+ osrs.reserve(deferred_queue.size());
for (auto& osr : deferred_queue) {
- if (!osr.deferred_running) {
- _deferred_submit(&osr);
+ osrs.push_back(&osr);
+ }
+ for (auto& osr : osrs) {
+ if (osr->deferred_pending) {
+ if (!osr->deferred_running) {
+ _deferred_submit_unlock(osr.get());
+ deferred_lock.lock();
+ } else {
+ dout(20) << __func__ << " osr " << osr << " already has running"
+ << dendl;
+ }
+ } else {
+ dout(20) << __func__ << " osr " << osr << " has no pending" << dendl;
}
}
}
-void BlueStore::_deferred_submit(OpSequencer *osr)
+void BlueStore::_deferred_submit_unlock(OpSequencer *osr)
{
dout(10) << __func__ << " osr " << osr
<< " " << osr->deferred_pending->iomap.size() << " ios pending "
bl.claim_append(i->second.bl);
++i;
}
+
+ deferred_lock.unlock();
bdev->aio_submit(&b->ioc);
}
+struct C_DeferredTrySubmit : public Context {
+ BlueStore *store;
+ C_DeferredTrySubmit(BlueStore *s) : store(s) {}
+ void finish(int r) {
+ store->deferred_try_submit();
+ }
+};
+
void BlueStore::_deferred_aio_finish(OpSequencer *osr)
{
dout(10) << __func__ << " osr " << osr << dendl;
assert(osr->deferred_running == b);
osr->deferred_running = nullptr;
if (!osr->deferred_pending) {
+ dout(20) << __func__ << " dequeueing" << dendl;
auto q = deferred_queue.iterator_to(*osr);
deferred_queue.erase(q);
} else if (deferred_aggressive) {
- _deferred_submit(osr);
+ dout(20) << __func__ << " queuing async deferred_try_submit" << dendl;
+ deferred_finisher.queue(new C_DeferredTrySubmit(this));
+ } else {
+ dout(20) << __func__ << " leaving queued, more pending" << dendl;
}
}
if (txc->deferred_txn) {
// ensure we do not block here because of deferred writes
if (!throttle_deferred_bytes.get_or_fail(txc->cost)) {
+ dout(10) << __func__ << " failed get throttle_deferred_bytes, aggressive"
+ << dendl;
+ ++deferred_aggressive;
deferred_try_submit();
+ {
+ // wake up any previously finished deferred events
+ std::lock_guard<std::mutex> l(kv_lock);
+ kv_cond.notify_one();
+ }
throttle_deferred_bytes.get(txc->cost);
- }
+ --deferred_aggressive;
+ }
}
utime_t tend = ceph_clock_now();
for (vector<coll_t>::iterator p = i.colls.begin(); p != i.colls.end();
++p, ++j) {
cvec[j] = _get_collection(*p);
-
- // note first collection we reference
- if (!txc->first_collection)
- txc->first_collection = cvec[j];
}
vector<OnodeRef> ovec(i.objects.size());
case Transaction::OP_TRUNCATE:
{
uint64_t off = op->off;
- _truncate(txc, c, o, off);
+ r = _truncate(txc, c, o, off);
}
break;
{
dout(15) << __func__ << " " << c->cid << " " << o->oid << dendl;
int r = 0;
- o->exists = true;
_assign_nid(txc, o);
txc->write_onode(o);
dout(10) << __func__ << " " << c->cid << " " << o->oid << " = " << r << dendl;
if (front_pad) {
size_t front_copy = MIN(chunk_size - front_pad, length);
bufferptr z = buffer::create_page_aligned(chunk_size);
- memset(z.c_str(), 0, front_pad);
+ z.zero(0, front_pad, false);
pad_count += front_pad;
- memcpy(z.c_str() + front_pad, bl->get_contiguous(0, front_copy), front_copy);
+ bl->copy(0, front_copy, z.c_str() + front_pad);
if (front_copy + front_pad < chunk_size) {
back_pad = chunk_size - (length + front_pad);
- memset(z.c_str() + front_pad + length, 0, back_pad);
+ z.zero(front_pad + length, back_pad, false);
pad_count += back_pad;
}
bufferlist old, t;
bl->append(z);
bl->claim_append(t);
*offset -= front_pad;
- length += front_pad + back_pad;
+ length += pad_count;
}
// back
back_pad = chunk_size - back_copy;
assert(back_copy <= length);
bufferptr tail(chunk_size);
- memcpy(tail.c_str(), bl->get_contiguous(length - back_copy, back_copy),
- back_copy);
- memset(tail.c_str() + back_copy, 0, back_pad);
+ bl->copy(length - back_copy, back_copy, tail.c_str());
+ tail.zero(back_copy, back_pad, false);
bufferlist old;
old.swap(*bl);
bl->substr_of(old, 0, length - back_copy);
// search suitable extent in both forward and reverse direction in
// [offset - target_max_blob_size, offset + target_max_blob_size] range
- // then check if blob can be reused via try_reuse_blob func or apply
+ // then check if blob can be reused via can_reuse_blob func or apply
// direct/deferred write (the latter for extents including or higher
// than 'offset' only).
do {
b->get_blob().get_ondisk_length() >= b_off + b_len &&
b->get_blob().is_unused(b_off, b_len) &&
b->get_blob().is_allocated(b_off, b_len)) {
- bufferlist padded;
- _apply_padding(head_pad, tail_pad, bl, padded);
+ _apply_padding(head_pad, tail_pad, bl);
dout(20) << __func__ << " write to unused 0x" << std::hex
<< b_off << "~" << b_len
<< " pad 0x" << head_pad << " + 0x" << tail_pad
<< std::dec << " of mutable " << *b << dendl;
- _buffer_cache_write(txc, b, b_off, padded,
+ _buffer_cache_write(txc, b, b_off, bl,
wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
if (!g_conf->bluestore_debug_omit_block_device_write) {
op->extents.emplace_back(bluestore_pextent_t(offset, length));
return 0;
});
- op->data = padded;
+ op->data = bl;
} else {
b->get_blob().map_bl(
- b_off, padded,
+ b_off, bl,
[&](uint64_t offset, bufferlist& t) {
bdev->aio_write(offset, t,
&txc->ioc, wctx->buffered);
});
}
}
- b->dirty_blob().calc_csum(b_off, padded);
+ b->dirty_blob().calc_csum(b_off, bl);
dout(20) << __func__ << " lex old " << *ep << dendl;
Extent *le = o->extent_map.set_lextent(c, offset, b_off + head_pad, length,
b,
b_len % chunk_size == 0 &&
b->get_blob().is_allocated(b_off, b_len)) {
- bufferlist padded;
- _apply_padding(head_pad, tail_pad, bl, padded);
+ _apply_padding(head_pad, tail_pad, bl);
dout(20) << __func__ << " reading head 0x" << std::hex << head_read
<< " and tail 0x" << tail_read << std::dec << dendl;
head_bl.append_zero(zlen);
logger->inc(l_bluestore_write_pad_bytes, zlen);
}
- head_bl.claim_append(padded);
- padded.swap(head_bl);
+ bl.claim_prepend(head_bl);
logger->inc(l_bluestore_write_penalty_read_ops);
}
if (tail_read) {
tail_bl.append_zero(zlen);
logger->inc(l_bluestore_write_pad_bytes, zlen);
}
- padded.claim_append(tail_bl);
+ bl.claim_append(tail_bl);
logger->inc(l_bluestore_write_penalty_read_ops);
}
logger->inc(l_bluestore_write_small_pre_read);
bluestore_deferred_op_t *op = _get_deferred_op(txc, o);
op->op = bluestore_deferred_op_t::OP_WRITE;
- _buffer_cache_write(txc, b, b_off, padded,
+ _buffer_cache_write(txc, b, b_off, bl,
wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
int r = b->get_blob().map(
});
assert(r == 0);
if (b->get_blob().csum_type) {
- b->dirty_blob().calc_csum(b_off, padded);
+ b->dirty_blob().calc_csum(b_off, bl);
}
- op->data.claim(padded);
+ op->data.claim(bl);
dout(20) << __func__ << " deferred write 0x" << std::hex << b_off << "~"
<< b_len << std::dec << " of mutable " << *b
<< " at " << op->extents << dendl;
logger->inc(l_bluestore_write_small_deferred);
return;
}
- //try to reuse blob
- if (b->try_reuse_blob(min_alloc_size,
+ // try to reuse blob if we can
+ if (b->can_reuse_blob(min_alloc_size,
max_bsize,
offset0 - bstart,
&alloc_len)) {
_pad_zeros(&bl, &b_off0, chunk_size);
dout(20) << __func__ << " reuse blob " << *b << std::hex
- << " (" << b_off0 << "~" << bl.length() << ")"
- << " (" << b_off << "~" << length << ")"
+ << " (0x" << b_off0 << "~" << bl.length() << ")"
+ << " (0x" << b_off << "~" << length << ")"
<< std::dec << dendl;
o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
auto bstart = prev_ep->blob_start();
dout(20) << __func__ << " considering " << *b
<< " bstart 0x" << std::hex << bstart << std::dec << dendl;
- if (b->try_reuse_blob(min_alloc_size,
+ if (b->can_reuse_blob(min_alloc_size,
max_bsize,
offset0 - bstart,
&alloc_len)) {
_pad_zeros(&bl, &b_off0, chunk_size);
dout(20) << __func__ << " reuse blob " << *b << std::hex
- << " (" << b_off0 << "~" << bl.length() << ")"
- << " (" << b_off << "~" << length << ")"
+ << " (0x" << b_off0 << "~" << bl.length() << ")"
+ << " (0x" << b_off << "~" << length << ")"
<< std::dec << dendl;
o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
// search suitable extent in both forward and reverse direction in
// [offset - target_max_blob_size, offset + target_max_blob_size] range
- // then check if blob can be reused via try_reuse_blob func.
+ // then check if blob can be reused via can_reuse_blob func.
bool any_change;
do {
any_change = false;
if (ep != end && ep->logical_offset < offset + max_bsize) {
if (offset >= ep->blob_start() &&
- ep->blob->try_reuse_blob(min_alloc_size, max_bsize,
+ ep->blob->can_reuse_blob(min_alloc_size, max_bsize,
offset - ep->blob_start(),
&l)) {
b = ep->blob;
b_off = offset - ep->blob_start();
prev_ep = end; // to avoid check below
dout(20) << __func__ << " reuse blob " << *b << std::hex
- << " (" << b_off << "~" << l << ")" << std::dec << dendl;
+ << " (0x" << b_off << "~" << l << ")" << std::dec << dendl;
} else {
++ep;
any_change = true;
}
if (prev_ep != end && prev_ep->logical_offset >= min_off) {
- if (prev_ep->blob->try_reuse_blob(min_alloc_size, max_bsize,
+ if (prev_ep->blob->can_reuse_blob(min_alloc_size, max_bsize,
offset - prev_ep->blob_start(),
&l)) {
b = prev_ep->blob;
b_off = offset - prev_ep->blob_start();
dout(20) << __func__ << " reuse blob " << *b << std::hex
- << " (" << b_off << "~" << l << ")" << std::dec << dendl;
+ << " (0x" << b_off << "~" << l << ")" << std::dec << dendl;
} else if (prev_ep != begin) {
--prev_ep;
any_change = true;
dout(20) << __func__ << " txc " << txc
<< " " << wctx->writes.size() << " blobs"
<< dendl;
-
- uint64_t need = 0;
- auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
- for (auto &wi : wctx->writes) {
- need += wi.blob_length;
- }
- int r = alloc->reserve(need);
- if (r < 0) {
- derr << __func__ << " failed to reserve 0x" << std::hex << need << std::dec
- << dendl;
- return r;
+ if (wctx->writes.empty()) {
+ return 0;
}
- uint64_t hint = 0;
CompressorRef c;
double crr = 0;
if (wctx->compress) {
cct->_conf->bluestore_compression_required_ratio,
[&]() {
double val;
- if(coll->pool_opts.get(pool_opts_t::COMPRESSION_REQUIRED_RATIO, &val)) {
+ if (coll->pool_opts.get(pool_opts_t::COMPRESSION_REQUIRED_RATIO, &val)) {
return boost::optional<double>(val);
}
return boost::optional<double>();
csum,
[&]() {
int val;
- if(coll->pool_opts.get(pool_opts_t::CSUM_TYPE, &val)) {
+ if (coll->pool_opts.get(pool_opts_t::CSUM_TYPE, &val)) {
return boost::optional<int>(val);
}
return boost::optional<int>();
}
);
+ // compress (as needed) and calc needed space
+ uint64_t need = 0;
+ auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
for (auto& wi : wctx->writes) {
- BlobRef b = wi.b;
- bluestore_blob_t& dblob = b->dirty_blob();
- uint64_t b_off = wi.b_off;
- bufferlist *l = &wi.bl;
- uint64_t final_length = wi.blob_length;
- uint64_t csum_length = wi.blob_length;
- unsigned csum_order = block_size_order;
- bufferlist compressed_bl;
- bool compressed = false;
- if(c && wi.blob_length > min_alloc_size) {
-
+ if (c && wi.blob_length > min_alloc_size) {
utime_t start = ceph_clock_now();
// compress
- assert(b_off == 0);
- assert(wi.blob_length == l->length());
- bluestore_compression_header_t chdr;
- chdr.type = c->get_type();
+ assert(wi.b_off == 0);
+ assert(wi.blob_length == wi.bl.length());
+
// FIXME: memory alignment here is bad
bufferlist t;
-
- r = c->compress(*l, t);
+ int r = c->compress(wi.bl, t);
assert(r == 0);
+ bluestore_compression_header_t chdr;
+ chdr.type = c->get_type();
chdr.length = t.length();
- ::encode(chdr, compressed_bl);
- compressed_bl.claim_append(t);
- uint64_t rawlen = compressed_bl.length();
- uint64_t newlen = P2ROUNDUP(rawlen, min_alloc_size);
- uint64_t want_len_raw = final_length * crr;
+ ::encode(chdr, wi.compressed_bl);
+ wi.compressed_bl.claim_append(t);
+
+ wi.compressed_len = wi.compressed_bl.length();
+ uint64_t newlen = P2ROUNDUP(wi.compressed_len, min_alloc_size);
+ uint64_t want_len_raw = wi.blob_length * crr;
uint64_t want_len = P2ROUNDUP(want_len_raw, min_alloc_size);
- if (newlen <= want_len && newlen < final_length) {
- // Cool. We compressed at least as much as we were hoping to.
- // pad out to min_alloc_size
- compressed_bl.append_zero(newlen - rawlen);
- logger->inc(l_bluestore_write_pad_bytes, newlen - rawlen);
+ if (newlen <= want_len && newlen < wi.blob_length) {
+ // Cool. We compressed at least as much as we were hoping to.
+ // pad out to min_alloc_size
+ wi.compressed_bl.append_zero(newlen - wi.compressed_len);
+ logger->inc(l_bluestore_write_pad_bytes, newlen - wi.compressed_len);
dout(20) << __func__ << std::hex << " compressed 0x" << wi.blob_length
- << " -> 0x" << rawlen << " => 0x" << newlen
+ << " -> 0x" << wi.compressed_len << " => 0x" << newlen
<< " with " << c->get_type()
<< std::dec << dendl;
- txc->statfs_delta.compressed() += rawlen;
- txc->statfs_delta.compressed_original() += l->length();
+ txc->statfs_delta.compressed() += wi.compressed_len;
+ txc->statfs_delta.compressed_original() += wi.blob_length;
txc->statfs_delta.compressed_allocated() += newlen;
- l = &compressed_bl;
- final_length = newlen;
- csum_length = newlen;
- csum_order = ctz(newlen);
- dblob.set_compressed(wi.blob_length, rawlen);
- compressed = true;
- logger->inc(l_bluestore_compress_success_count);
+ logger->inc(l_bluestore_compress_success_count);
+ wi.compressed = true;
+ need += newlen;
} else {
- dout(20) << __func__ << std::hex << " 0x" << l->length()
- << " compressed to 0x" << rawlen << " -> 0x" << newlen
- << " with " << c->get_type()
- << ", which is more than required 0x" << want_len_raw
+ dout(20) << __func__ << std::hex << " 0x" << wi.blob_length
+ << " compressed to 0x" << wi.compressed_len << " -> 0x" << newlen
+ << " with " << c->get_type()
+ << ", which is more than required 0x" << want_len_raw
<< " -> 0x" << want_len
- << ", leaving uncompressed"
- << std::dec << dendl;
- logger->inc(l_bluestore_compress_rejected_count);
+ << ", leaving uncompressed"
+ << std::dec << dendl;
+ logger->inc(l_bluestore_compress_rejected_count);
+ need += wi.blob_length;
}
logger->tinc(l_bluestore_compress_lat,
ceph_clock_now() - start);
+ } else {
+ need += wi.blob_length;
}
- if (!compressed && wi.new_blob) {
+ }
+ int r = alloc->reserve(need);
+ if (r < 0) {
+ derr << __func__ << " failed to reserve 0x" << std::hex << need << std::dec
+ << dendl;
+ return r;
+ }
+ AllocExtentVector prealloc;
+ prealloc.reserve(2 * wctx->writes.size());;
+ int prealloc_left = 0;
+ prealloc_left = alloc->allocate(
+ need, min_alloc_size, need,
+ 0, &prealloc);
+ assert(prealloc_left == (int64_t)need);
+ dout(20) << __func__ << " prealloc " << prealloc << dendl;
+ auto prealloc_pos = prealloc.begin();
+
+ for (auto& wi : wctx->writes) {
+ BlobRef b = wi.b;
+ bluestore_blob_t& dblob = b->dirty_blob();
+ uint64_t b_off = wi.b_off;
+ bufferlist *l = &wi.bl;
+ uint64_t final_length = wi.blob_length;
+ uint64_t csum_length = wi.blob_length;
+ unsigned csum_order = block_size_order;
+ if (wi.compressed) {
+ final_length = wi.compressed_bl.length();
+ csum_length = final_length;
+ csum_order = ctz(csum_length);
+ l = &wi.compressed_bl;
+ dblob.set_compressed(wi.blob_length, wi.compressed_len);
+ } else if (wi.new_blob) {
// initialize newly created blob only
assert(dblob.is_mutable());
if (l->length() != wi.blob_length) {
if ((suggested_boff % (1 << csum_order)) == 0 &&
suggested_boff + final_length <= max_bsize &&
suggested_boff > b_off) {
- dout(20) << __func__ << " forcing blob_offset to "
+ dout(20) << __func__ << " forcing blob_offset to 0x"
<< std::hex << suggested_boff << std::dec << dendl;
assert(suggested_boff >= b_off);
csum_length += suggested_boff - b_off;
b_off = suggested_boff;
}
+ if (csum != Checksummer::CSUM_NONE) {
+ dout(20) << __func__ << " initialize csum setting for new blob " << *b
+ << " csum_type " << Checksummer::get_csum_type_string(csum)
+ << " csum_order " << csum_order
+ << " csum_length 0x" << std::hex << csum_length << std::dec
+ << dendl;
+ dblob.init_csum(csum, csum_order, csum_length);
+ }
}
AllocExtentVector extents;
- extents.reserve(4); // 4 should be (more than) enough for most allocations
- int64_t got = alloc->allocate(final_length, min_alloc_size,
- max_alloc_size.load(),
- hint, &extents);
- assert(got == (int64_t)final_length);
- need -= got;
- txc->statfs_delta.allocated() += got;
+ int64_t left = final_length;
+ while (left > 0) {
+ assert(prealloc_left > 0);
+ if (prealloc_pos->length <= left) {
+ prealloc_left -= prealloc_pos->length;
+ left -= prealloc_pos->length;
+ txc->statfs_delta.allocated() += prealloc_pos->length;
+ extents.push_back(*prealloc_pos);
+ ++prealloc_pos;
+ } else {
+ extents.emplace_back(prealloc_pos->offset, left);
+ prealloc_pos->offset += left;
+ prealloc_pos->length -= left;
+ prealloc_left -= left;
+ txc->statfs_delta.allocated() += left;
+ left = 0;
+ break;
+ }
+ }
for (auto& p : extents) {
- bluestore_pextent_t e = bluestore_pextent_t(p);
- txc->allocated.insert(e.offset, e.length);
- hint = p.end();
+ txc->allocated.insert(p.offset, p.length);
}
dblob.allocated(P2ALIGN(b_off, min_alloc_size), final_length, extents);
- dout(20) << __func__ << " blob " << *b
- << " csum_type " << Checksummer::get_csum_type_string(csum)
- << " csum_order " << csum_order
- << " csum_length 0x" << std::hex << csum_length << std::dec
- << dendl;
-
- if (csum != Checksummer::CSUM_NONE) {
- if (!dblob.has_csum()) {
- dblob.init_csum(csum, csum_order, csum_length);
- }
+ dout(20) << __func__ << " blob " << *b << dendl;
+ if (dblob.has_csum()) {
dblob.calc_csum(b_off, *l);
}
+
if (wi.mark_unused) {
auto b_end = b_off + wi.bl.length();
if (b_off) {
}
}
}
- if (need > 0) {
- alloc->unreserve(need);
- }
+ assert(prealloc_pos == prealloc.end());
+ assert(prealloc_left == 0);
return 0;
}
dout(20) << __func__ << " will prefer large blob and csum sizes" << dendl;
- auto order = min_alloc_size_order.load();
if (o->onode.expected_write_size) {
- wctx->csum_order = std::max(order,
+ wctx->csum_order = std::max(min_alloc_size_order,
(uint8_t)ctz(o->onode.expected_write_size));
} else {
- wctx->csum_order = order;
+ wctx->csum_order = min_alloc_size_order;
}
if (wctx->compress) {
dout(15) << __func__ << " " << c->cid << " " << o->oid
<< " 0x" << std::hex << offset << "~" << length << std::dec
<< dendl;
- o->exists = true;
- _assign_nid(txc, o);
- int r = _do_write(txc, c, o, offset, length, bl, fadvise_flags);
- txc->write_onode(o);
-
+ int r = 0;
+ if (offset + length >= OBJECT_MAX_SIZE) {
+ r = -E2BIG;
+ } else {
+ _assign_nid(txc, o);
+ r = _do_write(txc, c, o, offset, length, bl, fadvise_flags);
+ txc->write_onode(o);
+ }
dout(10) << __func__ << " " << c->cid << " " << o->oid
<< " 0x" << std::hex << offset << "~" << length << std::dec
<< " = " << r << dendl;
dout(15) << __func__ << " " << c->cid << " " << o->oid
<< " 0x" << std::hex << offset << "~" << length << std::dec
<< dendl;
- o->exists = true;
- _assign_nid(txc, o);
- int r = _do_zero(txc, c, o, offset, length);
+ int r = 0;
+ if (offset + length >= OBJECT_MAX_SIZE) {
+ r = -E2BIG;
+ } else {
+ _assign_nid(txc, o);
+ r = _do_zero(txc, c, o, offset, length);
+ }
dout(10) << __func__ << " " << c->cid << " " << o->oid
<< " 0x" << std::hex << offset << "~" << length << std::dec
<< " = " << r << dendl;
txc->write_onode(o);
}
-void BlueStore::_truncate(TransContext *txc,
+int BlueStore::_truncate(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
uint64_t offset)
dout(15) << __func__ << " " << c->cid << " " << o->oid
<< " 0x" << std::hex << offset << std::dec
<< dendl;
- _do_truncate(txc, c, o, offset);
+ int r = 0;
+ if (offset >= OBJECT_MAX_SIZE) {
+ r = -E2BIG;
+ } else {
+ _do_truncate(txc, c, o, offset);
+ }
+ dout(10) << __func__ << " " << c->cid << " " << o->oid
+ << " 0x" << std::hex << offset << std::dec
+ << " = " << r << dendl;
+ return r;
}
int BlueStore::_do_remove(
OnodeRef o)
{
set<SharedBlob*> maybe_unshared_blobs;
- _do_truncate(txc, c, o, 0, &maybe_unshared_blobs);
+ bool is_gen = !o->oid.is_no_gen();
+ _do_truncate(txc, c, o, 0, is_gen ? &maybe_unshared_blobs : nullptr);
if (o->onode.has_omap()) {
o->flush();
_do_omap_clear(txc, o->onode.nid);
o->onode = bluestore_onode_t();
_debug_obj_on_delete(o->oid);
- if (!o->oid.is_no_gen() &&
- !maybe_unshared_blobs.empty()) {
- // see if we can unshare blobs still referenced by the head
- dout(10) << __func__ << " gen and maybe_unshared_blobs "
- << maybe_unshared_blobs << dendl;
- ghobject_t nogen = o->oid;
- nogen.generation = ghobject_t::NO_GEN;
- OnodeRef h = c->onode_map.lookup(nogen);
- if (h && h->exists) {
- dout(20) << __func__ << " checking for unshareable blobs on " << h
- << " " << h->oid << dendl;
- map<SharedBlob*,bluestore_extent_ref_map_t> expect;
- for (auto& e : h->extent_map.extent_map) {
- const bluestore_blob_t& b = e.blob->get_blob();
- SharedBlob *sb = e.blob->shared_blob.get();
- if (b.is_shared() &&
- sb->loaded &&
- maybe_unshared_blobs.count(sb)) {
- b.map(e.blob_offset, e.length, [&](uint64_t off, uint64_t len) {
- expect[sb].get(off, len);
- return 0;
- });
- }
- }
- vector<SharedBlob*> unshared_blobs;
- unshared_blobs.reserve(maybe_unshared_blobs.size());
- for (auto& p : expect) {
- dout(20) << " ? " << *p.first << " vs " << p.second << dendl;
- if (p.first->persistent->ref_map == p.second) {
- SharedBlob *sb = p.first;
- dout(20) << __func__ << " unsharing " << *sb << dendl;
- unshared_blobs.push_back(sb);
- txc->unshare_blob(sb);
- uint64_t sbid = c->make_blob_unshared(sb);
- string key;
- get_shared_blob_key(sbid, &key);
- txc->t->rmkey(PREFIX_SHARED_BLOB, key);
- }
- }
+ if (!is_gen || maybe_unshared_blobs.empty()) {
+ return 0;
+ }
- if (!unshared_blobs.empty()) {
- uint32_t b_start = OBJECT_MAX_SIZE;
- uint32_t b_end = 0;
- for (auto& e : h->extent_map.extent_map) {
- const bluestore_blob_t& b = e.blob->get_blob();
- SharedBlob *sb = e.blob->shared_blob.get();
- if (b.is_shared() &&
- std::find(unshared_blobs.begin(), unshared_blobs.end(),
- sb) != unshared_blobs.end()) {
- dout(20) << __func__ << " unsharing " << e << dendl;
- bluestore_blob_t& blob = e.blob->dirty_blob();
- blob.clear_flag(bluestore_blob_t::FLAG_SHARED);
- if (e.logical_offset < b_start) {
- b_start = e.logical_offset;
- }
- if (e.logical_end() > b_end) {
- b_end = e.logical_end();
- }
- }
- }
+ // see if we can unshare blobs still referenced by the head
+ dout(10) << __func__ << " gen and maybe_unshared_blobs "
+ << maybe_unshared_blobs << dendl;
+ ghobject_t nogen = o->oid;
+ nogen.generation = ghobject_t::NO_GEN;
+ OnodeRef h = c->onode_map.lookup(nogen);
+
+ if (!h || !h->exists) {
+ return 0;
+ }
- h->extent_map.dirty_range(b_start, b_end - b_start);
- txc->write_onode(h);
+ dout(20) << __func__ << " checking for unshareable blobs on " << h
+ << " " << h->oid << dendl;
+ map<SharedBlob*,bluestore_extent_ref_map_t> expect;
+ for (auto& e : h->extent_map.extent_map) {
+ const bluestore_blob_t& b = e.blob->get_blob();
+ SharedBlob *sb = e.blob->shared_blob.get();
+ if (b.is_shared() &&
+ sb->loaded &&
+ maybe_unshared_blobs.count(sb)) {
+ if (b.is_compressed()) {
+ expect[sb].get(0, b.get_ondisk_length());
+ } else {
+ b.map(e.blob_offset, e.length, [&](uint64_t off, uint64_t len) {
+ expect[sb].get(off, len);
+ return 0;
+ });
}
}
}
+
+ vector<SharedBlob*> unshared_blobs;
+ unshared_blobs.reserve(maybe_unshared_blobs.size());
+ for (auto& p : expect) {
+ dout(20) << " ? " << *p.first << " vs " << p.second << dendl;
+ if (p.first->persistent->ref_map == p.second) {
+ SharedBlob *sb = p.first;
+ dout(20) << __func__ << " unsharing " << *sb << dendl;
+ unshared_blobs.push_back(sb);
+ txc->unshare_blob(sb);
+ uint64_t sbid = c->make_blob_unshared(sb);
+ string key;
+ get_shared_blob_key(sbid, &key);
+ txc->t->rmkey(PREFIX_SHARED_BLOB, key);
+ }
+ }
+
+ if (unshared_blobs.empty()) {
+ return 0;
+ }
+
+ for (auto& e : h->extent_map.extent_map) {
+ const bluestore_blob_t& b = e.blob->get_blob();
+ SharedBlob *sb = e.blob->shared_blob.get();
+ if (b.is_shared() &&
+ std::find(unshared_blobs.begin(), unshared_blobs.end(),
+ sb) != unshared_blobs.end()) {
+ dout(20) << __func__ << " unsharing " << e << dendl;
+ bluestore_blob_t& blob = e.blob->dirty_blob();
+ blob.clear_flag(bluestore_blob_t::FLAG_SHARED);
+ h->extent_map.dirty_range(e.logical_offset, 1);
+ }
+ }
+ txc->write_onode(h);
+
return 0;
}
<< " " << name << " (" << val.length() << " bytes)"
<< dendl;
int r = 0;
- if (val.is_partial())
- o->onode.attrs[name.c_str()] = bufferptr(val.c_str(), val.length());
- else
- o->onode.attrs[name.c_str()] = val;
+ if (val.is_partial()) {
+ auto& b = o->onode.attrs[name.c_str()] = bufferptr(val.c_str(),
+ val.length());
+ b.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
+ } else {
+ auto& b = o->onode.attrs[name.c_str()] = val;
+ b.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
+ }
txc->write_onode(o);
dout(10) << __func__ << " " << c->cid << " " << o->oid
<< " " << name << " (" << val.length() << " bytes)"
int r = 0;
for (map<string,bufferptr>::const_iterator p = aset.begin();
p != aset.end(); ++p) {
- if (p->second.is_partial())
- o->onode.attrs[p->first.c_str()] =
+ if (p->second.is_partial()) {
+ auto& b = o->onode.attrs[p->first.c_str()] =
bufferptr(p->second.c_str(), p->second.length());
- else
- o->onode.attrs[p->first.c_str()] = p->second;
+ b.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
+ } else {
+ auto& b = o->onode.attrs[p->first.c_str()] = p->second;
+ b.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
+ }
}
txc->write_onode(o);
dout(10) << __func__ << " " << c->cid << " " << o->oid
return -EINVAL;
}
- newo->exists = true;
_assign_nid(txc, newo);
// clone data
CollectionRef& c,
OnodeRef& oldo,
OnodeRef& newo,
- uint64_t srcoff, uint64_t length, uint64_t dstoff)
+ uint64_t srcoff,
+ uint64_t length,
+ uint64_t dstoff)
{
dout(15) << __func__ << " " << c->cid << " " << oldo->oid << " -> "
<< newo->oid
e.blob->last_encoded_id = -1;
}
int n = 0;
- bool dirtied_oldo = false;
uint64_t end = srcoff + length;
+ uint32_t dirty_range_begin = 0;
+ uint32_t dirty_range_end = 0;
+ bool src_dirty = false;
for (auto ep = oldo->extent_map.seek_lextent(srcoff);
ep != oldo->extent_map.extent_map.end();
++ep) {
// make sure it is shared
if (!blob.is_shared()) {
c->make_blob_shared(_assign_blobid(txc), e.blob);
- dirtied_oldo = true; // fixme: overkill
+ if (!src_dirty) {
+ src_dirty = true;
+ dirty_range_begin = e.logical_offset;
+ }
+ assert(e.logical_end() > 0);
+ // -1 to exclude next potential shard
+ dirty_range_end = e.logical_end() - 1;
} else {
c->load_shared_blob(e.blob->shared_blob);
}
dout(20) << __func__ << " dst " << *ne << dendl;
++n;
}
- if (dirtied_oldo) {
- oldo->extent_map.dirty_range(srcoff, length); // overkill
+ if (src_dirty) {
+ oldo->extent_map.dirty_range(dirty_range_begin,
+ dirty_range_end - dirty_range_begin);
txc->write_onode(oldo);
}
txc->write_onode(newo);
<< " to offset 0x" << dstoff << std::dec << dendl;
int r = 0;
+ if (srcoff + length >= OBJECT_MAX_SIZE ||
+ dstoff + length >= OBJECT_MAX_SIZE) {
+ r = -E2BIG;
+ goto out;
+ }
if (srcoff + length > oldo->onode.size) {
r = -EINVAL;
goto out;
}
- newo->exists = true;
_assign_nid(txc, newo);
if (length > 0) {
assert(i->empty());
}
for (auto& p : coll_map) {
+ if (!p.second->onode_map.empty()) {
+ derr << __func__ << "stray onodes on " << p.first << dendl;
+ p.second->onode_map.dump(cct, 0);
+ }
+ if (!p.second->shared_blob_set.empty()) {
+ derr << __func__ << " stray shared blobs on " << p.first << dendl;
+ p.second->shared_blob_set.dump(cct, 0);
+ }
assert(p.second->onode_map.empty());
assert(p.second->shared_blob_set.empty());
}
void BlueStore::_apply_padding(uint64_t head_pad,
uint64_t tail_pad,
- bufferlist& bl,
bufferlist& padded)
{
- padded = bl;
if (head_pad) {
- bufferlist z;
- z.append_zero(head_pad);
- z.claim_append(padded);
- padded.claim(z);
+ padded.prepend_zero(head_pad);
}
if (tail_pad) {
padded.append_zero(tail_pad);