]> git.proxmox.com Git - ceph.git/blob - ceph/src/os/bluestore/ZonedAllocator.h
import quincy beta 17.1.0
[ceph.git] / ceph / src / os / bluestore / ZonedAllocator.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 //
5 // A simple allocator that just hands out space from the next empty zone. This
6 // is temporary, just to get the simplest append-only write workload to work.
7 //
8 // Copyright (C) 2020 Abutalib Aghayev
9 //
10
11 #ifndef CEPH_OS_BLUESTORE_ZONEDALLOCATOR_H
12 #define CEPH_OS_BLUESTORE_ZONEDALLOCATOR_H
13
14 #include <mutex>
15
16 #include "Allocator.h"
17 #include "common/ceph_mutex.h"
18 #include "include/btree_map.h"
19 #include "include/interval_set.h"
20 #include "include/mempool.h"
21 #include "bluestore_types.h"
22 #include "zoned_types.h"
23
24 class ZonedAllocator : public Allocator {
25 CephContext* cct;
26
27 // Currently only one thread at a time calls into ZonedAllocator due to
28 // atomic_alloc_and_submit_lock in BlueStore.cc, but we do locking anyway
29 // because eventually ZONE_APPEND support will land and
30 // atomic_alloc_and_submit_lock will be removed.
31 ceph::mutex lock = ceph::make_mutex("ZonedAllocator::lock");
32
33 uint64_t size;
34 uint64_t conventional_size, sequential_size;
35 std::atomic<int64_t> num_sequential_free; ///< total bytes in freelist
36 uint64_t block_size;
37 uint64_t zone_size;
38 uint64_t first_seq_zone_num;
39 uint64_t starting_zone_num;
40 uint64_t num_zones;
41 std::atomic<uint32_t> cleaning_zone = -1;
42 std::vector<zone_state_t> zone_states;
43
44 inline uint64_t get_offset(uint64_t zone_num) const {
45 return zone_num * zone_size + get_write_pointer(zone_num);
46 }
47
48 public:
49 inline uint64_t get_write_pointer(uint64_t zone_num) const {
50 return zone_states[zone_num].get_write_pointer();
51 }
52 private:
53 inline uint64_t get_remaining_space(uint64_t zone_num) const {
54 return zone_size - get_write_pointer(zone_num);
55 }
56
57 inline void increment_write_pointer(uint64_t zone_num, uint64_t want_size) {
58 zone_states[zone_num].increment_write_pointer(want_size);
59 }
60
61 inline void increment_num_dead_bytes(uint64_t zone_num, uint64_t length) {
62 zone_states[zone_num].increment_num_dead_bytes(length);
63 }
64
65 inline bool fits(uint64_t want_size, uint64_t zone_num) const {
66 return want_size <= get_remaining_space(zone_num);
67 }
68
69 public:
70 ZonedAllocator(CephContext* cct, int64_t size, int64_t block_size,
71 int64_t _zone_size,
72 int64_t _first_sequential_zone,
73 std::string_view name);
74 ~ZonedAllocator() override;
75
76 const char *get_type() const override {
77 return "zoned";
78 }
79
80 uint64_t get_dead_bytes(uint32_t zone) {
81 return zone_states[zone].num_dead_bytes;
82 }
83 uint64_t get_live_bytes(uint32_t zone) {
84 std::scoped_lock l(lock);
85 return zone_states[zone].write_pointer - zone_states[zone].num_dead_bytes;
86 }
87
88 int64_t allocate(
89 uint64_t want_size, uint64_t alloc_unit, uint64_t max_alloc_size,
90 int64_t hint, PExtentVector *extents) override;
91
92 void release(const interval_set<uint64_t>& release_set) override;
93
94 uint64_t get_free() override;
95
96 void dump() override;
97 void dump(std::function<void(uint64_t offset,
98 uint64_t length)> notify) override;
99
100 int64_t pick_zone_to_clean(float min_score, uint64_t min_saved);
101 void set_cleaning_zone(uint32_t zone) {
102 cleaning_zone = zone;
103 }
104 void clear_cleaning_zone(uint32_t zone) {
105 cleaning_zone = -1;
106 }
107 void reset_zone(uint32_t zone);
108
109 void init_from_zone_pointers(
110 std::vector<zone_state_t> &&_zone_states);
111 void init_add_free(uint64_t offset, uint64_t length) override {}
112 void init_rm_free(uint64_t offset, uint64_t length) override {}
113
114 void shutdown() override;
115
116 private:
117 bool low_on_space(void);
118 };
119
120 #endif