]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/controllers/pool.py
386d584404c88ce64ab781c043eecfcc7db572cf
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / pool.py
1 # -*- coding: utf-8 -*-
2
3 import time
4 from typing import Any, Dict, Iterable, List, Optional, Union, cast
5
6 import cherrypy
7
8 from .. import mgr
9 from ..security import Scope
10 from ..services.ceph_service import CephService
11 from ..services.exception import handle_send_command_error
12 from ..services.rbd import RbdConfiguration
13 from ..tools import TaskManager, str_to_bool
14 from . import APIDoc, APIRouter, Endpoint, EndpointDoc, ReadPermission, \
15 RESTController, Task, UIRouter
16
17 POOL_SCHEMA = ([{
18 "pool": (int, "pool id"),
19 "pool_name": (str, "pool name"),
20 "flags": (int, ""),
21 "flags_names": (str, "flags name"),
22 "type": (str, "type of pool"),
23 "size": (int, "pool size"),
24 "min_size": (int, ""),
25 "crush_rule": (str, ""),
26 "object_hash": (int, ""),
27 "pg_autoscale_mode": (str, ""),
28 "pg_num": (int, ""),
29 "pg_placement_num": (int, ""),
30 "pg_placement_num_target": (int, ""),
31 "pg_num_target": (int, ""),
32 "pg_num_pending": (int, ""),
33 "last_pg_merge_meta": ({
34 "ready_epoch": (int, ""),
35 "last_epoch_started": (int, ""),
36 "last_epoch_clean": (int, ""),
37 "source_pgid": (str, ""),
38 "source_version": (str, ""),
39 "target_version": (str, ""),
40 }, ""),
41 "auid": (int, ""),
42 "snap_mode": (str, ""),
43 "snap_seq": (int, ""),
44 "snap_epoch": (int, ""),
45 "pool_snaps": ([str], ""),
46 "quota_max_bytes": (int, ""),
47 "quota_max_objects": (int, ""),
48 "tiers": ([str], ""),
49 "tier_of": (int, ""),
50 "read_tier": (int, ""),
51 "write_tier": (int, ""),
52 "cache_mode": (str, ""),
53 "target_max_bytes": (int, ""),
54 "target_max_objects": (int, ""),
55 "cache_target_dirty_ratio_micro": (int, ""),
56 "cache_target_dirty_high_ratio_micro": (int, ""),
57 "cache_target_full_ratio_micro": (int, ""),
58 "cache_min_flush_age": (int, ""),
59 "cache_min_evict_age": (int, ""),
60 "erasure_code_profile": (str, ""),
61 "hit_set_params": ({
62 "type": (str, "")
63 }, ""),
64 "hit_set_period": (int, ""),
65 "hit_set_count": (int, ""),
66 "use_gmt_hitset": (bool, ""),
67 "min_read_recency_for_promote": (int, ""),
68 "min_write_recency_for_promote": (int, ""),
69 "hit_set_grade_decay_rate": (int, ""),
70 "hit_set_search_last_n": (int, ""),
71 "grade_table": ([str], ""),
72 "stripe_width": (int, ""),
73 "expected_num_objects": (int, ""),
74 "fast_read": (bool, ""),
75 "options": ({
76 "pg_num_min": (int, ""),
77 "pg_num_max": (int, "")
78 }, ""),
79 "application_metadata": ([str], ""),
80 "create_time": (str, ""),
81 "last_change": (str, ""),
82 "last_force_op_resend": (str, ""),
83 "last_force_op_resend_prenautilus": (str, ""),
84 "last_force_op_resend_preluminous": (str, ""),
85 "removed_snaps": ([str], "")
86 }])
87
88
89 def pool_task(name, metadata, wait_for=2.0):
90 return Task("pool/{}".format(name), metadata, wait_for)
91
92
93 @APIRouter('/pool', Scope.POOL)
94 @APIDoc("Get pool details by pool name", "Pool")
95 class Pool(RESTController):
96
97 @staticmethod
98 def _serialize_pool(pool, attrs):
99 if not attrs or not isinstance(attrs, list):
100 attrs = pool.keys()
101
102 crush_rules = {r['rule_id']: r["rule_name"] for r in mgr.get('osd_map_crush')['rules']}
103
104 res: Dict[Union[int, str], Union[str, List[Any]]] = {}
105 for attr in attrs:
106 if attr not in pool:
107 continue
108 if attr == 'type':
109 res[attr] = {1: 'replicated', 3: 'erasure'}[pool[attr]]
110 elif attr == 'crush_rule':
111 res[attr] = crush_rules[pool[attr]]
112 elif attr == 'application_metadata':
113 res[attr] = list(pool[attr].keys())
114 else:
115 res[attr] = pool[attr]
116
117 # pool_name is mandatory
118 res['pool_name'] = pool['pool_name']
119 return res
120
121 @classmethod
122 def _pool_list(cls, attrs=None, stats=False):
123 if attrs:
124 attrs = attrs.split(',')
125
126 if str_to_bool(stats):
127 pools = CephService.get_pool_list_with_stats()
128 else:
129 pools = CephService.get_pool_list()
130
131 return [cls._serialize_pool(pool, attrs) for pool in pools]
132
133 @EndpointDoc("Display Pool List",
134 parameters={
135 'attrs': (str, 'Pool Attributes'),
136 'stats': (bool, 'Pool Stats')
137 },
138 responses={200: POOL_SCHEMA})
139 def list(self, attrs=None, stats=False):
140 return self._pool_list(attrs, stats)
141
142 @classmethod
143 def _get(cls, pool_name: str, attrs: Optional[str] = None, stats: bool = False) -> dict:
144 pools = cls._pool_list(attrs, stats)
145 pool = [p for p in pools if p['pool_name'] == pool_name]
146 if not pool:
147 raise cherrypy.NotFound('No such pool')
148 return pool[0]
149
150 def get(self, pool_name: str, attrs: Optional[str] = None, stats: bool = False) -> dict:
151 pool = self._get(pool_name, attrs, stats)
152 pool['configuration'] = RbdConfiguration(pool_name).list()
153 return pool
154
155 @pool_task('delete', ['{pool_name}'])
156 @handle_send_command_error('pool')
157 def delete(self, pool_name):
158 return CephService.send_command('mon', 'osd pool delete', pool=pool_name, pool2=pool_name,
159 yes_i_really_really_mean_it=True)
160
161 @pool_task('edit', ['{pool_name}'])
162 def set(self, pool_name, flags=None, application_metadata=None, configuration=None, **kwargs):
163 self._set_pool_values(pool_name, application_metadata, flags, True, kwargs)
164 if kwargs.get('pool'):
165 pool_name = kwargs['pool']
166 RbdConfiguration(pool_name).set_configuration(configuration)
167 self._wait_for_pgs(pool_name)
168
169 @pool_task('create', {'pool_name': '{pool}'})
170 @handle_send_command_error('pool')
171 def create(self, pool, pg_num, pool_type, erasure_code_profile=None, flags=None,
172 application_metadata=None, rule_name=None, configuration=None, **kwargs):
173 ecp = erasure_code_profile if erasure_code_profile else None
174 CephService.send_command('mon', 'osd pool create', pool=pool, pg_num=int(pg_num),
175 pgp_num=int(pg_num), pool_type=pool_type, erasure_code_profile=ecp,
176 rule=rule_name)
177 self._set_pool_values(pool, application_metadata, flags, False, kwargs)
178 RbdConfiguration(pool).set_configuration(configuration)
179 self._wait_for_pgs(pool)
180
181 def _set_pool_values(self, pool, application_metadata, flags, update_existing, kwargs):
182 update_name = False
183 current_pool = self._get(pool)
184 if update_existing and kwargs.get('compression_mode') == 'unset':
185 self._prepare_compression_removal(current_pool.get('options'), kwargs)
186 if flags and 'ec_overwrites' in flags:
187 CephService.send_command('mon', 'osd pool set', pool=pool, var='allow_ec_overwrites',
188 val='true')
189 if application_metadata is not None:
190 def set_app(what, app):
191 CephService.send_command('mon', 'osd pool application ' + what, pool=pool, app=app,
192 yes_i_really_mean_it=True)
193 if update_existing:
194 original_app_metadata = set(
195 cast(Iterable[Any], current_pool.get('application_metadata')))
196 else:
197 original_app_metadata = set()
198
199 for app in original_app_metadata - set(application_metadata):
200 set_app('disable', app)
201 for app in set(application_metadata) - original_app_metadata:
202 set_app('enable', app)
203
204 def set_key(key, value):
205 CephService.send_command('mon', 'osd pool set', pool=pool, var=key, val=str(value))
206
207 quotas = {}
208 quotas['max_objects'] = kwargs.pop('quota_max_objects', None)
209 quotas['max_bytes'] = kwargs.pop('quota_max_bytes', None)
210 self._set_quotas(pool, quotas)
211
212 for key, value in kwargs.items():
213 if key == 'pool':
214 update_name = True
215 destpool = value
216 else:
217 set_key(key, value)
218 if key == 'pg_num':
219 set_key('pgp_num', value)
220 if update_name:
221 CephService.send_command('mon', 'osd pool rename', srcpool=pool, destpool=destpool)
222
223 def _set_quotas(self, pool, quotas):
224 for field, value in quotas.items():
225 if value is not None:
226 CephService.send_command('mon', 'osd pool set-quota',
227 pool=pool, field=field, val=str(value))
228
229 def _prepare_compression_removal(self, options, kwargs):
230 """
231 Presets payload with values to remove compression attributes in case they are not
232 needed anymore.
233
234 In case compression is not needed the dashboard will send 'compression_mode' with the
235 value 'unset'.
236
237 :param options: All set options for the current pool.
238 :param kwargs: Payload of the PUT / POST call
239 """
240 if options is not None:
241 def reset_arg(arg, value):
242 if options.get(arg):
243 kwargs[arg] = value
244 for arg in ['compression_min_blob_size', 'compression_max_blob_size',
245 'compression_required_ratio']:
246 reset_arg(arg, '0')
247 reset_arg('compression_algorithm', 'unset')
248
249 @classmethod
250 def _wait_for_pgs(cls, pool_name):
251 """
252 Keep the task waiting for until all pg changes are complete
253 :param pool_name: The name of the pool.
254 :type pool_name: string
255 """
256 current_pool = cls._get(pool_name)
257 initial_pgs = int(current_pool['pg_placement_num']) + int(current_pool['pg_num'])
258 cls._pg_wait_loop(current_pool, initial_pgs)
259
260 @classmethod
261 def _pg_wait_loop(cls, pool, initial_pgs):
262 """
263 Compares if all pg changes are completed, if not it will call itself
264 until all changes are completed.
265 :param pool: The dict that represents a pool.
266 :type pool: dict
267 :param initial_pgs: The pg and pg_num count before any change happened.
268 :type initial_pgs: int
269 """
270 if 'pg_num_target' in pool:
271 target = int(pool['pg_num_target']) + int(pool['pg_placement_num_target'])
272 current = int(pool['pg_placement_num']) + int(pool['pg_num'])
273 if current != target:
274 max_diff = abs(target - initial_pgs)
275 diff = max_diff - abs(target - current)
276 percentage = int(round(diff / float(max_diff) * 100))
277 TaskManager.current_task().set_progress(percentage)
278 time.sleep(4)
279 cls._pg_wait_loop(cls._get(pool['pool_name']), initial_pgs)
280
281 @RESTController.Resource()
282 @ReadPermission
283 def configuration(self, pool_name):
284 return RbdConfiguration(pool_name).list()
285
286
287 @UIRouter('/pool', Scope.POOL)
288 @APIDoc("Dashboard UI helper function; not part of the public API", "PoolUi")
289 class PoolUi(Pool):
290 @Endpoint()
291 @ReadPermission
292 def info(self):
293 """Used by the create-pool dialog"""
294 osd_map_crush = mgr.get('osd_map_crush')
295 options = mgr.get('config_options')['options']
296
297 def rules(pool_type):
298 return [r
299 for r in osd_map_crush['rules']
300 if r['type'] == pool_type]
301
302 def all_bluestore():
303 return all(o['osd_objectstore'] == 'bluestore'
304 for o in mgr.get('osd_metadata').values())
305
306 def get_config_option_enum(conf_name):
307 return [[v for v in o['enum_values'] if len(v) > 0]
308 for o in options
309 if o['name'] == conf_name][0]
310
311 profiles = CephService.get_erasure_code_profiles()
312 used_rules: Dict[str, List[str]] = {}
313 used_profiles: Dict[str, List[str]] = {}
314 pool_names = []
315 for p in self._pool_list():
316 name = p['pool_name']
317 pool_names.append(name)
318 rule = p['crush_rule']
319 if rule in used_rules:
320 used_rules[rule].append(name)
321 else:
322 used_rules[rule] = [name]
323 profile = p['erasure_code_profile']
324 if profile in used_profiles:
325 used_profiles[profile].append(name)
326 else:
327 used_profiles[profile] = [name]
328
329 mgr_config = mgr.get('config')
330 return {
331 "pool_names": pool_names,
332 "crush_rules_replicated": rules(1),
333 "crush_rules_erasure": rules(3),
334 "is_all_bluestore": all_bluestore(),
335 "osd_count": len(mgr.get('osd_map')['osds']),
336 "bluestore_compression_algorithm": mgr_config['bluestore_compression_algorithm'],
337 "compression_algorithms": get_config_option_enum('bluestore_compression_algorithm'),
338 "compression_modes": get_config_option_enum('bluestore_compression_mode'),
339 "pg_autoscale_default_mode": mgr_config['osd_pool_default_pg_autoscale_mode'],
340 "pg_autoscale_modes": get_config_option_enum('osd_pool_default_pg_autoscale_mode'),
341 "erasure_code_profiles": profiles,
342 "used_rules": used_rules,
343 "used_profiles": used_profiles,
344 'nodes': mgr.get('osd_map_tree')['nodes']
345 }