]> git.proxmox.com Git - pve-docs.git/blob - pveceph.1-synopsis.adoc
update static/schema information
[pve-docs.git] / pveceph.1-synopsis.adoc
1 *pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
3 *pveceph createmgr*
4
5 An alias for 'pveceph mgr create'.
6
7 *pveceph createmon*
8
9 An alias for 'pveceph mon create'.
10
11 *pveceph createosd*
12
13 An alias for 'pveceph osd create'.
14
15 *pveceph createpool*
16
17 An alias for 'pveceph pool create'.
18
19 *pveceph destroymgr*
20
21 An alias for 'pveceph mgr destroy'.
22
23 *pveceph destroymon*
24
25 An alias for 'pveceph mon destroy'.
26
27 *pveceph destroyosd*
28
29 An alias for 'pveceph osd destroy'.
30
31 *pveceph destroypool*
32
33 An alias for 'pveceph pool destroy'.
34
35 *pveceph fs create* `[OPTIONS]`
36
37 Create a Ceph filesystem
38
39 `--add-storage` `<boolean>` ('default =' `0`)::
40
41 Configure the created CephFS as storage for this cluster.
42
43 `--name` `<string>` ('default =' `cephfs`)::
44
45 The ceph filesystem name.
46
47 `--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
48
49 Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
50
51 *pveceph fs destroy* `<name>` `[OPTIONS]`
52
53 Destroy a Ceph filesystem
54
55 `<name>`: `<string>` ::
56
57 The ceph filesystem name.
58
59 `--remove-pools` `<boolean>` ('default =' `0`)::
60
61 Remove data and metadata pools configured for this fs.
62
63 `--remove-storages` `<boolean>` ('default =' `0`)::
64
65 Remove all pveceph-managed storages configured for this fs.
66
67 *pveceph help* `[OPTIONS]`
68
69 Get help about specified command.
70
71 `--extra-args` `<array>` ::
72
73 Shows help for a specific command
74
75 `--verbose` `<boolean>` ::
76
77 Verbose output format.
78
79 *pveceph init* `[OPTIONS]`
80
81 Create initial ceph default configuration and setup symlinks.
82
83 `--cluster-network` `<string>` ::
84
85 Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
86 +
87 NOTE: Requires option(s): `network`
88
89 `--disable_cephx` `<boolean>` ('default =' `0`)::
90
91 Disable cephx authentication.
92 +
93 WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
94
95 `--min_size` `<integer> (1 - 7)` ('default =' `2`)::
96
97 Minimum number of available replicas per object to allow I/O
98
99 `--network` `<string>` ::
100
101 Use specific network for all ceph related traffic
102
103 `--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
104
105 Placement group bits, used to specify the default number of placement groups.
106 +
107 NOTE: 'osd pool default pg num' does not work for default pools.
108
109 `--size` `<integer> (1 - 7)` ('default =' `3`)::
110
111 Targeted number of replicas per object
112
113 *pveceph install* `[OPTIONS]`
114
115 Install ceph related packages.
116
117 `--allow-experimental` `<boolean>` ('default =' `0`)::
118
119 Allow experimental versions. Use with care!
120
121 `--repository` `<enterprise | no-subscription | test>` ('default =' `enterprise`)::
122
123 Ceph repository to use.
124
125 `--version` `<quincy | reef>` ('default =' `quincy`)::
126
127 Ceph version to install.
128
129 *pveceph lspools*
130
131 An alias for 'pveceph pool ls'.
132
133 *pveceph mds create* `[OPTIONS]`
134
135 Create Ceph Metadata Server (MDS)
136
137 `--hotstandby` `<boolean>` ('default =' `0`)::
138
139 Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
140
141 `--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
142
143 The ID for the mds, when omitted the same as the nodename
144
145 *pveceph mds destroy* `<name>`
146
147 Destroy Ceph Metadata Server
148
149 `<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
150
151 The name (ID) of the mds
152
153 *pveceph mgr create* `[OPTIONS]`
154
155 Create Ceph Manager
156
157 `--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
158
159 The ID for the manager, when omitted the same as the nodename
160
161 *pveceph mgr destroy* `<id>`
162
163 Destroy Ceph Manager.
164
165 `<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
166
167 The ID of the manager
168
169 *pveceph mon create* `[OPTIONS]`
170
171 Create Ceph Monitor and Manager
172
173 `--mon-address` `<string>` ::
174
175 Overwrites autodetected monitor IP address(es). Must be in the public network(s) of Ceph.
176
177 `--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
178
179 The ID for the monitor, when omitted the same as the nodename
180
181 *pveceph mon destroy* `<monid>`
182
183 Destroy Ceph Monitor and Manager.
184
185 `<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
186
187 Monitor ID
188
189 *pveceph osd create* `<dev>` `[OPTIONS]`
190
191 Create OSD
192
193 `<dev>`: `<string>` ::
194
195 Block device name.
196
197 `--crush-device-class` `<string>` ::
198
199 Set the device class of the OSD in crush.
200
201 `--db_dev` `<string>` ::
202
203 Block device name for block.db.
204
205 `--db_dev_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
206
207 Size in GiB for block.db.
208 +
209 NOTE: Requires option(s): `db_dev`
210
211 `--encrypted` `<boolean>` ('default =' `0`)::
212
213 Enables encryption of the OSD.
214
215 `--wal_dev` `<string>` ::
216
217 Block device name for block.wal.
218
219 `--wal_dev_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
220
221 Size in GiB for block.wal.
222 +
223 NOTE: Requires option(s): `wal_dev`
224
225 *pveceph osd destroy* `<osdid>` `[OPTIONS]`
226
227 Destroy OSD
228
229 `<osdid>`: `<integer>` ::
230
231 OSD ID
232
233 `--cleanup` `<boolean>` ('default =' `0`)::
234
235 If set, we remove partition table entries.
236
237 *pveceph osd details* `<osdid>` `[OPTIONS]` `[FORMAT_OPTIONS]`
238
239 Get OSD details.
240
241 `<osdid>`: `<string>` ::
242
243 ID of the OSD
244
245 `--verbose` `<boolean>` ('default =' `0`)::
246
247 Print verbose information, same as json-pretty output format.
248
249 *pveceph pool create* `<name>` `[OPTIONS]`
250
251 Create Ceph pool
252
253 `<name>`: `<string>` ::
254
255 The name of the pool. It must be unique.
256
257 `--add_storages` `<boolean>` ('default =' `0; for erasure coded pools: 1`)::
258
259 Configure VM and CT storage using the new pool.
260
261 `--application` `<cephfs | rbd | rgw>` ('default =' `rbd`)::
262
263 The application of the pool.
264
265 `--crush_rule` `<string>` ::
266
267 The rule to use for mapping object placement in the cluster.
268
269 `--erasure-coding` `k=<integer> ,m=<integer> [,device-class=<class>] [,failure-domain=<domain>] [,profile=<profile>]` ::
270
271 Create an erasure coded pool for RBD with an accompaning replicated pool for metadata storage. With EC, the common ceph options 'size', 'min_size' and 'crush_rule' parameters will be applied to the metadata pool.
272
273 `--min_size` `<integer> (1 - 7)` ('default =' `2`)::
274
275 Minimum number of replicas per object
276
277 `--pg_autoscale_mode` `<off | on | warn>` ('default =' `warn`)::
278
279 The automatic PG scaling mode of the pool.
280
281 `--pg_num` `<integer> (1 - 32768)` ('default =' `128`)::
282
283 Number of placement groups.
284
285 `--pg_num_min` `<integer> (-N - 32768)` ::
286
287 Minimal number of placement groups.
288
289 `--size` `<integer> (1 - 7)` ('default =' `3`)::
290
291 Number of replicas per object
292
293 `--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
294
295 The estimated target size of the pool for the PG autoscaler.
296
297 `--target_size_ratio` `<number>` ::
298
299 The estimated target ratio of the pool for the PG autoscaler.
300
301 *pveceph pool destroy* `<name>` `[OPTIONS]`
302
303 Destroy pool
304
305 `<name>`: `<string>` ::
306
307 The name of the pool. It must be unique.
308
309 `--force` `<boolean>` ('default =' `0`)::
310
311 If true, destroys pool even if in use
312
313 `--remove_ecprofile` `<boolean>` ('default =' `1`)::
314
315 Remove the erasure code profile. Defaults to true, if applicable.
316
317 `--remove_storages` `<boolean>` ('default =' `0`)::
318
319 Remove all pveceph-managed storages configured for this pool
320
321 *pveceph pool get* `<name>` `[OPTIONS]` `[FORMAT_OPTIONS]`
322
323 Show the current pool status.
324
325 `<name>`: `<string>` ::
326
327 The name of the pool. It must be unique.
328
329 `--verbose` `<boolean>` ('default =' `0`)::
330
331 If enabled, will display additional data(eg. statistics).
332
333 *pveceph pool ls* `[FORMAT_OPTIONS]`
334
335 List all pools and their settings (which are settable by the POST/PUT
336 endpoints).
337
338 *pveceph pool set* `<name>` `[OPTIONS]`
339
340 Change POOL settings
341
342 `<name>`: `<string>` ::
343
344 The name of the pool. It must be unique.
345
346 `--application` `<cephfs | rbd | rgw>` ::
347
348 The application of the pool.
349
350 `--crush_rule` `<string>` ::
351
352 The rule to use for mapping object placement in the cluster.
353
354 `--min_size` `<integer> (1 - 7)` ::
355
356 Minimum number of replicas per object
357
358 `--pg_autoscale_mode` `<off | on | warn>` ::
359
360 The automatic PG scaling mode of the pool.
361
362 `--pg_num` `<integer> (1 - 32768)` ::
363
364 Number of placement groups.
365
366 `--pg_num_min` `<integer> (-N - 32768)` ::
367
368 Minimal number of placement groups.
369
370 `--size` `<integer> (1 - 7)` ::
371
372 Number of replicas per object
373
374 `--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
375
376 The estimated target size of the pool for the PG autoscaler.
377
378 `--target_size_ratio` `<number>` ::
379
380 The estimated target ratio of the pool for the PG autoscaler.
381
382 *pveceph purge* `[OPTIONS]`
383
384 Destroy ceph related data and configuration files.
385
386 `--crash` `<boolean>` ::
387
388 Additionally purge Ceph crash logs, /var/lib/ceph/crash.
389
390 `--logs` `<boolean>` ::
391
392 Additionally purge Ceph logs, /var/log/ceph.
393
394 *pveceph start* `[OPTIONS]`
395
396 Start ceph services.
397
398 `--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
399
400 Ceph service name.
401
402 *pveceph status*
403
404 Get Ceph Status.
405
406 *pveceph stop* `[OPTIONS]`
407
408 Stop ceph services.
409
410 `--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
411
412 Ceph service name.
413
414