]> git.proxmox.com Git - pve-docs.git/blame_incremental - pveceph.1-synopsis.adoc
totp: fix copy/paste mistake
[pve-docs.git] / pveceph.1-synopsis.adoc
... / ...
CommitLineData
1*pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
3*pveceph createmgr*
4
5An alias for 'pveceph mgr create'.
6
7*pveceph createmon*
8
9An alias for 'pveceph mon create'.
10
11*pveceph createosd*
12
13An alias for 'pveceph osd create'.
14
15*pveceph createpool*
16
17An alias for 'pveceph pool create'.
18
19*pveceph destroymgr*
20
21An alias for 'pveceph mgr destroy'.
22
23*pveceph destroymon*
24
25An alias for 'pveceph mon destroy'.
26
27*pveceph destroyosd*
28
29An alias for 'pveceph osd destroy'.
30
31*pveceph destroypool*
32
33An alias for 'pveceph pool destroy'.
34
35*pveceph fs create* `[OPTIONS]`
36
37Create a Ceph filesystem
38
39`--add-storage` `<boolean>` ('default =' `0`)::
40
41Configure the created CephFS as storage for this cluster.
42
43`--name` `<string>` ('default =' `cephfs`)::
44
45The ceph filesystem name.
46
47`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
48
49Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
50
51*pveceph fs destroy* `<name>` `[OPTIONS]`
52
53Destroy a Ceph filesystem
54
55`<name>`: `<string>` ::
56
57The ceph filesystem name.
58
59`--remove-pools` `<boolean>` ('default =' `0`)::
60
61Remove data and metadata pools configured for this fs.
62
63`--remove-storages` `<boolean>` ('default =' `0`)::
64
65Remove all pveceph-managed storages configured for this fs.
66
67*pveceph help* `[OPTIONS]`
68
69Get help about specified command.
70
71`--extra-args` `<array>` ::
72
73Shows help for a specific command
74
75`--verbose` `<boolean>` ::
76
77Verbose output format.
78
79*pveceph init* `[OPTIONS]`
80
81Create initial ceph default configuration and setup symlinks.
82
83`--cluster-network` `<string>` ::
84
85Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
86+
87NOTE: Requires option(s): `network`
88
89`--disable_cephx` `<boolean>` ('default =' `0`)::
90
91Disable cephx authentication.
92+
93WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
94
95`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
96
97Minimum number of available replicas per object to allow I/O
98
99`--network` `<string>` ::
100
101Use specific network for all ceph related traffic
102
103`--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
104
105Placement group bits, used to specify the default number of placement groups.
106+
107Depreacted. This setting was deprecated in recent Ceph versions.
108
109`--size` `<integer> (1 - 7)` ('default =' `3`)::
110
111Targeted number of replicas per object
112
113*pveceph install* `[OPTIONS]`
114
115Install ceph related packages.
116
117`--allow-experimental` `<boolean>` ('default =' `0`)::
118
119Allow experimental versions. Use with care!
120
121`--repository` `<enterprise | no-subscription | test>` ('default =' `enterprise`)::
122
123Ceph repository to use.
124
125`--version` `<quincy | reef>` ('default =' `quincy`)::
126
127Ceph version to install.
128
129*pveceph lspools*
130
131An alias for 'pveceph pool ls'.
132
133*pveceph mds create* `[OPTIONS]`
134
135Create Ceph Metadata Server (MDS)
136
137`--hotstandby` `<boolean>` ('default =' `0`)::
138
139Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
140
141`--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
142
143The ID for the mds, when omitted the same as the nodename
144
145*pveceph mds destroy* `<name>`
146
147Destroy Ceph Metadata Server
148
149`<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
150
151The name (ID) of the mds
152
153*pveceph mgr create* `[OPTIONS]`
154
155Create Ceph Manager
156
157`--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
158
159The ID for the manager, when omitted the same as the nodename
160
161*pveceph mgr destroy* `<id>`
162
163Destroy Ceph Manager.
164
165`<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
166
167The ID of the manager
168
169*pveceph mon create* `[OPTIONS]`
170
171Create Ceph Monitor and Manager
172
173`--mon-address` `<string>` ::
174
175Overwrites autodetected monitor IP address(es). Must be in the public network(s) of Ceph.
176
177`--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
178
179The ID for the monitor, when omitted the same as the nodename
180
181*pveceph mon destroy* `<monid>`
182
183Destroy Ceph Monitor and Manager.
184
185`<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
186
187Monitor ID
188
189*pveceph osd create* `<dev>` `[OPTIONS]`
190
191Create OSD
192
193`<dev>`: `<string>` ::
194
195Block device name.
196
197`--crush-device-class` `<string>` ::
198
199Set the device class of the OSD in crush.
200
201`--db_dev` `<string>` ::
202
203Block device name for block.db.
204
205`--db_dev_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
206
207Size in GiB for block.db.
208+
209NOTE: Requires option(s): `db_dev`
210
211`--encrypted` `<boolean>` ('default =' `0`)::
212
213Enables encryption of the OSD.
214
215`--osds-per-device` `<integer> (1 - N)` ::
216
217OSD services per physical device. Only useful for fast NVMe devices"
218 ." to utilize their performance better.
219
220`--wal_dev` `<string>` ::
221
222Block device name for block.wal.
223
224`--wal_dev_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
225
226Size in GiB for block.wal.
227+
228NOTE: Requires option(s): `wal_dev`
229
230*pveceph osd destroy* `<osdid>` `[OPTIONS]`
231
232Destroy OSD
233
234`<osdid>`: `<integer>` ::
235
236OSD ID
237
238`--cleanup` `<boolean>` ('default =' `0`)::
239
240If set, we remove partition table entries.
241
242*pveceph osd details* `<osdid>` `[OPTIONS]` `[FORMAT_OPTIONS]`
243
244Get OSD details.
245
246`<osdid>`: `<string>` ::
247
248ID of the OSD
249
250`--verbose` `<boolean>` ('default =' `0`)::
251
252Print verbose information, same as json-pretty output format.
253
254*pveceph pool create* `<name>` `[OPTIONS]`
255
256Create Ceph pool
257
258`<name>`: `<string>` ::
259
260The name of the pool. It must be unique.
261
262`--add_storages` `<boolean>` ('default =' `0; for erasure coded pools: 1`)::
263
264Configure VM and CT storage using the new pool.
265
266`--application` `<cephfs | rbd | rgw>` ('default =' `rbd`)::
267
268The application of the pool.
269
270`--crush_rule` `<string>` ::
271
272The rule to use for mapping object placement in the cluster.
273
274`--erasure-coding` `k=<integer> ,m=<integer> [,device-class=<class>] [,failure-domain=<domain>] [,profile=<profile>]` ::
275
276Create an erasure coded pool for RBD with an accompaning replicated pool for metadata storage. With EC, the common ceph options 'size', 'min_size' and 'crush_rule' parameters will be applied to the metadata pool.
277
278`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
279
280Minimum number of replicas per object
281
282`--pg_autoscale_mode` `<off | on | warn>` ('default =' `warn`)::
283
284The automatic PG scaling mode of the pool.
285
286`--pg_num` `<integer> (1 - 32768)` ('default =' `128`)::
287
288Number of placement groups.
289
290`--pg_num_min` `<integer> (-N - 32768)` ::
291
292Minimal number of placement groups.
293
294`--size` `<integer> (1 - 7)` ('default =' `3`)::
295
296Number of replicas per object
297
298`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
299
300The estimated target size of the pool for the PG autoscaler.
301
302`--target_size_ratio` `<number>` ::
303
304The estimated target ratio of the pool for the PG autoscaler.
305
306*pveceph pool destroy* `<name>` `[OPTIONS]`
307
308Destroy pool
309
310`<name>`: `<string>` ::
311
312The name of the pool. It must be unique.
313
314`--force` `<boolean>` ('default =' `0`)::
315
316If true, destroys pool even if in use
317
318`--remove_ecprofile` `<boolean>` ('default =' `1`)::
319
320Remove the erasure code profile. Defaults to true, if applicable.
321
322`--remove_storages` `<boolean>` ('default =' `0`)::
323
324Remove all pveceph-managed storages configured for this pool
325
326*pveceph pool get* `<name>` `[OPTIONS]` `[FORMAT_OPTIONS]`
327
328Show the current pool status.
329
330`<name>`: `<string>` ::
331
332The name of the pool. It must be unique.
333
334`--verbose` `<boolean>` ('default =' `0`)::
335
336If enabled, will display additional data(eg. statistics).
337
338*pveceph pool ls* `[FORMAT_OPTIONS]`
339
340List all pools and their settings (which are settable by the POST/PUT
341endpoints).
342
343*pveceph pool set* `<name>` `[OPTIONS]`
344
345Change POOL settings
346
347`<name>`: `<string>` ::
348
349The name of the pool. It must be unique.
350
351`--application` `<cephfs | rbd | rgw>` ::
352
353The application of the pool.
354
355`--crush_rule` `<string>` ::
356
357The rule to use for mapping object placement in the cluster.
358
359`--min_size` `<integer> (1 - 7)` ::
360
361Minimum number of replicas per object
362
363`--pg_autoscale_mode` `<off | on | warn>` ::
364
365The automatic PG scaling mode of the pool.
366
367`--pg_num` `<integer> (1 - 32768)` ::
368
369Number of placement groups.
370
371`--pg_num_min` `<integer> (-N - 32768)` ::
372
373Minimal number of placement groups.
374
375`--size` `<integer> (1 - 7)` ::
376
377Number of replicas per object
378
379`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
380
381The estimated target size of the pool for the PG autoscaler.
382
383`--target_size_ratio` `<number>` ::
384
385The estimated target ratio of the pool for the PG autoscaler.
386
387*pveceph purge* `[OPTIONS]`
388
389Destroy ceph related data and configuration files.
390
391`--crash` `<boolean>` ::
392
393Additionally purge Ceph crash logs, /var/lib/ceph/crash.
394
395`--logs` `<boolean>` ::
396
397Additionally purge Ceph logs, /var/log/ceph.
398
399*pveceph start* `[OPTIONS]`
400
401Start ceph services.
402
403`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
404
405Ceph service name.
406
407*pveceph status*
408
409Get Ceph Status.
410
411*pveceph stop* `[OPTIONS]`
412
413Stop ceph services.
414
415`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
416
417Ceph service name.
418
419