]> git.proxmox.com Git - pve-docs.git/blame - pveceph.1-synopsis.adoc
fix #3967: add ZFS dRAID documentation
[pve-docs.git] / pveceph.1-synopsis.adoc
CommitLineData
0840a663
DM
1*pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
e2d681b3 3*pveceph createmgr*
0840a663 4
e2d681b3 5An alias for 'pveceph mgr create'.
2489d6df 6
e2d681b3 7*pveceph createmon*
2489d6df 8
e2d681b3 9An alias for 'pveceph mon create'.
2489d6df 10
e2d681b3 11*pveceph createosd*
2489d6df 12
e2d681b3 13An alias for 'pveceph osd create'.
2489d6df 14
e2d681b3 15*pveceph createpool*
2489d6df 16
e2d681b3 17An alias for 'pveceph pool create'.
2489d6df 18
e2d681b3 19*pveceph destroymgr*
2489d6df 20
e2d681b3 21An alias for 'pveceph mgr destroy'.
0840a663 22
e2d681b3 23*pveceph destroymon*
0840a663 24
e2d681b3 25An alias for 'pveceph mon destroy'.
0840a663 26
e2d681b3 27*pveceph destroyosd*
0840a663 28
e2d681b3 29An alias for 'pveceph osd destroy'.
0840a663 30
e2d681b3 31*pveceph destroypool*
0840a663 32
e2d681b3 33An alias for 'pveceph pool destroy'.
0840a663 34
e2d681b3 35*pveceph fs create* `[OPTIONS]`
5d9c884c 36
e2d681b3 37Create a Ceph filesystem
5d9c884c 38
e2d681b3 39`--add-storage` `<boolean>` ('default =' `0`)::
0840a663 40
e2d681b3 41Configure the created CephFS as storage for this cluster.
0840a663 42
e2d681b3 43`--name` `<string>` ('default =' `cephfs`)::
0840a663 44
e2d681b3 45The ceph filesystem name.
2489d6df 46
e2d681b3 47`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
2489d6df 48
e2d681b3 49Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
0840a663 50
5370fa8c
TL
51*pveceph fs destroy* `<name>` `[OPTIONS]`
52
53Destroy a Ceph filesystem
54
55`<name>`: `<string>` ::
56
57The ceph filesystem name.
58
59`--remove-pools` `<boolean>` ('default =' `0`)::
60
61Remove data and metadata pools configured for this fs.
62
63`--remove-storages` `<boolean>` ('default =' `0`)::
64
65Remove all pveceph-managed storages configured for this fs.
66
e2d681b3 67*pveceph help* `[OPTIONS]`
0840a663 68
e2d681b3 69Get help about specified command.
0840a663 70
e2d681b3 71`--extra-args` `<array>` ::
0840a663 72
e2d681b3 73Shows help for a specific command
0840a663 74
e2d681b3 75`--verbose` `<boolean>` ::
2489d6df 76
e2d681b3 77Verbose output format.
0840a663 78
e2d681b3 79*pveceph init* `[OPTIONS]`
0840a663 80
e2d681b3 81Create initial ceph default configuration and setup symlinks.
2489d6df 82
e2d681b3 83`--cluster-network` `<string>` ::
2489d6df 84
e2d681b3
TL
85Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
86+
87NOTE: Requires option(s): `network`
88
89`--disable_cephx` `<boolean>` ('default =' `0`)::
90
1e3f8156 91Disable cephx authentication.
e2d681b3
TL
92+
93WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
2489d6df
WB
94
95`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 96
e2d681b3 97Minimum number of available replicas per object to allow I/O
0840a663 98
e2d681b3 99`--network` `<string>` ::
0840a663 100
e2d681b3
TL
101Use specific network for all ceph related traffic
102
103`--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
104
105Placement group bits, used to specify the default number of placement groups.
106+
107NOTE: 'osd pool default pg num' does not work for default pools.
0840a663 108
2489d6df 109`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 110
e2d681b3
TL
111Targeted number of replicas per object
112
113*pveceph install* `[OPTIONS]`
114
115Install ceph related packages.
116
ac70d7d1
TL
117`--allow-experimental` `<boolean>` ('default =' `0`)::
118
119Allow experimental versions. Use with care!
120
0695fdaf
TL
121`--test-repository` `<boolean>` ('default =' `0`)::
122
123Use the test, not the main repository. Use with care!
124
de786b48 125`--version` `<octopus | pacific | quincy>` ('default =' `pacific`)::
e2d681b3 126
1e3f8156 127Ceph version to install.
e2d681b3
TL
128
129*pveceph lspools*
130
131An alias for 'pveceph pool ls'.
0840a663 132
e2d681b3
TL
133*pveceph mds create* `[OPTIONS]`
134
135Create Ceph Metadata Server (MDS)
136
137`--hotstandby` `<boolean>` ('default =' `0`)::
138
139Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
140
141`--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
142
143The ID for the mds, when omitted the same as the nodename
144
145*pveceph mds destroy* `<name>`
146
147Destroy Ceph Metadata Server
148
149`<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
150
151The name (ID) of the mds
152
153*pveceph mgr create* `[OPTIONS]`
154
155Create Ceph Manager
156
157`--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
158
159The ID for the manager, when omitted the same as the nodename
160
161*pveceph mgr destroy* `<id>`
2489d6df
WB
162
163Destroy Ceph Manager.
164
165`<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
166
167The ID of the manager
168
e2d681b3
TL
169*pveceph mon create* `[OPTIONS]`
170
171Create Ceph Monitor and Manager
172
1e3f8156 173`--mon-address` `<string>` ::
e2d681b3 174
0695fdaf 175Overwrites autodetected monitor IP address(es). Must be in the public network(s) of Ceph.
e2d681b3 176
1e3f8156 177`--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
e2d681b3
TL
178
179The ID for the monitor, when omitted the same as the nodename
180
1e3f8156 181*pveceph mon destroy* `<monid>`
2489d6df
WB
182
183Destroy Ceph Monitor and Manager.
184
185`<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
0840a663
DM
186
187Monitor ID
188
e2d681b3 189*pveceph osd create* `<dev>` `[OPTIONS]`
0840a663 190
e2d681b3 191Create OSD
0840a663 192
e2d681b3 193`<dev>`: `<string>` ::
0840a663 194
e2d681b3 195Block device name.
0840a663 196
739d4d64
TL
197`--crush-device-class` `<string>` ::
198
199Set the device class of the OSD in crush.
200
1e3f8156 201`--db_dev` `<string>` ::
0840a663 202
1e3f8156 203Block device name for block.db.
0840a663 204
0695fdaf 205`--db_dev_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
0840a663 206
1e3f8156
TL
207Size in GiB for block.db.
208+
209NOTE: Requires option(s): `db_dev`
0840a663 210
1e3f8156 211`--encrypted` `<boolean>` ('default =' `0`)::
0840a663 212
1e3f8156 213Enables encryption of the OSD.
0840a663 214
e2d681b3 215`--wal_dev` `<string>` ::
2c0dde61 216
1e3f8156
TL
217Block device name for block.wal.
218
0695fdaf 219`--wal_dev_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
1e3f8156
TL
220
221Size in GiB for block.wal.
222+
223NOTE: Requires option(s): `wal_dev`
2c0dde61 224
e2d681b3 225*pveceph osd destroy* `<osdid>` `[OPTIONS]`
2489d6df 226
e2d681b3 227Destroy OSD
2489d6df 228
e2d681b3 229`<osdid>`: `<integer>` ::
0840a663 230
e2d681b3 231OSD ID
0840a663 232
e2d681b3 233`--cleanup` `<boolean>` ('default =' `0`)::
0840a663 234
e2d681b3 235If set, we remove partition table entries.
0840a663 236
e2d681b3 237*pveceph pool create* `<name>` `[OPTIONS]`
0840a663 238
7af2edf9 239Create Ceph pool
0840a663 240
e2d681b3 241`<name>`: `<string>` ::
0840a663 242
e2d681b3 243The name of the pool. It must be unique.
0840a663 244
e6d66c2f 245`--add_storages` `<boolean>` ('default =' `0; for erasure coded pools: 1`)::
5d9c884c 246
e6d66c2f 247Configure VM and CT storage using the new pool.
5d9c884c 248
4772952b 249`--application` `<cephfs | rbd | rgw>` ('default =' `rbd`)::
5d9c884c 250
4772952b 251The application of the pool.
5d9c884c 252
e2d681b3 253`--crush_rule` `<string>` ::
0840a663 254
e2d681b3 255The rule to use for mapping object placement in the cluster.
0840a663 256
7af2edf9
TL
257`--erasure-coding` `k=<integer> ,m=<integer> [,device-class=<class>] [,failure-domain=<domain>] [,profile=<profile>]` ::
258
460359c5 259Create an erasure coded pool for RBD with an accompaning replicated pool for metadata storage. With EC, the common ceph options 'size', 'min_size' and 'crush_rule' parameters will be applied to the metadata pool.
7af2edf9 260
e2d681b3 261`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 262
e2d681b3
TL
263Minimum number of replicas per object
264
4772952b
TL
265`--pg_autoscale_mode` `<off | on | warn>` ('default =' `warn`)::
266
267The automatic PG scaling mode of the pool.
268
d2656385 269`--pg_num` `<integer> (1 - 32768)` ('default =' `128`)::
e2d681b3
TL
270
271Number of placement groups.
0840a663 272
d2656385
TL
273`--pg_num_min` `<integer> (-N - 32768)` ::
274
275Minimal number of placement groups.
276
2489d6df 277`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 278
e2d681b3 279Number of replicas per object
0840a663 280
d2656385
TL
281`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
282
283The estimated target size of the pool for the PG autoscaler.
284
285`--target_size_ratio` `<number>` ::
286
287The estimated target ratio of the pool for the PG autoscaler.
288
e2d681b3 289*pveceph pool destroy* `<name>` `[OPTIONS]`
0840a663 290
e2d681b3 291Destroy pool
0840a663 292
e2d681b3 293`<name>`: `<string>` ::
0840a663 294
e2d681b3 295The name of the pool. It must be unique.
0840a663 296
e2d681b3
TL
297`--force` `<boolean>` ('default =' `0`)::
298
299If true, destroys pool even if in use
300
7af2edf9
TL
301`--remove_ecprofile` `<boolean>` ('default =' `1`)::
302
303Remove the erasure code profile. Defaults to true, if applicable.
304
e2d681b3
TL
305`--remove_storages` `<boolean>` ('default =' `0`)::
306
307Remove all pveceph-managed storages configured for this pool
308
d2656385
TL
309*pveceph pool get* `<name>` `[OPTIONS]` `[FORMAT_OPTIONS]`
310
311List pool settings.
312
313`<name>`: `<string>` ::
314
315The name of the pool. It must be unique.
316
317`--verbose` `<boolean>` ('default =' `0`)::
318
319If enabled, will display additional data(eg. statistics).
320
ac70d7d1 321*pveceph pool ls* `[FORMAT_OPTIONS]`
0840a663
DM
322
323List all pools.
324
4772952b
TL
325*pveceph pool set* `<name>` `[OPTIONS]`
326
327Change POOL settings
328
329`<name>`: `<string>` ::
330
331The name of the pool. It must be unique.
332
333`--application` `<cephfs | rbd | rgw>` ::
334
335The application of the pool.
336
337`--crush_rule` `<string>` ::
338
339The rule to use for mapping object placement in the cluster.
340
341`--min_size` `<integer> (1 - 7)` ::
342
343Minimum number of replicas per object
344
345`--pg_autoscale_mode` `<off | on | warn>` ::
346
347The automatic PG scaling mode of the pool.
348
d2656385 349`--pg_num` `<integer> (1 - 32768)` ::
4772952b
TL
350
351Number of placement groups.
352
d2656385
TL
353`--pg_num_min` `<integer> (-N - 32768)` ::
354
355Minimal number of placement groups.
356
4772952b
TL
357`--size` `<integer> (1 - 7)` ::
358
359Number of replicas per object
360
d2656385
TL
361`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
362
363The estimated target size of the pool for the PG autoscaler.
364
365`--target_size_ratio` `<number>` ::
366
367The estimated target ratio of the pool for the PG autoscaler.
368
ac70d7d1 369*pveceph purge* `[OPTIONS]`
0840a663
DM
370
371Destroy ceph related data and configuration files.
372
ac70d7d1
TL
373`--crash` `<boolean>` ::
374
375Additionally purge Ceph crash logs, /var/lib/ceph/crash.
376
377`--logs` `<boolean>` ::
378
379Additionally purge Ceph logs, /var/log/ceph.
380
c5aa7e14 381*pveceph start* `[OPTIONS]`
0840a663
DM
382
383Start ceph services.
384
c5aa7e14 385`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
386
387Ceph service name.
388
0840a663
DM
389*pveceph status*
390
d2656385 391Get Ceph Status.
0840a663 392
c5aa7e14 393*pveceph stop* `[OPTIONS]`
0840a663
DM
394
395Stop ceph services.
396
c5aa7e14 397`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
398
399Ceph service name.
400
401