]> git.proxmox.com Git - pve-docs.git/blame - pveceph.1-synopsis.adoc
btrfs: document df weirdness and how to better get usage
[pve-docs.git] / pveceph.1-synopsis.adoc
CommitLineData
0840a663
DM
1*pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
e2d681b3 3*pveceph createmgr*
0840a663 4
e2d681b3 5An alias for 'pveceph mgr create'.
2489d6df 6
e2d681b3 7*pveceph createmon*
2489d6df 8
e2d681b3 9An alias for 'pveceph mon create'.
2489d6df 10
e2d681b3 11*pveceph createosd*
2489d6df 12
e2d681b3 13An alias for 'pveceph osd create'.
2489d6df 14
e2d681b3 15*pveceph createpool*
2489d6df 16
e2d681b3 17An alias for 'pveceph pool create'.
2489d6df 18
e2d681b3 19*pveceph destroymgr*
2489d6df 20
e2d681b3 21An alias for 'pveceph mgr destroy'.
0840a663 22
e2d681b3 23*pveceph destroymon*
0840a663 24
e2d681b3 25An alias for 'pveceph mon destroy'.
0840a663 26
e2d681b3 27*pveceph destroyosd*
0840a663 28
e2d681b3 29An alias for 'pveceph osd destroy'.
0840a663 30
e2d681b3 31*pveceph destroypool*
0840a663 32
e2d681b3 33An alias for 'pveceph pool destroy'.
0840a663 34
e2d681b3 35*pveceph fs create* `[OPTIONS]`
5d9c884c 36
e2d681b3 37Create a Ceph filesystem
5d9c884c 38
e2d681b3 39`--add-storage` `<boolean>` ('default =' `0`)::
0840a663 40
e2d681b3 41Configure the created CephFS as storage for this cluster.
0840a663 42
e2d681b3 43`--name` `<string>` ('default =' `cephfs`)::
0840a663 44
e2d681b3 45The ceph filesystem name.
2489d6df 46
e2d681b3 47`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
2489d6df 48
e2d681b3 49Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
0840a663 50
e2d681b3 51*pveceph help* `[OPTIONS]`
0840a663 52
e2d681b3 53Get help about specified command.
0840a663 54
e2d681b3 55`--extra-args` `<array>` ::
0840a663 56
e2d681b3 57Shows help for a specific command
0840a663 58
e2d681b3 59`--verbose` `<boolean>` ::
2489d6df 60
e2d681b3 61Verbose output format.
0840a663 62
e2d681b3 63*pveceph init* `[OPTIONS]`
0840a663 64
e2d681b3 65Create initial ceph default configuration and setup symlinks.
2489d6df 66
e2d681b3 67`--cluster-network` `<string>` ::
2489d6df 68
e2d681b3
TL
69Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
70+
71NOTE: Requires option(s): `network`
72
73`--disable_cephx` `<boolean>` ('default =' `0`)::
74
1e3f8156 75Disable cephx authentication.
e2d681b3
TL
76+
77WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
2489d6df
WB
78
79`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 80
e2d681b3 81Minimum number of available replicas per object to allow I/O
0840a663 82
e2d681b3 83`--network` `<string>` ::
0840a663 84
e2d681b3
TL
85Use specific network for all ceph related traffic
86
87`--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
88
89Placement group bits, used to specify the default number of placement groups.
90+
91NOTE: 'osd pool default pg num' does not work for default pools.
0840a663 92
2489d6df 93`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 94
e2d681b3
TL
95Targeted number of replicas per object
96
97*pveceph install* `[OPTIONS]`
98
99Install ceph related packages.
100
ac70d7d1
TL
101`--allow-experimental` `<boolean>` ('default =' `0`)::
102
103Allow experimental versions. Use with care!
104
0695fdaf
TL
105`--test-repository` `<boolean>` ('default =' `0`)::
106
107Use the test, not the main repository. Use with care!
108
109`--version` `<octopus | pacific>` ('default =' `pacific`)::
e2d681b3 110
1e3f8156 111Ceph version to install.
e2d681b3
TL
112
113*pveceph lspools*
114
115An alias for 'pveceph pool ls'.
0840a663 116
e2d681b3
TL
117*pveceph mds create* `[OPTIONS]`
118
119Create Ceph Metadata Server (MDS)
120
121`--hotstandby` `<boolean>` ('default =' `0`)::
122
123Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
124
125`--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
126
127The ID for the mds, when omitted the same as the nodename
128
129*pveceph mds destroy* `<name>`
130
131Destroy Ceph Metadata Server
132
133`<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
134
135The name (ID) of the mds
136
137*pveceph mgr create* `[OPTIONS]`
138
139Create Ceph Manager
140
141`--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
142
143The ID for the manager, when omitted the same as the nodename
144
145*pveceph mgr destroy* `<id>`
2489d6df
WB
146
147Destroy Ceph Manager.
148
149`<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
150
151The ID of the manager
152
e2d681b3
TL
153*pveceph mon create* `[OPTIONS]`
154
155Create Ceph Monitor and Manager
156
1e3f8156 157`--mon-address` `<string>` ::
e2d681b3 158
0695fdaf 159Overwrites autodetected monitor IP address(es). Must be in the public network(s) of Ceph.
e2d681b3 160
1e3f8156 161`--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
e2d681b3
TL
162
163The ID for the monitor, when omitted the same as the nodename
164
1e3f8156 165*pveceph mon destroy* `<monid>`
2489d6df
WB
166
167Destroy Ceph Monitor and Manager.
168
169`<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
0840a663
DM
170
171Monitor ID
172
e2d681b3 173*pveceph osd create* `<dev>` `[OPTIONS]`
0840a663 174
e2d681b3 175Create OSD
0840a663 176
e2d681b3 177`<dev>`: `<string>` ::
0840a663 178
e2d681b3 179Block device name.
0840a663 180
739d4d64
TL
181`--crush-device-class` `<string>` ::
182
183Set the device class of the OSD in crush.
184
1e3f8156 185`--db_dev` `<string>` ::
0840a663 186
1e3f8156 187Block device name for block.db.
0840a663 188
0695fdaf 189`--db_dev_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
0840a663 190
1e3f8156
TL
191Size in GiB for block.db.
192+
193NOTE: Requires option(s): `db_dev`
0840a663 194
1e3f8156 195`--encrypted` `<boolean>` ('default =' `0`)::
0840a663 196
1e3f8156 197Enables encryption of the OSD.
0840a663 198
e2d681b3 199`--wal_dev` `<string>` ::
2c0dde61 200
1e3f8156
TL
201Block device name for block.wal.
202
0695fdaf 203`--wal_dev_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
1e3f8156
TL
204
205Size in GiB for block.wal.
206+
207NOTE: Requires option(s): `wal_dev`
2c0dde61 208
e2d681b3 209*pveceph osd destroy* `<osdid>` `[OPTIONS]`
2489d6df 210
e2d681b3 211Destroy OSD
2489d6df 212
e2d681b3 213`<osdid>`: `<integer>` ::
0840a663 214
e2d681b3 215OSD ID
0840a663 216
e2d681b3 217`--cleanup` `<boolean>` ('default =' `0`)::
0840a663 218
e2d681b3 219If set, we remove partition table entries.
0840a663 220
e2d681b3 221*pveceph pool create* `<name>` `[OPTIONS]`
0840a663 222
e2d681b3 223Create POOL
0840a663 224
e2d681b3 225`<name>`: `<string>` ::
0840a663 226
e2d681b3 227The name of the pool. It must be unique.
0840a663 228
e2d681b3 229`--add_storages` `<boolean>` ::
5d9c884c 230
e2d681b3 231Configure VM and CT storage using the new pool.
5d9c884c 232
4772952b 233`--application` `<cephfs | rbd | rgw>` ('default =' `rbd`)::
5d9c884c 234
4772952b 235The application of the pool.
5d9c884c 236
e2d681b3 237`--crush_rule` `<string>` ::
0840a663 238
e2d681b3 239The rule to use for mapping object placement in the cluster.
0840a663 240
e2d681b3 241`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 242
e2d681b3
TL
243Minimum number of replicas per object
244
4772952b
TL
245`--pg_autoscale_mode` `<off | on | warn>` ('default =' `warn`)::
246
247The automatic PG scaling mode of the pool.
248
d2656385 249`--pg_num` `<integer> (1 - 32768)` ('default =' `128`)::
e2d681b3
TL
250
251Number of placement groups.
0840a663 252
d2656385
TL
253`--pg_num_min` `<integer> (-N - 32768)` ::
254
255Minimal number of placement groups.
256
2489d6df 257`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 258
e2d681b3 259Number of replicas per object
0840a663 260
d2656385
TL
261`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
262
263The estimated target size of the pool for the PG autoscaler.
264
265`--target_size_ratio` `<number>` ::
266
267The estimated target ratio of the pool for the PG autoscaler.
268
e2d681b3 269*pveceph pool destroy* `<name>` `[OPTIONS]`
0840a663 270
e2d681b3 271Destroy pool
0840a663 272
e2d681b3 273`<name>`: `<string>` ::
0840a663 274
e2d681b3 275The name of the pool. It must be unique.
0840a663 276
e2d681b3
TL
277`--force` `<boolean>` ('default =' `0`)::
278
279If true, destroys pool even if in use
280
281`--remove_storages` `<boolean>` ('default =' `0`)::
282
283Remove all pveceph-managed storages configured for this pool
284
d2656385
TL
285*pveceph pool get* `<name>` `[OPTIONS]` `[FORMAT_OPTIONS]`
286
287List pool settings.
288
289`<name>`: `<string>` ::
290
291The name of the pool. It must be unique.
292
293`--verbose` `<boolean>` ('default =' `0`)::
294
295If enabled, will display additional data(eg. statistics).
296
ac70d7d1 297*pveceph pool ls* `[FORMAT_OPTIONS]`
0840a663
DM
298
299List all pools.
300
4772952b
TL
301*pveceph pool set* `<name>` `[OPTIONS]`
302
303Change POOL settings
304
305`<name>`: `<string>` ::
306
307The name of the pool. It must be unique.
308
309`--application` `<cephfs | rbd | rgw>` ::
310
311The application of the pool.
312
313`--crush_rule` `<string>` ::
314
315The rule to use for mapping object placement in the cluster.
316
317`--min_size` `<integer> (1 - 7)` ::
318
319Minimum number of replicas per object
320
321`--pg_autoscale_mode` `<off | on | warn>` ::
322
323The automatic PG scaling mode of the pool.
324
d2656385 325`--pg_num` `<integer> (1 - 32768)` ::
4772952b
TL
326
327Number of placement groups.
328
d2656385
TL
329`--pg_num_min` `<integer> (-N - 32768)` ::
330
331Minimal number of placement groups.
332
4772952b
TL
333`--size` `<integer> (1 - 7)` ::
334
335Number of replicas per object
336
d2656385
TL
337`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
338
339The estimated target size of the pool for the PG autoscaler.
340
341`--target_size_ratio` `<number>` ::
342
343The estimated target ratio of the pool for the PG autoscaler.
344
ac70d7d1 345*pveceph purge* `[OPTIONS]`
0840a663
DM
346
347Destroy ceph related data and configuration files.
348
ac70d7d1
TL
349`--crash` `<boolean>` ::
350
351Additionally purge Ceph crash logs, /var/lib/ceph/crash.
352
353`--logs` `<boolean>` ::
354
355Additionally purge Ceph logs, /var/log/ceph.
356
c5aa7e14 357*pveceph start* `[OPTIONS]`
0840a663
DM
358
359Start ceph services.
360
c5aa7e14 361`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
362
363Ceph service name.
364
0840a663
DM
365*pveceph status*
366
d2656385 367Get Ceph Status.
0840a663 368
c5aa7e14 369*pveceph stop* `[OPTIONS]`
0840a663
DM
370
371Stop ceph services.
372
c5aa7e14 373`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
374
375Ceph service name.
376
377