]> git.proxmox.com Git - pve-docs.git/blame - pveceph.1-synopsis.adoc
update link qemu documentation non web.archive
[pve-docs.git] / pveceph.1-synopsis.adoc
CommitLineData
0840a663
DM
1*pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
e2d681b3 3*pveceph createmgr*
0840a663 4
e2d681b3 5An alias for 'pveceph mgr create'.
2489d6df 6
e2d681b3 7*pveceph createmon*
2489d6df 8
e2d681b3 9An alias for 'pveceph mon create'.
2489d6df 10
e2d681b3 11*pveceph createosd*
2489d6df 12
e2d681b3 13An alias for 'pveceph osd create'.
2489d6df 14
e2d681b3 15*pveceph createpool*
2489d6df 16
e2d681b3 17An alias for 'pveceph pool create'.
2489d6df 18
e2d681b3 19*pveceph destroymgr*
2489d6df 20
e2d681b3 21An alias for 'pveceph mgr destroy'.
0840a663 22
e2d681b3 23*pveceph destroymon*
0840a663 24
e2d681b3 25An alias for 'pveceph mon destroy'.
0840a663 26
e2d681b3 27*pveceph destroyosd*
0840a663 28
e2d681b3 29An alias for 'pveceph osd destroy'.
0840a663 30
e2d681b3 31*pveceph destroypool*
0840a663 32
e2d681b3 33An alias for 'pveceph pool destroy'.
0840a663 34
e2d681b3 35*pveceph fs create* `[OPTIONS]`
5d9c884c 36
e2d681b3 37Create a Ceph filesystem
5d9c884c 38
e2d681b3 39`--add-storage` `<boolean>` ('default =' `0`)::
0840a663 40
e2d681b3 41Configure the created CephFS as storage for this cluster.
0840a663 42
e2d681b3 43`--name` `<string>` ('default =' `cephfs`)::
0840a663 44
e2d681b3 45The ceph filesystem name.
2489d6df 46
e2d681b3 47`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
2489d6df 48
e2d681b3 49Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
0840a663 50
e2d681b3 51*pveceph help* `[OPTIONS]`
0840a663 52
e2d681b3 53Get help about specified command.
0840a663 54
e2d681b3 55`--extra-args` `<array>` ::
0840a663 56
e2d681b3 57Shows help for a specific command
0840a663 58
e2d681b3 59`--verbose` `<boolean>` ::
2489d6df 60
e2d681b3 61Verbose output format.
0840a663 62
e2d681b3 63*pveceph init* `[OPTIONS]`
0840a663 64
e2d681b3 65Create initial ceph default configuration and setup symlinks.
2489d6df 66
e2d681b3 67`--cluster-network` `<string>` ::
2489d6df 68
e2d681b3
TL
69Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
70+
71NOTE: Requires option(s): `network`
72
73`--disable_cephx` `<boolean>` ('default =' `0`)::
74
1e3f8156 75Disable cephx authentication.
e2d681b3
TL
76+
77WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
2489d6df
WB
78
79`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 80
e2d681b3 81Minimum number of available replicas per object to allow I/O
0840a663 82
e2d681b3 83`--network` `<string>` ::
0840a663 84
e2d681b3
TL
85Use specific network for all ceph related traffic
86
87`--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
88
89Placement group bits, used to specify the default number of placement groups.
90+
91NOTE: 'osd pool default pg num' does not work for default pools.
0840a663 92
2489d6df 93`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 94
e2d681b3
TL
95Targeted number of replicas per object
96
97*pveceph install* `[OPTIONS]`
98
99Install ceph related packages.
100
ac70d7d1
TL
101`--allow-experimental` `<boolean>` ('default =' `0`)::
102
103Allow experimental versions. Use with care!
104
105`--version` `<luminous | nautilus | octopus>` ('default =' `nautilus`)::
e2d681b3 106
1e3f8156 107Ceph version to install.
e2d681b3
TL
108
109*pveceph lspools*
110
111An alias for 'pveceph pool ls'.
0840a663 112
e2d681b3
TL
113*pveceph mds create* `[OPTIONS]`
114
115Create Ceph Metadata Server (MDS)
116
117`--hotstandby` `<boolean>` ('default =' `0`)::
118
119Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
120
121`--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
122
123The ID for the mds, when omitted the same as the nodename
124
125*pveceph mds destroy* `<name>`
126
127Destroy Ceph Metadata Server
128
129`<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
130
131The name (ID) of the mds
132
133*pveceph mgr create* `[OPTIONS]`
134
135Create Ceph Manager
136
137`--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
138
139The ID for the manager, when omitted the same as the nodename
140
141*pveceph mgr destroy* `<id>`
2489d6df
WB
142
143Destroy Ceph Manager.
144
145`<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
146
147The ID of the manager
148
e2d681b3
TL
149*pveceph mon create* `[OPTIONS]`
150
151Create Ceph Monitor and Manager
152
1e3f8156 153`--mon-address` `<string>` ::
e2d681b3 154
1e3f8156 155Overwrites autodetected monitor IP address. Must be in the public network of ceph.
e2d681b3 156
1e3f8156 157`--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
e2d681b3
TL
158
159The ID for the monitor, when omitted the same as the nodename
160
1e3f8156 161*pveceph mon destroy* `<monid>`
2489d6df
WB
162
163Destroy Ceph Monitor and Manager.
164
165`<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
0840a663
DM
166
167Monitor ID
168
e2d681b3 169*pveceph osd create* `<dev>` `[OPTIONS]`
0840a663 170
e2d681b3 171Create OSD
0840a663 172
e2d681b3 173`<dev>`: `<string>` ::
0840a663 174
e2d681b3 175Block device name.
0840a663 176
739d4d64
TL
177`--crush-device-class` `<string>` ::
178
179Set the device class of the OSD in crush.
180
1e3f8156 181`--db_dev` `<string>` ::
0840a663 182
1e3f8156 183Block device name for block.db.
0840a663 184
1e3f8156 185`--db_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
0840a663 186
1e3f8156
TL
187Size in GiB for block.db.
188+
189NOTE: Requires option(s): `db_dev`
0840a663 190
1e3f8156 191`--encrypted` `<boolean>` ('default =' `0`)::
0840a663 192
1e3f8156 193Enables encryption of the OSD.
0840a663 194
e2d681b3 195`--wal_dev` `<string>` ::
2c0dde61 196
1e3f8156
TL
197Block device name for block.wal.
198
199`--wal_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
200
201Size in GiB for block.wal.
202+
203NOTE: Requires option(s): `wal_dev`
2c0dde61 204
e2d681b3 205*pveceph osd destroy* `<osdid>` `[OPTIONS]`
2489d6df 206
e2d681b3 207Destroy OSD
2489d6df 208
e2d681b3 209`<osdid>`: `<integer>` ::
0840a663 210
e2d681b3 211OSD ID
0840a663 212
e2d681b3 213`--cleanup` `<boolean>` ('default =' `0`)::
0840a663 214
e2d681b3 215If set, we remove partition table entries.
0840a663 216
e2d681b3 217*pveceph pool create* `<name>` `[OPTIONS]`
0840a663 218
e2d681b3 219Create POOL
0840a663 220
e2d681b3 221`<name>`: `<string>` ::
0840a663 222
e2d681b3 223The name of the pool. It must be unique.
0840a663 224
e2d681b3 225`--add_storages` `<boolean>` ::
5d9c884c 226
e2d681b3 227Configure VM and CT storage using the new pool.
5d9c884c 228
4772952b 229`--application` `<cephfs | rbd | rgw>` ('default =' `rbd`)::
5d9c884c 230
4772952b 231The application of the pool.
5d9c884c 232
e2d681b3 233`--crush_rule` `<string>` ::
0840a663 234
e2d681b3 235The rule to use for mapping object placement in the cluster.
0840a663 236
e2d681b3 237`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 238
e2d681b3
TL
239Minimum number of replicas per object
240
4772952b
TL
241`--pg_autoscale_mode` `<off | on | warn>` ('default =' `warn`)::
242
243The automatic PG scaling mode of the pool.
244
e2d681b3
TL
245`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
246
247Number of placement groups.
0840a663 248
2489d6df 249`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 250
e2d681b3 251Number of replicas per object
0840a663 252
e2d681b3 253*pveceph pool destroy* `<name>` `[OPTIONS]`
0840a663 254
e2d681b3 255Destroy pool
0840a663 256
e2d681b3 257`<name>`: `<string>` ::
0840a663 258
e2d681b3 259The name of the pool. It must be unique.
0840a663 260
e2d681b3
TL
261`--force` `<boolean>` ('default =' `0`)::
262
263If true, destroys pool even if in use
264
265`--remove_storages` `<boolean>` ('default =' `0`)::
266
267Remove all pveceph-managed storages configured for this pool
268
ac70d7d1 269*pveceph pool ls* `[FORMAT_OPTIONS]`
0840a663
DM
270
271List all pools.
272
4772952b
TL
273*pveceph pool set* `<name>` `[OPTIONS]`
274
275Change POOL settings
276
277`<name>`: `<string>` ::
278
279The name of the pool. It must be unique.
280
281`--application` `<cephfs | rbd | rgw>` ::
282
283The application of the pool.
284
285`--crush_rule` `<string>` ::
286
287The rule to use for mapping object placement in the cluster.
288
289`--min_size` `<integer> (1 - 7)` ::
290
291Minimum number of replicas per object
292
293`--pg_autoscale_mode` `<off | on | warn>` ::
294
295The automatic PG scaling mode of the pool.
296
297`--pg_num` `<integer> (8 - 32768)` ::
298
299Number of placement groups.
300
301`--size` `<integer> (1 - 7)` ::
302
303Number of replicas per object
304
ac70d7d1 305*pveceph purge* `[OPTIONS]`
0840a663
DM
306
307Destroy ceph related data and configuration files.
308
ac70d7d1
TL
309`--crash` `<boolean>` ::
310
311Additionally purge Ceph crash logs, /var/lib/ceph/crash.
312
313`--logs` `<boolean>` ::
314
315Additionally purge Ceph logs, /var/log/ceph.
316
c5aa7e14 317*pveceph start* `[OPTIONS]`
0840a663
DM
318
319Start ceph services.
320
c5aa7e14 321`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
322
323Ceph service name.
324
0840a663
DM
325*pveceph status*
326
327Get ceph status.
328
c5aa7e14 329*pveceph stop* `[OPTIONS]`
0840a663
DM
330
331Stop ceph services.
332
c5aa7e14 333`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
334
335Ceph service name.
336
337