]> git.proxmox.com Git - pve-docs.git/blame - pveceph.1-synopsis.adoc
storage: make description column wider
[pve-docs.git] / pveceph.1-synopsis.adoc
CommitLineData
0840a663
DM
1*pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
e2d681b3 3*pveceph createmgr*
0840a663 4
e2d681b3 5An alias for 'pveceph mgr create'.
2489d6df 6
e2d681b3 7*pveceph createmon*
2489d6df 8
e2d681b3 9An alias for 'pveceph mon create'.
2489d6df 10
e2d681b3 11*pveceph createosd*
2489d6df 12
e2d681b3 13An alias for 'pveceph osd create'.
2489d6df 14
e2d681b3 15*pveceph createpool*
2489d6df 16
e2d681b3 17An alias for 'pveceph pool create'.
2489d6df 18
e2d681b3 19*pveceph destroymgr*
2489d6df 20
e2d681b3 21An alias for 'pveceph mgr destroy'.
0840a663 22
e2d681b3 23*pveceph destroymon*
0840a663 24
e2d681b3 25An alias for 'pveceph mon destroy'.
0840a663 26
e2d681b3 27*pveceph destroyosd*
0840a663 28
e2d681b3 29An alias for 'pveceph osd destroy'.
0840a663 30
e2d681b3 31*pveceph destroypool*
0840a663 32
e2d681b3 33An alias for 'pveceph pool destroy'.
0840a663 34
e2d681b3 35*pveceph fs create* `[OPTIONS]`
5d9c884c 36
e2d681b3 37Create a Ceph filesystem
5d9c884c 38
e2d681b3 39`--add-storage` `<boolean>` ('default =' `0`)::
0840a663 40
e2d681b3 41Configure the created CephFS as storage for this cluster.
0840a663 42
e2d681b3 43`--name` `<string>` ('default =' `cephfs`)::
0840a663 44
e2d681b3 45The ceph filesystem name.
2489d6df 46
e2d681b3 47`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
2489d6df 48
e2d681b3 49Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
0840a663 50
e2d681b3 51*pveceph help* `[OPTIONS]`
0840a663 52
e2d681b3 53Get help about specified command.
0840a663 54
e2d681b3 55`--extra-args` `<array>` ::
0840a663 56
e2d681b3 57Shows help for a specific command
0840a663 58
e2d681b3 59`--verbose` `<boolean>` ::
2489d6df 60
e2d681b3 61Verbose output format.
0840a663 62
e2d681b3 63*pveceph init* `[OPTIONS]`
0840a663 64
e2d681b3 65Create initial ceph default configuration and setup symlinks.
2489d6df 66
e2d681b3 67`--cluster-network` `<string>` ::
2489d6df 68
e2d681b3
TL
69Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
70+
71NOTE: Requires option(s): `network`
72
73`--disable_cephx` `<boolean>` ('default =' `0`)::
74
1e3f8156 75Disable cephx authentication.
e2d681b3
TL
76+
77WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
2489d6df
WB
78
79`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 80
e2d681b3 81Minimum number of available replicas per object to allow I/O
0840a663 82
e2d681b3 83`--network` `<string>` ::
0840a663 84
e2d681b3
TL
85Use specific network for all ceph related traffic
86
87`--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
88
89Placement group bits, used to specify the default number of placement groups.
90+
91NOTE: 'osd pool default pg num' does not work for default pools.
0840a663 92
2489d6df 93`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 94
e2d681b3
TL
95Targeted number of replicas per object
96
97*pveceph install* `[OPTIONS]`
98
99Install ceph related packages.
100
ac70d7d1
TL
101`--allow-experimental` `<boolean>` ('default =' `0`)::
102
103Allow experimental versions. Use with care!
104
105`--version` `<luminous | nautilus | octopus>` ('default =' `nautilus`)::
e2d681b3 106
1e3f8156 107Ceph version to install.
e2d681b3
TL
108
109*pveceph lspools*
110
111An alias for 'pveceph pool ls'.
0840a663 112
e2d681b3
TL
113*pveceph mds create* `[OPTIONS]`
114
115Create Ceph Metadata Server (MDS)
116
117`--hotstandby` `<boolean>` ('default =' `0`)::
118
119Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
120
121`--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
122
123The ID for the mds, when omitted the same as the nodename
124
125*pveceph mds destroy* `<name>`
126
127Destroy Ceph Metadata Server
128
129`<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
130
131The name (ID) of the mds
132
133*pveceph mgr create* `[OPTIONS]`
134
135Create Ceph Manager
136
137`--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
138
139The ID for the manager, when omitted the same as the nodename
140
141*pveceph mgr destroy* `<id>`
2489d6df
WB
142
143Destroy Ceph Manager.
144
145`<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
146
147The ID of the manager
148
e2d681b3
TL
149*pveceph mon create* `[OPTIONS]`
150
151Create Ceph Monitor and Manager
152
1e3f8156 153`--mon-address` `<string>` ::
e2d681b3 154
1e3f8156 155Overwrites autodetected monitor IP address. Must be in the public network of ceph.
e2d681b3 156
1e3f8156 157`--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
e2d681b3
TL
158
159The ID for the monitor, when omitted the same as the nodename
160
1e3f8156 161*pveceph mon destroy* `<monid>`
2489d6df
WB
162
163Destroy Ceph Monitor and Manager.
164
165`<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
0840a663
DM
166
167Monitor ID
168
e2d681b3 169*pveceph osd create* `<dev>` `[OPTIONS]`
0840a663 170
e2d681b3 171Create OSD
0840a663 172
e2d681b3 173`<dev>`: `<string>` ::
0840a663 174
e2d681b3 175Block device name.
0840a663 176
1e3f8156 177`--db_dev` `<string>` ::
0840a663 178
1e3f8156 179Block device name for block.db.
0840a663 180
1e3f8156 181`--db_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
0840a663 182
1e3f8156
TL
183Size in GiB for block.db.
184+
185NOTE: Requires option(s): `db_dev`
0840a663 186
1e3f8156 187`--encrypted` `<boolean>` ('default =' `0`)::
0840a663 188
1e3f8156 189Enables encryption of the OSD.
0840a663 190
e2d681b3 191`--wal_dev` `<string>` ::
2c0dde61 192
1e3f8156
TL
193Block device name for block.wal.
194
195`--wal_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
196
197Size in GiB for block.wal.
198+
199NOTE: Requires option(s): `wal_dev`
2c0dde61 200
e2d681b3 201*pveceph osd destroy* `<osdid>` `[OPTIONS]`
2489d6df 202
e2d681b3 203Destroy OSD
2489d6df 204
e2d681b3 205`<osdid>`: `<integer>` ::
0840a663 206
e2d681b3 207OSD ID
0840a663 208
e2d681b3 209`--cleanup` `<boolean>` ('default =' `0`)::
0840a663 210
e2d681b3 211If set, we remove partition table entries.
0840a663 212
e2d681b3 213*pveceph pool create* `<name>` `[OPTIONS]`
0840a663 214
e2d681b3 215Create POOL
0840a663 216
e2d681b3 217`<name>`: `<string>` ::
0840a663 218
e2d681b3 219The name of the pool. It must be unique.
0840a663 220
e2d681b3 221`--add_storages` `<boolean>` ::
5d9c884c 222
e2d681b3 223Configure VM and CT storage using the new pool.
5d9c884c 224
e2d681b3 225`--application` `<cephfs | rbd | rgw>` ::
5d9c884c 226
e2d681b3 227The application of the pool, 'rbd' by default.
5d9c884c 228
e2d681b3 229`--crush_rule` `<string>` ::
0840a663 230
e2d681b3 231The rule to use for mapping object placement in the cluster.
0840a663 232
e2d681b3 233`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
0840a663 234
e2d681b3
TL
235Minimum number of replicas per object
236
237`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
238
239Number of placement groups.
0840a663 240
2489d6df 241`--size` `<integer> (1 - 7)` ('default =' `3`)::
0840a663 242
e2d681b3 243Number of replicas per object
0840a663 244
e2d681b3 245*pveceph pool destroy* `<name>` `[OPTIONS]`
0840a663 246
e2d681b3 247Destroy pool
0840a663 248
e2d681b3 249`<name>`: `<string>` ::
0840a663 250
e2d681b3 251The name of the pool. It must be unique.
0840a663 252
e2d681b3
TL
253`--force` `<boolean>` ('default =' `0`)::
254
255If true, destroys pool even if in use
256
257`--remove_storages` `<boolean>` ('default =' `0`)::
258
259Remove all pveceph-managed storages configured for this pool
260
ac70d7d1 261*pveceph pool ls* `[FORMAT_OPTIONS]`
0840a663
DM
262
263List all pools.
264
ac70d7d1 265*pveceph purge* `[OPTIONS]`
0840a663
DM
266
267Destroy ceph related data and configuration files.
268
ac70d7d1
TL
269`--crash` `<boolean>` ::
270
271Additionally purge Ceph crash logs, /var/lib/ceph/crash.
272
273`--logs` `<boolean>` ::
274
275Additionally purge Ceph logs, /var/log/ceph.
276
c5aa7e14 277*pveceph start* `[OPTIONS]`
0840a663
DM
278
279Start ceph services.
280
c5aa7e14 281`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
282
283Ceph service name.
284
0840a663
DM
285*pveceph status*
286
287Get ceph status.
288
c5aa7e14 289*pveceph stop* `[OPTIONS]`
0840a663
DM
290
291Stop ceph services.
292
c5aa7e14 293`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
0840a663
DM
294
295Ceph service name.
296
297