]> git.proxmox.com Git - pve-docs.git/blame_incremental - pveceph.1-synopsis.adoc
btrfs: document df weirdness and how to better get usage
[pve-docs.git] / pveceph.1-synopsis.adoc
... / ...
CommitLineData
1*pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
3*pveceph createmgr*
4
5An alias for 'pveceph mgr create'.
6
7*pveceph createmon*
8
9An alias for 'pveceph mon create'.
10
11*pveceph createosd*
12
13An alias for 'pveceph osd create'.
14
15*pveceph createpool*
16
17An alias for 'pveceph pool create'.
18
19*pveceph destroymgr*
20
21An alias for 'pveceph mgr destroy'.
22
23*pveceph destroymon*
24
25An alias for 'pveceph mon destroy'.
26
27*pveceph destroyosd*
28
29An alias for 'pveceph osd destroy'.
30
31*pveceph destroypool*
32
33An alias for 'pveceph pool destroy'.
34
35*pveceph fs create* `[OPTIONS]`
36
37Create a Ceph filesystem
38
39`--add-storage` `<boolean>` ('default =' `0`)::
40
41Configure the created CephFS as storage for this cluster.
42
43`--name` `<string>` ('default =' `cephfs`)::
44
45The ceph filesystem name.
46
47`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
48
49Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
50
51*pveceph help* `[OPTIONS]`
52
53Get help about specified command.
54
55`--extra-args` `<array>` ::
56
57Shows help for a specific command
58
59`--verbose` `<boolean>` ::
60
61Verbose output format.
62
63*pveceph init* `[OPTIONS]`
64
65Create initial ceph default configuration and setup symlinks.
66
67`--cluster-network` `<string>` ::
68
69Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
70+
71NOTE: Requires option(s): `network`
72
73`--disable_cephx` `<boolean>` ('default =' `0`)::
74
75Disable cephx authentication.
76+
77WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
78
79`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
80
81Minimum number of available replicas per object to allow I/O
82
83`--network` `<string>` ::
84
85Use specific network for all ceph related traffic
86
87`--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
88
89Placement group bits, used to specify the default number of placement groups.
90+
91NOTE: 'osd pool default pg num' does not work for default pools.
92
93`--size` `<integer> (1 - 7)` ('default =' `3`)::
94
95Targeted number of replicas per object
96
97*pveceph install* `[OPTIONS]`
98
99Install ceph related packages.
100
101`--allow-experimental` `<boolean>` ('default =' `0`)::
102
103Allow experimental versions. Use with care!
104
105`--test-repository` `<boolean>` ('default =' `0`)::
106
107Use the test, not the main repository. Use with care!
108
109`--version` `<octopus | pacific>` ('default =' `pacific`)::
110
111Ceph version to install.
112
113*pveceph lspools*
114
115An alias for 'pveceph pool ls'.
116
117*pveceph mds create* `[OPTIONS]`
118
119Create Ceph Metadata Server (MDS)
120
121`--hotstandby` `<boolean>` ('default =' `0`)::
122
123Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
124
125`--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
126
127The ID for the mds, when omitted the same as the nodename
128
129*pveceph mds destroy* `<name>`
130
131Destroy Ceph Metadata Server
132
133`<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
134
135The name (ID) of the mds
136
137*pveceph mgr create* `[OPTIONS]`
138
139Create Ceph Manager
140
141`--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
142
143The ID for the manager, when omitted the same as the nodename
144
145*pveceph mgr destroy* `<id>`
146
147Destroy Ceph Manager.
148
149`<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
150
151The ID of the manager
152
153*pveceph mon create* `[OPTIONS]`
154
155Create Ceph Monitor and Manager
156
157`--mon-address` `<string>` ::
158
159Overwrites autodetected monitor IP address(es). Must be in the public network(s) of Ceph.
160
161`--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
162
163The ID for the monitor, when omitted the same as the nodename
164
165*pveceph mon destroy* `<monid>`
166
167Destroy Ceph Monitor and Manager.
168
169`<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
170
171Monitor ID
172
173*pveceph osd create* `<dev>` `[OPTIONS]`
174
175Create OSD
176
177`<dev>`: `<string>` ::
178
179Block device name.
180
181`--crush-device-class` `<string>` ::
182
183Set the device class of the OSD in crush.
184
185`--db_dev` `<string>` ::
186
187Block device name for block.db.
188
189`--db_dev_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
190
191Size in GiB for block.db.
192+
193NOTE: Requires option(s): `db_dev`
194
195`--encrypted` `<boolean>` ('default =' `0`)::
196
197Enables encryption of the OSD.
198
199`--wal_dev` `<string>` ::
200
201Block device name for block.wal.
202
203`--wal_dev_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
204
205Size in GiB for block.wal.
206+
207NOTE: Requires option(s): `wal_dev`
208
209*pveceph osd destroy* `<osdid>` `[OPTIONS]`
210
211Destroy OSD
212
213`<osdid>`: `<integer>` ::
214
215OSD ID
216
217`--cleanup` `<boolean>` ('default =' `0`)::
218
219If set, we remove partition table entries.
220
221*pveceph pool create* `<name>` `[OPTIONS]`
222
223Create POOL
224
225`<name>`: `<string>` ::
226
227The name of the pool. It must be unique.
228
229`--add_storages` `<boolean>` ::
230
231Configure VM and CT storage using the new pool.
232
233`--application` `<cephfs | rbd | rgw>` ('default =' `rbd`)::
234
235The application of the pool.
236
237`--crush_rule` `<string>` ::
238
239The rule to use for mapping object placement in the cluster.
240
241`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
242
243Minimum number of replicas per object
244
245`--pg_autoscale_mode` `<off | on | warn>` ('default =' `warn`)::
246
247The automatic PG scaling mode of the pool.
248
249`--pg_num` `<integer> (1 - 32768)` ('default =' `128`)::
250
251Number of placement groups.
252
253`--pg_num_min` `<integer> (-N - 32768)` ::
254
255Minimal number of placement groups.
256
257`--size` `<integer> (1 - 7)` ('default =' `3`)::
258
259Number of replicas per object
260
261`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
262
263The estimated target size of the pool for the PG autoscaler.
264
265`--target_size_ratio` `<number>` ::
266
267The estimated target ratio of the pool for the PG autoscaler.
268
269*pveceph pool destroy* `<name>` `[OPTIONS]`
270
271Destroy pool
272
273`<name>`: `<string>` ::
274
275The name of the pool. It must be unique.
276
277`--force` `<boolean>` ('default =' `0`)::
278
279If true, destroys pool even if in use
280
281`--remove_storages` `<boolean>` ('default =' `0`)::
282
283Remove all pveceph-managed storages configured for this pool
284
285*pveceph pool get* `<name>` `[OPTIONS]` `[FORMAT_OPTIONS]`
286
287List pool settings.
288
289`<name>`: `<string>` ::
290
291The name of the pool. It must be unique.
292
293`--verbose` `<boolean>` ('default =' `0`)::
294
295If enabled, will display additional data(eg. statistics).
296
297*pveceph pool ls* `[FORMAT_OPTIONS]`
298
299List all pools.
300
301*pveceph pool set* `<name>` `[OPTIONS]`
302
303Change POOL settings
304
305`<name>`: `<string>` ::
306
307The name of the pool. It must be unique.
308
309`--application` `<cephfs | rbd | rgw>` ::
310
311The application of the pool.
312
313`--crush_rule` `<string>` ::
314
315The rule to use for mapping object placement in the cluster.
316
317`--min_size` `<integer> (1 - 7)` ::
318
319Minimum number of replicas per object
320
321`--pg_autoscale_mode` `<off | on | warn>` ::
322
323The automatic PG scaling mode of the pool.
324
325`--pg_num` `<integer> (1 - 32768)` ::
326
327Number of placement groups.
328
329`--pg_num_min` `<integer> (-N - 32768)` ::
330
331Minimal number of placement groups.
332
333`--size` `<integer> (1 - 7)` ::
334
335Number of replicas per object
336
337`--target_size` `^(\d+(\.\d+)?)([KMGT])?$` ::
338
339The estimated target size of the pool for the PG autoscaler.
340
341`--target_size_ratio` `<number>` ::
342
343The estimated target ratio of the pool for the PG autoscaler.
344
345*pveceph purge* `[OPTIONS]`
346
347Destroy ceph related data and configuration files.
348
349`--crash` `<boolean>` ::
350
351Additionally purge Ceph crash logs, /var/lib/ceph/crash.
352
353`--logs` `<boolean>` ::
354
355Additionally purge Ceph logs, /var/log/ceph.
356
357*pveceph start* `[OPTIONS]`
358
359Start ceph services.
360
361`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
362
363Ceph service name.
364
365*pveceph status*
366
367Get Ceph Status.
368
369*pveceph stop* `[OPTIONS]`
370
371Stop ceph services.
372
373`--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
374
375Ceph service name.
376
377