]> git.proxmox.com Git - pve-docs.git/blob - pveceph.1-synopsis.adoc
update static information
[pve-docs.git] / pveceph.1-synopsis.adoc
1 *pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
3 *pveceph createmgr*
4
5 An alias for 'pveceph mgr create'.
6
7 *pveceph createmon*
8
9 An alias for 'pveceph mon create'.
10
11 *pveceph createosd*
12
13 An alias for 'pveceph osd create'.
14
15 *pveceph createpool*
16
17 An alias for 'pveceph pool create'.
18
19 *pveceph destroymgr*
20
21 An alias for 'pveceph mgr destroy'.
22
23 *pveceph destroymon*
24
25 An alias for 'pveceph mon destroy'.
26
27 *pveceph destroyosd*
28
29 An alias for 'pveceph osd destroy'.
30
31 *pveceph destroypool*
32
33 An alias for 'pveceph pool destroy'.
34
35 *pveceph fs create* `[OPTIONS]`
36
37 Create a Ceph filesystem
38
39 `--add-storage` `<boolean>` ('default =' `0`)::
40
41 Configure the created CephFS as storage for this cluster.
42
43 `--name` `<string>` ('default =' `cephfs`)::
44
45 The ceph filesystem name.
46
47 `--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
48
49 Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
50
51 *pveceph help* `[OPTIONS]`
52
53 Get help about specified command.
54
55 `--extra-args` `<array>` ::
56
57 Shows help for a specific command
58
59 `--verbose` `<boolean>` ::
60
61 Verbose output format.
62
63 *pveceph init* `[OPTIONS]`
64
65 Create initial ceph default configuration and setup symlinks.
66
67 `--cluster-network` `<string>` ::
68
69 Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
70 +
71 NOTE: Requires option(s): `network`
72
73 `--disable_cephx` `<boolean>` ('default =' `0`)::
74
75 Disable cephx authentication.
76 +
77 WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
78
79 `--min_size` `<integer> (1 - 7)` ('default =' `2`)::
80
81 Minimum number of available replicas per object to allow I/O
82
83 `--network` `<string>` ::
84
85 Use specific network for all ceph related traffic
86
87 `--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
88
89 Placement group bits, used to specify the default number of placement groups.
90 +
91 NOTE: 'osd pool default pg num' does not work for default pools.
92
93 `--size` `<integer> (1 - 7)` ('default =' `3`)::
94
95 Targeted number of replicas per object
96
97 *pveceph install* `[OPTIONS]`
98
99 Install ceph related packages.
100
101 `--allow-experimental` `<boolean>` ('default =' `0`)::
102
103 Allow experimental versions. Use with care!
104
105 `--version` `<luminous | nautilus | octopus>` ('default =' `nautilus`)::
106
107 Ceph version to install.
108
109 *pveceph lspools*
110
111 An alias for 'pveceph pool ls'.
112
113 *pveceph mds create* `[OPTIONS]`
114
115 Create Ceph Metadata Server (MDS)
116
117 `--hotstandby` `<boolean>` ('default =' `0`)::
118
119 Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
120
121 `--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
122
123 The ID for the mds, when omitted the same as the nodename
124
125 *pveceph mds destroy* `<name>`
126
127 Destroy Ceph Metadata Server
128
129 `<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
130
131 The name (ID) of the mds
132
133 *pveceph mgr create* `[OPTIONS]`
134
135 Create Ceph Manager
136
137 `--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
138
139 The ID for the manager, when omitted the same as the nodename
140
141 *pveceph mgr destroy* `<id>`
142
143 Destroy Ceph Manager.
144
145 `<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
146
147 The ID of the manager
148
149 *pveceph mon create* `[OPTIONS]`
150
151 Create Ceph Monitor and Manager
152
153 `--mon-address` `<string>` ::
154
155 Overwrites autodetected monitor IP address. Must be in the public network of ceph.
156
157 `--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
158
159 The ID for the monitor, when omitted the same as the nodename
160
161 *pveceph mon destroy* `<monid>`
162
163 Destroy Ceph Monitor and Manager.
164
165 `<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
166
167 Monitor ID
168
169 *pveceph osd create* `<dev>` `[OPTIONS]`
170
171 Create OSD
172
173 `<dev>`: `<string>` ::
174
175 Block device name.
176
177 `--db_dev` `<string>` ::
178
179 Block device name for block.db.
180
181 `--db_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
182
183 Size in GiB for block.db.
184 +
185 NOTE: Requires option(s): `db_dev`
186
187 `--encrypted` `<boolean>` ('default =' `0`)::
188
189 Enables encryption of the OSD.
190
191 `--wal_dev` `<string>` ::
192
193 Block device name for block.wal.
194
195 `--wal_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
196
197 Size in GiB for block.wal.
198 +
199 NOTE: Requires option(s): `wal_dev`
200
201 *pveceph osd destroy* `<osdid>` `[OPTIONS]`
202
203 Destroy OSD
204
205 `<osdid>`: `<integer>` ::
206
207 OSD ID
208
209 `--cleanup` `<boolean>` ('default =' `0`)::
210
211 If set, we remove partition table entries.
212
213 *pveceph pool create* `<name>` `[OPTIONS]`
214
215 Create POOL
216
217 `<name>`: `<string>` ::
218
219 The name of the pool. It must be unique.
220
221 `--add_storages` `<boolean>` ::
222
223 Configure VM and CT storage using the new pool.
224
225 `--application` `<cephfs | rbd | rgw>` ::
226
227 The application of the pool, 'rbd' by default.
228
229 `--crush_rule` `<string>` ::
230
231 The rule to use for mapping object placement in the cluster.
232
233 `--min_size` `<integer> (1 - 7)` ('default =' `2`)::
234
235 Minimum number of replicas per object
236
237 `--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
238
239 Number of placement groups.
240
241 `--size` `<integer> (1 - 7)` ('default =' `3`)::
242
243 Number of replicas per object
244
245 *pveceph pool destroy* `<name>` `[OPTIONS]`
246
247 Destroy pool
248
249 `<name>`: `<string>` ::
250
251 The name of the pool. It must be unique.
252
253 `--force` `<boolean>` ('default =' `0`)::
254
255 If true, destroys pool even if in use
256
257 `--remove_storages` `<boolean>` ('default =' `0`)::
258
259 Remove all pveceph-managed storages configured for this pool
260
261 *pveceph pool ls* `[FORMAT_OPTIONS]`
262
263 List all pools.
264
265 *pveceph purge* `[OPTIONS]`
266
267 Destroy ceph related data and configuration files.
268
269 `--crash` `<boolean>` ::
270
271 Additionally purge Ceph crash logs, /var/lib/ceph/crash.
272
273 `--logs` `<boolean>` ::
274
275 Additionally purge Ceph logs, /var/log/ceph.
276
277 *pveceph start* `[OPTIONS]`
278
279 Start ceph services.
280
281 `--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
282
283 Ceph service name.
284
285 *pveceph status*
286
287 Get ceph status.
288
289 *pveceph stop* `[OPTIONS]`
290
291 Stop ceph services.
292
293 `--service` `(ceph|mon|mds|osd|mgr)(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)?` ('default =' `ceph.target`)::
294
295 Ceph service name.
296
297