]> git.proxmox.com Git - pve-docs.git/blame_incremental - pveceph.1-synopsis.adoc
basic network reload documentation
[pve-docs.git] / pveceph.1-synopsis.adoc
... / ...
CommitLineData
1*pveceph* `<COMMAND> [ARGS] [OPTIONS]`
2
3*pveceph createmgr*
4
5An alias for 'pveceph mgr create'.
6
7*pveceph createmon*
8
9An alias for 'pveceph mon create'.
10
11*pveceph createosd*
12
13An alias for 'pveceph osd create'.
14
15*pveceph createpool*
16
17An alias for 'pveceph pool create'.
18
19*pveceph destroymgr*
20
21An alias for 'pveceph mgr destroy'.
22
23*pveceph destroymon*
24
25An alias for 'pveceph mon destroy'.
26
27*pveceph destroyosd*
28
29An alias for 'pveceph osd destroy'.
30
31*pveceph destroypool*
32
33An alias for 'pveceph pool destroy'.
34
35*pveceph fs create* `[OPTIONS]`
36
37Create a Ceph filesystem
38
39`--add-storage` `<boolean>` ('default =' `0`)::
40
41Configure the created CephFS as storage for this cluster.
42
43`--name` `<string>` ('default =' `cephfs`)::
44
45The ceph filesystem name.
46
47`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
48
49Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.
50
51*pveceph help* `[OPTIONS]`
52
53Get help about specified command.
54
55`--extra-args` `<array>` ::
56
57Shows help for a specific command
58
59`--verbose` `<boolean>` ::
60
61Verbose output format.
62
63*pveceph init* `[OPTIONS]`
64
65Create initial ceph default configuration and setup symlinks.
66
67`--cluster-network` `<string>` ::
68
69Declare a separate cluster network, OSDs will routeheartbeat, object replication and recovery traffic over it
70+
71NOTE: Requires option(s): `network`
72
73`--disable_cephx` `<boolean>` ('default =' `0`)::
74
75Disable cephx authentication.
76+
77WARNING: cephx is a security feature protecting against man-in-the-middle attacks. Only consider disabling cephx if your network is private!
78
79`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
80
81Minimum number of available replicas per object to allow I/O
82
83`--network` `<string>` ::
84
85Use specific network for all ceph related traffic
86
87`--pg_bits` `<integer> (6 - 14)` ('default =' `6`)::
88
89Placement group bits, used to specify the default number of placement groups.
90+
91NOTE: 'osd pool default pg num' does not work for default pools.
92
93`--size` `<integer> (1 - 7)` ('default =' `3`)::
94
95Targeted number of replicas per object
96
97*pveceph install* `[OPTIONS]`
98
99Install ceph related packages.
100
101`--version` `<luminous | nautilus>` ('default =' `nautilus`)::
102
103Ceph version to install.
104
105*pveceph lspools*
106
107An alias for 'pveceph pool ls'.
108
109*pveceph mds create* `[OPTIONS]`
110
111Create Ceph Metadata Server (MDS)
112
113`--hotstandby` `<boolean>` ('default =' `0`)::
114
115Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. Faster switch on MDS failure, but needs more idle resources.
116
117`--name` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ('default =' `nodename`)::
118
119The ID for the mds, when omitted the same as the nodename
120
121*pveceph mds destroy* `<name>`
122
123Destroy Ceph Metadata Server
124
125`<name>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
126
127The name (ID) of the mds
128
129*pveceph mgr create* `[OPTIONS]`
130
131Create Ceph Manager
132
133`--id` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
134
135The ID for the manager, when omitted the same as the nodename
136
137*pveceph mgr destroy* `<id>`
138
139Destroy Ceph Manager.
140
141`<id>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
142
143The ID of the manager
144
145*pveceph mon create* `[OPTIONS]`
146
147Create Ceph Monitor and Manager
148
149`--mon-address` `<string>` ::
150
151Overwrites autodetected monitor IP address. Must be in the public network of ceph.
152
153`--monid` `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
154
155The ID for the monitor, when omitted the same as the nodename
156
157*pveceph mon destroy* `<monid>`
158
159Destroy Ceph Monitor and Manager.
160
161`<monid>`: `[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?` ::
162
163Monitor ID
164
165*pveceph osd create* `<dev>` `[OPTIONS]`
166
167Create OSD
168
169`<dev>`: `<string>` ::
170
171Block device name.
172
173`--db_dev` `<string>` ::
174
175Block device name for block.db.
176
177`--db_size` `<number> (1 - N)` ('default =' `bluestore_block_db_size or 10% of OSD size`)::
178
179Size in GiB for block.db.
180+
181NOTE: Requires option(s): `db_dev`
182
183`--encrypted` `<boolean>` ('default =' `0`)::
184
185Enables encryption of the OSD.
186
187`--wal_dev` `<string>` ::
188
189Block device name for block.wal.
190
191`--wal_size` `<number> (0.5 - N)` ('default =' `bluestore_block_wal_size or 1% of OSD size`)::
192
193Size in GiB for block.wal.
194+
195NOTE: Requires option(s): `wal_dev`
196
197*pveceph osd destroy* `<osdid>` `[OPTIONS]`
198
199Destroy OSD
200
201`<osdid>`: `<integer>` ::
202
203OSD ID
204
205`--cleanup` `<boolean>` ('default =' `0`)::
206
207If set, we remove partition table entries.
208
209*pveceph pool create* `<name>` `[OPTIONS]`
210
211Create POOL
212
213`<name>`: `<string>` ::
214
215The name of the pool. It must be unique.
216
217`--add_storages` `<boolean>` ::
218
219Configure VM and CT storage using the new pool.
220
221`--application` `<cephfs | rbd | rgw>` ::
222
223The application of the pool, 'rbd' by default.
224
225`--crush_rule` `<string>` ::
226
227The rule to use for mapping object placement in the cluster.
228
229`--min_size` `<integer> (1 - 7)` ('default =' `2`)::
230
231Minimum number of replicas per object
232
233`--pg_num` `<integer> (8 - 32768)` ('default =' `128`)::
234
235Number of placement groups.
236
237`--size` `<integer> (1 - 7)` ('default =' `3`)::
238
239Number of replicas per object
240
241*pveceph pool destroy* `<name>` `[OPTIONS]`
242
243Destroy pool
244
245`<name>`: `<string>` ::
246
247The name of the pool. It must be unique.
248
249`--force` `<boolean>` ('default =' `0`)::
250
251If true, destroys pool even if in use
252
253`--remove_storages` `<boolean>` ('default =' `0`)::
254
255Remove all pveceph-managed storages configured for this pool
256
257*pveceph pool ls*
258
259List all pools.
260
261*pveceph purge*
262
263Destroy ceph related data and configuration files.
264
265*pveceph start* `[<service>]`
266
267Start ceph services.
268
269`<service>`: `(ceph|mon|mds|osd|mgr)\.[A-Za-z0-9\-]{1,32}` ('default =' `ceph.target`)::
270
271Ceph service name.
272
273*pveceph status*
274
275Get ceph status.
276
277*pveceph stop* `[<service>]`
278
279Stop ceph services.
280
281`<service>`: `(ceph|mon|mds|osd|mgr)\.[A-Za-z0-9\-]{1,32}` ('default =' `ceph.target`)::
282
283Ceph service name.
284
285