]> git.proxmox.com Git - ceph.git/blame - ceph/src/sample.ceph.conf
update ceph source to reef 18.2.0
[ceph.git] / ceph / src / sample.ceph.conf
CommitLineData
7c673cae
FG
1##
2# Sample ceph ceph.conf file.
3##
4# This file defines cluster membership, the various locations
5# that Ceph stores data, and any other runtime options.
6
7# If a 'host' is defined for a daemon, the init.d start/stop script will
8# verify that it matches the hostname (or else ignore it). If it is
9# not defined, it is assumed that the daemon is intended to start on
10# the current host (e.g., in a setup with a startup.conf on each
11# node).
12
13## Metavariables
14# $cluster ; Expands to the Ceph Storage Cluster name. Useful
15# ; when running multiple Ceph Storage Clusters
16# ; on the same hardware.
17# ; Example: /etc/ceph/$cluster.keyring
18# ; (Default: ceph)
19#
20# $type ; Expands to one of mds, osd, or mon, depending on
21# ; the type of the instant daemon.
22# ; Example: /var/lib/ceph/$type
23#
24# $id ; Expands to the daemon identifier. For osd.0, this
25# ; would be 0; for mds.a, it would be a.
26# ; Example: /var/lib/ceph/$type/$cluster-$id
27#
28# $host ; Expands to the host name of the instant daemon.
29#
30# $name ; Expands to $type.$id.
31# ; Example: /var/run/ceph/$cluster-$name.asok
32
33[global]
f67539c2 34### http://docs.ceph.com/en/latest/rados/configuration/general-config-ref/
7c673cae
FG
35
36 ;fsid = {UUID} # use `uuidgen` to generate your own UUID
37 ;public network = 192.168.0.0/24
38 ;cluster network = 192.168.0.0/24
39
40 # Each running Ceph daemon has a running process identifier (PID) file.
41 # The PID file is generated upon start-up.
42 # Type: String (optional)
43 # (Default: N/A). The default path is /var/run/$cluster/$name.pid.
44 pid file = /var/run/ceph/$name.pid
45
46 # If set, when the Ceph Storage Cluster starts, Ceph sets the max open fds
47 # at the OS level (i.e., the max # of file descriptors).
48 # It helps prevents Ceph OSD Daemons from running out of file descriptors.
49 # Type: 64-bit Integer (optional)
50 # (Default: 0)
51 ;max open files = 131072
52
53
f67539c2
TL
54### http://docs.ceph.com/en/latest/rados/operations/
55### http://docs.ceph.com/en/latest/rados/configuration/auth-config-ref/
7c673cae
FG
56
57 # If enabled, the Ceph Storage Cluster daemons (i.e., ceph-mon, ceph-osd,
58 # and ceph-mds) must authenticate with each other.
59 # Type: String (optional); Valid settings are "cephx" or "none".
60 # (Default: cephx)
61 auth cluster required = cephx
62
63 # If enabled, the Ceph Storage Cluster daemons require Ceph Clients to
64 # authenticate with the Ceph Storage Cluster in order to access Ceph
65 # services.
66 # Type: String (optional); Valid settings are "cephx" or "none".
67 # (Default: cephx)
68 auth service required = cephx
69
70 # If enabled, the Ceph Client requires the Ceph Storage Cluster to
71 # authenticate with the Ceph Client.
72 # Type: String (optional); Valid settings are "cephx" or "none".
73 # (Default: cephx)
74 auth client required = cephx
75
76 # If set to true, Ceph requires signatures on all message traffic between
77 # the Ceph Client and the Ceph Storage Cluster, and between daemons
78 # comprising the Ceph Storage Cluster.
79 # Type: Boolean (optional)
80 # (Default: false)
81 ;cephx require signatures = true
82
83 # kernel RBD client do not support authentication yet:
84 cephx cluster require signatures = true
85 cephx service require signatures = false
86
87 # The path to the keyring file.
88 # Type: String (optional)
89 # Default: /etc/ceph/$cluster.$name.keyring,/etc/ceph/$cluster.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin
90 ;keyring = /etc/ceph/$cluster.$name.keyring
91
92
f67539c2 93### http://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/
7c673cae
FG
94
95
96 ## Replication level, number of data copies.
97 # Type: 32-bit Integer
98 # (Default: 3)
99 ;osd pool default size = 3
100
101 ## Replication level in degraded state, less than 'osd pool default size' value.
102 # Sets the minimum number of written replicas for objects in the
103 # pool in order to acknowledge a write operation to the client. If
104 # minimum is not met, Ceph will not acknowledge the write to the
105 # client. This setting ensures a minimum number of replicas when
106 # operating in degraded mode.
107 # Type: 32-bit Integer
108 # (Default: 0), which means no particular minimum. If 0, minimum is size - (size / 2).
109 ;osd pool default min size = 2
110
111 ## Ensure you have a realistic number of placement groups. We recommend
112 ## approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
113 ## divided by the number of replicas (i.e., osd pool default size). So for
114 ## 10 OSDs and osd pool default size = 3, we'd recommend approximately
115 ## (100 * 10) / 3 = 333
9f95a23c 116 ## always round to the nearest power of 2
7c673cae
FG
117
118 # Description: The default number of placement groups for a pool. The
119 # default value is the same as pg_num with mkpool.
120 # Type: 32-bit Integer
121 # (Default: 8)
122 ;osd pool default pg num = 128
123
124 # Description: The default number of placement groups for placement for a
125 # pool. The default value is the same as pgp_num with mkpool.
126 # PG and PGP should be equal (for now).
127 # Type: 32-bit Integer
128 # (Default: 8)
129 ;osd pool default pgp num = 128
130
20effc67 131 # The default CRUSH rule to use when creating a pool
7c673cae
FG
132 # Type: 32-bit Integer
133 # (Default: 0)
134 ;osd pool default crush rule = 0
135
136 # The bucket type to use for chooseleaf in a CRUSH rule.
137 # Uses ordinal rank rather than name.
138 # Type: 32-bit Integer
139 # (Default: 1) Typically a host containing one or more Ceph OSD Daemons.
140 ;osd crush chooseleaf type = 1
141
142
f67539c2 143### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/
7c673cae
FG
144
145 # The location of the logging file for your cluster.
146 # Type: String
147 # Required: No
148 # Default: /var/log/ceph/$cluster-$name.log
149 ;log file = /var/log/ceph/$cluster-$name.log
150
151 # Determines if logging messages should appear in syslog.
152 # Type: Boolean
153 # Required: No
154 # (Default: false)
155 ;log to syslog = true
156
157
f67539c2 158### http://docs.ceph.com/en/latest/rados/configuration/ms-ref/
7c673cae
FG
159
160 # Enable if you want your daemons to bind to IPv6 address instead of
161 # IPv4 ones. (Not required if you specify a daemon or cluster IP.)
162 # Type: Boolean
163 # (Default: false)
164 ;ms bind ipv6 = true
165
166##################
167## Monitors
168## You need at least one. You need at least three if you want to
169## tolerate any node failures. Always create an odd number.
170[mon]
f67539c2
TL
171### http://docs.ceph.com/en/latest/rados/configuration/mon-config-ref/
172### http://docs.ceph.com/en/latest/rados/configuration/mon-osd-interaction/
7c673cae
FG
173
174 # The IDs of initial monitors in a cluster during startup.
175 # If specified, Ceph requires an odd number of monitors to form an
176 # initial quorum (e.g., 3).
177 # Type: String
178 # (Default: None)
179 ;mon initial members = mycephhost
180
181 ;mon host = cephhost01,cephhost02
182 ;mon addr = 192.168.0.101,192.168.0.102
183
184 # The monitor's data location
185 # Default: /var/lib/ceph/mon/$cluster-$id
186 ;mon data = /var/lib/ceph/mon/$name
187
188 # The clock drift in seconds allowed between monitors.
189 # Type: Float
190 # (Default: .050)
191 ;mon clock drift allowed = .15
192
193 # Exponential backoff for clock drift warnings
194 # Type: Float
195 # (Default: 5)
196 ;mon clock drift warn backoff = 30 # Tell the monitor to backoff from this warning for 30 seconds
197
198 # The percentage of disk space used before an OSD is considered full.
199 # Type: Float
200 # (Default: .95)
201 ;mon osd full ratio = .95
202
203 # The percentage of disk space used before an OSD is considered nearfull.
204 # Type: Float
205 # (Default: .85)
206 ;mon osd nearfull ratio = .85
207
208 # The number of seconds Ceph waits before marking a Ceph OSD
209 # Daemon "down" and "out" if it doesn't respond.
210 # Type: 32-bit Integer
211 # (Default: 600)
212 ;mon osd down out interval = 600
213
214 # The grace period in seconds before declaring unresponsive Ceph OSD
215 # Daemons "down".
216 # Type: 32-bit Integer
217 # (Default: 900)
218 ;mon osd report timeout = 300
219
f67539c2 220### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/
7c673cae
FG
221
222 # logging, for debugging monitor crashes, in order of
223 # their likelihood of being helpful :)
224 ;debug ms = 1
225 ;debug mon = 20
226 ;debug paxos = 20
227 ;debug auth = 20
228
229
230;[mon.alpha]
231; host = alpha
232; mon addr = 192.168.0.10:6789
233
234;[mon.beta]
235; host = beta
236; mon addr = 192.168.0.11:6789
237
238;[mon.gamma]
239; host = gamma
240; mon addr = 192.168.0.12:6789
241
242
243##################
244## Metadata servers
245# You must deploy at least one metadata server to use CephFS. There is
246# experimental support for running multiple metadata servers. Do not run
247# multiple metadata servers in production.
248[mds]
f67539c2 249### http://docs.ceph.com/en/latest/cephfs/mds-config-ref/
7c673cae
FG
250
251 # where the mds keeps it's secret encryption keys
252 ;keyring = /var/lib/ceph/mds/$name/keyring
253
254 # Determines whether a 'ceph-mds' daemon should poll and
255 # replay the log of an active MDS (hot standby).
256 # Type: Boolean
257 # (Default: false)
258 ;mds standby replay = true
259
260 ; mds logging to debug issues.
261 ;debug ms = 1
262 ;debug mds = 20
263 ;debug journaler = 20
264
9f95a23c
TL
265 # The memory limit the MDS should enforce for its cache.
266 # (Default: 1G)
267 ;mds cache memory limit = 2G
7c673cae
FG
268
269;[mds.alpha]
270; host = alpha
271
272;[mds.beta]
273; host = beta
274
275##################
276## osd
277# You need at least one. Two or more if you want data to be replicated.
278# Define as many as you like.
279[osd]
f67539c2 280### http://docs.ceph.com/en/latest/rados/configuration/osd-config-ref/
7c673cae
FG
281
282 # The path to the OSDs data.
283 # You must create the directory when deploying Ceph.
284 # You should mount a drive for OSD data at this mount point.
285 # We do not recommend changing the default.
286 # Type: String
287 # Default: /var/lib/ceph/osd/$cluster-$id
288 ;osd data = /var/lib/ceph/osd/$name
289
290 ## You can change the number of recovery operations to speed up recovery
291 ## or slow it down if your machines can't handle it
292
293 # The number of active recovery requests per OSD at one time.
294 # More requests will accelerate recovery, but the requests
295 # places an increased load on the cluster.
296 # Type: 32-bit Integer
297 # (Default: 5)
298 ;osd recovery max active = 3
299
300 # The maximum number of backfills allowed to or from a single OSD.
301 # Type: 64-bit Integer
302 # (Default: 10)
303 ;osd max backfills = 5
304
305 # The maximum number of simultaneous scrub operations for a Ceph OSD Daemon.
306 # Type: 32-bit Int
307 # (Default: 1)
308 ;osd max scrubs = 2
309
310 # You may add settings for ceph-deploy so that it will create and mount
311 # the correct type of file system. Remove the comment `#` character for
312 # the following settings and replace the values in parenthesis
313 # with appropriate values, or leave the following settings commented
314 # out to accept the default values.
315
316 #osd mkfs type = {fs-type}
317 #osd mkfs options {fs-type} = {mkfs options} # default for xfs is "-f"
318 #osd mount options {fs-type} = {mount options} # default mount option is "rw, noatime"
319 ;osd mkfs type = btrfs
320 ;osd mount options btrfs = noatime,nodiratime
321
322 ## Ideally, make this a separate disk or partition. A few
323 ## hundred MB should be enough; more if you have fast or many
324 ## disks. You can use a file under the osd data dir if need be
325 ## (e.g. /data/$name/journal), but it will be slower than a
326 ## separate disk or partition.
327 # The path to the OSD's journal. This may be a path to a file or a block
328 # device (such as a partition of an SSD). If it is a file, you must
329 # create the directory to contain it.
330 # We recommend using a drive separate from the osd data drive.
331 # Type: String
332 # Default: /var/lib/ceph/osd/$cluster-$id/journal
333 ;osd journal = /var/lib/ceph/osd/$name/journal
334
335 # Check log files for corruption. Can be computationally expensive.
336 # Type: Boolean
337 # (Default: false)
338 ;osd check for log corruption = true
339
f67539c2 340### http://docs.ceph.com/en/latest/rados/configuration/journal-ref/
7c673cae
FG
341
342 # The size of the journal in megabytes. If this is 0,
343 # and the journal is a block device, the entire block device is used.
344 # Since v0.54, this is ignored if the journal is a block device,
345 # and the entire block device is used.
346 # Type: 32-bit Integer
347 # (Default: 5120)
348 # Recommended: Begin with 1GB. Should be at least twice the product
349 # of the expected speed multiplied by "filestore max sync interval".
350 ;osd journal size = 2048 ; journal size, in megabytes
351
352 ## If you want to run the journal on a tmpfs, disable DirectIO
353 # Enables direct i/o to the journal.
354 # Requires "journal block align" set to "true".
355 # Type: Boolean
356 # Required: Yes when using aio.
357 # (Default: true)
358 ;journal dio = false
359
360 # osd logging to debug osd issues, in order of likelihood of being helpful
361 ;debug ms = 1
362 ;debug osd = 20
363 ;debug filestore = 20
364 ;debug journal = 20
365
f67539c2 366### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/
7c673cae
FG
367
368 # The maximum interval in seconds for synchronizing the filestore.
369 # Type: Double (optional)
370 # (Default: 5)
371 ;filestore max sync interval = 5
372
373 # Enable snapshots for a btrfs filestore.
374 # Type: Boolean
375 # Required: No. Only used for btrfs.
376 # (Default: true)
377 ;filestore btrfs snap = false
378
379 # Enables the filestore flusher.
380 # Type: Boolean
381 # Required: No
382 # (Default: false)
383 ;filestore flusher = true
384
385 # Defines the maximum number of in progress operations the file store
386 # accepts before blocking on queuing new operations.
387 # Type: Integer
388 # Required: No. Minimal impact on performance.
389 # (Default: 500)
390 ;filestore queue max ops = 500
391
392 ## Filestore and OSD settings can be tweak to achieve better performance
393
f67539c2 394### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/#misc
7c673cae
FG
395
396 # Min number of files in a subdir before merging into parent NOTE: A negative value means to disable subdir merging
397 # Type: Integer
398 # Required: No
9f95a23c 399 # Default: -10
1adf2230 400 ;filestore merge threshold = -10
7c673cae
FG
401
402 # filestore_split_multiple * abs(filestore_merge_threshold) * 16 is the maximum number of files in a subdirectory before splitting into child directories.
403 # Type: Integer
404 # Required: No
405 # Default: 2
406 ;filestore split multiple = 2
407
408 # The number of filesystem operation threads that execute in parallel.
409 # Type: Integer
410 # Required: No
411 # Default: 2
412 ;filestore op threads = 4
413
7c673cae
FG
414 ## CRUSH
415
416 # By default OSDs update their details (location, weight and root) on the CRUSH map during startup
417 # Type: Boolean
418 # Required: No;
419 # (Default: true)
420 ;osd crush update on start = false
421
422;[osd.0]
423; host = delta
424
425;[osd.1]
426; host = epsilon
427
428;[osd.2]
429; host = zeta
430
431;[osd.3]
432; host = eta
433
434
435##################
436## client settings
437[client]
438
f67539c2 439### http://docs.ceph.com/en/latest/rbd/rbd-config-ref/
7c673cae
FG
440
441 # Enable caching for RADOS Block Device (RBD).
442 # Type: Boolean
443 # Required: No
444 # (Default: true)
445 rbd cache = true
446
447 # The RBD cache size in bytes.
448 # Type: 64-bit Integer
449 # Required: No
450 # (Default: 32 MiB)
451 ;rbd cache size = 33554432
452
453 # The dirty limit in bytes at which the cache triggers write-back.
454 # If 0, uses write-through caching.
455 # Type: 64-bit Integer
456 # Required: No
457 # Constraint: Must be less than rbd cache size.
458 # (Default: 24 MiB)
459 ;rbd cache max dirty = 25165824
460
461 # The dirty target before the cache begins writing data to the data storage.
462 # Does not block writes to the cache.
463 # Type: 64-bit Integer
464 # Required: No
465 # Constraint: Must be less than rbd cache max dirty.
466 # (Default: 16 MiB)
467 ;rbd cache target dirty = 16777216
468
469 # The number of seconds dirty data is in the cache before writeback starts.
470 # Type: Float
471 # Required: No
472 # (Default: 1.0)
473 ;rbd cache max dirty age = 1.0
474
475 # Start out in write-through mode, and switch to write-back after the
476 # first flush request is received. Enabling this is a conservative but
477 # safe setting in case VMs running on rbd are too old to send flushes,
478 # like the virtio driver in Linux before 2.6.32.
479 # Type: Boolean
480 # Required: No
481 # (Default: true)
482 ;rbd cache writethrough until flush = true
483
484 # The Ceph admin socket allows you to query a daemon via a socket interface
485 # From a client perspective this can be a virtual machine using librbd
486 # Type: String
487 # Required: No
488 ;admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok
489
490
491##################
492## radosgw client settings
493[client.radosgw.gateway]
494
f67539c2 495### http://docs.ceph.com/en/latest/radosgw/config-ref/
7c673cae
FG
496
497 # Sets the location of the data files for Ceph Object Gateway.
498 # You must create the directory when deploying Ceph.
499 # We do not recommend changing the default.
500 # Type: String
501 # Default: /var/lib/ceph/radosgw/$cluster-$id
502 ;rgw data = /var/lib/ceph/radosgw/$name
503
504 # Client's hostname
505 ;host = ceph-radosgw
506
507 # where the radosgw keeps it's secret encryption keys
508 ;keyring = /etc/ceph/ceph.client.radosgw.keyring
509
510 # FastCgiExternalServer uses this socket.
511 # If you do not specify a socket path, Ceph Object Gateway will not run as an external server.
512 # The path you specify here must be the same as the path specified in the rgw.conf file.
513 # Type: String
514 # Default: None
515 ;rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
516
517 # The location of the logging file for your radosgw.
518 # Type: String
519 # Required: No
520 # Default: /var/log/ceph/$cluster-$name.log
521 ;log file = /var/log/ceph/client.radosgw.gateway.log
522
523 # Enable 100-continue if it is operational.
524 # Type: Boolean
525 # Default: true
526 ;rgw print continue = false
527
528 # The DNS name of the served domain.
529 # Type: String
530 # Default: None
531 ;rgw dns name = radosgw.ceph.internal