]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/ceph.conf.template
import ceph quincy 17.2.4
[ceph.git] / ceph / qa / tasks / ceph.conf.template
CommitLineData
11fdf7f2
TL
1[global]
2 chdir = ""
3 pid file = /var/run/ceph/$cluster-$name.pid
4 auth supported = cephx
5
6 filestore xattr use omap = true
7
8 mon clock drift allowed = 1.000
9
10 osd crush chooseleaf type = 0
11 auth debug = true
12
13 ms die on old message = true
14 ms die on bug = true
15
11fdf7f2
TL
16 mon max pg per osd = 10000 # >= luminous
17 mon pg warn max object skew = 0
18
9f95a23c
TL
19 # disable pg_autoscaler by default for new pools
20 osd_pool_default_pg_autoscale_mode = off
21
11fdf7f2
TL
22 osd pool default size = 2
23
24 mon osd allow primary affinity = true
25 mon osd allow pg remap = true
26 mon warn on legacy crush tunables = false
27 mon warn on crush straw calc version zero = false
28 mon warn on no sortbitwise = false
29 mon warn on osd down out interval zero = false
eafe8130 30 mon warn on too few osds = false
92f5a8d4 31 mon_warn_on_pool_pg_num_not_power_of_two = false
9f95a23c 32 mon_warn_on_pool_no_redundancy = false
f67539c2 33 mon_allow_pool_size_one = true
11fdf7f2 34
20effc67 35 osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd"
11fdf7f2
TL
36
37 osd default data pool replay window = 5
38
39 mon allow pool delete = true
40
41 mon cluster log file level = debug
42 debug asserts on shutdown = true
f91f0fd5 43 mon health detail to clog = false
11fdf7f2
TL
44
45[osd]
46 osd journal size = 100
47
48 osd scrub load threshold = 5.0
49 osd scrub max interval = 600
50
51 osd recover clone overlap = true
52 osd recovery max chunk = 1048576
53
54 osd debug shutdown = true
55 osd debug op order = true
56 osd debug verify stray on activate = true
57
58 osd open classes on start = true
59 osd debug pg log writeout = true
60
61 osd deep scrub update digest min age = 30
62
63 osd map max advance = 10
64
65 journal zero on create = true
66
67 filestore ondisk finisher threads = 3
68 filestore apply finisher threads = 3
69
70 bdev debug aio = true
71 osd debug misdirected ops = true
72
73[mgr]
74 debug ms = 1
75 debug mgr = 20
76 debug mon = 20
77 debug auth = 20
78 mon reweight min pgs per osd = 4
79 mon reweight min bytes per osd = 10
9f95a23c 80 mgr/telemetry/nag = false
11fdf7f2
TL
81
82[mon]
83 debug ms = 1
84 debug mon = 20
85 debug paxos = 20
86 debug auth = 20
87 mon data avail warn = 5
88 mon mgr mkfs grace = 240
89 mon reweight min pgs per osd = 4
90 mon osd reporter subtree level = osd
91 mon osd prime pg temp = true
92 mon reweight min bytes per osd = 10
93
c5c27e9a
TL
94 # rotate auth tickets quickly to exercise renewal paths
95 auth mon ticket ttl = 660 # 11m
96 auth service ticket ttl = 240 # 4m
97
98 # don't complain about insecure global_id in the test suite
99 mon_warn_on_insecure_global_id_reclaim = false
100 mon_warn_on_insecure_global_id_reclaim_allowed = false
101
20effc67
TL
102 # 1m isn't quite enough
103 mon_down_mkfs_grace = 2m
104
105 mon_warn_on_filestore_osds = false
106
11fdf7f2
TL
107[client]
108 rgw cache enabled = true
109 rgw enable ops log = true
110 rgw enable usage log = true
111 log file = /var/log/ceph/$cluster-$name.$pid.log
112 admin socket = /var/run/ceph/$cluster-$name.$pid.asok