4 from teuthology
.orchestra
import run
7 from teuthology
import misc
as teuthology
8 from util
.rados
import rados
11 log
= logging
.getLogger(__name__
)
13 def task(ctx
, config
):
15 Test handling of lost objects on an ec pool.
17 A pretty rigid cluster is brought up andtested by this task
21 assert isinstance(config
, dict), \
22 'lost_unfound task only accepts a dict for configuration'
23 first_mon
= teuthology
.get_first_mon(ctx
, config
)
24 (mon
,) = ctx
.cluster
.only(first_mon
).remotes
.iterkeys()
26 manager
= ceph_manager
.CephManager(
29 logger
=log
.getChild('ceph_manager'),
32 manager
.wait_for_clean()
34 profile
= config
.get('erasure_code_profile', {
37 'ruleset-failure-domain': 'osd'
39 profile_name
= profile
.get('name', 'lost_unfound')
40 manager
.create_erasure_code_profile(profile_name
, profile
)
41 pool
= manager
.create_pool_with_unique_name(
42 erasure_code_profile_name
=profile_name
,
45 # something that is always there, readable and never empty
46 dummyfile
= '/etc/group'
48 # kludge to make sure they get a map
49 rados(ctx
, mon
, ['-p', pool
, 'put', 'dummy', dummyfile
])
51 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
52 manager
.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
53 manager
.wait_for_recovery()
56 for f
in range(1, 10):
57 rados(ctx
, mon
, ['-p', pool
, 'put', 'existing_%d' % f
, dummyfile
])
58 rados(ctx
, mon
, ['-p', pool
, 'put', 'existed_%d' % f
, dummyfile
])
59 rados(ctx
, mon
, ['-p', pool
, 'rm', 'existed_%d' % f
])
61 # delay recovery, and make the pg log very long (to prevent backfill)
62 manager
.raw_cluster_cmd(
65 '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
69 manager
.mark_down_osd(0)
71 manager
.mark_down_osd(3)
73 for f
in range(1, 10):
74 rados(ctx
, mon
, ['-p', pool
, 'put', 'new_%d' % f
, dummyfile
])
75 rados(ctx
, mon
, ['-p', pool
, 'put', 'existed_%d' % f
, dummyfile
])
76 rados(ctx
, mon
, ['-p', pool
, 'put', 'existing_%d' % f
, dummyfile
])
78 # take out osd.1 and a necessary shard of those objects.
80 manager
.mark_down_osd(1)
81 manager
.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
83 manager
.wait_till_osd_is_up(0)
85 manager
.wait_till_osd_is_up(3)
87 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
88 manager
.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
89 manager
.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
90 manager
.wait_till_active()
91 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
92 manager
.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
93 manager
.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
95 # verify that there are unfound objects
96 unfound
= manager
.get_num_unfound_objects()
97 log
.info("there are %d unfound objects" % unfound
)
100 testdir
= teuthology
.get_testdir(ctx
)
102 if config
.get('parallel_bench', True):
103 procs
.append(mon
.run(
106 " ".join(['adjust-ulimits',
108 '{tdir}/archive/coverage',
110 '--no-log-to-stderr',
111 '--name', 'client.admin',
115 'bench', '240', 'write',
116 ]).format(tdir
=testdir
),
118 logger
=log
.getChild('radosbench.{id}'.format(id='client.admin')),
125 pgs
= manager
.get_pg_stats()
127 if pg
['stat_sum']['num_objects_unfound'] > 0:
128 # verify that i can list them direct from the osd
129 log
.info('listing missing/lost in %s state %s', pg
['pgid'],
131 m
= manager
.list_pg_missing(pg
['pgid'])
133 assert m
['num_unfound'] == pg
['stat_sum']['num_objects_unfound']
135 log
.info("reverting unfound in %s", pg
['pgid'])
136 manager
.raw_cluster_cmd('pg', pg
['pgid'],
137 'mark_unfound_lost', 'delete')
139 log
.info("no unfound in %s", pg
['pgid'])
141 manager
.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
142 manager
.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
143 manager
.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5')
144 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
145 manager
.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
146 manager
.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
147 manager
.wait_for_recovery()
149 if not config
.get('parallel_bench', True):
153 for f
in range(1, 10):
154 err
= rados(ctx
, mon
, ['-p', pool
, 'get', 'new_%d' % f
, '-'])
156 err
= rados(ctx
, mon
, ['-p', pool
, 'get', 'existed_%d' % f
, '-'])
158 err
= rados(ctx
, mon
, ['-p', pool
, 'get', 'existing_%d' % f
, '-'])
161 # see if osd.1 can cope
162 manager
.revive_osd(1)
163 manager
.wait_till_osd_is_up(1)
164 manager
.wait_for_clean()