]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/lost_unfound.py
7 from teuthology
import misc
as teuthology
8 from teuthology
.orchestra
import run
9 from util
.rados
import rados
11 log
= logging
.getLogger(__name__
)
13 def task(ctx
, config
):
15 Test handling of lost objects.
17 A pretty rigid cluseter is brought up andtested by this task
22 assert isinstance(config
, dict), \
23 'lost_unfound task only accepts a dict for configuration'
24 first_mon
= teuthology
.get_first_mon(ctx
, config
)
25 (mon
,) = ctx
.cluster
.only(first_mon
).remotes
.iterkeys()
27 manager
= ceph_manager
.CephManager(
30 logger
=log
.getChild('ceph_manager'),
33 while len(manager
.get_osd_status()['up']) < 3:
36 manager
.wait_for_clean()
38 manager
.create_pool(POOL
)
40 # something that is always there
41 dummyfile
= '/etc/fstab'
43 # take an osd out until the very end
45 manager
.mark_down_osd(2)
46 manager
.mark_out_osd(2)
48 # kludge to make sure they get a map
49 rados(ctx
, mon
, ['-p', POOL
, 'put', 'dummy', dummyfile
])
51 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
52 manager
.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
53 manager
.wait_for_recovery()
56 for f
in range(1, 10):
57 rados(ctx
, mon
, ['-p', POOL
, 'put', 'existing_%d' % f
, dummyfile
])
58 rados(ctx
, mon
, ['-p', POOL
, 'put', 'existed_%d' % f
, dummyfile
])
59 rados(ctx
, mon
, ['-p', POOL
, 'rm', 'existed_%d' % f
])
61 # delay recovery, and make the pg log very long (to prevent backfill)
62 manager
.raw_cluster_cmd(
65 '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
69 manager
.mark_down_osd(0)
71 for f
in range(1, 10):
72 rados(ctx
, mon
, ['-p', POOL
, 'put', 'new_%d' % f
, dummyfile
])
73 rados(ctx
, mon
, ['-p', POOL
, 'put', 'existed_%d' % f
, dummyfile
])
74 rados(ctx
, mon
, ['-p', POOL
, 'put', 'existing_%d' % f
, dummyfile
])
76 # bring osd.0 back up, let it peer, but don't replicate the new
78 log
.info('osd.0 command_args is %s' % 'foo')
79 log
.info(ctx
.daemons
.get_daemon('osd', 0).command_args
)
80 ctx
.daemons
.get_daemon('osd', 0).command_kwargs
['args'].extend([
81 '--osd-recovery-delay-start', '1000'
84 manager
.mark_in_osd(0)
85 manager
.wait_till_osd_is_up(0)
87 manager
.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
88 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
89 manager
.wait_till_active()
91 # take out osd.1 and the only copy of those objects.
93 manager
.mark_down_osd(1)
94 manager
.mark_out_osd(1)
95 manager
.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
97 # bring up osd.2 so that things would otherwise, in theory, recovery fully
99 manager
.mark_in_osd(2)
100 manager
.wait_till_osd_is_up(2)
102 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
103 manager
.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
104 manager
.wait_till_active()
105 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
106 manager
.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
108 # verify that there are unfound objects
109 unfound
= manager
.get_num_unfound_objects()
110 log
.info("there are %d unfound objects" % unfound
)
113 testdir
= teuthology
.get_testdir(ctx
)
115 if config
.get('parallel_bench', True):
116 procs
.append(mon
.run(
119 " ".join(['adjust-ulimits',
121 '{tdir}/archive/coverage',
123 '--no-log-to-stderr',
124 '--name', 'client.admin',
128 'bench', '240', 'write',
129 ]).format(tdir
=testdir
),
131 logger
=log
.getChild('radosbench.{id}'.format(id='client.admin')),
138 pgs
= manager
.get_pg_stats()
140 if pg
['stat_sum']['num_objects_unfound'] > 0:
141 primary
= 'osd.%d' % pg
['acting'][0]
143 # verify that i can list them direct from the osd
144 log
.info('listing missing/lost in %s state %s', pg
['pgid'],
146 m
= manager
.list_pg_missing(pg
['pgid'])
148 assert m
['num_unfound'] == pg
['stat_sum']['num_objects_unfound']
150 for o
in m
['objects']:
151 if len(o
['locations']) == 0:
153 assert m
['num_unfound'] == num_unfound
155 log
.info("reverting unfound in %s on %s", pg
['pgid'], primary
)
156 manager
.raw_cluster_cmd('pg', pg
['pgid'],
157 'mark_unfound_lost', 'revert')
159 log
.info("no unfound in %s", pg
['pgid'])
161 manager
.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
162 manager
.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
163 manager
.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
164 manager
.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
165 manager
.wait_for_recovery()
168 for f
in range(1, 10):
169 err
= rados(ctx
, mon
, ['-p', POOL
, 'get', 'new_%d' % f
, '-'])
171 err
= rados(ctx
, mon
, ['-p', POOL
, 'get', 'existed_%d' % f
, '-'])
173 err
= rados(ctx
, mon
, ['-p', POOL
, 'get', 'existing_%d' % f
, '-'])
176 # see if osd.1 can cope
177 manager
.revive_osd(1)
178 manager
.mark_in_osd(1)
179 manager
.wait_till_osd_is_up(1)
180 manager
.wait_for_clean()