]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/resolve_stuck_peering.py
7 from teuthology
import misc
as teuthology
8 from tasks
.util
.rados
import rados
10 log
= logging
.getLogger(__name__
)
12 def task(ctx
, config
):
14 Test handling resolve stuck peering
16 requires 3 osds on a single test node
20 assert isinstance(config
, dict), \
21 'Resolve stuck peering only accepts a dict for config'
23 manager
= ctx
.managers
['ceph']
25 while len(manager
.get_osd_status()['up']) < 3:
29 manager
.wait_for_clean()
31 dummyfile
= '/etc/fstab'
32 dummyfile1
= '/etc/resolv.conf'
36 log
.info('creating pool foo')
37 manager
.raw_cluster_cmd('osd', 'pool', 'create', '%s' % pool
, '1')
39 #set min_size of the pool to 1
40 #so that we can continue with I/O
42 manager
.set_pool_property(pool
, "min_size", 1)
46 primary
= manager
.get_pg_primary('foo', 0)
47 log
.info("primary osd is %d", primary
)
50 others
.remove(primary
)
52 log
.info('writing initial objects')
53 first_mon
= teuthology
.get_first_mon(ctx
, config
)
54 (mon
,) = ctx
.cluster
.only(first_mon
).remotes
.keys()
57 rados(ctx
, mon
, ['-p', 'foo', 'put', 'existing_%d' % i
, dummyfile
])
59 manager
.wait_for_clean()
61 #kill other osds except primary
62 log
.info('killing other osds except primary')
66 manager
.mark_down_osd(i
)
70 rados(ctx
, mon
, ['-p', 'foo', 'put', 'new_%d' % i
, dummyfile1
])
73 manager
.kill_osd(primary
)
74 manager
.mark_down_osd(primary
)
80 #make sure that pg is down
81 #Assuming pg number for single pg pool will start from 0
83 pgstr
= manager
.get_pgid(pool
, pgnum
)
84 stats
= manager
.get_single_pg_stats(pgstr
)
90 while 'down' not in stats
['state']:
91 assert time
.time() - start
< timeout
, \
92 'failed to reach down state before timeout expired'
93 stats
= manager
.get_single_pg_stats(pgstr
)
96 manager
.raw_cluster_cmd('osd', 'lost', '%d' % primary
,\
97 '--yes-i-really-mean-it')
100 #expect the pg status to be active+undersized+degraded
101 #pg should recover and become active+clean within timeout
102 stats
= manager
.get_single_pg_stats(pgstr
)
103 print(stats
['state'])
108 while manager
.get_num_down():
109 assert time
.time() - start
< timeout
, \
110 'failed to recover before timeout expired'
112 manager
.revive_osd(primary
)