]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | import contextlib |
2 | import ipaddress | |
3 | import logging | |
4 | import re | |
5 | ||
b3b6e05e | 6 | from teuthology import misc as teuthology |
f67539c2 TL |
7 | from teuthology.config import config as teuth_config |
8 | ||
9 | log = logging.getLogger(__name__) | |
10 | ||
11 | ||
12 | def subst_vip(ctx, cmd): | |
13 | p = re.compile(r'({{VIP(\d+)}})') | |
14 | for m in p.findall(cmd): | |
15 | n = int(m[1]) | |
16 | if n >= len(ctx.vip["vips"]): | |
17 | log.warning(f'no VIP{n} (we have {len(ctx.vip["vips"])})') | |
18 | else: | |
19 | cmd = cmd.replace(m[0], str(ctx.vip["vips"][n])) | |
20 | ||
21 | if '{{VIPPREFIXLEN}}' in cmd: | |
22 | cmd = cmd.replace('{{VIPPREFIXLEN}}', str(ctx.vip["vnet"].prefixlen)) | |
23 | ||
24 | if '{{VIPSUBNET}}' in cmd: | |
25 | cmd = cmd.replace('{{VIPSUBNET}}', str(ctx.vip["vnet"].network_address)) | |
26 | ||
27 | return cmd | |
28 | ||
29 | ||
30 | def echo(ctx, config): | |
31 | """ | |
32 | This is mostly for debugging | |
33 | """ | |
34 | for remote in ctx.cluster.remotes.keys(): | |
35 | log.info(subst_vip(ctx, config)) | |
36 | ||
37 | ||
b3b6e05e TL |
38 | def exec(ctx, config): |
39 | """ | |
40 | This is similar to the standard 'exec' task, but does the VIP substitutions. | |
41 | """ | |
42 | assert isinstance(config, dict), "task exec got invalid config" | |
43 | ||
44 | testdir = teuthology.get_testdir(ctx) | |
45 | ||
46 | if 'all-roles' in config and len(config) == 1: | |
47 | a = config['all-roles'] | |
48 | roles = teuthology.all_roles(ctx.cluster) | |
49 | config = dict((id_, a) for id_ in roles if not id_.startswith('host.')) | |
50 | elif 'all-hosts' in config and len(config) == 1: | |
51 | a = config['all-hosts'] | |
52 | roles = teuthology.all_roles(ctx.cluster) | |
53 | config = dict((id_, a) for id_ in roles if id_.startswith('host.')) | |
54 | ||
55 | for role, ls in config.items(): | |
56 | (remote,) = ctx.cluster.only(role).remotes.keys() | |
57 | log.info('Running commands on role %s host %s', role, remote.name) | |
58 | for c in ls: | |
59 | c.replace('$TESTDIR', testdir) | |
60 | remote.run( | |
61 | args=[ | |
62 | 'sudo', | |
63 | 'TESTDIR={tdir}'.format(tdir=testdir), | |
64 | 'bash', | |
a4b75251 | 65 | '-ex', |
b3b6e05e TL |
66 | '-c', |
67 | subst_vip(ctx, c)], | |
68 | ) | |
69 | ||
70 | ||
f67539c2 TL |
71 | def map_vips(mip, count): |
72 | for mapping in teuth_config.get('vip', []): | |
73 | mnet = ipaddress.ip_network(mapping['machine_subnet']) | |
74 | vnet = ipaddress.ip_network(mapping['virtual_subnet']) | |
75 | if vnet.prefixlen >= mnet.prefixlen: | |
76 | log.error(f"virtual_subnet {vnet} prefix >= machine_subnet {mnet} prefix") | |
77 | return None | |
78 | if mip in mnet: | |
79 | pos = list(mnet.hosts()).index(mip) | |
80 | log.info(f"{mip} in {mnet}, pos {pos}") | |
81 | r = [] | |
82 | for sub in vnet.subnets(new_prefix=mnet.prefixlen): | |
83 | r += [list(sub.hosts())[pos]] | |
84 | count -= 1 | |
85 | if count == 0: | |
86 | break | |
87 | return vnet, r | |
88 | return None | |
89 | ||
90 | ||
91 | @contextlib.contextmanager | |
92 | def task(ctx, config): | |
93 | """ | |
94 | Set up a virtual network and allocate virtual IP(s) for each machine. | |
95 | ||
96 | The strategy here is to set up a private virtual subnet that is larger than | |
97 | the subnet the machine(s) exist in, and allocate virtual IPs from that pool. | |
98 | ||
99 | - The teuthology.yaml must include a section like:: | |
100 | ||
101 | vip: | |
102 | - machine_subnet: 172.21.0.0/20 | |
103 | virtual_subnet: 10.0.0.0/16 | |
104 | ||
105 | At least one item's machine_subnet should map the subnet the test machine's | |
106 | primary IP lives in (the one DNS resolves to). The virtual_subnet must have a | |
107 | shorter prefix (i.e., larger than the machine_subnet). If there are multiple | |
108 | machine_subnets, they cannot map into the same virtual_subnet. | |
109 | ||
110 | - Each machine gets an IP in the virtual_subset statically configured by the vip | |
111 | task. This lets all test machines reach each other and (most importantly) any | |
112 | virtual IPs. | |
113 | ||
114 | - 1 or more virtual IPs are then mapped for the task. These IPs are chosen based | |
115 | on one of the remotes. This uses a lot of network space but it avoids any | |
116 | conflicts between tests. | |
117 | ||
118 | To use a virtual IP, the {{VIP0}}, {{VIP1}}, etc. substitutions can be used. | |
119 | ||
120 | {{VIPSUBNET}} is the virtual_subnet address (10.0.0.0 in the example). | |
121 | ||
122 | {{VIPPREFIXLEN}} is the virtual_subnet prefix (16 in the example. | |
123 | ||
124 | These substitutions work for vip.echo, and (at the time of writing) cephadm.apply | |
125 | and cephadm.shell. | |
126 | """ | |
127 | if config is None: | |
128 | config = {} | |
129 | count = config.get('count', 1) | |
130 | ||
131 | ctx.vip_static = {} | |
132 | ctx.vip = {} | |
133 | ||
134 | log.info("Allocating static IPs for each host...") | |
135 | for remote in ctx.cluster.remotes.keys(): | |
136 | ip = remote.ssh.get_transport().getpeername()[0] | |
137 | log.info(f'peername {ip}') | |
138 | mip = ipaddress.ip_address(ip) | |
139 | vnet, vips = map_vips(mip, count + 1) | |
140 | static = vips.pop(0) | |
141 | log.info(f"{remote.hostname} static {static}, vnet {vnet}") | |
142 | ||
143 | if not ctx.vip: | |
144 | # do this only once (use the first remote we see), since we only need 1 | |
145 | # set of virtual IPs, regardless of how many remotes we have. | |
146 | log.info("VIPs are {map(str, vips)}") | |
147 | ctx.vip = { | |
148 | 'vnet': vnet, | |
149 | 'vips': vips, | |
150 | } | |
151 | else: | |
152 | # all remotes must be in the same virtual network... | |
153 | assert vnet == ctx.vip['vnet'] | |
154 | ||
155 | # pick interface | |
156 | p = re.compile(r'^(\S+) dev (\S+) (.*)scope link (.*)src (\S+)') | |
157 | iface = None | |
158 | for line in remote.sh(['sudo', 'ip','route','ls']).splitlines(): | |
159 | m = p.findall(line) | |
160 | if not m: | |
161 | continue | |
162 | route_iface = m[0][1] | |
163 | route_ip = m[0][4] | |
164 | if route_ip == ip: | |
165 | iface = route_iface | |
166 | break | |
167 | ||
168 | if not iface: | |
169 | log.error(f"Unable to find {remote.hostname} interface for {ip}") | |
170 | continue | |
171 | ||
172 | # configure | |
173 | log.info(f"Configuring {static} on {remote.hostname} iface {iface}...") | |
174 | remote.sh(['sudo', | |
175 | 'ip', 'addr', 'add', | |
176 | str(static) + '/' + str(vnet.prefixlen), | |
177 | 'dev', iface]) | |
178 | ||
179 | ctx.vip_static[remote] = { | |
180 | "iface": iface, | |
181 | "static": static, | |
182 | } | |
183 | ||
184 | try: | |
185 | yield | |
186 | ||
187 | finally: | |
188 | for remote, m in ctx.vip_static.items(): | |
189 | log.info(f"Removing {m['static']} (and any VIPs) on {remote.hostname} iface {m['iface']}...") | |
190 | remote.sh(['sudo', | |
191 | 'ip', 'addr', 'del', | |
192 | str(m['static']) + '/' + str(ctx.vip['vnet'].prefixlen), | |
193 | 'dev', m['iface']]) | |
194 | ||
195 | for vip in ctx.vip['vips']: | |
196 | remote.sh( | |
197 | [ | |
198 | 'sudo', | |
199 | 'ip', 'addr', 'del', | |
200 | str(vip) + '/' + str(ctx.vip['vnet'].prefixlen), | |
201 | 'dev', m['iface'] | |
202 | ], | |
203 | check_status=False, | |
204 | ) | |
205 |