]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/vip.py
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / qa / tasks / vip.py
1 import contextlib
2 import ipaddress
3 import logging
4 import re
5
6 from teuthology.config import config as teuth_config
7
8 log = logging.getLogger(__name__)
9
10
11 def subst_vip(ctx, cmd):
12 p = re.compile(r'({{VIP(\d+)}})')
13 for m in p.findall(cmd):
14 n = int(m[1])
15 if n >= len(ctx.vip["vips"]):
16 log.warning(f'no VIP{n} (we have {len(ctx.vip["vips"])})')
17 else:
18 cmd = cmd.replace(m[0], str(ctx.vip["vips"][n]))
19
20 if '{{VIPPREFIXLEN}}' in cmd:
21 cmd = cmd.replace('{{VIPPREFIXLEN}}', str(ctx.vip["vnet"].prefixlen))
22
23 if '{{VIPSUBNET}}' in cmd:
24 cmd = cmd.replace('{{VIPSUBNET}}', str(ctx.vip["vnet"].network_address))
25
26 return cmd
27
28
29 def echo(ctx, config):
30 """
31 This is mostly for debugging
32 """
33 for remote in ctx.cluster.remotes.keys():
34 log.info(subst_vip(ctx, config))
35
36
37 def map_vips(mip, count):
38 for mapping in teuth_config.get('vip', []):
39 mnet = ipaddress.ip_network(mapping['machine_subnet'])
40 vnet = ipaddress.ip_network(mapping['virtual_subnet'])
41 if vnet.prefixlen >= mnet.prefixlen:
42 log.error(f"virtual_subnet {vnet} prefix >= machine_subnet {mnet} prefix")
43 return None
44 if mip in mnet:
45 pos = list(mnet.hosts()).index(mip)
46 log.info(f"{mip} in {mnet}, pos {pos}")
47 r = []
48 for sub in vnet.subnets(new_prefix=mnet.prefixlen):
49 r += [list(sub.hosts())[pos]]
50 count -= 1
51 if count == 0:
52 break
53 return vnet, r
54 return None
55
56
57 @contextlib.contextmanager
58 def task(ctx, config):
59 """
60 Set up a virtual network and allocate virtual IP(s) for each machine.
61
62 The strategy here is to set up a private virtual subnet that is larger than
63 the subnet the machine(s) exist in, and allocate virtual IPs from that pool.
64
65 - The teuthology.yaml must include a section like::
66
67 vip:
68 - machine_subnet: 172.21.0.0/20
69 virtual_subnet: 10.0.0.0/16
70
71 At least one item's machine_subnet should map the subnet the test machine's
72 primary IP lives in (the one DNS resolves to). The virtual_subnet must have a
73 shorter prefix (i.e., larger than the machine_subnet). If there are multiple
74 machine_subnets, they cannot map into the same virtual_subnet.
75
76 - Each machine gets an IP in the virtual_subset statically configured by the vip
77 task. This lets all test machines reach each other and (most importantly) any
78 virtual IPs.
79
80 - 1 or more virtual IPs are then mapped for the task. These IPs are chosen based
81 on one of the remotes. This uses a lot of network space but it avoids any
82 conflicts between tests.
83
84 To use a virtual IP, the {{VIP0}}, {{VIP1}}, etc. substitutions can be used.
85
86 {{VIPSUBNET}} is the virtual_subnet address (10.0.0.0 in the example).
87
88 {{VIPPREFIXLEN}} is the virtual_subnet prefix (16 in the example.
89
90 These substitutions work for vip.echo, and (at the time of writing) cephadm.apply
91 and cephadm.shell.
92 """
93 if config is None:
94 config = {}
95 count = config.get('count', 1)
96
97 ctx.vip_static = {}
98 ctx.vip = {}
99
100 log.info("Allocating static IPs for each host...")
101 for remote in ctx.cluster.remotes.keys():
102 ip = remote.ssh.get_transport().getpeername()[0]
103 log.info(f'peername {ip}')
104 mip = ipaddress.ip_address(ip)
105 vnet, vips = map_vips(mip, count + 1)
106 static = vips.pop(0)
107 log.info(f"{remote.hostname} static {static}, vnet {vnet}")
108
109 if not ctx.vip:
110 # do this only once (use the first remote we see), since we only need 1
111 # set of virtual IPs, regardless of how many remotes we have.
112 log.info("VIPs are {map(str, vips)}")
113 ctx.vip = {
114 'vnet': vnet,
115 'vips': vips,
116 }
117 else:
118 # all remotes must be in the same virtual network...
119 assert vnet == ctx.vip['vnet']
120
121 # pick interface
122 p = re.compile(r'^(\S+) dev (\S+) (.*)scope link (.*)src (\S+)')
123 iface = None
124 for line in remote.sh(['sudo', 'ip','route','ls']).splitlines():
125 m = p.findall(line)
126 if not m:
127 continue
128 route_iface = m[0][1]
129 route_ip = m[0][4]
130 if route_ip == ip:
131 iface = route_iface
132 break
133
134 if not iface:
135 log.error(f"Unable to find {remote.hostname} interface for {ip}")
136 continue
137
138 # configure
139 log.info(f"Configuring {static} on {remote.hostname} iface {iface}...")
140 remote.sh(['sudo',
141 'ip', 'addr', 'add',
142 str(static) + '/' + str(vnet.prefixlen),
143 'dev', iface])
144
145 ctx.vip_static[remote] = {
146 "iface": iface,
147 "static": static,
148 }
149
150 try:
151 yield
152
153 finally:
154 for remote, m in ctx.vip_static.items():
155 log.info(f"Removing {m['static']} (and any VIPs) on {remote.hostname} iface {m['iface']}...")
156 remote.sh(['sudo',
157 'ip', 'addr', 'del',
158 str(m['static']) + '/' + str(ctx.vip['vnet'].prefixlen),
159 'dev', m['iface']])
160
161 for vip in ctx.vip['vips']:
162 remote.sh(
163 [
164 'sudo',
165 'ip', 'addr', 'del',
166 str(vip) + '/' + str(ctx.vip['vnet'].prefixlen),
167 'dev', m['iface']
168 ],
169 check_status=False,
170 )
171