]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/vip.py
6 from teuthology
.config
import config
as teuth_config
8 log
= logging
.getLogger(__name__
)
11 def subst_vip(ctx
, cmd
):
12 p
= re
.compile(r
'({{VIP(\d+)}})')
13 for m
in p
.findall(cmd
):
15 if n
>= len(ctx
.vip
["vips"]):
16 log
.warning(f
'no VIP{n} (we have {len(ctx.vip["vips"])})')
18 cmd
= cmd
.replace(m
[0], str(ctx
.vip
["vips"][n
]))
20 if '{{VIPPREFIXLEN}}' in cmd
:
21 cmd
= cmd
.replace('{{VIPPREFIXLEN}}', str(ctx
.vip
["vnet"].prefixlen
))
23 if '{{VIPSUBNET}}' in cmd
:
24 cmd
= cmd
.replace('{{VIPSUBNET}}', str(ctx
.vip
["vnet"].network_address
))
29 def echo(ctx
, config
):
31 This is mostly for debugging
33 for remote
in ctx
.cluster
.remotes
.keys():
34 log
.info(subst_vip(ctx
, config
))
37 def map_vips(mip
, count
):
38 for mapping
in teuth_config
.get('vip', []):
39 mnet
= ipaddress
.ip_network(mapping
['machine_subnet'])
40 vnet
= ipaddress
.ip_network(mapping
['virtual_subnet'])
41 if vnet
.prefixlen
>= mnet
.prefixlen
:
42 log
.error(f
"virtual_subnet {vnet} prefix >= machine_subnet {mnet} prefix")
45 pos
= list(mnet
.hosts()).index(mip
)
46 log
.info(f
"{mip} in {mnet}, pos {pos}")
48 for sub
in vnet
.subnets(new_prefix
=mnet
.prefixlen
):
49 r
+= [list(sub
.hosts())[pos
]]
57 @contextlib.contextmanager
58 def task(ctx
, config
):
60 Set up a virtual network and allocate virtual IP(s) for each machine.
62 The strategy here is to set up a private virtual subnet that is larger than
63 the subnet the machine(s) exist in, and allocate virtual IPs from that pool.
65 - The teuthology.yaml must include a section like::
68 - machine_subnet: 172.21.0.0/20
69 virtual_subnet: 10.0.0.0/16
71 At least one item's machine_subnet should map the subnet the test machine's
72 primary IP lives in (the one DNS resolves to). The virtual_subnet must have a
73 shorter prefix (i.e., larger than the machine_subnet). If there are multiple
74 machine_subnets, they cannot map into the same virtual_subnet.
76 - Each machine gets an IP in the virtual_subset statically configured by the vip
77 task. This lets all test machines reach each other and (most importantly) any
80 - 1 or more virtual IPs are then mapped for the task. These IPs are chosen based
81 on one of the remotes. This uses a lot of network space but it avoids any
82 conflicts between tests.
84 To use a virtual IP, the {{VIP0}}, {{VIP1}}, etc. substitutions can be used.
86 {{VIPSUBNET}} is the virtual_subnet address (10.0.0.0 in the example).
88 {{VIPPREFIXLEN}} is the virtual_subnet prefix (16 in the example.
90 These substitutions work for vip.echo, and (at the time of writing) cephadm.apply
95 count
= config
.get('count', 1)
100 log
.info("Allocating static IPs for each host...")
101 for remote
in ctx
.cluster
.remotes
.keys():
102 ip
= remote
.ssh
.get_transport().getpeername()[0]
103 log
.info(f
'peername {ip}')
104 mip
= ipaddress
.ip_address(ip
)
105 vnet
, vips
= map_vips(mip
, count
+ 1)
107 log
.info(f
"{remote.hostname} static {static}, vnet {vnet}")
110 # do this only once (use the first remote we see), since we only need 1
111 # set of virtual IPs, regardless of how many remotes we have.
112 log
.info("VIPs are {map(str, vips)}")
118 # all remotes must be in the same virtual network...
119 assert vnet
== ctx
.vip
['vnet']
122 p
= re
.compile(r
'^(\S+) dev (\S+) (.*)scope link (.*)src (\S+)')
124 for line
in remote
.sh(['sudo', 'ip','route','ls']).splitlines():
128 route_iface
= m
[0][1]
135 log
.error(f
"Unable to find {remote.hostname} interface for {ip}")
139 log
.info(f
"Configuring {static} on {remote.hostname} iface {iface}...")
142 str(static
) + '/' + str(vnet
.prefixlen
),
145 ctx
.vip_static
[remote
] = {
154 for remote
, m
in ctx
.vip_static
.items():
155 log
.info(f
"Removing {m['static']} (and any VIPs) on {remote.hostname} iface {m['iface']}...")
158 str(m
['static']) + '/' + str(ctx
.vip
['vnet'].prefixlen
),
161 for vip
in ctx
.vip
['vips']:
166 str(vip
) + '/' + str(ctx
.vip
['vnet'].prefixlen
),