]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / tests / conftest.py
1 import pytest
2 import os
3
4
5 @pytest.fixture()
6 def node(host, request):
7 """ This fixture represents a single node in the ceph cluster. Using the
8 host.ansible fixture provided by testinfra it can access all the ansible
9 variables provided to it by the specific test scenario being ran.
10
11 You must include this fixture on any tests that operate on specific type
12 of node because it contains the logic to manage which tests a node
13 should run.
14 """
15 ansible_vars = host.ansible.get_variables()
16 # tox/jenkins/user will pass in this environment variable. we need to do it this way
17 # because testinfra does not collect and provide ansible config passed in
18 # from using --extra-vars
19 ceph_dev_branch = os.environ.get("CEPH_DEV_BRANCH", "master")
20 group_names = ansible_vars["group_names"]
21 num_osd_ports = 4
22 if 'mimic' in ceph_dev_branch or 'luminous' in ceph_dev_branch:
23 num_osd_ports = 2
24
25 # capture the initial/default state
26 test_is_applicable = False
27 for marker in request.node.iter_markers():
28 if marker.name in group_names or marker.name == 'all':
29 test_is_applicable = True
30 break
31 # Check if any markers on the test method exist in the nodes group_names.
32 # If they do not, this test is not valid for the node being tested.
33 if not test_is_applicable:
34 reason = "%s: Not a valid test for node type: %s" % (
35 request.function, group_names)
36 pytest.skip(reason)
37
38 osd_ids = []
39 osds = []
40 cluster_address = ""
41 # I can assume eth1 because I know all the vagrant
42 # boxes we test with use that interface
43 address = host.interface("eth1").addresses[0]
44 subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
45 num_mons = len(ansible_vars["groups"]["mons"])
46 num_osds = len(ansible_vars.get("devices", []))
47 if not num_osds:
48 num_osds = len(ansible_vars.get("lvm_volumes", []))
49 osds_per_device = ansible_vars.get("osds_per_device", 1)
50 num_osds = num_osds * osds_per_device
51
52 # If number of devices doesn't map to number of OSDs, allow tests to define
53 # that custom number, defaulting it to ``num_devices``
54 num_osds = ansible_vars.get('num_osds', num_osds)
55 cluster_name = ansible_vars.get("cluster", "ceph")
56 conf_path = "/etc/ceph/{}.conf".format(cluster_name)
57 if "osds" in group_names:
58 # I can assume eth2 because I know all the vagrant
59 # boxes we test with use that interface. OSDs are the only
60 # nodes that have this interface.
61 cluster_address = host.interface("eth2").addresses[0]
62 cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
63 if cmd.rc == 0:
64 osd_ids = cmd.stdout.rstrip("\n").split("\n")
65 osds = osd_ids
66
67 data = dict(
68 address=address,
69 subnet=subnet,
70 vars=ansible_vars,
71 osd_ids=osd_ids,
72 num_mons=num_mons,
73 num_osds=num_osds,
74 num_osd_ports=num_osd_ports,
75 cluster_name=cluster_name,
76 conf_path=conf_path,
77 cluster_address=cluster_address,
78 osds=osds,
79 )
80 return data
81
82
83 def pytest_collection_modifyitems(session, config, items):
84 for item in items:
85 test_path = item.location[0]
86 if "mon" in test_path:
87 item.add_marker(pytest.mark.mons)
88 elif "osd" in test_path:
89 item.add_marker(pytest.mark.osds)
90 elif "mds" in test_path:
91 item.add_marker(pytest.mark.mdss)
92 elif "mgr" in test_path:
93 item.add_marker(pytest.mark.mgrs)
94 elif "rbd-mirror" in test_path:
95 item.add_marker(pytest.mark.rbdmirrors)
96 elif "rgw" in test_path:
97 item.add_marker(pytest.mark.rgws)
98 elif "nfs" in test_path:
99 item.add_marker(pytest.mark.nfss)
100 elif "iscsi" in test_path:
101 item.add_marker(pytest.mark.iscsigws)
102 else:
103 item.add_marker(pytest.mark.all)