]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/test_dashboard.py
b30175f4f6f44583f5cc745ebf00b40fe4fd3315
[ceph.git] / ceph / qa / tasks / mgr / test_dashboard.py
1 import logging
2 import ssl
3
4 import requests
5 from requests.adapters import HTTPAdapter
6
7 from .mgr_test_case import MgrTestCase
8
9 log = logging.getLogger(__name__)
10
11
12 class TestDashboard(MgrTestCase):
13 MGRS_REQUIRED = 3
14
15 def setUp(self):
16 super(TestDashboard, self).setUp()
17
18 self._assign_ports("dashboard", "ssl_server_port")
19 self._load_module("dashboard")
20 self.mgr_cluster.mon_manager.raw_cluster_cmd("dashboard",
21 "create-self-signed-cert")
22
23 def tearDown(self):
24 self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
25 "mgr/dashboard/standby_behaviour",
26 "redirect")
27 self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
28 "mgr/dashboard/standby_error_status_code",
29 "500")
30
31 def wait_until_webserver_available(self, url):
32 def _check_connection():
33 try:
34 requests.get(url, allow_redirects=False, verify=False)
35 return True
36 except requests.ConnectionError:
37 pass
38 return False
39 self.wait_until_true(_check_connection, timeout=30)
40
41 def test_standby(self):
42 original_active_id = self.mgr_cluster.get_active_id()
43 original_uri = self._get_uri("dashboard")
44 log.info("Originally running manager '{}' at {}".format(
45 original_active_id, original_uri))
46
47 # Force a failover and wait until the previously active manager
48 # is listed as standby.
49 self.mgr_cluster.mgr_fail(original_active_id)
50 self.wait_until_true(
51 lambda: original_active_id in self.mgr_cluster.get_standby_ids(),
52 timeout=30)
53
54 failed_active_id = self.mgr_cluster.get_active_id()
55 failed_over_uri = self._get_uri("dashboard")
56 log.info("After failover running manager '{}' at {}".format(
57 failed_active_id, failed_over_uri))
58
59 self.assertNotEqual(original_uri, failed_over_uri)
60
61 # Wait until web server of the standby node is settled.
62 self.wait_until_webserver_available(original_uri)
63
64 # The original active daemon should have come back up as a standby
65 # and be doing redirects to the new active daemon.
66 r = requests.get(original_uri, allow_redirects=False, verify=False)
67 self.assertEqual(r.status_code, 303)
68 self.assertEqual(r.headers['Location'], failed_over_uri)
69
70 # Ensure that every URL redirects to the active daemon.
71 r = requests.get("{}/runtime.js".format(original_uri.strip('/')),
72 allow_redirects=False,
73 verify=False)
74 self.assertEqual(r.status_code, 303)
75 self.assertEqual(r.headers['Location'], failed_over_uri)
76
77 def test_standby_disable_redirect(self):
78 self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
79 "mgr/dashboard/standby_behaviour",
80 "error")
81
82 original_active_id = self.mgr_cluster.get_active_id()
83 original_uri = self._get_uri("dashboard")
84 log.info("Originally running manager '{}' at {}".format(
85 original_active_id, original_uri))
86
87 # Force a failover and wait until the previously active manager
88 # is listed as standby.
89 self.mgr_cluster.mgr_fail(original_active_id)
90 self.wait_until_true(
91 lambda: original_active_id in self.mgr_cluster.get_standby_ids(),
92 timeout=30)
93
94 failed_active_id = self.mgr_cluster.get_active_id()
95 failed_over_uri = self._get_uri("dashboard")
96 log.info("After failover running manager '{}' at {}".format(
97 failed_active_id, failed_over_uri))
98
99 self.assertNotEqual(original_uri, failed_over_uri)
100
101 # Wait until web server of the standby node is settled.
102 self.wait_until_webserver_available(original_uri)
103
104 # Redirection should be disabled now, instead a 500 must be returned.
105 r = requests.get(original_uri, allow_redirects=False, verify=False)
106 self.assertEqual(r.status_code, 500)
107
108 self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
109 "mgr/dashboard/standby_error_status_code",
110 "503")
111
112 # The customized HTTP status code (503) must be returned.
113 r = requests.get(original_uri, allow_redirects=False, verify=False)
114 self.assertEqual(r.status_code, 503)
115
116 def test_urls(self):
117 base_uri = self._get_uri("dashboard")
118
119 # This is a very simple smoke test to check that the dashboard can
120 # give us a 200 response to requests. We're not testing that
121 # the content is correct or even renders!
122
123 urls = [
124 "/",
125 ]
126
127 failures = []
128
129 for url in urls:
130 r = requests.get(base_uri + url, allow_redirects=False,
131 verify=False)
132 if r.status_code >= 300 and r.status_code < 400:
133 log.error("Unexpected redirect to: {0} (from {1})".format(
134 r.headers['Location'], base_uri))
135 if r.status_code != 200:
136 failures.append(url)
137
138 log.info("{0}: {1} ({2} bytes)".format(
139 url, r.status_code, len(r.content)
140 ))
141
142 self.assertListEqual(failures, [])
143
144 def test_tls(self):
145 class CustomHTTPAdapter(HTTPAdapter):
146 def __init__(self, ssl_version):
147 self.ssl_version = ssl_version
148 super().__init__()
149
150 def init_poolmanager(self, *args, **kwargs):
151 kwargs['ssl_version'] = self.ssl_version
152 return super().init_poolmanager(*args, **kwargs)
153
154 uri = self._get_uri("dashboard")
155
156 # TLSv1
157 with self.assertRaises(requests.exceptions.SSLError):
158 session = requests.Session()
159 session.mount(uri, CustomHTTPAdapter(ssl.PROTOCOL_TLSv1))
160 session.get(uri, allow_redirects=False, verify=False)
161
162 # TLSv1.1
163 with self.assertRaises(requests.exceptions.SSLError):
164 session = requests.Session()
165 session.mount(uri, CustomHTTPAdapter(ssl.PROTOCOL_TLSv1_1))
166 session.get(uri, allow_redirects=False, verify=False)
167
168 session = requests.Session()
169 session.mount(uri, CustomHTTPAdapter(ssl.PROTOCOL_TLS))
170 r = session.get(uri, allow_redirects=False, verify=False)
171 self.assertEqual(r.status_code, 200)