]> git.proxmox.com Git - ceph.git/blame - ceph/src/rgw/rgw_civetweb_frontend.cc
import ceph 15.2.10
[ceph.git] / ceph / src / rgw / rgw_civetweb_frontend.cc
CommitLineData
7c673cae 1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
9f95a23c 2// vim: ts=8 sw=2 smarttab ft=cpp
7c673cae
FG
3
4#include <set>
5#include <string>
6
7#include <boost/utility/string_ref.hpp>
8
9#include "rgw_frontend.h"
10#include "rgw_client_io_filters.h"
11fdf7f2 11#include "rgw_dmclock_sync_scheduler.h"
7c673cae
FG
12
13#define dout_subsys ceph_subsys_rgw
14
11fdf7f2
TL
15namespace dmc = rgw::dmclock;
16
17RGWCivetWebFrontend::RGWCivetWebFrontend(RGWProcessEnv& env,
18 RGWFrontendConfig *conf,
19 dmc::SchedulerCtx& sched_ctx)
20 : conf(conf),
21 ctx(nullptr),
22 env(env)
23{
24
25 auto sched_t = dmc::get_scheduler_t(cct());
26 switch(sched_t){
27 case dmc::scheduler_t::none: [[fallthrough]];
28 case dmc::scheduler_t::throttler:
29 break;
30 case dmc::scheduler_t::dmclock:
31 // TODO: keep track of server ready state and use that here civetweb
32 // internally tracks in the ctx the threads used and free, while it is
33 // expected with the current implementation that the threads waiting on the
34 // queue would still show up in the "used" queue, it might be a useful thing
35 // to make decisions on in the future. Also while reconfiguring we should
36 // probably set this to false
37 auto server_ready_f = []() -> bool { return true; };
38
39 scheduler.reset(new dmc::SyncScheduler(cct(),
40 std::ref(sched_ctx.get_dmc_client_counters()),
41 *sched_ctx.get_dmc_client_config(),
42 server_ready_f,
43 std::ref(dmc::SyncScheduler::handle_request_cb),
44 dmc::AtLimit::Reject));
45 }
46
47}
48
7c673cae
FG
49static int civetweb_callback(struct mg_connection* conn)
50{
51 const struct mg_request_info* const req_info = mg_get_request_info(conn);
52 return static_cast<RGWCivetWebFrontend *>(req_info->user_data)->process(conn);
53}
54
55int RGWCivetWebFrontend::process(struct mg_connection* const conn)
56{
57 /* Hold a read lock over access to env.store for reconfiguration. */
9f95a23c 58 std::shared_lock lock{env.mutex};
7c673cae
FG
59
60 RGWCivetWeb cw_client(conn);
61 auto real_client_io = rgw::io::add_reordering(
181888fb 62 rgw::io::add_buffering(dout_context,
7c673cae
FG
63 rgw::io::add_chunking(
64 rgw::io::add_conlen_controlling(
65 &cw_client))));
181888fb 66 RGWRestfulIO client_io(dout_context, &real_client_io);
7c673cae 67
9f95a23c 68 RGWRequest req(env.store->getRados()->get_new_req_id());
94b18763 69 int http_ret = 0;
11fdf7f2 70 //assert (scheduler != nullptr);
7c673cae 71 int ret = process_request(env.store, env.rest, &req, env.uri_prefix,
11fdf7f2
TL
72 *env.auth_registry, &client_io, env.olog,
73 null_yield, scheduler.get() ,&http_ret);
7c673cae
FG
74 if (ret < 0) {
75 /* We don't really care about return code. */
76 dout(20) << "process_request() returned " << ret << dendl;
77 }
78
94b18763
FG
79 if (http_ret <= 0) {
80 /* Mark as processed. */
81 return 1;
82 }
83
84 return http_ret;
7c673cae
FG
85}
86
87int RGWCivetWebFrontend::run()
88{
89 auto& conf_map = conf->get_config_map();
7c673cae
FG
90
91 set_conf_default(conf_map, "num_threads",
11fdf7f2 92 std::to_string(g_conf()->rgw_thread_pool_size));
7c673cae
FG
93 set_conf_default(conf_map, "decode_url", "no");
94 set_conf_default(conf_map, "enable_keep_alive", "yes");
95 set_conf_default(conf_map, "validate_http_method", "no");
96 set_conf_default(conf_map, "canonicalize_url_path", "no");
31f18b77 97 set_conf_default(conf_map, "enable_auth_domain_check", "no");
11fdf7f2 98 set_conf_default(conf_map, "allow_unicode_in_urls", "yes");
28e407b8
AA
99
100 std::string listening_ports;
101 // support multiple port= entries
102 auto range = conf_map.equal_range("port");
103 for (auto p = range.first; p != range.second; ++p) {
104 std::string port_str = p->second;
105 // support port= entries with multiple values
106 std::replace(port_str.begin(), port_str.end(), '+', ',');
107 if (!listening_ports.empty()) {
108 listening_ports.append(1, ',');
109 }
110 listening_ports.append(port_str);
111 }
112 if (listening_ports.empty()) {
113 listening_ports = "80";
114 }
115 conf_map.emplace("listening_ports", std::move(listening_ports));
7c673cae
FG
116
117 /* Set run_as_user. This will cause civetweb to invoke setuid() and setgid()
118 * based on pw_uid and pw_gid obtained from pw_name. */
119 std::string uid_string = g_ceph_context->get_set_uid_string();
120 if (! uid_string.empty()) {
28e407b8 121 conf_map.emplace("run_as_user", std::move(uid_string));
7c673cae
FG
122 }
123
124 /* Prepare options for CivetWeb. */
125 const std::set<boost::string_ref> rgw_opts = { "port", "prefix" };
126
127 std::vector<const char*> options;
128
129 for (const auto& pair : conf_map) {
130 if (! rgw_opts.count(pair.first)) {
131 /* CivetWeb doesn't understand configurables of the glue layer between
132 * it and RadosGW. We need to strip them out. Otherwise CivetWeb would
133 * signalise an error. */
134 options.push_back(pair.first.c_str());
135 options.push_back(pair.second.c_str());
136
137 dout(20) << "civetweb config: " << pair.first
138 << ": " << pair.second << dendl;
139 }
140 }
141
142 options.push_back(nullptr);
143 /* Initialize the CivetWeb right now. */
144 struct mg_callbacks cb;
92f5a8d4 145 // FIPS zeroization audit 20191115: this memset is not security related.
7c673cae
FG
146 memset((void *)&cb, 0, sizeof(cb));
147 cb.begin_request = civetweb_callback;
148 cb.log_message = rgw_civetweb_log_callback;
149 cb.log_access = rgw_civetweb_log_access_callback;
150 ctx = mg_start(&cb, this, options.data());
151
152 return ! ctx ? -EIO : 0;
153} /* RGWCivetWebFrontend::run */