find_package(CppCheck)
find_package(IWYU)
-set(VERSION 15.2.10)
+set(VERSION 15.2.11)
# Contributor: John Coyle <dx9err@gmail.com>
# Maintainer: John Coyle <dx9err@gmail.com>
pkgname=ceph
-pkgver=15.2.10
+pkgver=15.2.11
pkgrel=0
pkgdesc="Ceph is a distributed object store and file system"
pkgusers="ceph"
xmlstarlet
yasm
"
-source="ceph-15.2.10.tar.bz2"
+source="ceph-15.2.11.tar.bz2"
subpackages="
$pkgname-base
$pkgname-common
_udevrulesdir=/etc/udev/rules.d
_python_sitelib=/usr/lib/python2.7/site-packages
-builddir=$srcdir/ceph-15.2.10
+builddir=$srcdir/ceph-15.2.11
build() {
export CEPH_BUILD_VIRTUALENV=$builddir
# main package definition
#################################################################################
Name: ceph
-Version: 15.2.10
+Version: 15.2.11
Release: 0%{?dist}
%if 0%{?fedora} || 0%{?rhel}
Epoch: 2
Group: System/Filesystems
%endif
URL: http://ceph.com/
-Source0: %{?_remote_tarball_prefix}ceph-15.2.10.tar.bz2
+Source0: %{?_remote_tarball_prefix}ceph-15.2.11.tar.bz2
%if 0%{?suse_version}
# _insert_obs_source_lines_here
ExclusiveArch: x86_64 aarch64 ppc64le s390x
# common
#################################################################################
%prep
-%autosetup -p1 -n ceph-15.2.10
+%autosetup -p1 -n ceph-15.2.11
%build
# LTO can be enabled as soon as the following GCC bug is fixed:
-ceph (15.2.10-1bionic) bionic; urgency=medium
+ceph (15.2.11-1bionic) bionic; urgency=medium
- -- Jenkins Build Slave User <jenkins-build@confusa10.front.sepia.ceph.com> Wed, 17 Mar 2021 13:15:33 -0400
+ -- Jenkins Build Slave User <jenkins-build@braggi11.front.sepia.ceph.com> Mon, 19 Apr 2021 13:59:34 +0000
+
+ceph (15.2.11-1) stable; urgency=medium
+
+ * New upstream release
+
+ -- Ceph Release Team <ceph-maintainers@ceph.com> Mon, 19 Apr 2021 13:47:28 +0000
ceph (15.2.10-1) stable; urgency=medium
ceph config set global mon_data_size_warn <size>
+AUTH_INSECURE_GLOBAL_ID_RECLAIM
+_______________________________
+
+One or more clients or daemons are connected to the cluster that are
+not securely reclaiming their global_id (a unique number identifying
+each entity in the cluster) when reconnecting to a monitor. The
+client is being permitted to connect anyway because the
+``auth_allow_insecure_global_id_reclaim`` option is set to true (which may
+be necessary until all ceph clients have been upgraded), and the
+``auth_expose_insecure_global_id_reclaim`` option set to ``true`` (which
+allows monitors to detect clients with insecure reclaim early by forcing them to
+reconnect right after they first authenticate).
+
+You can identify which client(s) are using unpatched ceph client code with::
+
+ ceph health detail
+
+Clients global_id reclaim rehavior can also seen in the
+``global_id_status`` field in the dump of clients connected to an
+individual monitor (``reclaim_insecure`` means the client is
+unpatched and is contributing to this health alert)::
+
+ ceph tell mon.\* sessions
+
+We strongly recommend that all clients in the system are upgraded to a
+newer version of Ceph that correctly reclaims global_id values. Once
+all clients have been updated, you can stop allowing insecure reconnections
+with::
+
+ ceph config set mon auth_allow_insecure_global_id_reclaim false
+
+If it is impractical to upgrade all clients immediately, you can silence
+this warning temporarily with::
+
+ ceph health mute AUTH_INSECURE_GLOBAL_ID_RECLAIM 1w # 1 week
+
+Although we do NOT recommend doing so, you can also disable this warning indefinitely
+with::
+
+ ceph config set mon mon_warn_on_insecure_global_id_reclaim false
+
+AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED
+_______________________________________
+
+Ceph is currently configured to allow clients to reconnect to monitors using
+an insecure process to reclaim their previous global_id because the setting
+``auth_allow_insecure_global_id_reclaim`` is set to ``true``. It may be necessary to
+leave this setting enabled while existing Ceph clients are upgraded to newer
+versions of Ceph that correctly and securely reclaim their global_id.
+
+If the ``AUTH_INSECURE_GLOBAL_ID_RECLAIM`` health alert has not also been raised and
+the ``auth_expose_insecure_global_id_reclaim`` setting has not been disabled (it is
+on by default), then there are currently no clients connected that need to be
+upgraded, and it is safe to disallow insecure global_id reclaim with::
+
+ ceph config set mon auth_allow_insecure_global_id_reclaim false
+
+If there are still clients that need to be upgraded, then this alert can be
+silenced temporarily with::
+
+ ceph health mute AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED 1w # 1 week
+
+Although we do NOT recommend doing so, you can also disable this warning indefinitely
+with::
+
+ ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false
+
Manager
-------
--mon-allow-pool-delete \
--osd-pool-default-pg-autoscale-mode off \
--mon-osd-backfillfull-ratio .99 \
+ --mon-warn-on-insecure-global-id-reclaim-allowed=false \
"$@" || return 1
cat > $dir/ceph.conf <<EOF
- cephadm.shell:
env: [sha1]
mon.a:
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
tasks:
- cephadm:
+ conf:
+ mon:
+ auth allow insecure global id reclaim: true
bluestore_warn_on_legacy_statfs: false
bluestore warn on no per pool omap: false
mon pg warn min per osd: 0
+ mon:
+ mon_warn_on_insecure_global_id_reclaim: false
+ mon_warn_on_insecure_global_id_reclaim_allowed: false
log-ignorelist:
- evicting unresponsive client
- exec:
mon osd prime pg temp = true
mon reweight min bytes per osd = 10
+ # rotate auth tickets quickly to exercise renewal paths
+ auth mon ticket ttl = 660 # 11m
+ auth service ticket ttl = 240 # 4m
+
+ # don't complain about insecure global_id in the test suite
+ mon_warn_on_insecure_global_id_reclaim = false
+ mon_warn_on_insecure_global_id_reclaim_allowed = false
+
[client]
rgw cache enabled = true
rgw enable ops log = true
mon osd prime pg temp = true
mon reweight min bytes per osd = 10
+# rotate auth tickets quickly to exercise renewal paths
+auth mon ticket ttl = 660 # 11m
+auth service ticket ttl = 240 # 4m
+
+# don't complain about global id reclaim
+mon_warn_on_insecure_global_id_reclaim = false
+mon_warn_on_insecure_global_id_reclaim_allowed = false
+
[client.rgw]
rgw cache enabled = true
rgw enable ops log = true
-27917a557cca91e4da407489bbaa64ad4352cc02
-15.2.10
+e3523634d9c2227df9af89a4eac33d16738c49cb
+15.2.11
{}
virtual ~AuthClientHandler() {}
+ virtual AuthClientHandler* clone() const = 0;
+
void init(const EntityName& n) { name = n; }
void set_want_keys(__u32 keys) {
#include "krb/KrbServiceHandler.hpp"
#endif
#include "none/AuthNoneServiceHandler.h"
+#include "common/dout.h"
#define dout_subsys ceph_subsys_auth
+std::ostream& operator<<(std::ostream& os,
+ global_id_status_t global_id_status)
+{
+ switch (global_id_status) {
+ case global_id_status_t::NONE:
+ return os << "none";
+ case global_id_status_t::NEW_PENDING:
+ return os << "new_pending";
+ case global_id_status_t::NEW_OK:
+ return os << "new_ok";
+ case global_id_status_t::NEW_NOT_EXPOSED:
+ return os << "new_not_exposed";
+ case global_id_status_t::RECLAIM_PENDING:
+ return os << "reclaim_pending";
+ case global_id_status_t::RECLAIM_OK:
+ return os << "reclaim_ok";
+ case global_id_status_t::RECLAIM_INSECURE:
+ return os << "reclaim_insecure";
+ default:
+ ceph_abort();
+ }
+}
+
+int AuthServiceHandler::start_session(const EntityName& entity_name,
+ uint64_t global_id,
+ bool is_new_global_id,
+ ceph::buffer::list *result,
+ AuthCapsInfo *caps)
+{
+ ceph_assert(!this->entity_name.get_type() && !this->global_id &&
+ global_id_status == global_id_status_t::NONE);
+
+ ldout(cct, 10) << __func__ << " entity_name=" << entity_name
+ << " global_id=" << global_id << " is_new_global_id="
+ << is_new_global_id << dendl;
+ this->entity_name = entity_name;
+ this->global_id = global_id;
+
+ return do_start_session(is_new_global_id, result, caps);
+}
+
AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks)
{
switch (type) {
class CryptoKey;
struct AuthCapsInfo;
+enum class global_id_status_t {
+ NONE,
+ // fresh client (global_id == 0); waiting for CephXAuthenticate
+ NEW_PENDING,
+ // connected client; new enough to correctly reclaim global_id
+ NEW_OK,
+ // connected client; unknown whether it can reclaim global_id correctly
+ NEW_NOT_EXPOSED,
+ // reconnecting client (global_id != 0); waiting for CephXAuthenticate
+ RECLAIM_PENDING,
+ // reconnected client; correctly reclaimed global_id
+ RECLAIM_OK,
+ // reconnected client; did not properly prove prior global_id ownership
+ RECLAIM_INSECURE
+};
+
+std::ostream& operator<<(std::ostream& os,
+ global_id_status_t global_id_status);
+
struct AuthServiceHandler {
protected:
CephContext *cct;
-public:
EntityName entity_name;
- uint64_t global_id;
+ uint64_t global_id = 0;
+ global_id_status_t global_id_status = global_id_status_t::NONE;
- explicit AuthServiceHandler(CephContext *cct_) : cct(cct_), global_id(0) {}
+public:
+ explicit AuthServiceHandler(CephContext *cct_) : cct(cct_) {}
virtual ~AuthServiceHandler() { }
- virtual int start_session(const EntityName& name,
- size_t connection_secret_required_length,
- ceph::buffer::list *result,
- AuthCapsInfo *caps,
- CryptoKey *session_key,
- std::string *connection_secret) = 0;
+ int start_session(const EntityName& entity_name,
+ uint64_t global_id,
+ bool is_new_global_id,
+ ceph::buffer::list *result,
+ AuthCapsInfo *caps);
virtual int handle_request(ceph::buffer::list::const_iterator& indata,
size_t connection_secret_required_length,
ceph::buffer::list *result,
- uint64_t *global_id,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) = 0;
- EntityName& get_entity_name() { return entity_name; }
+ const EntityName& get_entity_name() { return entity_name; }
+ uint64_t get_global_id() { return global_id; }
+ global_id_status_t get_global_id_status() { return global_id_status; }
+
+private:
+ virtual int do_start_session(bool is_new_global_id,
+ ceph::buffer::list *result,
+ AuthCapsInfo *caps) = 0;
};
extern AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks);
reset();
}
+ CephxClientHandler* clone() const override {
+ return new CephxClientHandler(*this);
+ }
+
void reset() override;
void prepare_build_request() override;
int build_request(bufferlist& bl) const override;
#define dout_prefix *_dout << "cephx keyserverdata: "
bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id,
- ExpiringCryptoKey& secret, uint64_t& secret_id) const
+ CryptoKey& secret, uint64_t& secret_id,
+ double& ttl) const
{
map<uint32_t, RotatingSecrets>::const_iterator iter =
rotating_secrets.find(service_id);
if (secrets.secrets.size() > 1)
++riter;
- if (riter->second.expiration < ceph_clock_now())
+ utime_t now = ceph_clock_now();
+ if (riter->second.expiration < now)
++riter; // "current" key has expired, use "next" key instead
secret_id = riter->first;
- secret = riter->second;
- ldout(cct, 30) << "get_service_secret service " << ceph_entity_type_name(service_id)
- << " id " << secret_id << " " << secret << dendl;
- return true;
-}
-
-bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id,
- CryptoKey& secret, uint64_t& secret_id) const
-{
- ExpiringCryptoKey e;
-
- if (!get_service_secret(cct, service_id, e, secret_id))
- return false;
+ secret = riter->second.key;
- secret = e.key;
+ // ttl may have just been increased by the user
+ // cap it by expiration of "next" key to prevent handing out a ticket
+ // with a bogus, possibly way into the future, validity
+ ttl = service_id == CEPH_ENTITY_TYPE_AUTH ?
+ cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl;
+ ttl = min(ttl, static_cast<double>(
+ secrets.secrets.rbegin()->second.expiration - now));
+
+ ldout(cct, 30) << __func__ << " service "
+ << ceph_entity_type_name(service_id) << " secret_id "
+ << secret_id << " " << riter->second << " ttl " << ttl
+ << dendl;
return true;
}
return data.get_caps(cct, name, type, caps_info);
}
-bool KeyServer::get_service_secret(uint32_t service_id,
- CryptoKey& secret, uint64_t& secret_id) const
+bool KeyServer::get_service_secret(uint32_t service_id, CryptoKey& secret,
+ uint64_t& secret_id, double& ttl) const
{
std::scoped_lock l{lock};
- return data.get_service_secret(cct, service_id, secret, secret_id);
+ return data.get_service_secret(cct, service_id, secret, secret_id, ttl);
}
bool KeyServer::get_service_secret(uint32_t service_id,
int KeyServer::_build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
- CephXSessionAuthInfo& info)
+ CephXSessionAuthInfo& info,
+ double ttl)
{
info.service_id = service_id;
info.ticket = parent_ticket;
- info.ticket.init_timestamps(ceph_clock_now(),
- cct->_conf->auth_service_ticket_ttl);
+ info.ticket.init_timestamps(ceph_clock_now(), ttl);
+ info.validity.set_from_double(ttl);
generate_secret(info.session_key);
const AuthTicket& parent_ticket,
CephXSessionAuthInfo& info)
{
- if (!get_service_secret(service_id, info.service_secret, info.secret_id)) {
+ double ttl;
+ if (!get_service_secret(service_id, info.service_secret, info.secret_id,
+ ttl)) {
return -EACCES;
}
std::scoped_lock l{lock};
-
- return _build_session_auth_info(service_id, parent_ticket, info);
+ return _build_session_auth_info(service_id, parent_ticket, info, ttl);
}
int KeyServer::build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
- CephXSessionAuthInfo& info,
- CryptoKey& service_secret,
- uint64_t secret_id)
+ const CryptoKey& service_secret,
+ uint64_t secret_id,
+ CephXSessionAuthInfo& info)
{
info.service_secret = service_secret;
info.secret_id = secret_id;
std::scoped_lock l{lock};
- return _build_session_auth_info(service_id, parent_ticket, info);
+ return _build_session_auth_info(service_id, parent_ticket, info,
+ cct->_conf->auth_service_ticket_ttl);
}
}
bool get_service_secret(CephContext *cct, uint32_t service_id,
- ExpiringCryptoKey& secret, uint64_t& secret_id) const;
- bool get_service_secret(CephContext *cct, uint32_t service_id,
- CryptoKey& secret, uint64_t& secret_id) const;
+ CryptoKey& secret, uint64_t& secret_id,
+ double& ttl) const;
bool get_service_secret(CephContext *cct, uint32_t service_id,
uint64_t secret_id, CryptoKey& secret) const;
bool get_auth(const EntityName& name, EntityAuth& auth) const;
void _dump_rotating_secrets();
int _build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
- CephXSessionAuthInfo& info);
+ CephXSessionAuthInfo& info,
+ double ttl);
bool _get_service_caps(const EntityName& name, uint32_t service_id,
AuthCapsInfo& caps) const;
public:
CephXSessionAuthInfo& info);
int build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
- CephXSessionAuthInfo& info,
- CryptoKey& service_secret,
- uint64_t secret_id);
+ const CryptoKey& service_secret,
+ uint64_t secret_id,
+ CephXSessionAuthInfo& info);
/* get current secret for specific service type */
- bool get_service_secret(uint32_t service_id, CryptoKey& service_key,
- uint64_t& secret_id) const;
+ bool get_service_secret(uint32_t service_id, CryptoKey& secret,
+ uint64_t& secret_id, double& ttl) const;
bool get_service_secret(uint32_t service_id, uint64_t secret_id,
CryptoKey& secret) const override;
<< " need " << need << dendl;
}
-bool cephx_decode_ticket(CephContext *cct, KeyStore *keys, uint32_t service_id,
- CephXTicketBlob& ticket_blob, CephXServiceTicketInfo& ticket_info)
+bool cephx_decode_ticket(CephContext *cct, KeyStore *keys,
+ uint32_t service_id,
+ const CephXTicketBlob& ticket_blob,
+ CephXServiceTicketInfo& ticket_info)
{
uint64_t secret_id = ticket_blob.secret_id;
CryptoKey service_secret;
CephXTicketBlob old_ticket;
uint32_t other_keys = 0; // replaces CephXServiceTicketRequest
+ bool old_ticket_may_be_omitted;
+
void encode(bufferlist& bl) const {
using ceph::encode;
- __u8 struct_v = 2;
+ __u8 struct_v = 3;
encode(struct_v, bl);
encode(client_challenge, bl);
encode(key, bl);
if (struct_v >= 2) {
decode(other_keys, bl);
}
+
+ // v2 and v3 encodings are the same, but:
+ // - some clients that send v1 or v2 don't populate old_ticket
+ // on reconnects (but do on renewals)
+ // - any client that sends v3 or later is expected to populate
+ // old_ticket both on reconnects and renewals
+ old_ticket_may_be_omitted = struct_v < 3;
}
};
WRITE_CLASS_ENCODER(CephXAuthenticate)
* Decode an extract ticket
*/
bool cephx_decode_ticket(CephContext *cct, KeyStore *keys,
- uint32_t service_id, CephXTicketBlob& ticket_blob,
+ uint32_t service_id,
+ const CephXTicketBlob& ticket_blob,
CephXServiceTicketInfo& ticket_info);
/*
#undef dout_prefix
#define dout_prefix *_dout << "cephx server " << entity_name << ": "
-int CephxServiceHandler::start_session(
- const EntityName& name,
- size_t connection_secret_required_length,
+int CephxServiceHandler::do_start_session(
+ bool is_new_global_id,
bufferlist *result_bl,
- AuthCapsInfo *caps,
- CryptoKey *session_key,
- std::string *connection_secret)
+ AuthCapsInfo *caps)
{
- entity_name = name;
+ global_id_status = is_new_global_id ? global_id_status_t::NEW_PENDING :
+ global_id_status_t::RECLAIM_PENDING;
uint64_t min = 1; // always non-zero
uint64_t max = std::numeric_limits<uint64_t>::max();
return 0;
}
+int CephxServiceHandler::verify_old_ticket(
+ const CephXAuthenticate& req,
+ CephXServiceTicketInfo& old_ticket_info,
+ bool& should_enc_ticket)
+{
+ ldout(cct, 20) << " checking old_ticket: secret_id="
+ << req.old_ticket.secret_id
+ << " len=" << req.old_ticket.blob.length()
+ << ", old_ticket_may_be_omitted="
+ << req.old_ticket_may_be_omitted << dendl;
+ ceph_assert(global_id_status != global_id_status_t::NONE);
+ if (global_id_status == global_id_status_t::NEW_PENDING) {
+ // old ticket is not needed
+ if (req.old_ticket.blob.length()) {
+ ldout(cct, 0) << " superfluous ticket presented" << dendl;
+ return -EINVAL;
+ }
+ if (req.old_ticket_may_be_omitted) {
+ ldout(cct, 10) << " new global_id " << global_id
+ << " (unexposed legacy client)" << dendl;
+ global_id_status = global_id_status_t::NEW_NOT_EXPOSED;
+ } else {
+ ldout(cct, 10) << " new global_id " << global_id << dendl;
+ global_id_status = global_id_status_t::NEW_OK;
+ }
+ return 0;
+ }
+
+ if (!req.old_ticket.blob.length()) {
+ // old ticket is needed but not presented
+ if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
+ req.old_ticket_may_be_omitted) {
+ ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+ << " with no ticket presented (legacy client, auth_allow_insecure_global_id_reclaim=true)"
+ << dendl;
+ global_id_status = global_id_status_t::RECLAIM_INSECURE;
+ return 0;
+ }
+ ldout(cct, 0) << " attempt to reclaim global_id " << global_id
+ << " without presenting ticket" << dendl;
+ return -EACCES;
+ }
+
+ if (!cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH,
+ req.old_ticket, old_ticket_info)) {
+ if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
+ req.old_ticket_may_be_omitted) {
+ ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+ << " using bad ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)"
+ << dendl;
+ global_id_status = global_id_status_t::RECLAIM_INSECURE;
+ return 0;
+ }
+ ldout(cct, 0) << " attempt to reclaim global_id " << global_id
+ << " using bad ticket" << dendl;
+ return -EACCES;
+ }
+ ldout(cct, 20) << " decoded old_ticket: global_id="
+ << old_ticket_info.ticket.global_id << dendl;
+ if (global_id != old_ticket_info.ticket.global_id) {
+ if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
+ req.old_ticket_may_be_omitted) {
+ ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+ << " using mismatching ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)"
+ << dendl;
+ global_id_status = global_id_status_t::RECLAIM_INSECURE;
+ return 0;
+ }
+ ldout(cct, 0) << " attempt to reclaim global_id " << global_id
+ << " using mismatching ticket" << dendl;
+ return -EACCES;
+ }
+ ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+ << " (valid ticket presented, will encrypt new ticket)"
+ << dendl;
+ global_id_status = global_id_status_t::RECLAIM_OK;
+ should_enc_ticket = true;
+ return 0;
+}
+
int CephxServiceHandler::handle_request(
bufferlist::const_iterator& indata,
size_t connection_secret_required_len,
bufferlist *result_bl,
- uint64_t *global_id,
AuthCapsInfo *caps,
CryptoKey *psession_key,
std::string *pconnection_secret)
ret = -EACCES;
break;
}
+
CephXServiceTicketInfo old_ticket_info;
+ ret = verify_old_ticket(req, old_ticket_info, should_enc_ticket);
+ if (ret) {
+ ldout(cct, 0) << " could not verify old ticket" << dendl;
+ break;
+ }
- if (cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH,
- req.old_ticket, old_ticket_info)) {
- *global_id = old_ticket_info.ticket.global_id;
- ldout(cct, 10) << "decoded old_ticket with global_id=" << *global_id
- << dendl;
- should_enc_ticket = true;
+ double ttl;
+ if (!key_server->get_service_secret(CEPH_ENTITY_TYPE_AUTH,
+ info.service_secret, info.secret_id,
+ ttl)) {
+ ldout(cct, 0) << " could not get service secret for auth subsystem" << dendl;
+ ret = -EIO;
+ break;
}
- ldout(cct,10) << __func__ << " auth ticket global_id " << *global_id
- << dendl;
- info.ticket.init_timestamps(ceph_clock_now(),
- cct->_conf->auth_mon_ticket_ttl);
+ info.service_id = CEPH_ENTITY_TYPE_AUTH;
info.ticket.name = entity_name;
- info.ticket.global_id = *global_id;
- info.validity += cct->_conf->auth_mon_ticket_ttl;
+ info.ticket.global_id = global_id;
+ info.ticket.init_timestamps(ceph_clock_now(), ttl);
+ info.validity.set_from_double(ttl);
key_server->generate_secret(session_key);
if (psession_key) {
*psession_key = session_key;
}
- info.service_id = CEPH_ENTITY_TYPE_AUTH;
- if (!key_server->get_service_secret(CEPH_ENTITY_TYPE_AUTH, info.service_secret, info.secret_id)) {
- ldout(cct, 0) << " could not get service secret for auth subsystem" << dendl;
- ret = -EIO;
- break;
- }
vector<CephXSessionAuthInfo> info_vec;
info_vec.push_back(info);
}
}
encode(cbl, *result_bl);
- // provite all of the other tickets at the same time
+ // provide requested service tickets at the same time
vector<CephXSessionAuthInfo> info_vec;
for (uint32_t service_id = 1; service_id <= req.other_keys;
service_id <<= 1) {
- if (req.other_keys & service_id) {
+ // skip CEPH_ENTITY_TYPE_AUTH: auth ticket is already encoded
+ // (possibly encrypted with the old session key)
+ if ((req.other_keys & service_id) &&
+ service_id != CEPH_ENTITY_TYPE_AUTH) {
ldout(cct, 10) << " adding key for service "
<< ceph_entity_type_name(service_id) << dendl;
CephXSessionAuthInfo svc_info;
service_id,
info.ticket,
svc_info);
- svc_info.validity += cct->_conf->auth_service_ticket_ttl;
info_vec.push_back(svc_info);
}
}
int service_err = 0;
for (uint32_t service_id = 1; service_id <= ticket_req.keys;
service_id <<= 1) {
- if (ticket_req.keys & service_id) {
+ // skip CEPH_ENTITY_TYPE_AUTH: auth ticket must be obtained with
+ // CEPHX_GET_AUTH_SESSION_KEY
+ if ((ticket_req.keys & service_id) &&
+ service_id != CEPH_ENTITY_TYPE_AUTH) {
ldout(cct, 10) << " adding key for service "
<< ceph_entity_type_name(service_id) << dendl;
CephXSessionAuthInfo info;
service_err = r;
continue;
}
- info.validity += cct->_conf->auth_service_ticket_ttl;
info_vec.push_back(info);
++found_services;
}
#include "auth/Auth.h"
class KeyServer;
+struct CephXAuthenticate;
+struct CephXServiceTicketInfo;
class CephxServiceHandler : public AuthServiceHandler {
KeyServer *key_server;
: AuthServiceHandler(cct_), key_server(ks), server_challenge(0) {}
~CephxServiceHandler() override {}
- int start_session(const EntityName& name,
- size_t connection_secret_required_length,
- bufferlist *result_bl,
- AuthCapsInfo *caps,
- CryptoKey *session_key,
- std::string *connection_secret) override;
int handle_request(
bufferlist::const_iterator& indata,
size_t connection_secret_required_length,
bufferlist *result_bl,
- uint64_t *global_id,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) override;
- void build_cephx_response_header(int request_type, int status, bufferlist& bl);
+private:
+ int do_start_session(bool is_new_global_id,
+ bufferlist *result_bl,
+ AuthCapsInfo *caps) override;
+
+ int verify_old_ticket(const CephXAuthenticate& req,
+ CephXServiceTicketInfo& old_ticket_info,
+ bool& should_enc_ticket);
+ void build_cephx_response_header(int request_type, int status,
+ bufferlist& bl);
};
#endif
reset();
}
~KrbClientHandler() override;
-
+
+ KrbClientHandler* clone() const override {
+ return new KrbClientHandler(*this);
+ }
+
int get_protocol() const override { return CEPH_AUTH_GSS; }
void reset() override {
m_gss_client_name = GSS_C_NO_NAME;
bufferlist::const_iterator& indata,
size_t connection_secret_required_length,
bufferlist *buff_list,
- uint64_t *global_id,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret)
return result;
}
-int KrbServiceHandler::start_session(
- const EntityName& name,
- size_t connection_secret_required_length,
+int KrbServiceHandler::do_start_session(
+ bool is_new_global_id,
bufferlist *buff_list,
- AuthCapsInfo *caps,
- CryptoKey *session_key,
- std::string *connection_secret)
+ AuthCapsInfo *caps)
{
gss_buffer_desc gss_buffer_in = {0, nullptr};
gss_OID gss_object_id = GSS_C_NT_HOSTBASED_SERVICE;
gss_buffer_in.length = gss_service_name.length();
gss_buffer_in.value = (const_cast<char*>(gss_service_name.c_str()));
- entity_name = name;
gss_major_status = gss_import_name(&gss_minor_status,
&gss_buffer_in,
int handle_request(bufferlist::const_iterator& indata,
size_t connection_secret_required_length,
bufferlist *buff_list,
- uint64_t *global_id,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) override;
- int start_session(const EntityName& name,
- size_t connection_secret_required_length,
- bufferlist *buff_list,
- AuthCapsInfo *caps,
- CryptoKey *session_key,
- std::string *connection_secret) override;
-
private:
+ int do_start_session(bool is_new_global_id,
+ bufferlist *buff_list,
+ AuthCapsInfo *caps) override;
+
gss_buffer_desc m_gss_buffer_out;
gss_cred_id_t m_gss_credentials;
gss_ctx_id_t m_gss_sec_ctx;
AuthNoneClientHandler(CephContext *cct_)
: AuthClientHandler(cct_) {}
+ AuthNoneClientHandler* clone() const override {
+ return new AuthNoneClientHandler(*this);
+ }
+
void reset() override { }
void prepare_build_request() override {}
: AuthServiceHandler(cct_) {}
~AuthNoneServiceHandler() override {}
- int start_session(const EntityName& name,
- size_t connection_secret_required_length,
- bufferlist *result_bl,
- AuthCapsInfo *caps,
- CryptoKey *session_key,
- std::string *connection_secret) override {
- entity_name = name;
- caps->allow_all = true;
- return 1;
- }
int handle_request(bufferlist::const_iterator& indata,
size_t connection_secret_required_length,
bufferlist *result_bl,
- uint64_t *global_id,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) override {
return 0;
}
- void build_cephx_response_header(int request_type, int status,
- bufferlist& bl) {
+
+private:
+ int do_start_session(bool is_new_global_id,
+ bufferlist *result_bl,
+ AuthCapsInfo *caps) override {
+ caps->allow_all = true;
+ return 1;
}
};
if not cp.has_section('global'):
cp.add_section('global')
cp.set('global', 'fsid', fsid);
- cp.set('global', 'mon host', addr_arg)
+ cp.set('global', 'mon_host', addr_arg)
cp.set('global', 'container_image', args.image)
+ if not cp.has_section('mon'):
+ cp.add_section('mon')
+ if (
+ not cp.has_option('mon', 'auth_allow_insecure_global_id_reclaim')
+ and not cp.has_option('mon', 'auth allow insecure global id reclaim')
+ ):
+ cp.set('mon', 'auth_allow_insecure_global_id_reclaim', 'false')
cpf = StringIO()
cp.write(cpf)
config = cpf.getvalue()
OPTION(cephx_sign_messages, OPT_BOOL) // Default to signing session messages if supported
OPTION(auth_mon_ticket_ttl, OPT_DOUBLE)
OPTION(auth_service_ticket_ttl, OPT_DOUBLE)
+OPTION(auth_allow_insecure_global_id_reclaim, OPT_BOOL)
+OPTION(auth_expose_insecure_global_id_reclaim, OPT_BOOL)
OPTION(auth_debug, OPT_BOOL) // if true, assert when weird things happen
OPTION(mon_client_hunt_parallel, OPT_U32) // how many mons to try to connect to in parallel during hunt
OPTION(mon_client_hunt_interval, OPT_DOUBLE) // try new mon every N seconds until we connect
.add_service("mon")
.set_description("time before OSDs who do not report to the mons are marked down (seconds)"),
+ Option("mon_warn_on_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(true)
+ .add_service("mon")
+ .set_description("issue AUTH_INSECURE_GLOBAL_ID_RECLAIM health warning if any connected clients are insecurely reclaiming global_id")
+ .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed")
+ .add_see_also("auth_allow_insecure_global_id_reclaim")
+ .add_see_also("auth_expose_insecure_global_id_reclaim"),
+
+ Option("mon_warn_on_insecure_global_id_reclaim_allowed", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(true)
+ .add_service("mon")
+ .set_description("issue AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED health warning if insecure global_id reclaim is allowed")
+ .add_see_also("mon_warn_on_insecure_global_id_reclaim")
+ .add_see_also("auth_allow_insecure_global_id_reclaim")
+ .add_see_also("auth_expose_insecure_global_id_reclaim"),
+
Option("mon_warn_on_msgr2_not_enabled", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_default(true)
.add_service("mon")
.set_description(""),
Option("auth_mon_ticket_ttl", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
- .set_default(12_hr)
+ .set_default(72_hr)
.set_description(""),
Option("auth_service_ticket_ttl", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
.set_default(1_hr)
.set_description(""),
+ Option("auth_allow_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(true)
+ .set_description("Allow reclaiming global_id without presenting a valid ticket proving previous possession of that global_id")
+ .set_long_description("Allowing unauthorized global_id (re)use poses a security risk. Unfortunately, older clients may omit their ticket on reconnects and therefore rely on this being allowed for preserving their global_id for the lifetime of the client instance. Setting this value to false would immediately prevent new connections from those clients (assuming auth_expose_insecure_global_id_reclaim set to true) and eventually break existing sessions as well (regardless of auth_expose_insecure_global_id_reclaim setting).")
+ .add_see_also("mon_warn_on_insecure_global_id_reclaim")
+ .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed")
+ .add_see_also("auth_expose_insecure_global_id_reclaim"),
+
+ Option("auth_expose_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(true)
+ .set_description("Force older clients that may omit their ticket on reconnects to reconnect as part of establishing a session")
+ .set_long_description("In permissive mode (auth_allow_insecure_global_id_reclaim set to true), this helps with identifying clients that are not patched. In enforcing mode (auth_allow_insecure_global_id_reclaim set to false), this is a fail-fast mechanism: don't establish a session that will almost inevitably be broken later.")
+ .add_see_also("mon_warn_on_insecure_global_id_reclaim")
+ .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed")
+ .add_see_also("auth_allow_insecure_global_id_reclaim"),
+
Option("auth_debug", Option::TYPE_BOOL, Option::LEVEL_DEV)
.set_default(false)
.set_description(""),
bool start = false;
bool finished = false;
EntityName entity_name;
+ bool is_new_global_id = false;
// set up handler?
if (m->protocol == 0 && !s->auth_handler) {
ceph_assert(!paxos_writable);
return false;
}
+ is_new_global_id = true;
}
try {
if (start) {
// new session
ret = s->auth_handler->start_session(entity_name,
- 0, // no connection_secret needed
+ s->con->peer_global_id,
+ is_new_global_id,
&response_bl,
- &s->con->peer_caps_info,
- nullptr, nullptr);
+ &s->con->peer_caps_info);
} else {
// request
ret = s->auth_handler->handle_request(
indata,
0, // no connection_secret needed
&response_bl,
- &s->con->peer_global_id,
&s->con->peer_caps_info,
nullptr, nullptr);
}
{
dout(20) << __func__ << dendl;
bool changed = false;
+ const auto max = g_conf().get_val<uint64_t>("mon_health_max_detail");
// snapshot of usage
DataStats stats;
}
}
+ // AUTH_INSECURE_GLOBAL_ID_RECLAIM
+ if (g_conf().get_val<bool>("mon_warn_on_insecure_global_id_reclaim") &&
+ g_conf().get_val<bool>("auth_allow_insecure_global_id_reclaim")) {
+ // Warn if there are any clients that are insecurely renewing their global_id
+ std::lock_guard l(mon->session_map_lock);
+ list<std::string> detail;
+ for (auto p = mon->session_map.sessions.begin();
+ p != mon->session_map.sessions.end();
+ ++p) {
+ if ((*p)->global_id_status == global_id_status_t::RECLAIM_INSECURE) {
+ ostringstream ds;
+ ds << (*p)->entity_name << " at " << (*p)->addrs
+ << " is using insecure global_id reclaim";
+ detail.push_back(ds.str());
+ if (detail.size() >= max) {
+ detail.push_back("...");
+ break;
+ }
+ }
+ }
+ if (!detail.empty()) {
+ ostringstream ss;
+ ss << "client%plurals% %isorare% using insecure global_id reclaim";
+ auto& d = next.add("AUTH_INSECURE_GLOBAL_ID_RECLAIM", HEALTH_WARN, ss.str(),
+ detail.size());
+ d.detail.swap(detail);
+ }
+ }
+ // AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED
+ if (g_conf().get_val<bool>("mon_warn_on_insecure_global_id_reclaim_allowed") &&
+ g_conf().get_val<bool>("auth_allow_insecure_global_id_reclaim")) {
+ ostringstream ss, ds;
+ ss << "mon%plurals% %isorare% allowing insecure global_id reclaim";
+ auto& d = next.add("AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED", HEALTH_WARN, ss.str(), 1);
+ ds << "mon." << mon->name << " has auth_allow_insecure_global_id_reclaim set to true";
+ d.detail.push_back(ds.str());
+ }
+
auto p = quorum_checks.find(mon->rank);
if (p == quorum_checks.end()) {
if (next.empty()) {
}
while ((!got_config || monmap.get_epoch() == 0) && r == 0) {
ldout(cct,20) << __func__ << " waiting for monmap|config" << dendl;
- map_cond.wait_for(l, ceph::make_timespan(
- cct->_conf->mon_client_hunt_interval));
+ auto status = map_cond.wait_for(l, ceph::make_timespan(
+ cct->_conf->mon_client_hunt_interval));
+ if (status == std::cv_status::timeout) {
+ r = -ETIMEDOUT;
+ }
}
if (got_config) {
ldout(cct,10) << __func__ << " success" << dendl;
active_con.reset();
pending_cons.clear();
+
auth.reset();
+ global_id = 0;
+ authenticate_err = 0;
+ authenticated = false;
monc_lock.unlock();
_start_hunting();
if (rank >= 0) {
- _add_conn(rank, global_id);
+ _add_conn(rank);
} else {
- _add_conns(global_id);
+ _add_conns();
}
// throw out old queued messages
}
}
-MonConnection& MonClient::_add_conn(unsigned rank, uint64_t global_id)
+MonConnection& MonClient::_add_conn(unsigned rank)
{
auto peer = monmap.get_addrs(rank);
auto conn = messenger->connect_to_mon(peer);
MonConnection mc(cct, conn, global_id, &auth_registry);
+ if (auth) {
+ mc.get_auth().reset(auth->clone());
+ }
auto inserted = pending_cons.insert(std::make_pair(peer, std::move(mc)));
ldout(cct, 10) << "picked mon." << monmap.get_name(rank)
<< " con " << conn
return inserted.first->second;
}
-void MonClient::_add_conns(uint64_t global_id)
+void MonClient::_add_conns()
{
// collect the next batch of candidates who are listed right next to the ones
// already tried
n = ranks.size();
}
for (unsigned i = 0; i < n; i++) {
- _add_conn(ranks[i], global_id);
+ _add_conn(ranks[i]);
tried.insert(ranks[i]);
}
}
_resend_mon_commands();
send_log(true);
if (active_con) {
- std::swap(auth, active_con->get_auth());
+ auth = std::move(active_con->get_auth());
if (global_id && global_id != active_con->get_global_id()) {
lderr(cct) << __func__ << " global_id changed from " << global_id
<< " to " << active_con->get_global_id() << dendl;
return -EACCES;
}
- if (auth) {
- auth.reset();
- }
int r = _init_auth(*method, entity_name, want_keys, keyring, true);
ceph_assert(r == 0);
uint32_t want_keys,
RotatingKeyRing* keyring)
{
- if (auth && (int)m->protocol == auth->get_protocol()) {
- // good, negotiation completed
- auth->reset();
- return 0;
- }
-
int r = _init_auth(m->protocol, entity_name, want_keys, keyring, false);
if (r == -ENOTSUP) {
if (m->result == -ENOTSUP) {
RotatingKeyRing* keyring,
bool msgr2)
{
- ldout(cct,10) << __func__ << " method " << method << dendl;
- auth.reset(
- AuthClientHandler::create(cct, method, keyring));
+ ldout(cct, 10) << __func__ << " method " << method << dendl;
+ if (auth && auth->get_protocol() == (int)method) {
+ ldout(cct, 10) << __func__ << " already have auth, reseting" << dendl;
+ auth->reset();
+ return 0;
+ }
+
+ ldout(cct, 10) << __func__ << " creating new auth" << dendl;
+ auth.reset(AuthClientHandler::create(cct, method, keyring));
if (!auth) {
ldout(cct, 10) << " no handler for protocol " << method << dendl;
return -ENOTSUP;
void _finish_hunting(int auth_err);
void _finish_auth(int auth_err);
void _reopen_session(int rank = -1);
- MonConnection& _add_conn(unsigned rank, uint64_t global_id);
+ MonConnection& _add_conn(unsigned rank);
+ void _add_conns();
void _un_backoff();
- void _add_conns(uint64_t global_id);
void _send_mon_message(MessageRef m);
std::map<entity_addrvec_t, MonConnection>::iterator _find_pending_con(
if (s->auth_handler) {
s->entity_name = s->auth_handler->get_entity_name();
+ s->global_id = s->auth_handler->get_global_id();
+ s->global_id_status = s->auth_handler->get_global_id_status();
}
- dout(20) << " entity " << s->entity_name
- << " caps " << s->caps.get_str() << dendl;
+ dout(20) << " entity_name " << s->entity_name
+ << " global_id " << s->global_id
+ << " (" << s->global_id_status
+ << ") caps " << s->caps.get_str() << dendl;
if ((is_synchronizing() ||
(!s->authenticated && !exited_quorum.is_zero())) &&
return;
}
+ // global_id_status == NONE: all sessions for auth_none and krb,
+ // mon <-> mon sessions (including proxied sessions) for cephx
+ ceph_assert(s->global_id_status == global_id_status_t::NONE ||
+ s->global_id_status == global_id_status_t::NEW_OK ||
+ s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED ||
+ s->global_id_status == global_id_status_t::RECLAIM_OK ||
+ s->global_id_status == global_id_status_t::RECLAIM_INSECURE);
+
+ // let mon_getmap through for "ping" (which doesn't reconnect)
+ // and "tell" (which reconnects but doesn't attempt to preserve
+ // its global_id and stays in NEW_NOT_EXPOSED, retrying until
+ // ->send_attempts reaches 0)
+ if (cct->_conf->auth_expose_insecure_global_id_reclaim &&
+ s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED &&
+ op->get_req()->get_type() != CEPH_MSG_MON_GET_MAP) {
+ dout(5) << __func__ << " " << op->get_req()->get_source_inst()
+ << " may omit old_ticket on reconnects, discarding "
+ << *op->get_req() << " and forcing reconnect" << dendl;
+ ceph_assert(s->con && !s->proxy_con);
+ s->con->mark_down();
+ {
+ std::lock_guard l(session_map_lock);
+ remove_session(s);
+ }
+ op->mark_zap();
+ return;
+ }
+
switch (op->get_req()->get_type()) {
case CEPH_MSG_MON_GET_MAP:
handle_mon_get_map(op);
}
ret = key_server.build_session_auth_info(
- service_id, auth_ticket_info.ticket, info, secret, (uint64_t)-1);
+ service_id, auth_ticket_info.ticket, secret, (uint64_t)-1, info);
if (ret < 0) {
dout(0) << __func__ << " failed to build mon session_auth_info "
<< cpp_strerror(ret) << dendl;
// are supported by the client if we require it. for msgr2 that
// is not necessary.
+ bool is_new_global_id = false;
if (!con->peer_global_id) {
con->peer_global_id = authmon()->_assign_global_id();
if (!con->peer_global_id) {
dout(1) << __func__ << " failed to assign global_id" << dendl;
return -EBUSY;
}
- dout(10) << __func__ << " assigned global_id " << con->peer_global_id
- << dendl;
+ is_new_global_id = true;
}
// set up partial session
r = s->auth_handler->start_session(
entity_name,
- auth_meta->get_connection_secret_length(),
+ con->peer_global_id,
+ is_new_global_id,
reply,
- &con->peer_caps_info,
- &auth_meta->session_key,
- &auth_meta->connection_secret);
+ &con->peer_caps_info);
} else {
priv = con->get_priv();
if (!priv) {
p,
auth_meta->get_connection_secret_length(),
reply,
- &con->peer_global_id,
&con->peer_caps_info,
&auth_meta->session_key,
&auth_meta->connection_secret);
AuthServiceHandler *auth_handler = nullptr;
EntityName entity_name;
+ uint64_t global_id = 0;
+ global_id_status_t global_id_status = global_id_status_t::NONE;
ConnectionRef proxy_con;
uint64_t proxy_tid = 0;
f->dump_bool("open", !closed);
f->dump_object("caps", caps);
f->dump_bool("authenticated", authenticated);
+ f->dump_unsigned("global_id", global_id);
+ f->dump_stream("global_id_status") << global_id_status;
f->dump_unsigned("osd_epoch", osd_epoch);
f->dump_string("remote_host", remote_host);
}