]> git.proxmox.com Git - ceph.git/commitdiff
import ceph 15.2.11
authorThomas Lamprecht <t.lamprecht@proxmox.com>
Tue, 20 Apr 2021 10:33:04 +0000 (12:33 +0200)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Tue, 20 Apr 2021 10:33:04 +0000 (12:33 +0200)
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
36 files changed:
ceph/CMakeLists.txt
ceph/alpine/APKBUILD
ceph/ceph.spec
ceph/changelog.upstream
ceph/doc/rados/operations/health-checks.rst
ceph/qa/standalone/ceph-helpers.sh
ceph/qa/suites/rados/cephadm/upgrade/3-start-upgrade.yaml
ceph/qa/suites/rados/thrash-old-clients/ceph.yaml
ceph/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml
ceph/qa/tasks/ceph.conf.template
ceph/qa/tasks/cephadm.conf
ceph/src/.git_version
ceph/src/auth/AuthClientHandler.h
ceph/src/auth/AuthServiceHandler.cc
ceph/src/auth/AuthServiceHandler.h
ceph/src/auth/cephx/CephxClientHandler.h
ceph/src/auth/cephx/CephxKeyServer.cc
ceph/src/auth/cephx/CephxKeyServer.h
ceph/src/auth/cephx/CephxProtocol.cc
ceph/src/auth/cephx/CephxProtocol.h
ceph/src/auth/cephx/CephxServiceHandler.cc
ceph/src/auth/cephx/CephxServiceHandler.h
ceph/src/auth/krb/KrbClientHandler.hpp
ceph/src/auth/krb/KrbServiceHandler.cpp
ceph/src/auth/krb/KrbServiceHandler.hpp
ceph/src/auth/none/AuthNoneClientHandler.h
ceph/src/auth/none/AuthNoneServiceHandler.h
ceph/src/cephadm/cephadm
ceph/src/common/legacy_config_opts.h
ceph/src/common/options.cc
ceph/src/mon/AuthMonitor.cc
ceph/src/mon/HealthMonitor.cc
ceph/src/mon/MonClient.cc
ceph/src/mon/MonClient.h
ceph/src/mon/Monitor.cc
ceph/src/mon/Session.h

index e94f5da1523e60133e221bc20e00021976749f2f..3784f3361bc4dcb15219829fa2e4dabea2e1207a 100644 (file)
@@ -668,4 +668,4 @@ add_custom_target(tags DEPENDS ctags)
 
 find_package(CppCheck)
 find_package(IWYU)
-set(VERSION 15.2.10)
+set(VERSION 15.2.11)
index f2b431e7177ac90a14a12e3e5f0e9216591381ac..d019225cdacdd2077a9ad5803d83b4911a7d4fa1 100644 (file)
@@ -1,7 +1,7 @@
 # Contributor: John Coyle <dx9err@gmail.com>
 # Maintainer: John Coyle <dx9err@gmail.com>
 pkgname=ceph
-pkgver=15.2.10
+pkgver=15.2.11
 pkgrel=0
 pkgdesc="Ceph is a distributed object store and file system"
 pkgusers="ceph"
@@ -63,7 +63,7 @@ makedepends="
        xmlstarlet
        yasm
 "
-source="ceph-15.2.10.tar.bz2"
+source="ceph-15.2.11.tar.bz2"
 subpackages="
        $pkgname-base
        $pkgname-common
@@ -116,7 +116,7 @@ _sysconfdir=/etc
 _udevrulesdir=/etc/udev/rules.d
 _python_sitelib=/usr/lib/python2.7/site-packages
 
-builddir=$srcdir/ceph-15.2.10
+builddir=$srcdir/ceph-15.2.11
 
 build() {
        export CEPH_BUILD_VIRTUALENV=$builddir
index 09bfda181c361135a0936ae75e2740c06ac0f7b4..d6a735dd7f04ac7451ae0ac3eac583b4c3140614 100644 (file)
@@ -98,7 +98,7 @@
 # main package definition
 #################################################################################
 Name:          ceph
-Version:       15.2.10
+Version:       15.2.11
 Release:       0%{?dist}
 %if 0%{?fedora} || 0%{?rhel}
 Epoch:         2
@@ -114,7 +114,7 @@ License:    LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-
 Group:         System/Filesystems
 %endif
 URL:           http://ceph.com/
-Source0:       %{?_remote_tarball_prefix}ceph-15.2.10.tar.bz2
+Source0:       %{?_remote_tarball_prefix}ceph-15.2.11.tar.bz2
 %if 0%{?suse_version}
 # _insert_obs_source_lines_here
 ExclusiveArch:  x86_64 aarch64 ppc64le s390x
@@ -1140,7 +1140,7 @@ This package provides Ceph’s default alerts for Prometheus.
 # common
 #################################################################################
 %prep
-%autosetup -p1 -n ceph-15.2.10
+%autosetup -p1 -n ceph-15.2.11
 
 %build
 # LTO can be enabled as soon as the following GCC bug is fixed:
index 9287b9a85a297dae06647c2edb1ab1c6967c3c3d..0953cf4435670403ff012d5ed7ac750698ccabca 100644 (file)
@@ -1,7 +1,13 @@
-ceph (15.2.10-1bionic) bionic; urgency=medium
+ceph (15.2.11-1bionic) bionic; urgency=medium
 
 
- -- Jenkins Build Slave User <jenkins-build@confusa10.front.sepia.ceph.com>  Wed, 17 Mar 2021 13:15:33 -0400
+ -- Jenkins Build Slave User <jenkins-build@braggi11.front.sepia.ceph.com>  Mon, 19 Apr 2021 13:59:34 +0000
+
+ceph (15.2.11-1) stable; urgency=medium
+
+  * New upstream release
+
+ -- Ceph Release Team <ceph-maintainers@ceph.com>  Mon, 19 Apr 2021 13:47:28 +0000
 
 ceph (15.2.10-1) stable; urgency=medium
 
index 32169dab333d5b52c23f5bc39cdf0af13b194be4..87f4bd6c927c1e184fe2696b38ae687124f7d57a 100644 (file)
@@ -122,6 +122,73 @@ The warning threshold may be adjusted with::
 
   ceph config set global mon_data_size_warn <size>
 
+AUTH_INSECURE_GLOBAL_ID_RECLAIM
+_______________________________
+
+One or more clients or daemons are connected to the cluster that are
+not securely reclaiming their global_id (a unique number identifying
+each entity in the cluster) when reconnecting to a monitor.  The
+client is being permitted to connect anyway because the
+``auth_allow_insecure_global_id_reclaim`` option is set to true (which may
+be necessary until all ceph clients have been upgraded), and the
+``auth_expose_insecure_global_id_reclaim`` option set to ``true`` (which
+allows monitors to detect clients with insecure reclaim early by forcing them to
+reconnect right after they first authenticate).
+
+You can identify which client(s) are using unpatched ceph client code with::
+
+  ceph health detail
+
+Clients global_id reclaim rehavior can also seen in the
+``global_id_status`` field in the dump of clients connected to an
+individual monitor (``reclaim_insecure`` means the client is
+unpatched and is contributing to this health alert)::
+
+  ceph tell mon.\* sessions
+
+We strongly recommend that all clients in the system are upgraded to a
+newer version of Ceph that correctly reclaims global_id values.  Once
+all clients have been updated, you can stop allowing insecure reconnections
+with::
+
+  ceph config set mon auth_allow_insecure_global_id_reclaim false
+
+If it is impractical to upgrade all clients immediately, you can silence
+this warning temporarily with::
+
+  ceph health mute AUTH_INSECURE_GLOBAL_ID_RECLAIM 1w   # 1 week
+
+Although we do NOT recommend doing so, you can also disable this warning indefinitely
+with::
+
+  ceph config set mon mon_warn_on_insecure_global_id_reclaim false
+
+AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED
+_______________________________________
+
+Ceph is currently configured to allow clients to reconnect to monitors using
+an insecure process to reclaim their previous global_id because the setting
+``auth_allow_insecure_global_id_reclaim`` is set to ``true``.  It may be necessary to
+leave this setting enabled while existing Ceph clients are upgraded to newer
+versions of Ceph that correctly and securely reclaim their global_id.
+
+If the ``AUTH_INSECURE_GLOBAL_ID_RECLAIM`` health alert has not also been raised and
+the ``auth_expose_insecure_global_id_reclaim`` setting has not been disabled (it is
+on by default), then there are currently no clients connected that need to be
+upgraded, and it is safe to disallow insecure global_id reclaim with::
+
+  ceph config set mon auth_allow_insecure_global_id_reclaim false
+
+If there are still clients that need to be upgraded, then this alert can be
+silenced temporarily with::
+
+  ceph health mute AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED 1w   # 1 week
+
+Although we do NOT recommend doing so, you can also disable this warning indefinitely
+with::
+
+  ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false
+
 
 Manager
 -------
index 20336d06fdb28f5492d65dc4e43d9b19ea5f269a..0a34a353ac7f28291e1ae945187cd5d75964f7bc 100755 (executable)
@@ -480,6 +480,7 @@ function run_mon() {
        --mon-allow-pool-delete \
        --osd-pool-default-pg-autoscale-mode off \
        --mon-osd-backfillfull-ratio .99 \
+       --mon-warn-on-insecure-global-id-reclaim-allowed=false \
         "$@" || return 1
 
     cat > $dir/ceph.conf <<EOF
index f13b2e07ce7137c50f09074c165b16143dbeab65..8f8c56a08434da9b4bbea7e81f9f3bf3ae5cc37d 100644 (file)
@@ -2,4 +2,6 @@ tasks:
 - cephadm.shell:
     env: [sha1]
     mon.a:
+      - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
+      - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
       - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
index 42f9495a851667af64c11da289253bb70bac7e12..ebcd7c14250642248ca536be5d51efbf8a156cae 100644 (file)
@@ -1,2 +1,5 @@
 tasks:
 - cephadm:
+    conf:
+      mon:
+        auth allow insecure global id reclaim: true
index 62469c09008e8433dbb1b7d63bc8be180d9d0278..f6d34bda0802d0110c2df7444977e827c0964fb4 100644 (file)
@@ -14,6 +14,9 @@ tasks:
         bluestore_warn_on_legacy_statfs: false
         bluestore warn on no per pool omap: false
         mon pg warn min per osd: 0
+      mon:
+        mon_warn_on_insecure_global_id_reclaim: false
+        mon_warn_on_insecure_global_id_reclaim_allowed: false
     log-ignorelist:
       - evicting unresponsive client
 - exec:
index 8619817cf75bd0ee975371c78390d4cc801869e5..f1a102dc8ee37bd7e420f4f884bf56088088f463 100644 (file)
        mon osd prime pg temp = true
        mon reweight min bytes per osd = 10
 
+       # rotate auth tickets quickly to exercise renewal paths
+       auth mon ticket ttl = 660      # 11m
+       auth service ticket ttl = 240  # 4m
+
+       # don't complain about insecure global_id in the test suite
+       mon_warn_on_insecure_global_id_reclaim = false
+       mon_warn_on_insecure_global_id_reclaim_allowed = false
+
 [client]
        rgw cache enabled = true
        rgw enable ops log = true
index a6cbb28c0eb0a7ffbf70fc34511942f92b8184a3..4a7945ac31d2a264f20ee36e8137b5bb57831073 100644 (file)
@@ -73,6 +73,14 @@ mon osd reporter subtree level = osd
 mon osd prime pg temp = true
 mon reweight min bytes per osd = 10
 
+# rotate auth tickets quickly to exercise renewal paths
+auth mon ticket ttl = 660      # 11m
+auth service ticket ttl = 240  # 4m
+
+# don't complain about global id reclaim
+mon_warn_on_insecure_global_id_reclaim = false
+mon_warn_on_insecure_global_id_reclaim_allowed = false
+
 [client.rgw]
 rgw cache enabled = true
 rgw enable ops log = true
index 0ca509a9f2c2876ce866ff13ab4b7eb894785c46..3bba791e21539d978e115dbdfa931f7339dbbc06 100644 (file)
@@ -1,2 +1,2 @@
-27917a557cca91e4da407489bbaa64ad4352cc02
-15.2.10
+e3523634d9c2227df9af89a4eac33d16738c49cb
+15.2.11
index 3e2f73db049b47ca7a3817129a6cffb742d5e6ad..c8989b1a0d9e613936c86fa6801276884733eb8b 100644 (file)
@@ -37,6 +37,8 @@ public:
   {}
   virtual ~AuthClientHandler() {}
 
+  virtual AuthClientHandler* clone() const = 0;
+
   void init(const EntityName& n) { name = n; }
   
   void set_want_keys(__u32 keys) {
index 51c5c75da7bf775466d2ffe9ab98b12aabedc023..2d1297ee2509064f19e98b4e71f2258b5dde00f9 100644 (file)
 #include "krb/KrbServiceHandler.hpp"
 #endif
 #include "none/AuthNoneServiceHandler.h"
+#include "common/dout.h"
 
 #define dout_subsys ceph_subsys_auth
 
 
+std::ostream& operator<<(std::ostream& os,
+                        global_id_status_t global_id_status)
+{
+  switch (global_id_status) {
+  case global_id_status_t::NONE:
+    return os << "none";
+  case global_id_status_t::NEW_PENDING:
+    return os << "new_pending";
+  case global_id_status_t::NEW_OK:
+    return os << "new_ok";
+  case global_id_status_t::NEW_NOT_EXPOSED:
+    return os << "new_not_exposed";
+  case global_id_status_t::RECLAIM_PENDING:
+    return os << "reclaim_pending";
+  case global_id_status_t::RECLAIM_OK:
+    return os << "reclaim_ok";
+  case global_id_status_t::RECLAIM_INSECURE:
+    return os << "reclaim_insecure";
+  default:
+    ceph_abort();
+  }
+}
+
+int AuthServiceHandler::start_session(const EntityName& entity_name,
+                                     uint64_t global_id,
+                                     bool is_new_global_id,
+                                     ceph::buffer::list *result,
+                                     AuthCapsInfo *caps)
+{
+  ceph_assert(!this->entity_name.get_type() && !this->global_id &&
+             global_id_status == global_id_status_t::NONE);
+
+  ldout(cct, 10) << __func__ << " entity_name=" << entity_name
+                << " global_id=" << global_id << " is_new_global_id="
+                << is_new_global_id << dendl;
+  this->entity_name = entity_name;
+  this->global_id = global_id;
+
+  return do_start_session(is_new_global_id, result, caps);
+}
+
 AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks)
 {
   switch (type) {
index ac33eaa2425946eda980342eb811cf3a4b7e8c65..4b3dcccbe9dd26bd379d0b8f8ce1eeaeae63a198 100644 (file)
@@ -25,32 +25,57 @@ class KeyServer;
 class CryptoKey;
 struct AuthCapsInfo;
 
+enum class global_id_status_t {
+  NONE,
+  // fresh client (global_id == 0); waiting for CephXAuthenticate
+  NEW_PENDING,
+  // connected client; new enough to correctly reclaim global_id
+  NEW_OK,
+  // connected client; unknown whether it can reclaim global_id correctly
+  NEW_NOT_EXPOSED,
+  // reconnecting client (global_id != 0); waiting for CephXAuthenticate
+  RECLAIM_PENDING,
+  // reconnected client; correctly reclaimed global_id
+  RECLAIM_OK,
+  // reconnected client; did not properly prove prior global_id ownership
+  RECLAIM_INSECURE
+};
+
+std::ostream& operator<<(std::ostream& os,
+                        global_id_status_t global_id_status);
+
 struct AuthServiceHandler {
 protected:
   CephContext *cct;
-public:
   EntityName entity_name;
-  uint64_t global_id;
+  uint64_t global_id = 0;
+  global_id_status_t global_id_status = global_id_status_t::NONE;
 
-  explicit AuthServiceHandler(CephContext *cct_) : cct(cct_), global_id(0) {}
+public:
+  explicit AuthServiceHandler(CephContext *cct_) : cct(cct_) {}
 
   virtual ~AuthServiceHandler() { }
 
-  virtual int start_session(const EntityName& name,
-                           size_t connection_secret_required_length,
-                           ceph::buffer::list *result,
-                           AuthCapsInfo *caps,
-                           CryptoKey *session_key,
-                           std::string *connection_secret) = 0;
+  int start_session(const EntityName& entity_name,
+                   uint64_t global_id,
+                   bool is_new_global_id,
+                   ceph::buffer::list *result,
+                   AuthCapsInfo *caps);
   virtual int handle_request(ceph::buffer::list::const_iterator& indata,
                             size_t connection_secret_required_length,
                             ceph::buffer::list *result,
-                            uint64_t *global_id,
                             AuthCapsInfo *caps,
                             CryptoKey *session_key,
                             std::string *connection_secret) = 0;
 
-  EntityName& get_entity_name() { return entity_name; }
+  const EntityName& get_entity_name() { return entity_name; }
+  uint64_t get_global_id() { return global_id; }
+  global_id_status_t get_global_id_status() { return global_id_status; }
+
+private:
+  virtual int do_start_session(bool is_new_global_id,
+                              ceph::buffer::list *result,
+                              AuthCapsInfo *caps) = 0;
 };
 
 extern AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks);
index 6b225e4aca30d5d15531bb5afdf64fd7c04c2d0a..0b4d7c8627d50fd86ebf0d79b6a2b597745341b9 100644 (file)
@@ -48,6 +48,10 @@ public:
     reset();
   }
 
+  CephxClientHandler* clone() const override {
+    return new CephxClientHandler(*this);
+  }
+
   void reset() override;
   void prepare_build_request() override;
   int build_request(bufferlist& bl) const override;
index d6ba3fea15a1ca19f87db664efa0b0cbbf3b51ff..a59bac4a14eb4f45a859957eb360ef777641a89e 100644 (file)
@@ -22,7 +22,8 @@
 #define dout_prefix *_dout << "cephx keyserverdata: "
 
 bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id,
-                           ExpiringCryptoKey& secret, uint64_t& secret_id) const
+                                      CryptoKey& secret, uint64_t& secret_id,
+                                      double& ttl) const
 {
   map<uint32_t, RotatingSecrets>::const_iterator iter =
        rotating_secrets.find(service_id);
@@ -39,25 +40,25 @@ bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id,
   if (secrets.secrets.size() > 1)
     ++riter;
 
-  if (riter->second.expiration < ceph_clock_now())
+  utime_t now = ceph_clock_now();
+  if (riter->second.expiration < now)
     ++riter;   // "current" key has expired, use "next" key instead
 
   secret_id = riter->first;
-  secret = riter->second;
-  ldout(cct, 30) << "get_service_secret service " << ceph_entity_type_name(service_id)
-          << " id " << secret_id << " " << secret << dendl;
-  return true;
-}
-
-bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id,
-                               CryptoKey& secret, uint64_t& secret_id) const
-{
-  ExpiringCryptoKey e;
-
-  if (!get_service_secret(cct, service_id, e, secret_id))
-    return false;
+  secret = riter->second.key;
 
-  secret = e.key;
+  // ttl may have just been increased by the user
+  // cap it by expiration of "next" key to prevent handing out a ticket
+  // with a bogus, possibly way into the future, validity
+  ttl = service_id == CEPH_ENTITY_TYPE_AUTH ?
+      cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl;
+  ttl = min(ttl, static_cast<double>(
+                    secrets.secrets.rbegin()->second.expiration - now));
+
+  ldout(cct, 30) << __func__ << " service "
+                << ceph_entity_type_name(service_id) << " secret_id "
+                << secret_id << " " << riter->second << " ttl " << ttl
+                << dendl;
   return true;
 }
 
@@ -233,12 +234,12 @@ bool KeyServer::get_caps(const EntityName& name, const string& type,
   return data.get_caps(cct, name, type, caps_info);
 }
 
-bool KeyServer::get_service_secret(uint32_t service_id,
-               CryptoKey& secret, uint64_t& secret_id) const
+bool KeyServer::get_service_secret(uint32_t service_id, CryptoKey& secret,
+                                  uint64_t& secret_id, double& ttl) const
 {
   std::scoped_lock l{lock};
 
-  return data.get_service_secret(cct, service_id, secret, secret_id);
+  return data.get_service_secret(cct, service_id, secret, secret_id, ttl);
 }
 
 bool KeyServer::get_service_secret(uint32_t service_id,
@@ -412,12 +413,13 @@ bool KeyServer::get_service_caps(const EntityName& name, uint32_t service_id,
 
 int KeyServer::_build_session_auth_info(uint32_t service_id,
                                        const AuthTicket& parent_ticket,
-                                       CephXSessionAuthInfo& info)
+                                       CephXSessionAuthInfo& info,
+                                       double ttl)
 {
   info.service_id = service_id;
   info.ticket = parent_ticket;
-  info.ticket.init_timestamps(ceph_clock_now(),
-                             cct->_conf->auth_service_ticket_ttl);
+  info.ticket.init_timestamps(ceph_clock_now(), ttl);
+  info.validity.set_from_double(ttl);
 
   generate_secret(info.session_key);
 
@@ -435,25 +437,27 @@ int KeyServer::build_session_auth_info(uint32_t service_id,
                                       const AuthTicket& parent_ticket,
                                       CephXSessionAuthInfo& info)
 {
-  if (!get_service_secret(service_id, info.service_secret, info.secret_id)) {
+  double ttl;
+  if (!get_service_secret(service_id, info.service_secret, info.secret_id,
+                         ttl)) {
     return -EACCES;
   }
 
   std::scoped_lock l{lock};
-
-  return _build_session_auth_info(service_id, parent_ticket, info);
+  return _build_session_auth_info(service_id, parent_ticket, info, ttl);
 }
 
 int KeyServer::build_session_auth_info(uint32_t service_id,
                                       const AuthTicket& parent_ticket,
-                                      CephXSessionAuthInfo& info,
-                                      CryptoKey& service_secret,
-                                      uint64_t secret_id)
+                                      const CryptoKey& service_secret,
+                                      uint64_t secret_id,
+                                      CephXSessionAuthInfo& info)
 {
   info.service_secret = service_secret;
   info.secret_id = secret_id;
 
   std::scoped_lock l{lock};
-  return _build_session_auth_info(service_id, parent_ticket, info);
+  return _build_session_auth_info(service_id, parent_ticket, info,
+                                 cct->_conf->auth_service_ticket_ttl);
 }
 
index 59cd6932567e6be4cf3826c9e44e79b299a5794d..2449a6a1ad9b476be2549775684be53c4aaa85f8 100644 (file)
@@ -95,9 +95,8 @@ struct KeyServerData {
   }
 
   bool get_service_secret(CephContext *cct, uint32_t service_id,
-                         ExpiringCryptoKey& secret, uint64_t& secret_id) const;
-  bool get_service_secret(CephContext *cct, uint32_t service_id,
-                         CryptoKey& secret, uint64_t& secret_id) const;
+                         CryptoKey& secret, uint64_t& secret_id,
+                         double& ttl) const;
   bool get_service_secret(CephContext *cct, uint32_t service_id,
                          uint64_t secret_id, CryptoKey& secret) const;
   bool get_auth(const EntityName& name, EntityAuth& auth) const;
@@ -202,7 +201,8 @@ class KeyServer : public KeyStore {
   void _dump_rotating_secrets();
   int _build_session_auth_info(uint32_t service_id, 
                               const AuthTicket& parent_ticket,
-                              CephXSessionAuthInfo& info);
+                              CephXSessionAuthInfo& info,
+                              double ttl);
   bool _get_service_caps(const EntityName& name, uint32_t service_id,
        AuthCapsInfo& caps) const;
 public:
@@ -221,13 +221,13 @@ public:
                              CephXSessionAuthInfo& info);
   int build_session_auth_info(uint32_t service_id,
                              const AuthTicket& parent_ticket,
-                             CephXSessionAuthInfo& info,
-                             CryptoKey& service_secret,
-                             uint64_t secret_id);
+                             const CryptoKey& service_secret,
+                             uint64_t secret_id,
+                             CephXSessionAuthInfo& info);
 
   /* get current secret for specific service type */
-  bool get_service_secret(uint32_t service_id, CryptoKey& service_key, 
-                         uint64_t& secret_id) const;
+  bool get_service_secret(uint32_t service_id, CryptoKey& secret,
+                         uint64_t& secret_id, double& ttl) const;
   bool get_service_secret(uint32_t service_id, uint64_t secret_id,
                          CryptoKey& secret) const override;
 
index ef95b66aa1c2e19285823178906ee8eba64dfa33..0a0a5b7f492e088ce80b43fc6ffcab39a734239b 100644 (file)
@@ -369,8 +369,10 @@ void CephXTicketManager::validate_tickets(uint32_t mask, uint32_t& have, uint32_
                 << " need " << need << dendl;
 }
 
-bool cephx_decode_ticket(CephContext *cct, KeyStore *keys, uint32_t service_id,
-             CephXTicketBlob& ticket_blob, CephXServiceTicketInfo& ticket_info)
+bool cephx_decode_ticket(CephContext *cct, KeyStore *keys,
+                        uint32_t service_id,
+                        const CephXTicketBlob& ticket_blob,
+                        CephXServiceTicketInfo& ticket_info)
 {
   uint64_t secret_id = ticket_blob.secret_id;
   CryptoKey service_secret;
index 6c2d53a0951c59668dd45150cb97ad11ef39a007..f9a8e9c1a7bb96c91c0008ee15e28f37b81cbff7 100644 (file)
@@ -123,9 +123,11 @@ struct CephXAuthenticate {
   CephXTicketBlob old_ticket;
   uint32_t other_keys = 0;  // replaces CephXServiceTicketRequest
 
+  bool old_ticket_may_be_omitted;
+
   void encode(bufferlist& bl) const {
     using ceph::encode;
-    __u8 struct_v = 2;
+    __u8 struct_v = 3;
     encode(struct_v, bl);
     encode(client_challenge, bl);
     encode(key, bl);
@@ -142,6 +144,13 @@ struct CephXAuthenticate {
     if (struct_v >= 2) {
       decode(other_keys, bl);
     }
+
+    // v2 and v3 encodings are the same, but:
+    // - some clients that send v1 or v2 don't populate old_ticket
+    //   on reconnects (but do on renewals)
+    // - any client that sends v3 or later is expected to populate
+    //   old_ticket both on reconnects and renewals
+    old_ticket_may_be_omitted = struct_v < 3;
   }
 };
 WRITE_CLASS_ENCODER(CephXAuthenticate)
@@ -415,7 +424,8 @@ WRITE_CLASS_ENCODER(CephXAuthorize)
  * Decode an extract ticket
  */
 bool cephx_decode_ticket(CephContext *cct, KeyStore *keys,
-                        uint32_t service_id, CephXTicketBlob& ticket_blob,
+                        uint32_t service_id,
+                        const CephXTicketBlob& ticket_blob,
                         CephXServiceTicketInfo& ticket_info);
 
 /*
index 867621a861a82bfe35ec4d40bd9765ec7f9e5923..4f9fcba9f8bfd95ade293fe994939471d653a401 100644 (file)
 #undef dout_prefix
 #define dout_prefix *_dout << "cephx server " << entity_name << ": "
 
-int CephxServiceHandler::start_session(
-  const EntityName& name,
-  size_t connection_secret_required_length,
+int CephxServiceHandler::do_start_session(
+  bool is_new_global_id,
   bufferlist *result_bl,
-  AuthCapsInfo *caps,
-  CryptoKey *session_key,
-  std::string *connection_secret)
+  AuthCapsInfo *caps)
 {
-  entity_name = name;
+  global_id_status = is_new_global_id ? global_id_status_t::NEW_PENDING :
+                                       global_id_status_t::RECLAIM_PENDING;
 
   uint64_t min = 1; // always non-zero
   uint64_t max = std::numeric_limits<uint64_t>::max();
@@ -49,11 +47,90 @@ int CephxServiceHandler::start_session(
   return 0;
 }
 
+int CephxServiceHandler::verify_old_ticket(
+  const CephXAuthenticate& req,
+  CephXServiceTicketInfo& old_ticket_info,
+  bool& should_enc_ticket)
+{
+  ldout(cct, 20) << " checking old_ticket: secret_id="
+                << req.old_ticket.secret_id
+                << " len=" << req.old_ticket.blob.length()
+                << ", old_ticket_may_be_omitted="
+                << req.old_ticket_may_be_omitted << dendl;
+  ceph_assert(global_id_status != global_id_status_t::NONE);
+  if (global_id_status == global_id_status_t::NEW_PENDING) {
+    // old ticket is not needed
+    if (req.old_ticket.blob.length()) {
+      ldout(cct, 0) << " superfluous ticket presented" << dendl;
+      return -EINVAL;
+    }
+    if (req.old_ticket_may_be_omitted) {
+      ldout(cct, 10) << " new global_id " << global_id
+                    << " (unexposed legacy client)" << dendl;
+      global_id_status = global_id_status_t::NEW_NOT_EXPOSED;
+    } else {
+      ldout(cct, 10) << " new global_id " << global_id << dendl;
+      global_id_status = global_id_status_t::NEW_OK;
+    }
+    return 0;
+  }
+
+  if (!req.old_ticket.blob.length()) {
+    // old ticket is needed but not presented
+    if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
+       req.old_ticket_may_be_omitted) {
+      ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+                    << " with no ticket presented (legacy client, auth_allow_insecure_global_id_reclaim=true)"
+                    << dendl;
+      global_id_status = global_id_status_t::RECLAIM_INSECURE;
+      return 0;
+    }
+    ldout(cct, 0) << " attempt to reclaim global_id " << global_id
+                 << " without presenting ticket" << dendl;
+    return -EACCES;
+  }
+
+  if (!cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH,
+                          req.old_ticket, old_ticket_info)) {
+    if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
+       req.old_ticket_may_be_omitted) {
+      ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+                    << " using bad ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)"
+                    << dendl;
+      global_id_status = global_id_status_t::RECLAIM_INSECURE;
+      return 0;
+    }
+    ldout(cct, 0) << " attempt to reclaim global_id " << global_id
+                 << " using bad ticket" << dendl;
+    return -EACCES;
+  }
+  ldout(cct, 20) << " decoded old_ticket: global_id="
+                << old_ticket_info.ticket.global_id << dendl;
+  if (global_id != old_ticket_info.ticket.global_id) {
+    if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
+       req.old_ticket_may_be_omitted) {
+      ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+                    << " using mismatching ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)"
+                    << dendl;
+      global_id_status = global_id_status_t::RECLAIM_INSECURE;
+      return 0;
+    }
+    ldout(cct, 0) << " attempt to reclaim global_id " << global_id
+                 << " using mismatching ticket" << dendl;
+    return -EACCES;
+  }
+  ldout(cct, 10) << " allowing reclaim of global_id " << global_id
+                << " (valid ticket presented, will encrypt new ticket)"
+                << dendl;
+  global_id_status = global_id_status_t::RECLAIM_OK;
+  should_enc_ticket = true;
+  return 0;
+}
+
 int CephxServiceHandler::handle_request(
   bufferlist::const_iterator& indata,
   size_t connection_secret_required_len,
   bufferlist *result_bl,
-  uint64_t *global_id,
   AuthCapsInfo *caps,
   CryptoKey *psession_key,
   std::string *pconnection_secret)
@@ -125,23 +202,28 @@ int CephxServiceHandler::handle_request(
        ret = -EACCES;
        break;
       }
+
       CephXServiceTicketInfo old_ticket_info;
+      ret = verify_old_ticket(req, old_ticket_info, should_enc_ticket);
+      if (ret) {
+       ldout(cct, 0) << " could not verify old ticket" << dendl;
+       break;
+      }
 
-      if (cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH,
-                             req.old_ticket, old_ticket_info)) {
-        *global_id = old_ticket_info.ticket.global_id;
-        ldout(cct, 10) << "decoded old_ticket with global_id=" << *global_id
-                      << dendl;
-        should_enc_ticket = true;
+      double ttl;
+      if (!key_server->get_service_secret(CEPH_ENTITY_TYPE_AUTH,
+                                         info.service_secret, info.secret_id,
+                                         ttl)) {
+        ldout(cct, 0) << " could not get service secret for auth subsystem" << dendl;
+        ret = -EIO;
+        break;
       }
 
-      ldout(cct,10) << __func__ << " auth ticket global_id " << *global_id
-                   << dendl;
-      info.ticket.init_timestamps(ceph_clock_now(),
-                                 cct->_conf->auth_mon_ticket_ttl);
+      info.service_id = CEPH_ENTITY_TYPE_AUTH;
       info.ticket.name = entity_name;
-      info.ticket.global_id = *global_id;
-      info.validity += cct->_conf->auth_mon_ticket_ttl;
+      info.ticket.global_id = global_id;
+      info.ticket.init_timestamps(ceph_clock_now(), ttl);
+      info.validity.set_from_double(ttl);
 
       key_server->generate_secret(session_key);
 
@@ -149,12 +231,6 @@ int CephxServiceHandler::handle_request(
       if (psession_key) {
        *psession_key = session_key;
       }
-      info.service_id = CEPH_ENTITY_TYPE_AUTH;
-      if (!key_server->get_service_secret(CEPH_ENTITY_TYPE_AUTH, info.service_secret, info.secret_id)) {
-        ldout(cct, 0) << " could not get service secret for auth subsystem" << dendl;
-        ret = -EIO;
-        break;
-      }
 
       vector<CephXSessionAuthInfo> info_vec;
       info_vec.push_back(info);
@@ -200,11 +276,14 @@ int CephxServiceHandler::handle_request(
            }
          }
          encode(cbl, *result_bl);
-         // provite all of the other tickets at the same time
+         // provide requested service tickets at the same time
          vector<CephXSessionAuthInfo> info_vec;
          for (uint32_t service_id = 1; service_id <= req.other_keys;
               service_id <<= 1) {
-           if (req.other_keys & service_id) {
+           // skip CEPH_ENTITY_TYPE_AUTH: auth ticket is already encoded
+           // (possibly encrypted with the old session key)
+           if ((req.other_keys & service_id) &&
+               service_id != CEPH_ENTITY_TYPE_AUTH) {
              ldout(cct, 10) << " adding key for service "
                             << ceph_entity_type_name(service_id) << dendl;
              CephXSessionAuthInfo svc_info;
@@ -212,7 +291,6 @@ int CephxServiceHandler::handle_request(
                service_id,
                info.ticket,
                svc_info);
-             svc_info.validity += cct->_conf->auth_service_ticket_ttl;
              info_vec.push_back(svc_info);
            }
          }
@@ -264,7 +342,10 @@ int CephxServiceHandler::handle_request(
       int service_err = 0;
       for (uint32_t service_id = 1; service_id <= ticket_req.keys;
           service_id <<= 1) {
-        if (ticket_req.keys & service_id) {
+        // skip CEPH_ENTITY_TYPE_AUTH: auth ticket must be obtained with
+        // CEPHX_GET_AUTH_SESSION_KEY
+        if ((ticket_req.keys & service_id) &&
+            service_id != CEPH_ENTITY_TYPE_AUTH) {
          ldout(cct, 10) << " adding key for service "
                         << ceph_entity_type_name(service_id) << dendl;
           CephXSessionAuthInfo info;
@@ -279,7 +360,6 @@ int CephxServiceHandler::handle_request(
            service_err = r;
            continue;
          }
-          info.validity += cct->_conf->auth_service_ticket_ttl;
           info_vec.push_back(info);
          ++found_services;
         }
index cb598fc870a97c70a7cc8c05b718cca8bb4b967a..28d24f1ecad375a8895cf028552e2e3d8df2d234 100644 (file)
@@ -19,6 +19,8 @@
 #include "auth/Auth.h"
 
 class KeyServer;
+struct CephXAuthenticate;
+struct CephXServiceTicketInfo;
 
 class CephxServiceHandler  : public AuthServiceHandler {
   KeyServer *key_server;
@@ -29,22 +31,24 @@ public:
     : AuthServiceHandler(cct_), key_server(ks), server_challenge(0) {}
   ~CephxServiceHandler() override {}
   
-  int start_session(const EntityName& name,
-                   size_t connection_secret_required_length,
-                   bufferlist *result_bl,
-                   AuthCapsInfo *caps,
-                   CryptoKey *session_key,
-                   std::string *connection_secret) override;
   int handle_request(
     bufferlist::const_iterator& indata,
     size_t connection_secret_required_length,
     bufferlist *result_bl,
-    uint64_t *global_id,
     AuthCapsInfo *caps,
     CryptoKey *session_key,
     std::string *connection_secret) override;
 
-  void build_cephx_response_header(int request_type, int status, bufferlist& bl);
+private:
+  int do_start_session(bool is_new_global_id,
+                      bufferlist *result_bl,
+                      AuthCapsInfo *caps) override;
+
+  int verify_old_ticket(const CephXAuthenticate& req,
+                       CephXServiceTicketInfo& old_ticket_info,
+                       bool& should_enc_ticket);
+  void build_cephx_response_header(int request_type, int status,
+                                  bufferlist& bl);
 };
 
 #endif
index 9ab26a6905d8e0e6c4d20b074d187af63196c4e5..58e5311167a48511356c665d19f573147d9e5ac4 100644 (file)
@@ -39,7 +39,11 @@ class KrbClientHandler : public AuthClientHandler {
       reset();
     }
     ~KrbClientHandler() override;
-    
+
+    KrbClientHandler* clone() const override {
+      return new KrbClientHandler(*this);
+    }
+
     int get_protocol() const override { return CEPH_AUTH_GSS; }
     void reset() override {
       m_gss_client_name = GSS_C_NO_NAME; 
index 3bd679d0ab5f489caea3fdbb60f59915e5f3880f..c2ca3bbf28ccd04daebc4348fc00d5164c25a43f 100644 (file)
@@ -30,7 +30,6 @@ int KrbServiceHandler::handle_request(
   bufferlist::const_iterator& indata,
   size_t connection_secret_required_length,
   bufferlist *buff_list,
-  uint64_t *global_id,
   AuthCapsInfo *caps,
   CryptoKey *session_key,
   std::string *connection_secret)
@@ -152,13 +151,10 @@ int KrbServiceHandler::handle_request(
   return result;
 }
 
-int KrbServiceHandler::start_session(
-  const EntityName& name,
-  size_t connection_secret_required_length,
+int KrbServiceHandler::do_start_session(
+  bool is_new_global_id,
   bufferlist *buff_list,
-  AuthCapsInfo *caps,
-  CryptoKey *session_key,
-  std::string *connection_secret)
+  AuthCapsInfo *caps)
 {
   gss_buffer_desc gss_buffer_in = {0, nullptr};
   gss_OID gss_object_id = GSS_C_NT_HOSTBASED_SERVICE;
@@ -170,7 +166,6 @@ int KrbServiceHandler::start_session(
 
   gss_buffer_in.length = gss_service_name.length();
   gss_buffer_in.value  = (const_cast<char*>(gss_service_name.c_str()));
-  entity_name = name;
 
   gss_major_status = gss_import_name(&gss_minor_status, 
                                      &gss_buffer_in, 
index 672efc54638bd5096266a6f39dc512af7ee4ab88..ee91baa5532f49e5bdaefefca6957564fa83a947 100644 (file)
@@ -40,19 +40,15 @@ class KrbServiceHandler : public AuthServiceHandler {
     int handle_request(bufferlist::const_iterator& indata,
                       size_t connection_secret_required_length,
                       bufferlist *buff_list,
-                       uint64_t *global_id,
                        AuthCapsInfo *caps,
                       CryptoKey *session_key,
                       std::string *connection_secret) override;
 
-    int start_session(const EntityName& name,
-                     size_t connection_secret_required_length,
-                     bufferlist *buff_list,
-                      AuthCapsInfo *caps,
-                     CryptoKey *session_key,
-                     std::string *connection_secret) override;
-
   private:
+    int do_start_session(bool is_new_global_id,
+                        bufferlist *buff_list,
+                        AuthCapsInfo *caps) override;
+
     gss_buffer_desc m_gss_buffer_out;
     gss_cred_id_t m_gss_credentials; 
     gss_ctx_id_t m_gss_sec_ctx; 
index eb3ef8f552171460d7b54358e8ffa8d332ea765f..4cc4c8a0c933db68c21be3b4a9a395d03046b41a 100644 (file)
@@ -26,6 +26,10 @@ public:
   AuthNoneClientHandler(CephContext *cct_)
     : AuthClientHandler(cct_) {}
 
+  AuthNoneClientHandler* clone() const override {
+    return new AuthNoneClientHandler(*this);
+  }
+
   void reset() override { }
 
   void prepare_build_request() override {}
index d8cc13ae5983ea2dc738104a5112ddc09e2c617d..9a8a38d2260be56a19b7fe5129a64e7996ca9ea0 100644 (file)
@@ -25,27 +25,21 @@ public:
     : AuthServiceHandler(cct_) {}
   ~AuthNoneServiceHandler() override {}
   
-  int start_session(const EntityName& name,
-                   size_t connection_secret_required_length,
-                   bufferlist *result_bl,
-                   AuthCapsInfo *caps,
-                   CryptoKey *session_key,
-                   std::string *connection_secret) override {
-    entity_name = name;
-    caps->allow_all = true;
-    return 1;
-  }
   int handle_request(bufferlist::const_iterator& indata,
                     size_t connection_secret_required_length,
                     bufferlist *result_bl,
-                    uint64_t *global_id,
                     AuthCapsInfo *caps,
                     CryptoKey *session_key,
                     std::string *connection_secret) override {
     return 0;
   }
-  void build_cephx_response_header(int request_type, int status,
-                                  bufferlist& bl) {
+
+private:
+  int do_start_session(bool is_new_global_id,
+                      bufferlist *result_bl,
+                      AuthCapsInfo *caps) override {
+    caps->allow_all = true;
+    return 1;
   }
 };
 
index 1b4163b82e2a16df23a2cdabd658e918c6d77c97..638cd1fd862e633196a21a8c8bf3323b9b6e7e84 100755 (executable)
@@ -2897,8 +2897,15 @@ def command_bootstrap():
     if not cp.has_section('global'):
         cp.add_section('global')
     cp.set('global', 'fsid', fsid);
-    cp.set('global', 'mon host', addr_arg)
+    cp.set('global', 'mon_host', addr_arg)
     cp.set('global', 'container_image', args.image)
+    if not cp.has_section('mon'):
+        cp.add_section('mon')
+    if (
+            not cp.has_option('mon', 'auth_allow_insecure_global_id_reclaim')
+            and not cp.has_option('mon', 'auth allow insecure global id reclaim')
+    ):
+        cp.set('mon', 'auth_allow_insecure_global_id_reclaim', 'false')
     cpf = StringIO()
     cp.write(cpf)
     config = cpf.getvalue()
index 6aa45b7e4cb118e4de7efd8f03ed16fca90ca618..2fd7451ba0ca0c710d25ff2c938a390da40b1eab 100644 (file)
@@ -330,6 +330,8 @@ OPTION(cephx_service_require_version, OPT_INT)
 OPTION(cephx_sign_messages, OPT_BOOL)  // Default to signing session messages if supported
 OPTION(auth_mon_ticket_ttl, OPT_DOUBLE)
 OPTION(auth_service_ticket_ttl, OPT_DOUBLE)
+OPTION(auth_allow_insecure_global_id_reclaim, OPT_BOOL)
+OPTION(auth_expose_insecure_global_id_reclaim, OPT_BOOL)
 OPTION(auth_debug, OPT_BOOL)          // if true, assert when weird things happen
 OPTION(mon_client_hunt_parallel, OPT_U32)   // how many mons to try to connect to in parallel during hunt
 OPTION(mon_client_hunt_interval, OPT_DOUBLE)   // try new mon every N seconds until we connect
index f74aa788a73b0a2b9b36995b116671f183344a18..4ce2689c234e4a04cdfbfc6c517b39afbdbab5dd 100644 (file)
@@ -1699,6 +1699,22 @@ std::vector<Option> get_global_options() {
     .add_service("mon")
     .set_description("time before OSDs who do not report to the mons are marked down (seconds)"),
 
+    Option("mon_warn_on_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+    .set_default(true)
+    .add_service("mon")
+    .set_description("issue AUTH_INSECURE_GLOBAL_ID_RECLAIM health warning if any connected clients are insecurely reclaiming global_id")
+    .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed")
+    .add_see_also("auth_allow_insecure_global_id_reclaim")
+    .add_see_also("auth_expose_insecure_global_id_reclaim"),
+
+    Option("mon_warn_on_insecure_global_id_reclaim_allowed", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+    .set_default(true)
+    .add_service("mon")
+    .set_description("issue AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED health warning if insecure global_id reclaim is allowed")
+    .add_see_also("mon_warn_on_insecure_global_id_reclaim")
+    .add_see_also("auth_allow_insecure_global_id_reclaim")
+    .add_see_also("auth_expose_insecure_global_id_reclaim"),
+
     Option("mon_warn_on_msgr2_not_enabled", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
     .set_default(true)
     .add_service("mon")
@@ -2257,13 +2273,29 @@ std::vector<Option> get_global_options() {
     .set_description(""),
 
     Option("auth_mon_ticket_ttl", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
-    .set_default(12_hr)
+    .set_default(72_hr)
     .set_description(""),
 
     Option("auth_service_ticket_ttl", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
     .set_default(1_hr)
     .set_description(""),
 
+    Option("auth_allow_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+    .set_default(true)
+    .set_description("Allow reclaiming global_id without presenting a valid ticket proving previous possession of that global_id")
+    .set_long_description("Allowing unauthorized global_id (re)use poses a security risk.  Unfortunately, older clients may omit their ticket on reconnects and therefore rely on this being allowed for preserving their global_id for the lifetime of the client instance.  Setting this value to false would immediately prevent new connections from those clients (assuming auth_expose_insecure_global_id_reclaim set to true) and eventually break existing sessions as well (regardless of auth_expose_insecure_global_id_reclaim setting).")
+    .add_see_also("mon_warn_on_insecure_global_id_reclaim")
+    .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed")
+    .add_see_also("auth_expose_insecure_global_id_reclaim"),
+
+    Option("auth_expose_insecure_global_id_reclaim", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+    .set_default(true)
+    .set_description("Force older clients that may omit their ticket on reconnects to reconnect as part of establishing a session")
+    .set_long_description("In permissive mode (auth_allow_insecure_global_id_reclaim set to true), this helps with identifying clients that are not patched.  In enforcing mode (auth_allow_insecure_global_id_reclaim set to false), this is a fail-fast mechanism: don't establish a session that will almost inevitably be broken later.")
+    .add_see_also("mon_warn_on_insecure_global_id_reclaim")
+    .add_see_also("mon_warn_on_insecure_global_id_reclaim_allowed")
+    .add_see_also("auth_allow_insecure_global_id_reclaim"),
+
     Option("auth_debug", Option::TYPE_BOOL, Option::LEVEL_DEV)
     .set_default(false)
     .set_description(""),
index a2ed644cdb23bbf05dfa45f3e8e83f6bf6ced8b4..f189918bec1cc048503d33b3e855e9ca15909c75 100644 (file)
@@ -588,6 +588,7 @@ bool AuthMonitor::prep_auth(MonOpRequestRef op, bool paxos_writable)
   bool start = false;
   bool finished = false;
   EntityName entity_name;
+  bool is_new_global_id = false;
 
   // set up handler?
   if (m->protocol == 0 && !s->auth_handler) {
@@ -707,23 +708,23 @@ bool AuthMonitor::prep_auth(MonOpRequestRef op, bool paxos_writable)
       ceph_assert(!paxos_writable);
       return false;
     }
+    is_new_global_id = true;
   }
 
   try {
     if (start) {
       // new session
       ret = s->auth_handler->start_session(entity_name,
-                                          0, // no connection_secret needed
+                                          s->con->peer_global_id,
+                                          is_new_global_id,
                                           &response_bl,
-                                          &s->con->peer_caps_info,
-                                          nullptr, nullptr);
+                                          &s->con->peer_caps_info);
     } else {
       // request
       ret = s->auth_handler->handle_request(
        indata,
        0, // no connection_secret needed
        &response_bl,
-       &s->con->peer_global_id,
        &s->con->peer_caps_info,
        nullptr, nullptr);
     }
index 26ffad6e32e656105e9e657adebf4bfe26ddca3a..8ad6d798b4afe7b81165d41d0e7ade39c8e1e450 100644 (file)
@@ -540,6 +540,7 @@ bool HealthMonitor::check_member_health()
 {
   dout(20) << __func__ << dendl;
   bool changed = false;
+  const auto max = g_conf().get_val<uint64_t>("mon_health_max_detail");
 
   // snapshot of usage
   DataStats stats;
@@ -609,6 +610,44 @@ bool HealthMonitor::check_member_health()
     }
   }
 
+  // AUTH_INSECURE_GLOBAL_ID_RECLAIM
+  if (g_conf().get_val<bool>("mon_warn_on_insecure_global_id_reclaim") &&
+      g_conf().get_val<bool>("auth_allow_insecure_global_id_reclaim")) {
+    // Warn if there are any clients that are insecurely renewing their global_id
+    std::lock_guard l(mon->session_map_lock);
+    list<std::string> detail;
+    for (auto p = mon->session_map.sessions.begin();
+        p != mon->session_map.sessions.end();
+        ++p) {
+      if ((*p)->global_id_status == global_id_status_t::RECLAIM_INSECURE) {
+       ostringstream ds;
+       ds << (*p)->entity_name << " at " << (*p)->addrs
+          << " is using insecure global_id reclaim";
+       detail.push_back(ds.str());
+       if (detail.size() >= max) {
+         detail.push_back("...");
+         break;
+       }
+      }
+    }
+    if (!detail.empty()) {
+      ostringstream ss;
+      ss << "client%plurals% %isorare% using insecure global_id reclaim";
+      auto& d = next.add("AUTH_INSECURE_GLOBAL_ID_RECLAIM", HEALTH_WARN, ss.str(),
+                        detail.size());
+      d.detail.swap(detail);
+    }
+  }
+  // AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED
+  if (g_conf().get_val<bool>("mon_warn_on_insecure_global_id_reclaim_allowed") &&
+      g_conf().get_val<bool>("auth_allow_insecure_global_id_reclaim")) {
+    ostringstream ss, ds;
+    ss << "mon%plurals% %isorare% allowing insecure global_id reclaim";
+    auto& d = next.add("AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED", HEALTH_WARN, ss.str(), 1);
+    ds << "mon." << mon->name << " has auth_allow_insecure_global_id_reclaim set to true";
+    d.detail.push_back(ds.str());
+  }
+
   auto p = quorum_checks.find(mon->rank);
   if (p == quorum_checks.end()) {
     if (next.empty()) {
index 25c58b0b1cefcc92f9c11d723a0ba4a8761ea418..207ff8ff8fc87157d5e93ce70939a659cb0f6180 100644 (file)
@@ -162,8 +162,11 @@ int MonClient::get_monmap_and_config()
       }
       while ((!got_config || monmap.get_epoch() == 0) && r == 0) {
        ldout(cct,20) << __func__ << " waiting for monmap|config" << dendl;
-       map_cond.wait_for(l, ceph::make_timespan(
-          cct->_conf->mon_client_hunt_interval));
+       auto status = map_cond.wait_for(l, ceph::make_timespan(
+           cct->_conf->mon_client_hunt_interval));
+       if (status == std::cv_status::timeout) {
+         r = -ETIMEDOUT;
+       }
       }
       if (got_config) {
        ldout(cct,10) << __func__ << " success" << dendl;
@@ -508,7 +511,11 @@ void MonClient::shutdown()
 
   active_con.reset();
   pending_cons.clear();
+
   auth.reset();
+  global_id = 0;
+  authenticate_err = 0;
+  authenticated = false;
 
   monc_lock.unlock();
 
@@ -688,9 +695,9 @@ void MonClient::_reopen_session(int rank)
   _start_hunting();
 
   if (rank >= 0) {
-    _add_conn(rank, global_id);
+    _add_conn(rank);
   } else {
-    _add_conns(global_id);
+    _add_conns();
   }
 
   // throw out old queued messages
@@ -712,11 +719,14 @@ void MonClient::_reopen_session(int rank)
   }
 }
 
-MonConnection& MonClient::_add_conn(unsigned rank, uint64_t global_id)
+MonConnection& MonClient::_add_conn(unsigned rank)
 {
   auto peer = monmap.get_addrs(rank);
   auto conn = messenger->connect_to_mon(peer);
   MonConnection mc(cct, conn, global_id, &auth_registry);
+  if (auth) {
+    mc.get_auth().reset(auth->clone());
+  }
   auto inserted = pending_cons.insert(std::make_pair(peer, std::move(mc)));
   ldout(cct, 10) << "picked mon." << monmap.get_name(rank)
                  << " con " << conn
@@ -725,7 +735,7 @@ MonConnection& MonClient::_add_conn(unsigned rank, uint64_t global_id)
   return inserted.first->second;
 }
 
-void MonClient::_add_conns(uint64_t global_id)
+void MonClient::_add_conns()
 {
   // collect the next batch of candidates who are listed right next to the ones
   // already tried
@@ -778,7 +788,7 @@ void MonClient::_add_conns(uint64_t global_id)
     n = ranks.size();
   }
   for (unsigned i = 0; i < n; i++) {
-    _add_conn(ranks[i], global_id);
+    _add_conn(ranks[i]);
     tried.insert(ranks[i]);
   }
 }
@@ -878,7 +888,7 @@ void MonClient::_finish_hunting(int auth_err)
     _resend_mon_commands();
     send_log(true);
     if (active_con) {
-      std::swap(auth, active_con->get_auth());
+      auth = std::move(active_con->get_auth());
       if (global_id && global_id != active_con->get_global_id()) {
        lderr(cct) << __func__ << " global_id changed from " << global_id
                   << " to " << active_con->get_global_id() << dendl;
@@ -1796,9 +1806,6 @@ int MonConnection::get_auth_request(
     return -EACCES;
   }
 
-  if (auth) {
-    auth.reset();
-  }
   int r = _init_auth(*method, entity_name, want_keys, keyring, true);
   ceph_assert(r == 0);
 
@@ -1919,12 +1926,6 @@ int MonConnection::_negotiate(MAuthReply *m,
                              uint32_t want_keys,
                              RotatingKeyRing* keyring)
 {
-  if (auth && (int)m->protocol == auth->get_protocol()) {
-    // good, negotiation completed
-    auth->reset();
-    return 0;
-  }
-
   int r = _init_auth(m->protocol, entity_name, want_keys, keyring, false);
   if (r == -ENOTSUP) {
     if (m->result == -ENOTSUP) {
@@ -1943,9 +1944,15 @@ int MonConnection::_init_auth(
   RotatingKeyRing* keyring,
   bool msgr2)
 {
-  ldout(cct,10) << __func__ << " method " << method << dendl;
-  auth.reset(
-    AuthClientHandler::create(cct, method, keyring));
+  ldout(cct, 10) << __func__ << " method " << method << dendl;
+  if (auth && auth->get_protocol() == (int)method) {
+    ldout(cct, 10) << __func__ << " already have auth, reseting" << dendl;
+    auth->reset();
+    return 0;
+  }
+
+  ldout(cct, 10) << __func__ << " creating new auth" << dendl;
+  auth.reset(AuthClientHandler::create(cct, method, keyring));
   if (!auth) {
     ldout(cct, 10) << " no handler for protocol " << method << dendl;
     return -ENOTSUP;
index b04db11f1350b2b90ed7512c8a60458450106241..cd5394f5e285ce84bd06ebabd5cf18ba9a670ee6 100644 (file)
@@ -314,9 +314,9 @@ private:
   void _finish_hunting(int auth_err);
   void _finish_auth(int auth_err);
   void _reopen_session(int rank = -1);
-  MonConnection& _add_conn(unsigned rank, uint64_t global_id);
+  MonConnection& _add_conn(unsigned rank);
+  void _add_conns();
   void _un_backoff();
-  void _add_conns(uint64_t global_id);
   void _send_mon_message(MessageRef m);
 
   std::map<entity_addrvec_t, MonConnection>::iterator _find_pending_con(
index f1e4c257db47fef5dbfe2250b37e7191210b6e09..615e1251b7f50a92858439f0a3fba318d759372d 100644 (file)
@@ -4340,9 +4340,13 @@ void Monitor::_ms_dispatch(Message *m)
 
   if (s->auth_handler) {
     s->entity_name = s->auth_handler->get_entity_name();
+    s->global_id = s->auth_handler->get_global_id();
+    s->global_id_status = s->auth_handler->get_global_id_status();
   }
-  dout(20) << " entity " << s->entity_name
-          << " caps " << s->caps.get_str() << dendl;
+  dout(20) << " entity_name " << s->entity_name
+          << " global_id " << s->global_id
+          << " (" << s->global_id_status
+          << ") caps " << s->caps.get_str() << dendl;
 
   if ((is_synchronizing() ||
        (!s->authenticated && !exited_quorum.is_zero())) &&
@@ -4393,6 +4397,34 @@ void Monitor::dispatch_op(MonOpRequestRef op)
     return;
   }
 
+  // global_id_status == NONE: all sessions for auth_none and krb,
+  // mon <-> mon sessions (including proxied sessions) for cephx
+  ceph_assert(s->global_id_status == global_id_status_t::NONE ||
+              s->global_id_status == global_id_status_t::NEW_OK ||
+              s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED ||
+              s->global_id_status == global_id_status_t::RECLAIM_OK ||
+              s->global_id_status == global_id_status_t::RECLAIM_INSECURE);
+
+  // let mon_getmap through for "ping" (which doesn't reconnect)
+  // and "tell" (which reconnects but doesn't attempt to preserve
+  // its global_id and stays in NEW_NOT_EXPOSED, retrying until
+  // ->send_attempts reaches 0)
+  if (cct->_conf->auth_expose_insecure_global_id_reclaim &&
+      s->global_id_status == global_id_status_t::NEW_NOT_EXPOSED &&
+      op->get_req()->get_type() != CEPH_MSG_MON_GET_MAP) {
+    dout(5) << __func__ << " " << op->get_req()->get_source_inst()
+            << " may omit old_ticket on reconnects, discarding "
+            << *op->get_req() << " and forcing reconnect" << dendl;
+    ceph_assert(s->con && !s->proxy_con);
+    s->con->mark_down();
+    {
+      std::lock_guard l(session_map_lock);
+      remove_session(s);
+    }
+    op->mark_zap();
+    return;
+  }
+
   switch (op->get_req()->get_type()) {
     case CEPH_MSG_MON_GET_MAP:
       handle_mon_get_map(op);
@@ -6078,7 +6110,7 @@ bool Monitor::get_authorizer(int service_id, AuthAuthorizer **authorizer)
     }
 
     ret = key_server.build_session_auth_info(
-      service_id, auth_ticket_info.ticket, info, secret, (uint64_t)-1);
+      service_id, auth_ticket_info.ticket, secret, (uint64_t)-1, info);
     if (ret < 0) {
       dout(0) << __func__ << " failed to build mon session_auth_info "
              << cpp_strerror(ret) << dendl;
@@ -6252,14 +6284,14 @@ int Monitor::handle_auth_request(
     // are supported by the client if we require it.  for msgr2 that
     // is not necessary.
 
+    bool is_new_global_id = false;
     if (!con->peer_global_id) {
       con->peer_global_id = authmon()->_assign_global_id();
       if (!con->peer_global_id) {
        dout(1) << __func__ << " failed to assign global_id" << dendl;
        return -EBUSY;
       }
-      dout(10) << __func__ << "  assigned global_id " << con->peer_global_id
-              << dendl;
+      is_new_global_id = true;
     }
 
     // set up partial session
@@ -6269,11 +6301,10 @@ int Monitor::handle_auth_request(
 
     r = s->auth_handler->start_session(
       entity_name,
-      auth_meta->get_connection_secret_length(),
+      con->peer_global_id,
+      is_new_global_id,
       reply,
-      &con->peer_caps_info,
-      &auth_meta->session_key,
-      &auth_meta->connection_secret);
+      &con->peer_caps_info);
   } else {
     priv = con->get_priv();
     if (!priv) {
@@ -6286,7 +6317,6 @@ int Monitor::handle_auth_request(
       p,
       auth_meta->get_connection_secret_length(),
       reply,
-      &con->peer_global_id,
       &con->peer_caps_info,
       &auth_meta->session_key,
       &auth_meta->connection_secret);
index 140867a7ea95d9e47efaa8f3e9b090f4bb566597..bc4dfaba6ff1ec60624c2e51431b4050a21dceda 100644 (file)
@@ -64,6 +64,8 @@ struct MonSession : public RefCountedObject {
 
   AuthServiceHandler *auth_handler = nullptr;
   EntityName entity_name;
+  uint64_t global_id = 0;
+  global_id_status_t global_id_status = global_id_status_t::NONE;
 
   ConnectionRef proxy_con;
   uint64_t proxy_tid = 0;
@@ -123,6 +125,8 @@ struct MonSession : public RefCountedObject {
     f->dump_bool("open", !closed);
     f->dump_object("caps", caps);
     f->dump_bool("authenticated", authenticated);
+    f->dump_unsigned("global_id", global_id);
+    f->dump_stream("global_id_status") << global_id_status;
     f->dump_unsigned("osd_epoch", osd_epoch);
     f->dump_string("remote_host", remote_host);
   }