From: Thomas Lamprecht Date: Mon, 29 Aug 2022 07:09:58 +0000 (+0200) Subject: check in ceph 17.2.3 sources X-Git-Url: https://git.proxmox.com/?a=commitdiff_plain;ds=sidebyside;h=0948533fc3b372aaa92e1cd3da22f2258220e199;p=ceph.git check in ceph 17.2.3 sources we don't plan to build those as the changes are rather irrelevant for Proxmox VE users Signed-off-by: Thomas Lamprecht --- diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index 39bec5896..28133cc65 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.16) project(ceph - VERSION 17.2.1 + VERSION 17.2.3 LANGUAGES CXX C ASM) cmake_policy(SET CMP0028 NEW) diff --git a/ceph/ceph.spec b/ceph/ceph.spec index 14a54db8e..4c5e78556 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -156,7 +156,7 @@ # main package definition ################################################################################# Name: ceph -Version: 17.2.1 +Version: 17.2.3 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -172,7 +172,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-17.2.1.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-17.2.3.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -1239,7 +1239,7 @@ This package provides Ceph default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-17.2.1 +%autosetup -p1 -n ceph-17.2.3 %build # Disable lto on systems that do not support symver attribute diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index b048b8e4f..c23038092 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,3 +1,15 @@ +ceph (17.2.3-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Thu, 28 Jul 2022 21:52:09 +0000 + +ceph (17.2.2-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Thu, 21 Jul 2022 17:29:31 +0000 + ceph (17.2.1-1) stable; urgency=medium * New upstream release diff --git a/ceph/qa/tasks/cephfs/test_volumes.py b/ceph/qa/tasks/cephfs/test_volumes.py index 2ae4674db..9b5649171 100644 --- a/ceph/qa/tasks/cephfs/test_volumes.py +++ b/ceph/qa/tasks/cephfs/test_volumes.py @@ -6330,3 +6330,143 @@ class TestMisc(TestVolumesHelper): # verify trash dir is clean self._wait_for_trash_empty() + + def test_malicious_metafile_on_legacy_to_v1_upgrade(self): + """ + Validate handcrafted .meta file on legacy subvol root doesn't break the system + on legacy subvol upgrade to v1 + poor man's upgrade test -- theme continues... + """ + subvol1, subvol2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + # emulate a old-fashioned subvolume in the default group + createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1) + self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True) + + # add required xattrs to subvolume + default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") + self.mount_a.setfattr(createpath1, 'ceph.dir.layout.pool', default_pool, sudo=True) + + # create v2 subvolume + self._fs_cmd("subvolume", "create", self.volname, subvol2) + + # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume + # .meta into legacy subvol1's root + subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta") + self.mount_a.run_shell(["cp", subvol2_metapath, createpath1], sudo=True) + + # Upgrade legacy subvol1 to v1 + subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1) + self.assertNotEqual(subvolpath1, None) + subvolpath1 = subvolpath1.rstrip() + + # the subvolume path returned should not be of subvol2 from handcrafted + # .meta file + self.assertEqual(createpath1[1:], subvolpath1) + + # ensure metadata file is in legacy location, with required version v1 + self._assert_meta_location_and_version(self.volname, subvol1, version=1, legacy=True) + + # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2 + # path whose '.meta' file is copied to subvol1 root + authid1 = "alice" + self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1) + + # Validate that the mds path added is of subvol1 and not of subvol2 + out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty")) + self.assertEqual("client.alice", out[0]["entity"]) + self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"]) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvol1) + self._fs_cmd("subvolume", "rm", self.volname, subvol2) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_binary_metafile_on_legacy_to_v1_upgrade(self): + """ + Validate binary .meta file on legacy subvol root doesn't break the system + on legacy subvol upgrade to v1 + poor man's upgrade test -- theme continues... + """ + subvol = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # emulate a old-fashioned subvolume -- in a custom group + createpath = os.path.join(".", "volumes", group, subvol) + self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True) + + # add required xattrs to subvolume + default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") + self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True) + + # Create unparseable binary .meta file on legacy subvol's root + meta_contents = os.urandom(4096) + meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta") + self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True) + + # Upgrade legacy subvol to v1 + subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group) + self.assertNotEqual(subvolpath, None) + subvolpath = subvolpath.rstrip() + + # The legacy subvolume path should be returned for subvol. + # Should ignore unparseable binary .meta file in subvol's root + self.assertEqual(createpath[1:], subvolpath) + + # ensure metadata file is in legacy location, with required version v1 + self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvol, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_unparseable_metafile_on_legacy_to_v1_upgrade(self): + """ + Validate unparseable text .meta file on legacy subvol root doesn't break the system + on legacy subvol upgrade to v1 + poor man's upgrade test -- theme continues... + """ + subvol = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # emulate a old-fashioned subvolume -- in a custom group + createpath = os.path.join(".", "volumes", group, subvol) + self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True) + + # add required xattrs to subvolume + default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") + self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True) + + # Create unparseable text .meta file on legacy subvol's root + meta_contents = "unparseable config\nfile ...\nunparseable config\nfile ...\n" + meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta") + self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True) + + # Upgrade legacy subvol to v1 + subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group) + self.assertNotEqual(subvolpath, None) + subvolpath = subvolpath.rstrip() + + # The legacy subvolume path should be returned for subvol. + # Should ignore unparseable binary .meta file in subvol's root + self.assertEqual(createpath[1:], subvolpath) + + # ensure metadata file is in legacy location, with required version v1 + self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvol, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) diff --git a/ceph/src/.git_version b/ceph/src/.git_version index b7c2846fb..493a13236 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -ec95624474b1871a821a912b8c3af68f8f8e7aa1 -17.2.1 +dff484dfc9e19a9819f375586300b3b79d80034d +17.2.3 diff --git a/ceph/src/libcephsqlite.cc b/ceph/src/libcephsqlite.cc index 9e7a06291..eafadcd89 100644 --- a/ceph/src/libcephsqlite.cc +++ b/ceph/src/libcephsqlite.cc @@ -381,8 +381,8 @@ static int FileSize(sqlite3_file *file, sqlite_int64 *osize) static bool parsepath(std::string_view path, struct cephsqlite_fileloc* fileloc) { - static const std::regex re1{"^/*(\\*[[:digit:]]+):([[:alnum:]-_.]*)/([[:alnum:]-._]+)$"}; - static const std::regex re2{"^/*([[:alnum:]-_.]+):([[:alnum:]-_.]*)/([[:alnum:]-._]+)$"}; + static const std::regex re1{"^/*(\\*[[:digit:]]+):([[:alnum:]\\-_.]*)/([[:alnum:]\\-._]+)$"}; + static const std::regex re2{"^/*([[:alnum:]\\-_.]+):([[:alnum:]\\-_.]*)/([[:alnum:]\\-._]+)$"}; std::cmatch cm; if (!std::regex_match(path.data(), cm, re1)) { diff --git a/ceph/src/pybind/mgr/volumes/fs/operations/versions/metadata_manager.py b/ceph/src/pybind/mgr/volumes/fs/operations/versions/metadata_manager.py index e6bcb718d..665eec6b8 100644 --- a/ceph/src/pybind/mgr/volumes/fs/operations/versions/metadata_manager.py +++ b/ceph/src/pybind/mgr/volumes/fs/operations/versions/metadata_manager.py @@ -45,16 +45,17 @@ class MetadataManager(object): def refresh(self): fd = None conf_data = StringIO() + log.debug("opening config {0}".format(self.config_path)) try: - log.debug("opening config {0}".format(self.config_path)) fd = self.fs.open(self.config_path, os.O_RDONLY) while True: data = self.fs.read(fd, -1, MetadataManager.MAX_IO_BYTES) if not len(data): break conf_data.write(data.decode('utf-8')) - conf_data.seek(0) - self.config.readfp(conf_data) + except UnicodeDecodeError: + raise MetadataMgrException(-errno.EINVAL, + "failed to decode, erroneous metadata config '{0}'".format(self.config_path)) except cephfs.ObjectNotFound: raise MetadataMgrException(-errno.ENOENT, "metadata config '{0}' not found".format(self.config_path)) except cephfs.Error as e: @@ -63,6 +64,16 @@ class MetadataManager(object): if fd is not None: self.fs.close(fd) + conf_data.seek(0) + try: + if sys.version_info >= (3, 2): + self.config.read_file(conf_data) + else: + self.config.readfp(conf_data) + except configparser.Error: + raise MetadataMgrException(-errno.EINVAL, "failed to parse, erroneous metadata config " + "'{0}'".format(self.config_path)) + def flush(self): # cull empty sections for section in list(self.config.sections()): diff --git a/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py b/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py index f7aa2ec81..bb4605cb1 100644 --- a/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py +++ b/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py @@ -5,6 +5,7 @@ import errno import logging from hashlib import md5 from typing import Dict, Union +from pathlib import Path import cephfs @@ -15,6 +16,7 @@ from ..trash import create_trashcan, open_trashcan from ...fs_util import get_ancestor_xattr from ...exception import MetadataMgrException, VolumeException from .auth_metadata import AuthMetadataManager +from .subvolume_attrs import SubvolumeStates log = logging.getLogger(__name__) @@ -115,7 +117,7 @@ class SubvolumeBase(object): @property def state(self): """ Subvolume state, one of SubvolumeStates """ - raise NotImplementedError + return SubvolumeStates.from_value(self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_STATE)) @property def subvol_type(self): @@ -128,6 +130,15 @@ class SubvolumeBase(object): raise NotImplementedError def load_config(self): + try: + self.fs.stat(self.legacy_config_path) + self.legacy_mode = True + except cephfs.Error as e: + pass + + log.debug("loading config " + "'{0}' [mode: {1}]".format(self.subvolname, "legacy" + if self.legacy_mode else "new")) if self.legacy_mode: self.metadata_mgr = MetadataManager(self.fs, self.legacy_config_path, @@ -318,8 +329,16 @@ class SubvolumeBase(object): self.fs.stat(self.base_path) self.metadata_mgr.refresh() log.debug("loaded subvolume '{0}'".format(self.subvolname)) + subvolpath = self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_PATH) + # subvolume with retained snapshots has empty path, don't mistake it for + # fabricated metadata. + if (not self.legacy_mode and self.state != SubvolumeStates.STATE_RETAINED and + self.base_path.decode('utf-8') != str(Path(subvolpath).parent)): + raise MetadataMgrException(-errno.ENOENT, 'fabricated .meta') except MetadataMgrException as me: - if me.errno == -errno.ENOENT and not self.legacy_mode: + if me.errno in (-errno.ENOENT, -errno.EINVAL) and not self.legacy_mode: + log.warn("subvolume '{0}', {1}, " + "assuming legacy_mode".format(self.subvolname, me.error_str)) self.legacy_mode = True self.load_config() self.discover() diff --git a/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py b/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py index 8c98b3736..107f1a468 100644 --- a/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py +++ b/ceph/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py @@ -694,7 +694,7 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate): @property def state(self): - return SubvolumeStates.from_value(self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_STATE)) + return super(SubvolumeV1, self).state @state.setter def state(self, val): diff --git a/ceph/src/rgw/rgw_rest_s3.cc b/ceph/src/rgw/rgw_rest_s3.cc index 1cb855d09..ff2af574f 100644 --- a/ceph/src/rgw/rgw_rest_s3.cc +++ b/ceph/src/rgw/rgw_rest_s3.cc @@ -4983,9 +4983,14 @@ int RGWHandler_REST_S3Website::retarget(RGWOp* op, RGWOp** new_op, optional_yiel if (!(s->prot_flags & RGW_REST_WEBSITE)) return 0; + if (rgw::sal::Bucket::empty(s->bucket.get())) { + // TODO-FUTURE: if the bucket does not exist, maybe expose it here? + return -ERR_NO_SUCH_BUCKET; + } + if (!s->bucket->get_info().has_website) { - // TODO-FUTURE: if the bucket has no WebsiteConfig, expose it here - return -ERR_NO_SUCH_WEBSITE_CONFIGURATION; + // TODO-FUTURE: if the bucket has no WebsiteConfig, expose it here + return -ERR_NO_SUCH_WEBSITE_CONFIGURATION; } rgw_obj_key new_obj;