From ec96510ddc0f5509c27d2a7243f50f2f5667d23d Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fabian=20Gr=C3=BCnbichler?= Date: Mon, 9 Aug 2021 14:41:22 +0200 Subject: [PATCH] import ceph 15.2.14 MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabian Grünbichler --- ceph/CMakeLists.txt | 7 +- ceph/PendingReleaseNotes | 16 + ceph/alpine/APKBUILD | 6 +- ceph/ceph.spec | 65 +- ceph/ceph.spec.in | 59 +- ceph/changelog.upstream | 10 +- ceph/debian/control | 1 - ceph/doc/man/8/ceph-volume.rst | 67 +- ceph/doc/mgr/dashboard.rst | 1 + ceph/doc/mgr/dashboard_plugins/motd.inc.rst | 30 + ceph/doc/rados/configuration/msgr2.rst | 2 +- .../configuration/network-config-ref.rst | 30 +- ceph/doc/rados/operations/balancer.rst | 4 +- ceph/doc/radosgw/frontends.rst | 32 + ceph/examples/boto3/delete_notification.py | 8 +- ceph/examples/boto3/get_notification.py | 6 +- ceph/examples/boto3/list_unordered.py | 4 +- ceph/examples/boto3/notification_filters.py | 6 +- ceph/examples/boto3/service-2.sdk-extras.json | 14 +- ceph/examples/boto3/topic_with_endpoint.py | 4 +- ceph/make-dist | 16 +- .../grafana/dashboards/host-details.json | 38 +- .../grafana/dashboards/hosts-overview.json | 4 +- ceph/qa/distros/all/centos_8.0.yaml | 4 - ceph/qa/distros/all/centos_8.3.yaml | 6 + ceph/qa/distros/all/centos_8.yaml | 2 +- ceph/qa/distros/all/rhel_8.0.yaml | 4 - ceph/qa/distros/all/rhel_8.3.yaml | 6 + ceph/qa/distros/all/rhel_8.yaml | 2 +- ceph/qa/distros/all/ubuntu_20.04.yaml | 2 + .../podman/centos_8.2_kubic_stable.yaml | 19 + .../distros/podman/rhel_8.3_kubic_stable.yaml | 19 + .../ubuntu_18.04_kubic_stable.yaml} | 4 +- .../podman/ubuntu_20.04_kubic_stable.yaml | 14 + .../podman/ubuntu_20.04_kubic_testing.yaml | 14 + .../supported-all-distro/rhel_8.2.yaml | 1 - .../distros/supported-all-distro/rhel_8.yaml | 1 + .../supported-all-distro/ubuntu_latest.yaml | 2 +- .../supported-random-distro$/centos_8.yaml | 1 + .../centos_latest.yaml | 1 - .../supported-random-distro$/rhel_8.yaml | 1 + .../supported-random-distro$/rhel_latest.yaml | 1 - .../ubuntu_18.04.yaml | 1 + .../ubuntu_latest.yaml | 2 +- ceph/qa/distros/supported/centos_latest.yaml | 2 +- ceph/qa/distros/supported/rhel_latest.yaml | 2 +- ceph/qa/distros/supported/ubuntu_latest.yaml | 2 +- .../distro => perf-basic}/ubuntu_18.04.yaml | 0 .../rados/cephadm/smoke/distro/centos_7.yaml | 1 - .../cephadm/smoke/distro/centos_8.0.yaml | 1 - .../smoke/distro/centos_8.2_kubic_stable.yaml | 1 + .../cephadm/smoke/distro/centos_latest.yaml | 1 - .../rados/cephadm/smoke/distro/rhel_7.yaml | 1 - .../rados/cephadm/smoke/distro/rhel_8.0.yaml | 1 - .../smoke/distro/rhel_8.3_kubic_stable.yaml | 1 + .../cephadm/smoke/distro/rhel_latest.yaml | 1 - .../cephadm/smoke/distro/ubuntu_20.04.yaml | 1 + .../cephadm/smoke/distro/ubuntu_latest.yaml | 1 - .../1-start-distro/1-start-centos_8.yaml | 4 +- .../1-start-distro/1-start-ubuntu_20.04.yaml | 4 +- .../suites/rados/cephadm/with-work/0-distro | 1 + .../cephadm/with-work/distro/centos_8.0.yaml | 1 - .../with-work/distro/centos_latest.yaml | 1 - .../cephadm/with-work/distro/rhel_8.0.yaml | 1 - .../cephadm/with-work/distro/rhel_latest.yaml | 1 - .../with-work/distro/ubuntu_18.04_podman.yaml | 1 - .../distro => workunits/0-distro}/.qa | 0 .../0-distro/centos_8.2_kubic_stable.yaml | 1 + .../0-distro/ubuntu_20.04_kubic_stable.yaml | 1 + .../0-distro/ubuntu_20.04_kubic_testing.yaml | 1 + .../workunits/distro/centos_latest.yaml | 1 - .../workunits/distro/ubuntu_18.04_podman.yaml | 1 - .../rados/dashboard/tasks/dashboard.yaml | 1 + .../rados/mgr/tasks/module_selftest.yaml | 1 + .../distro => perf}/ubuntu_18.04.yaml | 0 ceph/qa/suites/rados/perf/ubuntu_latest.yaml | 1 - .../suites/rados/thrash/3-scrub-overrides/$ | 0 .../distro => thrash/3-scrub-overrides}/.qa | 0 .../thrash/3-scrub-overrides/default.yaml | 0 .../max-simultaneous-scrubs-2.yaml | 5 + .../max-simultaneous-scrubs-3.yaml | 5 + .../msgr-failures/osd-dispatch-delay.yaml | 7 + .../3-workload/rbd_notification_tests.yaml | 8 +- .../mimic-x-singleton/ubuntu_18.04.yaml | 1 + .../mimic-x-singleton/ubuntu_latest.yaml | 1 - .../mimic-x/parallel/ubuntu_18.04.yaml | 1 + .../mimic-x/parallel/ubuntu_latest.yaml | 1 - .../ubuntu_18.04.yaml | 1 + .../ubuntu_latest.yaml | 1 - .../mimic-x/stress-split/ubuntu_18.04.yaml | 1 + .../mimic-x/stress-split/ubuntu_latest.yaml | 1 - .../nautilus-x-singleton/ubuntu_18.04.yaml | 1 + .../nautilus-x-singleton/ubuntu_latest.yaml | 1 - .../nautilus-x/parallel/ubuntu_18.04.yaml | 1 + .../nautilus-x/parallel/ubuntu_latest.yaml | 1 - .../ubuntu_18.04.yaml | 1 + .../ubuntu_latest.yaml | 1 - .../nautilus-x/stress-split/ubuntu_18.04.yaml | 1 + .../stress-split/ubuntu_latest.yaml | 1 - ceph/qa/tasks/barbican.py | 8 +- ceph/qa/tasks/cephadm.py | 4 +- ceph/qa/tasks/cephfs/filesystem.py | 11 +- ceph/qa/tasks/cephfs/test_admin.py | 75 + ceph/qa/tasks/mgr/dashboard/test_motd.py | 37 + ceph/qa/tasks/mgr/test_module_selftest.py | 8 + ceph/qa/tasks/userdata_setup.yaml | 2 + ceph/qa/workunits/cephadm/test_repos.sh | 2 +- .../rados/test_envlibrados_for_rocksdb.sh | 2 +- ceph/qa/workunits/rbd/cli_generic.sh | 216 ++- ceph/qa/workunits/rbd/qemu-iotests.sh | 9 +- ceph/qa/workunits/rgw/test_rgw_orphan_list.sh | 20 +- ceph/src/.git_version | 4 +- ceph/src/ceph-volume/ceph_volume/api/lvm.py | 12 + .../ceph_volume/devices/lvm/main.py | 4 + .../ceph_volume/devices/lvm/migrate.py | 674 ++++++++ .../tests/devices/lvm/test_migrate.py | 1504 +++++++++++++++++ .../tests/functional/batch/tox.ini | 12 +- .../ceph_volume/tests/functional/lvm/tox.ini | 12 +- .../tests/functional/playbooks/deploy.yml | 8 +- .../tests/functional/simple/tox.ini | 10 +- .../ceph-volume/ceph_volume/util/system.py | 3 +- ceph/src/ceph.in | 7 +- ceph/src/ceph_mon.cc | 8 + ceph/src/cephadm/cephadm | 69 +- .../src/cephadm/samples/custom_container.json | 4 +- ceph/src/cephadm/tests/test_cephadm.py | 147 +- ceph/src/client/Client.cc | 109 +- ceph/src/client/Client.h | 4 +- ceph/src/client/Inode.h | 2 +- ceph/src/cls/rgw/cls_rgw.cc | 86 +- ceph/src/common/config_proxy.h | 16 +- ceph/src/common/ipaddr.cc | 118 +- ceph/src/common/options.cc | 14 + ceph/src/common/pick_address.cc | 307 ++-- ceph/src/common/pick_address.h | 14 + ceph/src/crush/crush.h | 2 +- ceph/src/include/ceph_fs.h | 12 +- ceph/src/include/cephfs/libcephfs.h | 4 + ceph/src/include/config-h.in.cmake | 3 + ceph/src/include/ipaddr.h | 15 +- ceph/src/libcephfs.cc | 8 + ceph/src/librbd/ImageCtx.cc | 8 +- ceph/src/librbd/api/Trash.cc | 64 +- ceph/src/mds/CInode.h | 2 +- ceph/src/mds/DamageTable.cc | 7 +- ceph/src/mds/FSMap.cc | 5 +- ceph/src/mds/FSMap.h | 14 +- ceph/src/mds/MDCache.cc | 18 +- ceph/src/mds/MDLog.cc | 22 +- ceph/src/mds/MDSDaemon.cc | 1 + ceph/src/mds/Mutation.h | 1 + ceph/src/mds/Server.cc | 44 +- ceph/src/mds/SessionMap.cc | 3 +- ceph/src/mds/SnapRealm.cc | 2 +- ceph/src/mds/SnapServer.h | 2 +- ceph/src/mds/mdstypes.h | 13 +- ceph/src/mgr/DaemonServer.cc | 18 +- ceph/src/mgr/PyModuleRegistry.cc | 50 +- ceph/src/mon/MDSMonitor.cc | 44 +- ceph/src/mon/MDSMonitor.h | 7 + ceph/src/mon/MonClient.cc | 38 +- ceph/src/mon/MonClient.h | 4 +- ceph/src/mon/OSDMonitor.cc | 5 +- ceph/src/mon/PaxosService.cc | 18 +- ceph/src/os/bluestore/AvlAllocator.cc | 43 +- ceph/src/os/bluestore/BlueFS.cc | 125 +- ceph/src/os/bluestore/BlueFS.h | 42 +- ceph/src/os/bluestore/BlueRocksEnv.cc | 58 +- ceph/src/os/bluestore/BlueRocksEnv.h | 8 - ceph/src/os/bluestore/BlueStore.cc | 42 +- ceph/src/os/bluestore/BlueStore.h | 42 +- ceph/src/os/bluestore/HybridAllocator.cc | 2 + ceph/src/os/bluestore/StupidAllocator.cc | 4 + ceph/src/os/bluestore/bluefs_types.h | 8 +- ceph/src/os/bluestore/bluestore_tool.cc | 38 +- ceph/src/osd/OSD.cc | 22 +- ceph/src/osd/OSD.h | 5 +- ceph/src/osd/PG.cc | 12 +- ceph/src/osd/PeeringState.cc | 24 +- ceph/src/osd/PrimaryLogPG.cc | 3 +- ceph/src/osd/osd_types.cc | 4 +- ceph/src/perfglue/cpu_profiler.cc | 4 +- ceph/src/pybind/ceph_volume_client.py | 23 +- .../services/prometheus/prometheus.yml.j2 | 1 - ceph/src/pybind/mgr/dashboard/HACKING.rst | 34 + .../dashboard/ci/cephadm/bootstrap-cluster.sh | 15 + .../mgr/dashboard/ci/cephadm/ceph_cluster.yml | 39 + .../ci/cephadm/run-cephadm-e2e-tests.sh | 41 + .../mgr/dashboard/ci/cephadm/start-cluster.sh | 79 + .../mgr/dashboard/frontend/cypress.json | 3 +- .../cluster/configuration.e2e-spec.ts | 4 +- .../cypress/integration/cluster/hosts.po.ts | 92 +- .../integration/cluster/osds.e2e-spec.ts | 2 +- .../workflow/01-hosts.e2e-spec.ts | 53 + .../cypress/integration/page-helper.po.ts | 90 +- .../cypress/integration/rgw/buckets.po.ts | 5 +- .../integration/ui/dashboard.e2e-spec.ts | 2 +- .../dist/en-US/1.ecdc99fc68ced4743e9f.js | 1 - .../dist/en-US/1.f2cb9c77bf340e0ea797.js | 1 + .../dist/en-US/5.3532f17ccbf4b268177b.js | 1 - .../dist/en-US/5.7e995d52d0c4ff2d857a.js | 1 + ...17ca13aa5.js => 6.dc8bd57d45d56da356c9.js} | 2 +- .../dist/en-US/7.1c25843a719516e3e84b.js | 1 + .../dist/en-US/7.eee5262a4f5915f2d5d4.js | 1 - .../dist/en-US/8.a5d6e73e48ae0cf89ca6.js | 1 - .../dist/en-US/8.d45d4eb6e6f206518908.js | 1 + .../dashboard/frontend/dist/en-US/index.html | 2 +- .../dist/en-US/main.6fdc1558ba9057d093e0.js | 2 - .../dist/en-US/main.8f0b1a650c943458f796.js | 2 + ... main.8f0b1a650c943458f796.js.LICENSE.txt} | 0 .../en-US/runtime.47bc076fdcd8bde0c19f.js | 1 - .../en-US/runtime.aa14b459a46eea461c85.js | 1 + .../rbd-details/rbd-details.component.html | 6 +- .../app/ceph/block/rbd-form/rbd-form.model.ts | 3 + .../block/rbd-list/rbd-list.component.html | 22 + .../block/rbd-list/rbd-list.component.scss | 5 + .../block/rbd-list/rbd-list.component.spec.ts | 30 + .../ceph/block/rbd-list/rbd-list.component.ts | 30 +- .../ceph/dashboard/osd-summary.pipe.spec.ts | 43 +- .../app/ceph/dashboard/osd-summary.pipe.ts | 2 +- .../ceph/nfs/nfs-form/nfs-form.component.html | 3 +- .../nfs/nfs-form/nfs-form.component.spec.ts | 11 +- .../ceph/nfs/nfs-form/nfs-form.component.ts | 2 +- .../rgw-bucket-form.component.html | 15 + .../rgw-bucket-form.component.spec.ts | 135 +- .../rgw-bucket-form.component.ts | 128 +- .../rgw-bucket-list.component.spec.ts | 12 +- .../rgw-bucket-list.component.ts | 20 +- .../rgw-user-form.component.spec.ts | 4 +- .../dashboard-help.component.html | 11 +- .../dashboard-help.component.ts | 8 +- .../navigation/navigation.component.html | 8 +- .../navigation/navigation.component.scss | 4 - .../navigation/navigation.component.spec.ts | 3 +- .../navigation/navigation.component.ts | 16 +- .../src/app/shared/api/motd.service.spec.ts | 34 + .../src/app/shared/api/motd.service.ts | 25 + .../app/shared/api/rgw-bucket.service.spec.ts | 2 +- .../src/app/shared/api/rgw-bucket.service.ts | 21 +- .../alert-panel/alert-panel.component.html | 4 +- .../alert-panel/alert-panel.component.ts | 20 +- .../shared/components/components.module.ts | 7 +- .../components/motd/motd.component.html | 8 + .../components/motd/motd.component.scss | 0 .../components/motd/motd.component.spec.ts | 26 + .../shared/components/motd/motd.component.ts | 33 + .../src/app/shared/forms/cd-validators.ts | 19 +- .../src/app/shared/pipes/pipes.module.ts | 10 +- .../shared/pipes/sanitize-html.pipe.spec.ts | 26 + .../app/shared/pipes/sanitize-html.pipe.ts | 13 + .../motd-notification.service.spec.ts | 117 ++ .../services/motd-notification.service.ts | 82 + ceph/src/pybind/mgr/dashboard/module.py | 2 +- ceph/src/pybind/mgr/dashboard/plugins/motd.py | 102 ++ .../mgr/dashboard/services/access_control.py | 25 + ceph/src/pybind/mgr/dashboard/services/rbd.py | 37 +- .../dashboard/tests/test_access_control.py | 109 ++ .../mgr/dashboard/tests/test_rbd_service.py | 99 +- ceph/src/pybind/mgr/selftest/module.py | 20 +- ceph/src/pybind/mgr/telemetry/module.py | 4 +- ceph/src/python-common/ceph/utils.py | 39 + ceph/src/python-common/tox.ini | 2 +- ceph/src/rgw/rgw-orphan-list | 19 +- ceph/src/rgw/rgw_admin.cc | 4 + ceph/src/rgw/rgw_asio_frontend.cc | 52 + ceph/src/rgw/rgw_cache.h | 5 + ceph/src/rgw/rgw_common.cc | 35 + ceph/src/rgw/rgw_common.h | 7 + ceph/src/rgw/rgw_file.cc | 4 +- ceph/src/rgw/rgw_gc.cc | 8 +- ceph/src/rgw/rgw_obj_manifest.cc | 6 + ceph/src/rgw/rgw_op.cc | 102 +- ceph/src/rgw/rgw_op.h | 5 + ceph/src/rgw/rgw_orphan.cc | 35 +- ceph/src/rgw/rgw_pubsub.cc | 11 + ceph/src/rgw/rgw_rest_client.cc | 2 +- ceph/src/rgw/rgw_rest_s3.cc | 23 + ceph/src/rgw/rgw_rest_swift.cc | 6 +- ceph/src/rgw/rgw_sync_module_pubsub.cc | 25 - ceph/src/rgw/rgw_sync_module_pubsub.h | 2 +- ceph/src/rgw/rgw_user.cc | 117 +- ceph/src/rgw/services/svc_notify.cc | 125 +- ceph/src/rgw/services/svc_notify.h | 10 +- ceph/src/rgw/services/svc_sys_obj_cache.cc | 4 +- ceph/src/test/cls_rgw/test_cls_rgw.cc | 11 +- ceph/src/test/common/test_counter.cc | 2 +- ceph/src/test/libcephfs/test.cc | 132 ++ ceph/src/test/objectstore/Allocator_test.cc | 58 + .../objectstore/bmap_allocator_replay_test.cc | 15 +- ceph/src/test/objectstore/store_test.cc | 70 + ceph/src/test/objectstore/test_bluefs.cc | 69 + ceph/src/test/rgw/amqp_mock.cc | 9 + ceph/src/test/test_ipaddr.cc | 102 +- ceph/src/tools/cephfs/DataScan.cc | 11 +- ceph/src/tools/rbd/Schedule.cc | 18 +- ceph/src/tools/rbd/Schedule.h | 2 + ceph/src/tools/rbd/Utils.cc | 27 +- ceph/src/tools/rbd/Utils.h | 6 +- ceph/src/tools/rbd/action/Kernel.cc | 156 +- ceph/src/tools/rbd/action/List.cc | 14 +- ceph/src/tools/rbd/action/Migration.cc | 11 +- .../rbd/action/MirrorSnapshotSchedule.cc | 4 + ceph/src/tools/rbd/action/Perf.cc | 2 + ceph/src/tools/rbd/action/Trash.cc | 17 +- .../tools/rbd/action/TrashPurgeSchedule.cc | 4 + .../image_replayer/snapshot/Replayer.h | 2 +- 306 files changed, 6689 insertions(+), 1646 deletions(-) create mode 100644 ceph/doc/mgr/dashboard_plugins/motd.inc.rst create mode 100644 ceph/qa/distros/all/centos_8.3.yaml create mode 100644 ceph/qa/distros/all/rhel_8.3.yaml create mode 100644 ceph/qa/distros/all/ubuntu_20.04.yaml create mode 100644 ceph/qa/distros/podman/centos_8.2_kubic_stable.yaml create mode 100644 ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml rename ceph/qa/distros/{all/ubuntu_18.04_podman.yaml => podman/ubuntu_18.04_kubic_stable.yaml} (80%) create mode 100644 ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml create mode 100644 ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml delete mode 120000 ceph/qa/distros/supported-all-distro/rhel_8.2.yaml create mode 120000 ceph/qa/distros/supported-all-distro/rhel_8.yaml create mode 120000 ceph/qa/distros/supported-random-distro$/centos_8.yaml delete mode 120000 ceph/qa/distros/supported-random-distro$/centos_latest.yaml create mode 120000 ceph/qa/distros/supported-random-distro$/rhel_8.yaml delete mode 120000 ceph/qa/distros/supported-random-distro$/rhel_latest.yaml create mode 120000 ceph/qa/distros/supported-random-distro$/ubuntu_18.04.yaml rename ceph/qa/suites/{rados/cephadm/smoke/distro => perf-basic}/ubuntu_18.04.yaml (100%) delete mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/centos_7.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.0.yaml create mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.2_kubic_stable.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/centos_latest.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/rhel_7.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.0.yaml create mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.3_kubic_stable.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/rhel_latest.yaml create mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_20.04.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/rados/cephadm/with-work/0-distro delete mode 120000 ceph/qa/suites/rados/cephadm/with-work/distro/centos_8.0.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/with-work/distro/centos_latest.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/with-work/distro/rhel_8.0.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/with-work/distro/rhel_latest.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/with-work/distro/ubuntu_18.04_podman.yaml rename ceph/qa/suites/rados/cephadm/{with-work/distro => workunits/0-distro}/.qa (100%) create mode 120000 ceph/qa/suites/rados/cephadm/workunits/0-distro/centos_8.2_kubic_stable.yaml create mode 120000 ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_stable.yaml create mode 120000 ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_testing.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/workunits/distro/centos_latest.yaml delete mode 120000 ceph/qa/suites/rados/cephadm/workunits/distro/ubuntu_18.04_podman.yaml rename ceph/qa/suites/rados/{cephadm/with-work/distro => perf}/ubuntu_18.04.yaml (100%) delete mode 120000 ceph/qa/suites/rados/perf/ubuntu_latest.yaml create mode 100644 ceph/qa/suites/rados/thrash/3-scrub-overrides/$ rename ceph/qa/suites/rados/{cephadm/workunits/distro => thrash/3-scrub-overrides}/.qa (100%) create mode 100644 ceph/qa/suites/rados/thrash/3-scrub-overrides/default.yaml create mode 100644 ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-2.yaml create mode 100644 ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-3.yaml create mode 100644 ceph/qa/suites/rados/thrash/msgr-failures/osd-dispatch-delay.yaml create mode 120000 ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_18.04.yaml delete mode 120000 ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_latest.yaml create mode 100644 ceph/qa/tasks/mgr/dashboard/test_motd.py create mode 100644 ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py create mode 100755 ceph/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh create mode 100755 ceph/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml create mode 100755 ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh create mode 100755 ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.ecdc99fc68ced4743e9f.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.f2cb9c77bf340e0ea797.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/5.3532f17ccbf4b268177b.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/5.7e995d52d0c4ff2d857a.js rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{6.bbb14e8467017ca13aa5.js => 6.dc8bd57d45d56da356c9.js} (50%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/7.1c25843a719516e3e84b.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/7.eee5262a4f5915f2d5d4.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/8.a5d6e73e48ae0cf89ca6.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/8.d45d4eb6e6f206518908.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.6fdc1558ba9057d093e0.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.8f0b1a650c943458f796.js rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{main.6fdc1558ba9057d093e0.js.LICENSE.txt => main.8f0b1a650c943458f796.js.LICENSE.txt} (100%) delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.47bc076fdcd8bde0c19f.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.aa14b459a46eea461c85.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/motd.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/motd.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/motd/motd.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/sanitize-html.pipe.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/sanitize-html.pipe.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/services/motd-notification.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/services/motd-notification.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/plugins/motd.py diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index a2d34ee97..3ac7fb27e 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.10.2) # remove cmake/modules/FindPython* once 3.12 is required project(ceph - VERSION 15.2.0 + VERSION 15.2.14 LANGUAGES CXX C ASM) foreach(policy @@ -440,6 +440,11 @@ if(WITH_MGR) set(MGR_PYTHON_LIBRARIES ${Python3_LIBRARIES}) set(MGR_PYTHON_VERSION_MAJOR ${Python3_VERSION_MAJOR}) set(MGR_PYTHON_VERSION_MINOR ${Python3_VERSION_MINOR}) + # https://tracker.ceph.com/issues/45147 + if(Python3_VERSION VERSION_GREATER_EQUAL 3.8) + set(MGR_DISABLED_MODULES "diskprediction_local") + message(STATUS "mgr module disabled for ${Python3_VERSION}: ${MGR_DISABLED_MODULES}") + endif() # Boost dependency check deferred to Boost section endif(WITH_MGR) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index e4eb04ad8..3cd108a95 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -1,6 +1,22 @@ +15.2.14 +------- + +* RGW: It is possible to specify ssl options and ciphers for beast frontend now. + The default ssl options setting is "no_sslv2:no_sslv3:no_tlsv1:no_tlsv1_1". + If you want to return back the old behavior add 'ssl_options=' (empty) to + ``rgw frontends`` configuration. + 15.2.11 ------- +* `ceph-mgr-modules-core` debian package does not recommend `ceph-mgr-rook` + anymore. As the latter depends on `python3-numpy` which cannot be imported in + different Python sub-interpreters multi-times if the version of + `python3-numpy` is older than 1.19. Since `apt-get` installs the `Recommends` + packages by default, `ceph-mgr-rook` was always installed along with + `ceph-mgr` debian package as an indirect dependency. If your workflow depends + on this behavior, you might want to install `ceph-mgr-rook` separately. + * OSD: the option ``osd_fast_shutdown_notify_mon`` has been introduced to allow the OSD to notify the monitor it is shutting down even if ``osd_fast_shutdown`` is enabled. This helps with the monitor logs on larger clusters, that may get diff --git a/ceph/alpine/APKBUILD b/ceph/alpine/APKBUILD index b1e8f2512..ef43085d6 100644 --- a/ceph/alpine/APKBUILD +++ b/ceph/alpine/APKBUILD @@ -1,7 +1,7 @@ # Contributor: John Coyle # Maintainer: John Coyle pkgname=ceph -pkgver=15.2.13 +pkgver=15.2.14 pkgrel=0 pkgdesc="Ceph is a distributed object store and file system" pkgusers="ceph" @@ -63,7 +63,7 @@ makedepends=" xmlstarlet yasm " -source="ceph-15.2.13.tar.bz2" +source="ceph-15.2.14.tar.bz2" subpackages=" $pkgname-base $pkgname-common @@ -116,7 +116,7 @@ _sysconfdir=/etc _udevrulesdir=/etc/udev/rules.d _python_sitelib=/usr/lib/python2.7/site-packages -builddir=$srcdir/ceph-15.2.13 +builddir=$srcdir/ceph-15.2.14 build() { export CEPH_BUILD_VIRTUALENV=$builddir diff --git a/ceph/ceph.spec b/ceph/ceph.spec index ce1ef531a..a966466df 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -98,7 +98,7 @@ # main package definition ################################################################################# Name: ceph -Version: 15.2.13 +Version: 15.2.14 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -114,7 +114,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-15.2.13.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-15.2.14.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -1150,7 +1150,7 @@ This package provides Ceph’s default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-15.2.13 +%autosetup -p1 -n ceph-15.2.14 %build # LTO can be enabled as soon as the following GCC bug is fixed: @@ -1452,21 +1452,7 @@ fi %postun base /sbin/ldconfig -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph.target -%endif -if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph - if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then - source $SYSCONF_CEPH - fi -fi %pre -n cephadm getent group cephadm >/dev/null || groupadd -r cephadm @@ -1612,13 +1598,7 @@ fi %endif %postun mds -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mds@\*.service ceph-mds.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-mds@\*.service ceph-mds.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1665,13 +1645,7 @@ fi %endif %postun mgr -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mgr@\*.service ceph-mgr.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-mgr@\*.service ceph-mgr.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1818,13 +1792,7 @@ fi %endif %postun mon -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mon@\*.service ceph-mon.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-mon@\*.service ceph-mon.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1876,13 +1844,7 @@ fi %endif %postun -n rbd-mirror -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1923,15 +1885,8 @@ fi %endif %postun immutable-object-cache -test -n "$FIRST_ARG" || FIRST_ARG=$1 -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target -%endif -if [ $FIRST_ARG -ge 1 ] ; then +if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph @@ -1986,13 +1941,7 @@ fi %postun radosgw /sbin/ldconfig -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-radosgw@\*.service ceph-radosgw.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-radosgw@\*.service ceph-radosgw.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -2053,13 +2002,7 @@ fi %endif %postun osd -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. diff --git a/ceph/ceph.spec.in b/ceph/ceph.spec.in index 433e316f9..ccafc4562 100644 --- a/ceph/ceph.spec.in +++ b/ceph/ceph.spec.in @@ -1452,21 +1452,7 @@ fi %postun base /sbin/ldconfig -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph.target -%endif -if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph - if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then - source $SYSCONF_CEPH - fi -fi %pre -n cephadm getent group cephadm >/dev/null || groupadd -r cephadm @@ -1612,13 +1598,7 @@ fi %endif %postun mds -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mds@\*.service ceph-mds.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-mds@\*.service ceph-mds.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1665,13 +1645,7 @@ fi %endif %postun mgr -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mgr@\*.service ceph-mgr.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-mgr@\*.service ceph-mgr.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1818,13 +1792,7 @@ fi %endif %postun mon -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-mon@\*.service ceph-mon.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-mon@\*.service ceph-mon.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1876,13 +1844,7 @@ fi %endif %postun -n rbd-mirror -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -1923,15 +1885,8 @@ fi %endif %postun immutable-object-cache -test -n "$FIRST_ARG" || FIRST_ARG=$1 -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target -%endif -if [ $FIRST_ARG -ge 1 ] ; then +if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph @@ -1986,13 +1941,7 @@ fi %postun radosgw /sbin/ldconfig -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-radosgw@\*.service ceph-radosgw.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-radosgw@\*.service ceph-radosgw.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. @@ -2053,13 +2002,7 @@ fi %endif %postun osd -%if 0%{?suse_version} -DISABLE_RESTART_ON_UPDATE="yes" -%service_del_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target -%endif -%if 0%{?fedora} || 0%{?rhel} %systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target -%endif if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index 03a97c540..14b0e4b50 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,7 +1,13 @@ -ceph (15.2.13-1bionic) bionic; urgency=medium +ceph (15.2.14-1bionic) bionic; urgency=medium - -- Jenkins Build Slave User Wed, 26 May 2021 19:34:51 +0000 + -- Jenkins Build Slave User Thu, 05 Aug 2021 17:22:18 +0000 + +ceph (15.2.14-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Thu, 05 Aug 2021 17:11:52 +0000 ceph (15.2.13-1) stable; urgency=medium diff --git a/ceph/debian/control b/ceph/debian/control index 397e3a09d..ee62a1a6d 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -305,7 +305,6 @@ Depends: ${misc:Depends}, python3-openssl, Replaces: ceph-mgr (<< 15.1.0) Breaks: ceph-mgr (<< 15.1.0) -Recommends: ceph-mgr-rook Description: ceph manager modules which are always enabled Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, diff --git a/ceph/doc/man/8/ceph-volume.rst b/ceph/doc/man/8/ceph-volume.rst index b3c70a556..9bd51744e 100644 --- a/ceph/doc/man/8/ceph-volume.rst +++ b/ceph/doc/man/8/ceph-volume.rst @@ -15,7 +15,7 @@ Synopsis | **ceph-volume** **inventory** | **ceph-volume** **lvm** [ *trigger* | *create* | *activate* | *prepare* -| *zap* | *list* | *batch*] +| *zap* | *list* | *batch* | *new-wal* | *new-db* | *migrate* ] | **ceph-volume** **simple** [ *trigger* | *scan* | *activate* ] @@ -241,6 +241,71 @@ Positional arguments: ``/path/to/sda1`` or ``/path/to/sda`` for regular devices. +**new-wal** +Attaches the given logical volume to OSD as a WAL. Logical volume +name format is vg/lv. Fails if OSD has already got attached WAL. + +Usage:: + + ceph-volume lvm new-wal --osd-id OSD_ID --osd-fsid OSD_FSID --target TARGET_LV + +Optional arguments: + +* [-h, --help] show the help message and exit + +Required arguments: + +* --osd-id OSD_ID OSD id to attach new WAL to +* --osd-fsid OSD_FSID OSD fsid to attach new WAL to +* --target TARGET_LV logical volume name to attach as WAL + + +**new-db** +Attaches the given logical volume to OSD as a DB. Logical volume +name format is vg/lv. Fails if OSD has already got attached DB. + +Usage:: + + ceph-volume lvm new-db --osd-id OSD_ID --osd-fsid OSD_FSID --target + +Optional arguments: + +* [-h, --help] show the help message and exit + +Required arguments: + +* --osd-id OSD_ID OSD id to attach new DB to +* --osd-fsid OSD_FSID OSD fsid to attach new DB to +* --target TARGET_LV logical volume name to attach as DB + +**migrate** + +Moves BlueFS data from source volume(s) to the target one, source volumes +(except the main, i.e. data or block one) are removed on success. LVM volumes +are permitted for Target only, both already attached or new one. In the latter +case it is attached to the OSD replacing one of the source devices. Following +replacement rules apply (in the order of precedence, stop on the first match): + + - if source list has DB volume - target device replaces it. + - if source list has WAL volume - target device replace it. + - if source list has slow volume only - operation is not permitted, + requires explicit allocation via new-db/new-wal command. + +Usage:: + + ceph-volume lvm migrate --osd-id OSD_ID --osd-fsid OSD_FSID --target TARGET_LV --from {data|db|wal} [{data|db|wal} ...] + +Optional arguments: + +* [-h, --help] show the help message and exit + +Required arguments: + +* --osd-id OSD_ID OSD id to perform migration at +* --osd-fsid OSD_FSID OSD fsid to perform migration at +* --target TARGET_LV logical volume to move data to +* --from TYPE_LIST list of source device type names, e.g. --from db wal + simple ------ diff --git a/ceph/doc/mgr/dashboard.rst b/ceph/doc/mgr/dashboard.rst index 31cbe585c..ca577741d 100644 --- a/ceph/doc/mgr/dashboard.rst +++ b/ceph/doc/mgr/dashboard.rst @@ -1189,6 +1189,7 @@ and loosely coupled fashion. .. include:: dashboard_plugins/feature_toggles.inc.rst .. include:: dashboard_plugins/debug.inc.rst +.. include:: dashboard_plugins/motd.inc.rst Troubleshooting the Dashboard diff --git a/ceph/doc/mgr/dashboard_plugins/motd.inc.rst b/ceph/doc/mgr/dashboard_plugins/motd.inc.rst new file mode 100644 index 000000000..b8464e1f3 --- /dev/null +++ b/ceph/doc/mgr/dashboard_plugins/motd.inc.rst @@ -0,0 +1,30 @@ +.. _dashboard-motd: + +Message of the day (MOTD) +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Displays a configured `message of the day` at the top of the Ceph Dashboard. + +The importance of a MOTD can be configured by its severity, which is +`info`, `warning` or `danger`. The MOTD can expire after a given time, +this means it will not be displayed in the UI anymore. Use the following +syntax to specify the expiration time: `Ns|m|h|d|w` for seconds, minutes, +hours, days and weeks. If the MOTD should expire after 2 hours, use `2h` +or `5w` for 5 weeks. Use `0` to configure a MOTD that does not expire. + +To configure a MOTD, run the following command:: + + $ ceph dashboard motd set + +To show the configured MOTD:: + + $ ceph dashboard motd get + +To clear the configured MOTD run:: + + $ ceph dashboard motd clear + +A MOTD with a `info` or `warning` severity can be closed by the user. The +`info` MOTD is not displayed anymore until the local storage cookies are +cleared or a new MOTD with a different severity is displayed. A MOTD with +a 'warning' severity will be displayed again in a new session. diff --git a/ceph/doc/rados/configuration/msgr2.rst b/ceph/doc/rados/configuration/msgr2.rst index 293f7c3d3..70494d65f 100644 --- a/ceph/doc/rados/configuration/msgr2.rst +++ b/ceph/doc/rados/configuration/msgr2.rst @@ -88,7 +88,7 @@ Similarly, two options control whether IPv4 and IPv6 addresses are used: * ``ms_bind_ipv6`` [default: false] controls whether a daemon binds to an IPv6 address -.. note: The ability to bind to multiple ports has paved the way for +.. note:: The ability to bind to multiple ports has paved the way for dual-stack IPv4 and IPv6 support. That said, dual-stack support is not yet tested as of Nautilus v14.2.0 and likely needs some additional code changes to work correctly. diff --git a/ceph/doc/rados/configuration/network-config-ref.rst b/ceph/doc/rados/configuration/network-config-ref.rst index bd49a87b3..4ebbf1016 100644 --- a/ceph/doc/rados/configuration/network-config-ref.rst +++ b/ceph/doc/rados/configuration/network-config-ref.rst @@ -201,6 +201,27 @@ following option to the ``[global]`` section of your Ceph configuration file. We prefer that the cluster network is **NOT** reachable from the public network or the Internet for added security. +IPv4/IPv6 Dual Stack Mode +------------------------- + +If you want to run in an IPv4/IPv6 dual stack mode and want to define your public and/or +cluster networks, then you need to specify both your IPv4 and IPv6 networks for each: + +.. code-block:: ini + + [global] + # ... elided configuration + public network = {IPv4 public-network/netmask}, {IPv6 public-network/netmask} + +This is so ceph can find a valid IP address for both address families. + +If you want just an IPv4 or an IPv6 stack environment, then make sure you set the `ms bind` +options correctly. + +.. note:: + Binding to IPv4 is enabled by default, so if you just add the option to bind to IPv6 + you'll actually put yourself into dual stack mode. If you want just IPv6, then disable IPv4 and + enable IPv6. See `Bind`_ below. Ceph Daemons ============ @@ -336,11 +357,16 @@ addresses. :Default: ``7300`` :Required: No. +``ms bind ipv4`` + +:Description: Enables Ceph daemons to bind to IPv4 addresses. +:Type: Boolean +:Default: ``true`` +:Required: No ``ms bind ipv6`` -:Description: Enables Ceph daemons to bind to IPv6 addresses. Currently the - messenger *either* uses IPv4 or IPv6, but it cannot do both. +:Description: Enables Ceph daemons to bind to IPv6 addresses. :Type: Boolean :Default: ``false`` :Required: No diff --git a/ceph/doc/rados/operations/balancer.rst b/ceph/doc/rados/operations/balancer.rst index b4bbfd851..9f4f0b3df 100644 --- a/ceph/doc/rados/operations/balancer.rst +++ b/ceph/doc/rados/operations/balancer.rst @@ -42,9 +42,9 @@ healed itself). When the cluster is healthy, the balancer will throttle its changes such that the percentage of PGs that are misplaced (i.e., that need to be moved) is below a threshold of (by default) 5%. The -``max_misplaced`` threshold can be adjusted with:: +``target_max_misplaced_ratio`` threshold can be adjusted with:: - ceph config set mgr mgr/balancer/max_misplaced .07 # 7% + ceph config set mgr target_max_misplaced_ratio .07 # 7% Modes diff --git a/ceph/doc/radosgw/frontends.rst b/ceph/doc/radosgw/frontends.rst index e4a013590..be96e77e8 100644 --- a/ceph/doc/radosgw/frontends.rst +++ b/ceph/doc/radosgw/frontends.rst @@ -64,6 +64,38 @@ Options :Type: String :Default: None +``ssl_options`` + +:Description: Optional colon separated list of ssl context options: + + ``default_workarounds`` Implement various bug workarounds. + + ``no_compression`` Disable compression. + + ``no_sslv2`` Disable SSL v2. + + ``no_sslv3`` Disable SSL v3. + + ``no_tlsv1`` Disable TLS v1. + + ``no_tlsv1_1`` Disable TLS v1.1. + + ``no_tlsv1_2`` Disable TLS v1.2. + + ``single_dh_use`` Always create a new key when using tmp_dh parameters. + +:Type: String +:Default: ``no_sslv2:no_sslv3:no_tlsv1:no_tlsv1_1`` + +``ssl_ciphers`` + +:Description: Optional list of one or more cipher strings separated by colons. + The format of the string is described in openssl's ciphers(1) + manual. + +:Type: String +:Default: None + ``tcp_nodelay`` :Description: If set the socket option will disable Nagle's algorithm on diff --git a/ceph/examples/boto3/delete_notification.py b/ceph/examples/boto3/delete_notification.py index 8e4d3d7b7..ca5958e52 100755 --- a/ceph/examples/boto3/delete_notification.py +++ b/ceph/examples/boto3/delete_notification.py @@ -13,7 +13,7 @@ elif len(sys.argv) == 2: bucketname = sys.argv[1] notification_name = "" else: - print 'Usage: ' + sys.argv[0] + ' [notification]' + print('Usage: ' + sys.argv[0] + ' [notification]') sys.exit(1) # endpoint and keys from vstart @@ -30,7 +30,7 @@ client = boto3.client('s3', # deleting all notification configurations on a bucket (without deleting the bucket itself) are extension to AWS S3 API if notification_name == "": - print client.delete_bucket_notification_configuration(Bucket=bucketname) + print(client.delete_bucket_notification_configuration(Bucket=bucketname)) else: - print client.delete_bucket_notification_configuration(Bucket=bucketname, - Notification=notification_name) + print(client.delete_bucket_notification_configuration(Bucket=bucketname, + Notification=notification_name)) diff --git a/ceph/examples/boto3/get_notification.py b/ceph/examples/boto3/get_notification.py index 6e3219895..490c018d4 100755 --- a/ceph/examples/boto3/get_notification.py +++ b/ceph/examples/boto3/get_notification.py @@ -4,7 +4,7 @@ import boto3 import sys if len(sys.argv) != 3: - print 'Usage: ' + sys.argv[0] + ' ' + print('Usage: ' + sys.argv[0] + ' ') sys.exit(1) # bucket name as first argument @@ -24,5 +24,5 @@ client = boto3.client('s3', # getting a specific notification configuration is an extension to AWS S3 API -print client.get_bucket_notification_configuration(Bucket=bucketname, - Notification=notification_name) +print(client.get_bucket_notification_configuration(Bucket=bucketname, + Notification=notification_name)) diff --git a/ceph/examples/boto3/list_unordered.py b/ceph/examples/boto3/list_unordered.py index b2339eaa6..2aa5a8e06 100755 --- a/ceph/examples/boto3/list_unordered.py +++ b/ceph/examples/boto3/list_unordered.py @@ -4,7 +4,7 @@ import boto3 import sys if len(sys.argv) != 2: - print 'Usage: ' + sys.argv[0] + ' ' + print('Usage: ' + sys.argv[0] + ' ') sys.exit(1) # bucket name as first argument @@ -22,4 +22,4 @@ client = boto3.client('s3', # geting an unordered list of objets is an extension to AWS S3 API -print client.list_objects(Bucket=bucketname, AllowUnordered=True) +print(client.list_objects(Bucket=bucketname, AllowUnordered=True)) diff --git a/ceph/examples/boto3/notification_filters.py b/ceph/examples/boto3/notification_filters.py index a45393c74..2687c8b3a 100755 --- a/ceph/examples/boto3/notification_filters.py +++ b/ceph/examples/boto3/notification_filters.py @@ -4,7 +4,7 @@ import boto3 import sys if len(sys.argv) != 4: - print 'Usage: ' + sys.argv[0] + ' ' + print('Usage: ' + sys.argv[0] + ' ') sys.exit(1) # bucket name as first argument @@ -44,5 +44,5 @@ topic_conf_list = [{'Id': notification_id, } }}] -print client.put_bucket_notification_configuration(Bucket=bucketname, - NotificationConfiguration={'TopicConfigurations': topic_conf_list}) +print(client.put_bucket_notification_configuration(Bucket=bucketname, + NotificationConfiguration={'TopicConfigurations': topic_conf_list})) diff --git a/ceph/examples/boto3/service-2.sdk-extras.json b/ceph/examples/boto3/service-2.sdk-extras.json index 658832263..9ee66730e 100644 --- a/ceph/examples/boto3/service-2.sdk-extras.json +++ b/ceph/examples/boto3/service-2.sdk-extras.json @@ -191,12 +191,22 @@ "UsageStatsSummary": { "type": "structure", "members": { - "TotalBytes":{"shape":"TotalBytes"}, + "QuotaMaxBytes":{"shape":"QuotaMaxBytes"}, + "QuotaMaxBuckets":{"shape": "QuotaMaxBuckets"}, + "QuotaMaxObjCount":{"shape":"QuotaMaxObjCount"}, + "QuotaMaxBytesPerBucket":{"shape":"QuotaMaxBytesPerBucket"}, + "QuotaMaxObjCountPerBucket":{"shape":"QuotaMaxObjCountPerBucket"}, + "TotalBytes":{"shape":"TotalBytes"}, "TotalBytesRounded":{"shape":"TotalBytesRounded"}, "TotalEntries":{"shape":"TotalEntries"} } }, - "TotalBytesRounded":{"type":"integer"}, + "QuotaMaxBytes":{"type":"integer"}, + "QuotaMaxBuckets":{"type": "integer"}, + "QuotaMaxObjCount":{"type":"integer"}, + "QuotaMaxBytesPerBucket":{"type":"integer"}, + "QuotaMaxObjCountPerBucket":{"type":"integer"}, + "TotalBytesRounded":{"type":"integer"}, "TotalBytes":{"type":"integer"}, "TotalEntries":{"type":"integer"} }, diff --git a/ceph/examples/boto3/topic_with_endpoint.py b/ceph/examples/boto3/topic_with_endpoint.py index b6e626e02..3137cee7d 100755 --- a/ceph/examples/boto3/topic_with_endpoint.py +++ b/ceph/examples/boto3/topic_with_endpoint.py @@ -15,7 +15,7 @@ elif len(sys.argv) == 2: topic_name = sys.argv[1] region_name = "" else: - print 'Usage: ' + sys.argv[0] + ' [region name]' + print('Usage: ' + sys.argv[0] + ' [region name]') sys.exit(1) # endpoint and keys from vstart @@ -38,4 +38,4 @@ client = boto3.client('sns', endpoint_args = 'push-endpoint=amqp://127.0.0.1:5672&amqp-exchange=ex1&amqp-ack-level=broker' attributes = {nvp[0] : nvp[1] for nvp in urlparse.parse_qsl(endpoint_args, keep_blank_values=True)} -print client.create_topic(Name=topic_name, Attributes=attributes) +print(client.create_topic(Name=topic_name, Attributes=attributes)) diff --git a/ceph/make-dist b/ceph/make-dist index e8cc805f7..c24de14ab 100755 --- a/ceph/make-dist +++ b/ceph/make-dist @@ -1,7 +1,21 @@ #!/bin/bash -e +SCRIPTNAME="$(basename "${0}")" +BASEDIR="$(readlink -f "$(dirname "${0}")")" + if [ ! -d .git ]; then - echo "no .git present. run this from the base dir of the git checkout." + echo "$SCRIPTNAME: Full path to the script: $BASEDIR/$SCRIPTNAME" + echo "$SCRIPTNAME: No .git present. Run this from the base dir of the git checkout." + exit 1 +fi + +# Running the script from a directory containing a colon anywhere in the path +# will expose us to the dreaded "[BUG] npm run [command] failed if the directory +# path contains colon" bug https://github.com/npm/cli/issues/633 +# (see https://tracker.ceph.com/issues/39556 for details) +if [[ "$BASEDIR" == *:* ]] ; then + echo "$SCRIPTNAME: Full path to the script: $BASEDIR/$SCRIPTNAME" + echo "$SCRIPTNAME: The path to the script contains a colon. Their presence has been known to break the script." exit 1 fi diff --git a/ceph/monitoring/grafana/dashboards/host-details.json b/ceph/monitoring/grafana/dashboards/host-details.json index 0300e4f01..237349dd3 100644 --- a/ceph/monitoring/grafana/dashboards/host-details.json +++ b/ceph/monitoring/grafana/dashboards/host-details.json @@ -37,7 +37,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1557386759572, + "iteration": 1615564911000, "links": [], "panels": [ { @@ -182,7 +182,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts).*\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts).*\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts).*\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts).*\"}[1m]))\n) * 100", + "expr": "sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]))\n) * 100", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{mode}}", @@ -283,14 +283,14 @@ "steppedLine": false, "targets": [ { - "expr": "(node_memory_MemTotal{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]].*\"})- (\n (node_memory_MemFree{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]].*\"}) + \n (node_memory_Cached{instance=~\"[[ceph_hosts]].*\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]].*\"}) + \n (node_memory_Buffers{instance=~\"[[ceph_hosts]].*\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]].*\"}) +\n (node_memory_Slab{instance=~\"[[ceph_hosts]].*\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]].*\"})\n )\n \n", + "expr": "(node_memory_MemTotal{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"})- (\n (node_memory_MemFree{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n (node_memory_Cached{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n (node_memory_Buffers{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) +\n (node_memory_Slab{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"})\n )\n \n", "format": "time_series", "intervalFactor": 1, "legendFormat": "used", "refId": "D" }, { - "expr": "node_memory_MemFree{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]].*\"} ", + "expr": "node_memory_MemFree{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} ", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -298,7 +298,7 @@ "refId": "A" }, { - "expr": "(node_memory_Cached{instance=~\"[[ceph_hosts]].*\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]].*\"}) + \n(node_memory_Buffers{instance=~\"[[ceph_hosts]].*\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]].*\"}) +\n(node_memory_Slab{instance=~\"[[ceph_hosts]].*\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]].*\"}) \n", + "expr": "(node_memory_Cached{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n(node_memory_Buffers{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) +\n(node_memory_Slab{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) \n", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -306,7 +306,7 @@ "refId": "C" }, { - "expr": "node_memory_MemTotal{instance=~\"[[ceph_hosts]].*\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]].*\"} ", + "expr": "node_memory_MemTotal{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} ", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -401,7 +401,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.rx", @@ -410,7 +410,7 @@ "textEditor": true }, { - "expr": "sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts).*\",device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -501,7 +501,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(node_network_receive_drop{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_receive_drop{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -509,7 +509,7 @@ "refId": "A" }, { - "expr": "irate(node_network_transmit_drop{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_transmit_drop{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -621,7 +621,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts).*\"})", + "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})", "format": "time_series", "intervalFactor": 2, "refId": "A", @@ -685,7 +685,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(node_network_receive_errs{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_receive_errs{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -693,7 +693,7 @@ "refId": "A" }, { - "expr": "irate(node_network_transmit_errs{instance=~\"[[ceph_hosts]].*\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"[[ceph_hosts]].*\"}[1m])", + "expr": "irate(node_network_transmit_errs{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -798,7 +798,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts).*\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts).*\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) writes", @@ -807,7 +807,7 @@ "textEditor": true }, { - "expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts).*\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -899,14 +899,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) write", "refId": "B" }, { - "expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts).*\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts).*\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) read", @@ -992,7 +992,7 @@ "steppedLine": false, "targets": [ { - "expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts).*\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts).*\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance,device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -1083,7 +1083,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts).*\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts).*\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", "format": "time_series", "hide": false, "intervalFactor": 1, diff --git a/ceph/monitoring/grafana/dashboards/hosts-overview.json b/ceph/monitoring/grafana/dashboards/hosts-overview.json index 804aa51cc..b179d5717 100644 --- a/ceph/monitoring/grafana/dashboards/hosts-overview.json +++ b/ceph/monitoring/grafana/dashboards/hosts-overview.json @@ -131,7 +131,6 @@ "#d44a3a" ], "datasource": "$datasource", - "decimals": 0, "description": "Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster", "decimals": 2, "format": "percentunit", @@ -215,7 +214,6 @@ "#d44a3a" ], "datasource": "$datasource", - "decimals": 0, "description": "Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)", "decimals": 2, "format": "percentunit", @@ -433,7 +431,7 @@ "tableColumn": "", "targets": [ { - "expr" : "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)", + "expr" : "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device, ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)", "format": "time_series", "instant": true, "intervalFactor": 1, diff --git a/ceph/qa/distros/all/centos_8.0.yaml b/ceph/qa/distros/all/centos_8.0.yaml index 4fbb41228..282379e03 100644 --- a/ceph/qa/distros/all/centos_8.0.yaml +++ b/ceph/qa/distros/all/centos_8.0.yaml @@ -1,6 +1,2 @@ os_type: centos os_version: "8.0" -overrides: - selinux: - whitelist: - - scontext=system_u:system_r:logrotate_t:s0 diff --git a/ceph/qa/distros/all/centos_8.3.yaml b/ceph/qa/distros/all/centos_8.3.yaml new file mode 100644 index 000000000..55ab1a48d --- /dev/null +++ b/ceph/qa/distros/all/centos_8.3.yaml @@ -0,0 +1,6 @@ +os_type: centos +os_version: "8.3" +overrides: + selinux: + whitelist: + - scontext=system_u:system_r:logrotate_t:s0 diff --git a/ceph/qa/distros/all/centos_8.yaml b/ceph/qa/distros/all/centos_8.yaml index 8abfcfd12..e9308ad1a 120000 --- a/ceph/qa/distros/all/centos_8.yaml +++ b/ceph/qa/distros/all/centos_8.yaml @@ -1 +1 @@ -centos_8.1.yaml \ No newline at end of file +centos_8.3.yaml \ No newline at end of file diff --git a/ceph/qa/distros/all/rhel_8.0.yaml b/ceph/qa/distros/all/rhel_8.0.yaml index 215c34b7f..3edcc303a 100644 --- a/ceph/qa/distros/all/rhel_8.0.yaml +++ b/ceph/qa/distros/all/rhel_8.0.yaml @@ -1,6 +1,2 @@ os_type: rhel os_version: "8.0" -overrides: - selinux: - whitelist: - - scontext=system_u:system_r:logrotate_t:s0 diff --git a/ceph/qa/distros/all/rhel_8.3.yaml b/ceph/qa/distros/all/rhel_8.3.yaml new file mode 100644 index 000000000..c18c07b83 --- /dev/null +++ b/ceph/qa/distros/all/rhel_8.3.yaml @@ -0,0 +1,6 @@ +os_type: rhel +os_version: "8.3" +overrides: + selinux: + whitelist: + - scontext=system_u:system_r:logrotate_t:s0 diff --git a/ceph/qa/distros/all/rhel_8.yaml b/ceph/qa/distros/all/rhel_8.yaml index c58864f04..9e5fa1165 120000 --- a/ceph/qa/distros/all/rhel_8.yaml +++ b/ceph/qa/distros/all/rhel_8.yaml @@ -1 +1 @@ -rhel_8.1.yaml \ No newline at end of file +rhel_8.3.yaml \ No newline at end of file diff --git a/ceph/qa/distros/all/ubuntu_20.04.yaml b/ceph/qa/distros/all/ubuntu_20.04.yaml new file mode 100644 index 000000000..f20398230 --- /dev/null +++ b/ceph/qa/distros/all/ubuntu_20.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "20.04" diff --git a/ceph/qa/distros/podman/centos_8.2_kubic_stable.yaml b/ceph/qa/distros/podman/centos_8.2_kubic_stable.yaml new file mode 100644 index 000000000..5ea907f75 --- /dev/null +++ b/ceph/qa/distros/podman/centos_8.2_kubic_stable.yaml @@ -0,0 +1,19 @@ +os_type: centos +os_version: "8.2" +overrides: + selinux: + whitelist: + - scontext=system_u:system_r:logrotate_t:s0 + +tasks: +- exec: + all: + - echo -e "[[registry]]\nlocation = 'docker.io'\n\n[[registry.mirror]]\nlocation='docker-mirror.front.sepia.ceph.com:5000'\n" | sudo tee /etc/containers/registries.conf + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - sudo dnf -y module disable container-tools + - sudo dnf -y install 'dnf-command(copr)' + - sudo dnf -y copr enable rhcontainerbot/container-selinux + - sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo + - sudo dnf remove -y podman + - sudo dnf -y install podman + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml b/ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml new file mode 100644 index 000000000..541cf5180 --- /dev/null +++ b/ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml @@ -0,0 +1,19 @@ +os_type: rhel +os_version: "8.3" +overrides: + selinux: + whitelist: + - scontext=system_u:system_r:logrotate_t:s0 + +tasks: +- exec: + all: + - echo -e "[[registry]]\nlocation = 'docker.io'\n\n[[registry.mirror]]\nlocation='docker-mirror.front.sepia.ceph.com:5000'\n" | sudo tee /etc/containers/registries.conf + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - sudo dnf -y module disable container-tools + - sudo dnf -y install 'dnf-command(copr)' + - sudo dnf -y copr enable rhcontainerbot/container-selinux + - sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo + - sudo dnf remove -y podman + - sudo dnf -y install podman + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/all/ubuntu_18.04_podman.yaml b/ceph/qa/distros/podman/ubuntu_18.04_kubic_stable.yaml similarity index 80% rename from ceph/qa/distros/all/ubuntu_18.04_podman.yaml rename to ceph/qa/distros/podman/ubuntu_18.04_kubic_stable.yaml index 214e964ac..136d5c4f8 100644 --- a/ceph/qa/distros/all/ubuntu_18.04_podman.yaml +++ b/ceph/qa/distros/podman/ubuntu_18.04_kubic_stable.yaml @@ -5,8 +5,10 @@ os_version: "18.04" tasks: - exec: all: + - echo -e "[[registry]]\nlocation = 'docker.io'\n\n[[registry.mirror]]\nlocation='docker-mirror.front.sepia.ceph.com:5000'\n" | sudo tee /etc/containers/registries.conf + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_18.04/Release.key | sudo apt-key add - - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_18.04/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list - sudo apt update - sudo apt -y install podman - - echo -e "[[registry]]\nlocation = 'docker.io'\n\n[[registry.mirror]]\nlocation='docker-mirror.front.sepia.ceph.com:5000'\n" | sudo tee /etc/containers/registries.conf + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml b/ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml new file mode 100644 index 000000000..9c42eb2ff --- /dev/null +++ b/ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml @@ -0,0 +1,14 @@ +os_type: ubuntu +os_version: "20.04" + +# feel free to remove this test, if Kubic project is no longer maintained. +tasks: +- exec: + all: + - echo -e "[[registry]]\nlocation = 'docker.io'\n\n[[registry.mirror]]\nlocation='docker-mirror.front.sepia.ceph.com:5000'\n" | sudo tee /etc/containers/registries.conf + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key | sudo apt-key add - + - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list + - sudo apt update + - sudo apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install podman containernetworking-plugins + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml b/ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml new file mode 100644 index 000000000..75e72d739 --- /dev/null +++ b/ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml @@ -0,0 +1,14 @@ +os_type: ubuntu +os_version: "20.04" + +# feel free to remove this test, if Kubic project is no longer maintained. +tasks: +- exec: + all: + - echo -e "[[registry]]\nlocation = 'docker.io'\n\n[[registry.mirror]]\nlocation='docker-mirror.front.sepia.ceph.com:5000'\n" | sudo tee /etc/containers/registries.conf + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/xUbuntu_20.04/Release.key | sudo apt-key add - + - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/xUbuntu_20.04/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list + - sudo apt update + - sudo apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install podman containernetworking-plugins + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/supported-all-distro/rhel_8.2.yaml b/ceph/qa/distros/supported-all-distro/rhel_8.2.yaml deleted file mode 120000 index dd95793a8..000000000 --- a/ceph/qa/distros/supported-all-distro/rhel_8.2.yaml +++ /dev/null @@ -1 +0,0 @@ -../all/rhel_8.2.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-all-distro/rhel_8.yaml b/ceph/qa/distros/supported-all-distro/rhel_8.yaml new file mode 120000 index 000000000..f803f091e --- /dev/null +++ b/ceph/qa/distros/supported-all-distro/rhel_8.yaml @@ -0,0 +1 @@ +../all/rhel_8.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml b/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml index 64a66d9aa..75d907e3b 120000 --- a/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml +++ b/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml @@ -1 +1 @@ -../all/ubuntu_18.04.yaml \ No newline at end of file +../all/ubuntu_20.04.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-random-distro$/centos_8.yaml b/ceph/qa/distros/supported-random-distro$/centos_8.yaml new file mode 120000 index 000000000..b7e6c9b4e --- /dev/null +++ b/ceph/qa/distros/supported-random-distro$/centos_8.yaml @@ -0,0 +1 @@ +../all/centos_8.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-random-distro$/centos_latest.yaml b/ceph/qa/distros/supported-random-distro$/centos_latest.yaml deleted file mode 120000 index 591d55ba5..000000000 --- a/ceph/qa/distros/supported-random-distro$/centos_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -../all/centos_8.1.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-random-distro$/rhel_8.yaml b/ceph/qa/distros/supported-random-distro$/rhel_8.yaml new file mode 120000 index 000000000..f803f091e --- /dev/null +++ b/ceph/qa/distros/supported-random-distro$/rhel_8.yaml @@ -0,0 +1 @@ +../all/rhel_8.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-random-distro$/rhel_latest.yaml b/ceph/qa/distros/supported-random-distro$/rhel_latest.yaml deleted file mode 120000 index dd95793a8..000000000 --- a/ceph/qa/distros/supported-random-distro$/rhel_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -../all/rhel_8.2.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-random-distro$/ubuntu_18.04.yaml b/ceph/qa/distros/supported-random-distro$/ubuntu_18.04.yaml new file mode 120000 index 000000000..64a66d9aa --- /dev/null +++ b/ceph/qa/distros/supported-random-distro$/ubuntu_18.04.yaml @@ -0,0 +1 @@ +../all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-random-distro$/ubuntu_latest.yaml b/ceph/qa/distros/supported-random-distro$/ubuntu_latest.yaml index 64a66d9aa..75d907e3b 120000 --- a/ceph/qa/distros/supported-random-distro$/ubuntu_latest.yaml +++ b/ceph/qa/distros/supported-random-distro$/ubuntu_latest.yaml @@ -1 +1 @@ -../all/ubuntu_18.04.yaml \ No newline at end of file +../all/ubuntu_20.04.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported/centos_latest.yaml b/ceph/qa/distros/supported/centos_latest.yaml index 591d55ba5..b7e6c9b4e 120000 --- a/ceph/qa/distros/supported/centos_latest.yaml +++ b/ceph/qa/distros/supported/centos_latest.yaml @@ -1 +1 @@ -../all/centos_8.1.yaml \ No newline at end of file +../all/centos_8.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported/rhel_latest.yaml b/ceph/qa/distros/supported/rhel_latest.yaml index dd95793a8..f803f091e 120000 --- a/ceph/qa/distros/supported/rhel_latest.yaml +++ b/ceph/qa/distros/supported/rhel_latest.yaml @@ -1 +1 @@ -../all/rhel_8.2.yaml \ No newline at end of file +../all/rhel_8.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported/ubuntu_latest.yaml b/ceph/qa/distros/supported/ubuntu_latest.yaml index 64a66d9aa..75d907e3b 120000 --- a/ceph/qa/distros/supported/ubuntu_latest.yaml +++ b/ceph/qa/distros/supported/ubuntu_latest.yaml @@ -1 +1 @@ -../all/ubuntu_18.04.yaml \ No newline at end of file +../all/ubuntu_20.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_18.04.yaml b/ceph/qa/suites/perf-basic/ubuntu_18.04.yaml similarity index 100% rename from ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_18.04.yaml rename to ceph/qa/suites/perf-basic/ubuntu_18.04.yaml diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/centos_7.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/centos_7.yaml deleted file mode 120000 index 5ad8254e1..000000000 --- a/ceph/qa/suites/rados/cephadm/smoke/distro/centos_7.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/centos_7.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.0.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.0.yaml deleted file mode 120000 index f07a2bd87..000000000 --- a/ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.0.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/centos_8.0.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.2_kubic_stable.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.2_kubic_stable.yaml new file mode 120000 index 000000000..3afeed74d --- /dev/null +++ b/ceph/qa/suites/rados/cephadm/smoke/distro/centos_8.2_kubic_stable.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.2_kubic_stable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/centos_latest.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/centos_latest.yaml deleted file mode 120000 index bd9854e70..000000000 --- a/ceph/qa/suites/rados/cephadm/smoke/distro/centos_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_7.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_7.yaml deleted file mode 120000 index b776cd2f7..000000000 --- a/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_7.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/rhel_7.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.0.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.0.yaml deleted file mode 120000 index 7247d53d1..000000000 --- a/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.0.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/rhel_8.0.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.3_kubic_stable.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.3_kubic_stable.yaml new file mode 120000 index 000000000..20f0f7c55 --- /dev/null +++ b/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_8.3_kubic_stable.yaml @@ -0,0 +1 @@ +.qa/distros/podman/rhel_8.3_kubic_stable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_latest.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_latest.yaml deleted file mode 120000 index be3103a33..000000000 --- a/ceph/qa/suites/rados/cephadm/smoke/distro/rhel_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/rhel_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_20.04.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_20.04.yaml new file mode 120000 index 000000000..162964882 --- /dev/null +++ b/ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_20.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_20.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_latest.yaml b/ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/rados/cephadm/smoke/distro/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-centos_8.yaml b/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-centos_8.yaml index 14c803a9a..5d380ad78 100644 --- a/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-centos_8.yaml +++ b/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-centos_8.yaml @@ -1,7 +1,7 @@ tasks: - cephadm: - image: docker.io/ceph/ceph:v15.2.5 - cephadm_branch: v15.2.5 + image: docker.io/ceph/ceph:v15.2.9 + cephadm_branch: v15.2.9 cephadm_git_url: https://github.com/ceph/ceph # avoid --cap-add=PTRACE + --privileged for older cephadm versions allow_ptrace: false diff --git a/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-ubuntu_20.04.yaml b/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-ubuntu_20.04.yaml index 0ad5a32d3..76da6e80a 100644 --- a/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-ubuntu_20.04.yaml +++ b/ceph/qa/suites/rados/cephadm/upgrade/1-start-distro/1-start-ubuntu_20.04.yaml @@ -1,7 +1,7 @@ tasks: - cephadm: - image: docker.io/ceph/ceph:v15.2.0 - cephadm_branch: v15.2.0 + image: docker.io/ceph/ceph:v15.2.9 + cephadm_branch: v15.2.9 cephadm_git_url: https://github.com/ceph/ceph # avoid --cap-add=PTRACE + --privileged for older cephadm versions allow_ptrace: false diff --git a/ceph/qa/suites/rados/cephadm/with-work/0-distro b/ceph/qa/suites/rados/cephadm/with-work/0-distro new file mode 120000 index 000000000..92c9153ea --- /dev/null +++ b/ceph/qa/suites/rados/cephadm/with-work/0-distro @@ -0,0 +1 @@ +../smoke/distro/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/with-work/distro/centos_8.0.yaml b/ceph/qa/suites/rados/cephadm/with-work/distro/centos_8.0.yaml deleted file mode 120000 index f07a2bd87..000000000 --- a/ceph/qa/suites/rados/cephadm/with-work/distro/centos_8.0.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/centos_8.0.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/with-work/distro/centos_latest.yaml b/ceph/qa/suites/rados/cephadm/with-work/distro/centos_latest.yaml deleted file mode 120000 index bd9854e70..000000000 --- a/ceph/qa/suites/rados/cephadm/with-work/distro/centos_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/with-work/distro/rhel_8.0.yaml b/ceph/qa/suites/rados/cephadm/with-work/distro/rhel_8.0.yaml deleted file mode 120000 index 7247d53d1..000000000 --- a/ceph/qa/suites/rados/cephadm/with-work/distro/rhel_8.0.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/rhel_8.0.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/with-work/distro/rhel_latest.yaml b/ceph/qa/suites/rados/cephadm/with-work/distro/rhel_latest.yaml deleted file mode 120000 index be3103a33..000000000 --- a/ceph/qa/suites/rados/cephadm/with-work/distro/rhel_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/rhel_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/with-work/distro/ubuntu_18.04_podman.yaml b/ceph/qa/suites/rados/cephadm/with-work/distro/ubuntu_18.04_podman.yaml deleted file mode 120000 index b948dd434..000000000 --- a/ceph/qa/suites/rados/cephadm/with-work/distro/ubuntu_18.04_podman.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/ubuntu_18.04_podman.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/with-work/distro/.qa b/ceph/qa/suites/rados/cephadm/workunits/0-distro/.qa similarity index 100% rename from ceph/qa/suites/rados/cephadm/with-work/distro/.qa rename to ceph/qa/suites/rados/cephadm/workunits/0-distro/.qa diff --git a/ceph/qa/suites/rados/cephadm/workunits/0-distro/centos_8.2_kubic_stable.yaml b/ceph/qa/suites/rados/cephadm/workunits/0-distro/centos_8.2_kubic_stable.yaml new file mode 120000 index 000000000..3afeed74d --- /dev/null +++ b/ceph/qa/suites/rados/cephadm/workunits/0-distro/centos_8.2_kubic_stable.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.2_kubic_stable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_stable.yaml b/ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_stable.yaml new file mode 120000 index 000000000..065bb1ab2 --- /dev/null +++ b/ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_stable.yaml @@ -0,0 +1 @@ +.qa/distros/podman/ubuntu_20.04_kubic_stable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_testing.yaml b/ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_testing.yaml new file mode 120000 index 000000000..36a5ad131 --- /dev/null +++ b/ceph/qa/suites/rados/cephadm/workunits/0-distro/ubuntu_20.04_kubic_testing.yaml @@ -0,0 +1 @@ +.qa/distros/podman/ubuntu_20.04_kubic_testing.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/workunits/distro/centos_latest.yaml b/ceph/qa/suites/rados/cephadm/workunits/distro/centos_latest.yaml deleted file mode 120000 index bd9854e70..000000000 --- a/ceph/qa/suites/rados/cephadm/workunits/distro/centos_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/cephadm/workunits/distro/ubuntu_18.04_podman.yaml b/ceph/qa/suites/rados/cephadm/workunits/distro/ubuntu_18.04_podman.yaml deleted file mode 120000 index b948dd434..000000000 --- a/ceph/qa/suites/rados/cephadm/workunits/distro/ubuntu_18.04_podman.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/ubuntu_18.04_podman.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml index 317c5de17..0c050d5dd 100644 --- a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml +++ b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml @@ -56,3 +56,4 @@ tasks: - tasks.mgr.dashboard.test_summary - tasks.mgr.dashboard.test_telemetry - tasks.mgr.dashboard.test_user + - tasks.mgr.dashboard.test_motd diff --git a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml index deab01adb..905e6f783 100644 --- a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml @@ -23,3 +23,4 @@ tasks: - cephfs_test_runner: modules: - tasks.mgr.test_module_selftest + fail_on_skip: false diff --git a/ceph/qa/suites/rados/cephadm/with-work/distro/ubuntu_18.04.yaml b/ceph/qa/suites/rados/perf/ubuntu_18.04.yaml similarity index 100% rename from ceph/qa/suites/rados/cephadm/with-work/distro/ubuntu_18.04.yaml rename to ceph/qa/suites/rados/perf/ubuntu_18.04.yaml diff --git a/ceph/qa/suites/rados/perf/ubuntu_latest.yaml b/ceph/qa/suites/rados/perf/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/rados/perf/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/3-scrub-overrides/$ b/ceph/qa/suites/rados/thrash/3-scrub-overrides/$ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/rados/cephadm/workunits/distro/.qa b/ceph/qa/suites/rados/thrash/3-scrub-overrides/.qa similarity index 100% rename from ceph/qa/suites/rados/cephadm/workunits/distro/.qa rename to ceph/qa/suites/rados/thrash/3-scrub-overrides/.qa diff --git a/ceph/qa/suites/rados/thrash/3-scrub-overrides/default.yaml b/ceph/qa/suites/rados/thrash/3-scrub-overrides/default.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-2.yaml b/ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-2.yaml new file mode 100644 index 000000000..abf852e98 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-2.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd max scrubs: 2 diff --git a/ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-3.yaml b/ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-3.yaml new file mode 100644 index 000000000..3b3dfd61f --- /dev/null +++ b/ceph/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-3.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd max scrubs: 3 diff --git a/ceph/qa/suites/rados/thrash/msgr-failures/osd-dispatch-delay.yaml b/ceph/qa/suites/rados/thrash/msgr-failures/osd-dispatch-delay.yaml new file mode 100644 index 000000000..aff059fb8 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/msgr-failures/osd-dispatch-delay.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + osd debug inject dispatch delay duration: 0.1 + osd debug inject dispatch delay probability: 0.1 + diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml index 212846d4d..45520042d 100644 --- a/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml @@ -8,25 +8,23 @@ tasks: env: RBD_FEATURES: "61" - workunit: - #The line below to change to 'pacific' - branch: master + branch: pacific clients: client.1: - rbd/notify_slave.sh env: RBD_FEATURES: "61" + RBD_DISABLE_UPDATE_FEATURES: "1" - print: "**** done rbd: old librbd -> new librbd" - parallel: - workunit: - #The line below to change to 'pacific' - branch: master + branch: octopus clients: client.0: - rbd/notify_slave.sh env: RBD_FEATURES: "61" - workunit: - #The line below to change to 'pacific' branch: pacific clients: client.1: diff --git a/ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/mimic-x-singleton/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/mimic-x/parallel/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/mimic-x/stress-split/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/nautilus-x-singleton/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/nautilus-x/parallel/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_18.04.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_18.04.yaml new file mode 120000 index 000000000..dce171c3c --- /dev/null +++ b/ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_latest.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_latest.yaml deleted file mode 120000 index 3a09f9abb..000000000 --- a/ceph/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/tasks/barbican.py b/ceph/qa/tasks/barbican.py index 46b036ef4..03f58c5c7 100644 --- a/ceph/qa/tasks/barbican.py +++ b/ceph/qa/tasks/barbican.py @@ -8,6 +8,8 @@ import six from six.moves import http_client from six.moves.urllib.parse import urlparse import json +import time +import math from teuthology import misc as teuthology from teuthology import contextutil @@ -326,12 +328,16 @@ def create_secrets(ctx, config): token_resp.status < 300): raise Exception("Cannot authenticate user "+secret["username"]+" for secret creation") + expire = time.time() + 5400 # now + 90m + (expire_fract,dummy) = math.modf(expire) + expire_format = "%%FT%%T.%06d" % (round(expire_fract*1000000)) + expiration = time.strftime(expire_format, time.gmtime(expire)) token_id = token_resp.getheader('x-subject-token') key1_json = json.dumps( { "name": secret['name'], - "expiration": "2020-12-31T19:14:44.180394", + "expiration": expiration, "algorithm": "aes", "bit_length": 256, "mode": "cbc", diff --git a/ceph/qa/tasks/cephadm.py b/ceph/qa/tasks/cephadm.py index 431392745..51cf7488e 100644 --- a/ceph/qa/tasks/cephadm.py +++ b/ceph/qa/tasks/cephadm.py @@ -583,10 +583,10 @@ def ceph_mgrs(ctx, config): for mgr in [r for r in roles if teuthology.is_type('mgr', cluster_name)(r)]: c_, _, id_ = teuthology.split_role(mgr) - if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mgr: - continue log.info('Adding %s on %s' % (mgr, remote.shortname)) nodes.append(remote.shortname + '=' + id_) + if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mgr: + continue daemons[mgr] = (remote, id_) if nodes: _shell(ctx, cluster_name, remote, [ diff --git a/ceph/qa/tasks/cephfs/filesystem.py b/ceph/qa/tasks/cephfs/filesystem.py index 7f01b0ff4..0c02e2aa3 100644 --- a/ceph/qa/tasks/cephfs/filesystem.py +++ b/ceph/qa/tasks/cephfs/filesystem.py @@ -63,9 +63,12 @@ class FSStatus(object): """ Operations on a snapshot of the FSMap. """ - def __init__(self, mon_manager): + def __init__(self, mon_manager, epoch=None): self.mon = mon_manager - self.map = json.loads(self.mon.raw_cluster_cmd("fs", "dump", "--format=json")) + cmd = ["fs", "dump", "--format=json"] + if epoch is not None: + cmd.append(str(epoch)) + self.map = json.loads(self.mon.raw_cluster_cmd(*cmd)) def __str__(self): return json.dumps(self.map, indent = 2, sort_keys = True) @@ -333,8 +336,8 @@ class MDSCluster(CephCluster): def newfs(self, name='cephfs', create=True): return Filesystem(self._ctx, name=name, create=create) - def status(self): - return FSStatus(self.mon_manager) + def status(self, epoch=None): + return FSStatus(self.mon_manager, epoch) def delete_all_filesystems(self): """ diff --git a/ceph/qa/tasks/cephfs/test_admin.py b/ceph/qa/tasks/cephfs/test_admin.py index 60198604b..879dc0568 100644 --- a/ceph/qa/tasks/cephfs/test_admin.py +++ b/ceph/qa/tasks/cephfs/test_admin.py @@ -1,4 +1,7 @@ +import errno import json +import logging +import time from teuthology.orchestra.run import CommandFailedError @@ -7,6 +10,7 @@ from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.filesystem import FileLayout +log = logging.getLogger(__name__) class TestAdminCommands(CephFSTestCase): """ @@ -169,6 +173,77 @@ class TestAdminCommands(CephFSTestCase): pool_names[i], 'cephfs', keys[i], fs_name) +class TestDump(CephFSTestCase): + CLIENTS_REQUIRED = 0 + MDSS_REQUIRED = 1 + + def test_fs_dump_epoch(self): + """ + That dumping a specific epoch works. + """ + + status1 = self.fs.status() + status2 = self.fs.status(epoch=status1["epoch"]-1) + self.assertEqual(status1["epoch"], status2["epoch"]+1) + + def test_fsmap_trim(self): + """ + That the fsmap is trimmed normally. + """ + + paxos_service_trim_min = 25 + self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min) + mon_max_mdsmap_epochs = 20 + self.config_set('mon', 'mon_max_mdsmap_epochs', mon_max_mdsmap_epochs) + + status = self.fs.status() + epoch = status["epoch"] + + # for N mutations + mutations = paxos_service_trim_min + mon_max_mdsmap_epochs + b = False + for i in range(mutations): + self.fs.set_joinable(b) + b = not b + + time.sleep(10) # for tick/compaction + + try: + self.fs.status(epoch=epoch) + except CommandFailedError as e: + self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed") + else: + self.fail("trimming did not occur as expected") + + def test_fsmap_force_trim(self): + """ + That the fsmap is trimmed forcefully. + """ + + status = self.fs.status() + epoch = status["epoch"] + + paxos_service_trim_min = 1 + self.config_set('mon', 'paxos_service_trim_min', paxos_service_trim_min) + mon_mds_force_trim_to = epoch+1 + self.config_set('mon', 'mon_mds_force_trim_to', mon_mds_force_trim_to) + + # force a new fsmap + self.fs.set_joinable(False) + time.sleep(10) # for tick/compaction + + status = self.fs.status() + log.debug(f"new epoch is {status['epoch']}") + self.fs.status(epoch=epoch+1) # epoch+1 is not trimmed, may not == status["epoch"] + + try: + self.fs.status(epoch=epoch) + except CommandFailedError as e: + self.assertEqual(e.exitstatus, errno.ENOENT, "invalid error code when trying to fetch FSMap that was trimmed") + else: + self.fail("trimming did not occur as expected") + + class TestConfigCommands(CephFSTestCase): """ Test that daemons and clients respond to the otherwise rarely-used diff --git a/ceph/qa/tasks/mgr/dashboard/test_motd.py b/ceph/qa/tasks/mgr/dashboard/test_motd.py new file mode 100644 index 000000000..2edbf36ba --- /dev/null +++ b/ceph/qa/tasks/mgr/dashboard/test_motd.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# pylint: disable=too-many-public-methods + +from __future__ import absolute_import + +import time + +from .helper import DashboardTestCase + + +class MotdTest(DashboardTestCase): + @classmethod + def tearDownClass(cls): + cls._ceph_cmd(['dashboard', 'motd', 'clear']) + super(MotdTest, cls).tearDownClass() + + def setUp(self): + super(MotdTest, self).setUp() + self._ceph_cmd(['dashboard', 'motd', 'clear']) + + def test_none(self): + data = self._get('/ui-api/motd') + self.assertStatus(200) + self.assertIsNone(data) + + def test_set(self): + self._ceph_cmd(['dashboard', 'motd', 'set', 'info', '0', 'foo bar baz']) + data = self._get('/ui-api/motd') + self.assertStatus(200) + self.assertIsInstance(data, dict) + + def test_expired(self): + self._ceph_cmd(['dashboard', 'motd', 'set', 'info', '2s', 'foo bar baz']) + time.sleep(5) + data = self._get('/ui-api/motd') + self.assertStatus(200) + self.assertIsNone(data) diff --git a/ceph/qa/tasks/mgr/test_module_selftest.py b/ceph/qa/tasks/mgr/test_module_selftest.py index 63cf39a3b..a982ad3c4 100644 --- a/ceph/qa/tasks/mgr/test_module_selftest.py +++ b/ceph/qa/tasks/mgr/test_module_selftest.py @@ -3,6 +3,7 @@ import time import requests import errno import logging + from teuthology.exceptions import CommandFailedError from tasks.mgr.mgr_test_case import MgrTestCase @@ -49,6 +50,13 @@ class TestModuleSelftest(MgrTestCase): self._selftest_plugin("influx") def test_diskprediction_local(self): + self._load_module("selftest") + python_version = self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "python-version") + if tuple(int(v) for v in python_version.split('.')) >= (3, 8): + # https://tracker.ceph.com/issues/45147 + self.skipTest(f'python {python_version} not compatible with ' + 'diskprediction_local') self._selftest_plugin("diskprediction_local") # Not included in qa/packages/packages.yaml diff --git a/ceph/qa/tasks/userdata_setup.yaml b/ceph/qa/tasks/userdata_setup.yaml index 7271925c9..9aa2d0396 100644 --- a/ceph/qa/tasks/userdata_setup.yaml +++ b/ceph/qa/tasks/userdata_setup.yaml @@ -14,6 +14,8 @@ #!/usr/bin/env bash # mount a NFS share for storing logs + sed -i 's/archive.ubuntu.com/old-releases.ubuntu.com/' /etc/apt/sources.list + sed -i 's/security.ubuntu.com/old-releases.ubuntu.com/' /etc/apt/sources.list apt-get update apt-get -y install nfs-common mkdir /mnt/log diff --git a/ceph/qa/workunits/cephadm/test_repos.sh b/ceph/qa/workunits/cephadm/test_repos.sh index 1732ca1df..148972ceb 100755 --- a/ceph/qa/workunits/cephadm/test_repos.sh +++ b/ceph/qa/workunits/cephadm/test_repos.sh @@ -28,7 +28,7 @@ sudo $CEPHADM -v add-repo --dev master test_install_uninstall sudo $CEPHADM -v rm-repo -sudo $CEPHADM -v add-repo --release 15.1.1 +sudo $CEPHADM -v add-repo --release 15.2.7 test_install_uninstall sudo $CEPHADM -v rm-repo diff --git a/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh b/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh index ecbb4098a..7be53727c 100755 --- a/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh +++ b/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh @@ -33,7 +33,7 @@ case $(distro_id) in sudo subscription-manager repos --enable "codeready-builder-for-rhel-8-x86_64-rpms" ;; esac - install git gcc-c++.x86_64 snappy-devel zlib zlib-devel bzip2 bzip2-devel libradospp-devel.x86_64 cmake + install git gcc-c++.x86_64 snappy-devel zlib zlib-devel bzip2 bzip2-devel libradospp-devel.x86_64 cmake libarchive ;; opensuse*|suse|sles) install git gcc-c++ snappy-devel zlib-devel libbz2-devel libradospp-devel diff --git a/ceph/qa/workunits/rbd/cli_generic.sh b/ceph/qa/workunits/rbd/cli_generic.sh index e363109c2..e6a7cb759 100755 --- a/ceph/qa/workunits/rbd/cli_generic.sh +++ b/ceph/qa/workunits/rbd/cli_generic.sh @@ -485,21 +485,219 @@ test_purge() { echo "testing trash purge..." remove_images + rbd trash ls | wc -l | grep 0 + rbd trash purge + + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash ls | wc -l | grep 2 + rbd trash purge + rbd trash ls | wc -l | grep 0 + + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd trash mv testimg1 --expires-at "1 hour" + rbd trash mv testimg2 --expires-at "3 hours" + rbd trash ls | wc -l | grep 2 + rbd trash purge + rbd trash ls | wc -l | grep 2 + rbd trash purge --expired-before "now + 2 hours" + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg2 + rbd trash purge --expired-before "now + 4 hours" + rbd trash ls | wc -l | grep 0 + + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap # pin testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash ls | wc -l | grep 3 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg1 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 + + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd snap create testimg2@snap # pin testimg2 + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash ls | wc -l | grep 3 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg2 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID rbd trash purge rbd trash ls | wc -l | grep 0 - rbd create $RBD_CREATE_ARGS foo -s 1 - rbd create $RBD_CREATE_ARGS bar -s 1 + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd create $RBD_CREATE_ARGS --size 256 testimg2 + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg3@snap # pin testimg3 + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash ls | wc -l | grep 3 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg3 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 - rbd trash mv foo --expires-at "10 sec" - rbd trash mv bar --expires-at "30 sec" + # test purging a clone with a chain of parents + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap + rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2 + rbd snap rm testimg1@snap + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg2@snap + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4 + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5 + rbd snap rm testimg2@snap + rbd snap create testimg4@snap + rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6 + rbd snap rm testimg4@snap + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash mv testimg4 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 3 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash ls | grep testimg4 + rbd trash mv testimg6 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 2 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash mv testimg5 + rbd trash ls | wc -l | grep 3 + rbd trash purge + rbd trash ls | wc -l | grep 0 - rbd trash purge --expired-before "now + 10 sec" - rbd trash ls | grep -v foo | wc -l | grep 1 - rbd trash ls | grep bar + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap + rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2 + rbd snap rm testimg1@snap + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg3@snap # pin testimg3 + rbd snap create testimg2@snap + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4 + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5 + rbd snap rm testimg2@snap + rbd snap create testimg4@snap + rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6 + rbd snap rm testimg4@snap + rbd trash mv testimg1 + rbd trash mv testimg2 + rbd trash mv testimg3 + rbd trash mv testimg4 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 4 + rbd trash mv testimg6 + rbd trash ls | wc -l | grep 5 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 3 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash ls | grep testimg3 + rbd trash mv testimg5 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg3 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 + + # test purging a clone with a chain of auto-delete parents + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap + rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2 + rbd snap rm testimg1@snap + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg2@snap + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4 + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5 + rbd snap rm testimg2@snap + rbd snap create testimg4@snap + rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6 + rbd snap rm testimg4@snap + rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1 + rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2 + rbd trash mv testimg3 + rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 3 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash ls | grep testimg4 + rbd trash mv testimg6 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 2 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash mv testimg5 + rbd trash ls | wc -l | grep 3 + rbd trash purge + rbd trash ls | wc -l | grep 0 - LAST_IMG=$(rbd trash ls | grep bar | awk '{print $1;}') - rbd trash rm $LAST_IMG --force --no-progress | grep -v '.' | wc -l | grep 0 + rbd create $RBD_CREATE_ARGS --size 256 testimg1 + rbd snap create testimg1@snap + rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2 + rbd snap rm testimg1@snap + rbd create $RBD_CREATE_ARGS --size 256 testimg3 + rbd snap create testimg3@snap # pin testimg3 + rbd snap create testimg2@snap + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4 + rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5 + rbd snap rm testimg2@snap + rbd snap create testimg4@snap + rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6 + rbd snap rm testimg4@snap + rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1 + rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2 + rbd trash mv testimg3 + rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 4 + rbd trash mv testimg6 + rbd trash ls | wc -l | grep 5 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 3 + rbd trash ls | grep testimg1 + rbd trash ls | grep testimg2 + rbd trash ls | grep testimg3 + rbd trash mv testimg5 + rbd trash ls | wc -l | grep 4 + rbd trash purge 2>&1 | grep 'some expired images could not be removed' + rbd trash ls | wc -l | grep 1 + rbd trash ls | grep testimg3 + ID=$(rbd trash ls | awk '{ print $1 }') + rbd snap purge --image-id $ID + rbd trash purge + rbd trash ls | wc -l | grep 0 } test_deep_copy_clone() { diff --git a/ceph/qa/workunits/rbd/qemu-iotests.sh b/ceph/qa/workunits/rbd/qemu-iotests.sh index ddb63dae3..1f13da9fc 100755 --- a/ceph/qa/workunits/rbd/qemu-iotests.sh +++ b/ceph/qa/workunits/rbd/qemu-iotests.sh @@ -5,11 +5,11 @@ # require the admin ceph user, as there's no way to pass the ceph user # to qemu-iotests currently. -testlist='001 002 003 004 005 008 009 010 011 021 025 032 033 055' +testlist='001 002 003 004 005 008 009 010 011 021 025 032 033' git clone https://github.com/qemu/qemu.git cd qemu -if lsb_release -da 2>&1 | grep -iq 'bionic'; then +if lsb_release -da 2>&1 | grep -iqE '(bionic|focal)'; then # Bionic requires a matching test harness git checkout v2.11.0 elif lsb_release -da 2>&1 | grep -iqE '(xenial|linux release 8)'; then @@ -34,11 +34,6 @@ then ln -s /usr/bin/qemu-nbd else QEMU='/usr/libexec/qemu-kvm' - - # disable test 055 since qemu-kvm (RHEL/CentOS) doesn't support the - # required QMP commands under EL7 and Python 3 is not supported by - # the test under EL8 - testlist=$(echo ${testlist} | sed "s/ 055//g") fi ln -s $QEMU bin/qemu diff --git a/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh b/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh index 4299078a1..34d550cea 100755 --- a/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh +++ b/ceph/qa/workunits/rgw/test_rgw_orphan_list.sh @@ -1,12 +1,13 @@ #!/usr/bin/env bash -set -ex +# set -x +set -e # if defined, debug messages will be displayed and prepended with the string # debug="DEBUG" -huge_size=2222 # in megabytes -big_size=6 # in megabytes +huge_size=5100 # in megabytes +big_size=7 # in megabytes huge_obj=/tmp/huge_obj.temp.$$ big_obj=/tmp/big_obj.temp.$$ @@ -160,7 +161,6 @@ mys3uploadkill() { exit 1 fi - set -v local_file="$1" remote_bkt="$2" remote_obj="$3" @@ -229,8 +229,16 @@ mys3cmd ls s3://multipart-bkt bkt="incomplete-mp-bkt-1" mys3cmd mb s3://$bkt -mys3uploadkill $huge_obj $bkt incomplete-mp-obj-1 $fifo 20 -mys3uploadkill $huge_obj $bkt incomplete-mp-obj-2 $fifo 100 + +mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c $fifo 20 + +# generate an incomplete multipart with more than 1,000 parts +mys3uploadkill $huge_obj $bkt incomplete-mp-obj-b $fifo 1005 + +# generate more than 1000 incomplet multiparts +for c in $(seq 1005) ;do + mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c-$c $fifo 3 +done #################################### # resharded bucket diff --git a/ceph/src/.git_version b/ceph/src/.git_version index a05bd878e..59bde6058 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -c44bc49e7a57a87d84dfff2a077a2058aa2172e2 -15.2.13 +cd3bb7e87a2f62c1b862ff3fd8b1eec13391a5be +15.2.14 diff --git a/ceph/src/ceph-volume/ceph_volume/api/lvm.py b/ceph/src/ceph-volume/ceph_volume/api/lvm.py index 30362f1bd..e4b932b80 100644 --- a/ceph/src/ceph-volume/ceph_volume/api/lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/api/lvm.py @@ -1134,3 +1134,15 @@ def get_device_lvs(device, name_prefix=''): lvs = _output_parser(stdout, LV_FIELDS) return [Volume(**lv) for lv in lvs if lv['lv_name'] and lv['lv_name'].startswith(name_prefix)] + +def get_lv_by_fullname(full_name): + """ + returns LV by the specified LV's full name (formatted as vg_name/lv_name) + """ + try: + vg_name, lv_name = full_name.split('/') + res_lv = get_first_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}) + except ValueError: + res_lv = None + return res_lv diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py index 3ef3c1117..39947454d 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/main.py @@ -9,6 +9,7 @@ from . import trigger from . import listing from . import zap from . import batch +from . import migrate class LVM(object): @@ -30,6 +31,9 @@ class LVM(object): 'trigger': trigger.Trigger, 'list': listing.List, 'zap': zap.Zap, + 'migrate': migrate.Migrate, + 'new-wal': migrate.NewWAL, + 'new-db': migrate.NewDB, } def __init__(self, argv): diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py new file mode 100644 index 000000000..3410dd508 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py @@ -0,0 +1,674 @@ +from __future__ import print_function +import argparse +import logging +import os +from textwrap import dedent +from ceph_volume.util import system, disk, merge_dict +from ceph_volume.util.device import Device +from ceph_volume import decorators, terminal, process +from ceph_volume.api import lvm as api +from ceph_volume.systemd import systemctl + + +logger = logging.getLogger(__name__) +mlogger = terminal.MultiLogger(__name__) + +def get_cluster_name(osd_id, osd_fsid): + """ + From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the + system that match those tag values, then return cluster_name for the first + one. + """ + lv_tags = {} + lv_tags['ceph.osd_id'] = osd_id + lv_tags['ceph.osd_fsid'] = osd_fsid + + lvs = api.get_lvs(tags=lv_tags) + if not lvs: + mlogger.error( + 'Unable to find any LV for source OSD: id:{} fsid:{}'.format( + osd_id, osd_fsid) ) + raise SystemExit('Unexpected error, terminating') + return next(iter(lvs)).tags["ceph.cluster_name"] + +def get_osd_path(osd_id, osd_fsid): + return '/var/lib/ceph/osd/{}-{}'.format( + get_cluster_name(osd_id, osd_fsid), osd_id) + +def find_associated_devices(osd_id, osd_fsid): + """ + From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the + system that match those tag values, further detect if any partitions are + part of the OSD, and then return the set of LVs and partitions (if any). + """ + lv_tags = {} + lv_tags['ceph.osd_id'] = osd_id + lv_tags['ceph.osd_fsid'] = osd_fsid + + lvs = api.get_lvs(tags=lv_tags) + if not lvs: + mlogger.error( + 'Unable to find any LV for source OSD: id:{} fsid:{}'.format( + osd_id, osd_fsid) ) + raise SystemExit('Unexpected error, terminating') + + devices = set(ensure_associated_lvs(lvs, lv_tags)) + return [(Device(path), type) for path, type in devices if path] + +def ensure_associated_lvs(lvs, lv_tags): + """ + Go through each LV and ensure if backing devices (journal, wal, block) + are LVs or partitions, so that they can be accurately reported. + """ + # look for many LVs for each backing type, because it is possible to + # receive a filtering for osd.1, and have multiple failed deployments + # leaving many journals with osd.1 - usually, only a single LV will be + # returned + + block_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'block'})) + db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'})) + wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'})) + backing_devices = [(block_lvs, 'block'), (db_lvs, 'db'), + (wal_lvs, 'wal')] + + verified_devices = [] + + for lv in lvs: + # go through each lv and append it, otherwise query `blkid` to find + # a physical device. Do this for each type (journal,db,wal) regardless + # if they have been processed in the previous LV, so that bad devices + # with the same ID can be caught + for ceph_lvs, type in backing_devices: + + if ceph_lvs: + verified_devices.extend([(l.lv_path, type) for l in ceph_lvs]) + continue + + # must be a disk partition, by querying blkid by the uuid we are + # ensuring that the device path is always correct + try: + device_uuid = lv.tags['ceph.{}_uuid'.format(type)] + except KeyError: + # Bluestore will not have ceph.journal_uuid, and Filestore + # will not not have ceph.db_uuid + continue + + osd_device = disk.get_device_from_partuuid(device_uuid) + if not osd_device: + # if the osd_device is not found by the partuuid, then it is + # not possible to ensure this device exists anymore, so skip it + continue + verified_devices.append((osd_device, type)) + + return verified_devices + +class VolumeTagTracker(object): + def __init__(self, devices, target_lv): + self.target_lv = target_lv + self.data_device = self.db_device = self.wal_device = None + for device, type in devices: + if type == 'block': + self.data_device = device + elif type == 'db': + self.db_device = device + elif type == 'wal': + self.wal_device = device + if not self.data_device: + mlogger.error('Data device not found') + raise SystemExit( + "Unexpected error, terminating") + if not self.data_device.is_lv: + mlogger.error('Data device isn\'t LVM') + raise SystemExit( + "Unexpected error, terminating") + + self.old_target_tags = self.target_lv.tags.copy() + self.old_data_tags = ( + self.data_device.lv_api.tags.copy() + if self.data_device.is_lv else None) + self.old_db_tags = ( + self.db_device.lv_api.tags.copy() + if self.db_device and self.db_device.is_lv else None) + self.old_wal_tags = ( + self.wal_device.lv_api.tags.copy() + if self.wal_device and self.wal_device.is_lv else None) + + def update_tags_when_lv_create(self, create_type): + tags = {} + if not self.data_device.is_lv: + mlogger.warning( + 'Data device is not LVM, wouldn\'t update LVM tags') + else: + tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid + tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path + self.data_device.lv_api.set_tags(tags) + + tags = self.data_device.lv_api.tags.copy() + tags["ceph.type"] = create_type + self.target_lv.set_tags(tags) + + aux_dev = None + if create_type == "db" and self.wal_device: + aux_dev = self.wal_device + elif create_type == "wal" and self.db_device: + aux_dev = self.db_device + else: + return + if not aux_dev.is_lv: + mlogger.warning( + '{} device is not LVM, wouldn\'t update LVM tags'.format( + create_type.upper())) + else: + tags = {} + tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid + tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path + aux_dev.lv_api.set_tags(tags) + + def remove_lvs(self, source_devices, target_type): + remaining_devices = [self.data_device, self.db_device, self.wal_device] + + outdated_tags = [] + for device, type in source_devices: + if type == "block" or type == target_type: + continue + remaining_devices.remove(device) + if device.is_lv: + outdated_tags.append("ceph.{}_uuid".format(type)) + outdated_tags.append("ceph.{}_device".format(type)) + device.lv_api.clear_tags() + if len(outdated_tags) > 0: + for d in remaining_devices: + if d and d.is_lv: + d.lv_api.clear_tags(outdated_tags) + + def replace_lvs(self, source_devices, target_type): + remaining_devices = [self.data_device] + if self.db_device: + remaining_devices.append(self.db_device) + if self.wal_device: + remaining_devices.append(self.wal_device) + + outdated_tags = [] + for device, type in source_devices: + if type == "block": + continue + remaining_devices.remove(device) + if device.is_lv: + outdated_tags.append("ceph.{}_uuid".format(type)) + outdated_tags.append("ceph.{}_device".format(type)) + device.lv_api.clear_tags() + + new_tags = {} + new_tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid + new_tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path + + for d in remaining_devices: + if d and d.is_lv: + if len(outdated_tags) > 0: + d.lv_api.clear_tags(outdated_tags) + d.lv_api.set_tags(new_tags) + + if not self.data_device.is_lv: + mlogger.warning( + 'Data device is not LVM, wouldn\'t properly update target LVM tags') + else: + tags = self.data_device.lv_api.tags.copy() + + tags["ceph.type"] = target_type + tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid + tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path + self.target_lv.set_tags(tags) + + def undo(self): + mlogger.info( + 'Undoing lv tag set') + if self.data_device: + if self.old_data_tags: + self.data_device.lv_api.set_tags(self.old_data_tags) + else: + self.data_device.lv_api.clear_tags() + if self.db_device: + if self.old_db_tags: + self.db_device.lv_api.set_tags(self.old_db_tags) + else: + self.db_device.lv_api.clear_tags() + if self.wal_device: + if self.old_wal_tags: + self.wal_device.lv_api.set_tags(self.old_wal_tags) + else: + self.wal_device.lv_api.clear_tags() + if self.old_target_tags: + self.target_lv.set_tags(self.old_target_tags) + else: + self.target_lv.clear_tags() + +class Migrate(object): + + help = 'Migrate BlueFS data from to another LVM device' + + def __init__(self, argv): + self.argv = argv + self.osd_id = None + + def get_source_devices(self, devices, target_type=""): + ret = [] + for device, type in devices: + if type == target_type: + continue + if type == 'block': + if 'data' not in self.args.from_: + continue; + elif type == 'db': + if 'db' not in self.args.from_: + continue; + elif type == 'wal': + if 'wal' not in self.args.from_: + continue; + ret.append([device, type]) + if ret == []: + mlogger.error('Source device list is empty') + raise SystemExit( + 'Unable to migrate to : {}'.format(self.args.target)) + return ret + + # ceph-bluestore-tool uses the following replacement rules + # (in the order of precedence, stop on the first match) + # if source list has DB volume - target device replaces it. + # if source list has WAL volume - target device replace it. + # if source list has slow volume only - operation isn’t permitted, + # requires explicit allocation via new-db/new-wal command.detects which + def get_target_type_by_source(self, devices): + ret = None + for device, type in devices: + if type == 'db': + return 'db' + elif type == 'wal': + ret = 'wal' + return ret + + def get_filename_by_type(self, type): + filename = 'block' + if type == 'db' or type == 'wal': + filename += '.' + type + return filename + + def get_source_args(self, osd_path, devices): + ret = [] + for device, type in devices: + ret = ret + ["--devs-source", os.path.join( + osd_path, self.get_filename_by_type(type))] + return ret + + @decorators.needs_root + def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv): + source_devices = self.get_source_devices(devices) + target_type = self.get_target_type_by_source(source_devices) + if not target_type: + mlogger.error( + "Unable to determine new volume type," + " please use new-db or new-wal command before.") + raise SystemExit( + "Unable to migrate to : {}".format(self.args.target)) + + target_path = target_lv.lv_path + + try: + tag_tracker = VolumeTagTracker(devices, target_lv) + # we need to update lvm tags for all the remaining volumes + # and clear for ones which to be removed + + # ceph-bluestore-tool removes source volume(s) other than block one + # and attaches target one after successful migration + tag_tracker.replace_lvs(source_devices, target_type) + + osd_path = get_osd_path(osd_id, osd_fsid) + source_args = self.get_source_args(osd_path, source_devices) + mlogger.info("Migrate to new, Source: {} Target: {}".format( + source_args, target_path)) + stdout, stderr, exit_code = process.call([ + 'ceph-bluestore-tool', + '--path', + osd_path, + '--dev-target', + target_path, + '--command', + 'bluefs-bdev-migrate'] + + source_args) + if exit_code != 0: + mlogger.error( + 'Failed to migrate device, error code:{}'.format(exit_code)) + raise SystemExit( + 'Failed to migrate to : {}'.format(self.args.target)) + else: + system.chown(os.path.join(osd_path, "block.{}".format( + target_type))) + terminal.success('Migration successful.') + except: + tag_tracker.undo() + raise + + return + + @decorators.needs_root + def migrate_to_existing(self, osd_id, osd_fsid, devices, target_lv): + target_type = target_lv.tags["ceph.type"] + if target_type == "wal": + mlogger.error("Migrate to WAL is not supported") + raise SystemExit( + "Unable to migrate to : {}".format(self.args.target)) + target_filename = self.get_filename_by_type(target_type) + if (target_filename == ""): + mlogger.error( + "Target Logical Volume doesn't have proper volume type " + "(ceph.type LVM tag): {}".format(target_type)) + raise SystemExit( + "Unable to migrate to : {}".format(self.args.target)) + + osd_path = get_osd_path(osd_id, osd_fsid) + source_devices = self.get_source_devices(devices, target_type) + target_path = os.path.join(osd_path, target_filename) + tag_tracker = VolumeTagTracker(devices, target_lv) + + try: + # ceph-bluestore-tool removes source volume(s) other than + # block and target ones after successful migration + tag_tracker.remove_lvs(source_devices, target_type) + source_args = self.get_source_args(osd_path, source_devices) + mlogger.info("Migrate to existing, Source: {} Target: {}".format( + source_args, target_path)) + stdout, stderr, exit_code = process.call([ + 'ceph-bluestore-tool', + '--path', + osd_path, + '--dev-target', + target_path, + '--command', + 'bluefs-bdev-migrate'] + + source_args) + if exit_code != 0: + mlogger.error( + 'Failed to migrate device, error code:{}'.format(exit_code)) + raise SystemExit( + 'Failed to migrate to : {}'.format(self.args.target)) + else: + terminal.success('Migration successful.') + except: + tag_tracker.undo() + raise + + return + + @decorators.needs_root + def migrate_osd(self): + if self.args.osd_id: + osd_is_running = systemctl.osd_is_active(self.args.osd_id) + if osd_is_running: + mlogger.error('OSD is running, stop it with: ' + 'systemctl stop ceph-osd@{}'.format( + self.args.osd_id)) + raise SystemExit( + 'Unable to migrate devices associated with OSD ID: {}' + .format(self.args.osd_id)) + + target_lv = api.get_lv_by_fullname(self.args.target) + if not target_lv: + mlogger.error( + 'Target path "{}" is not a Logical Volume'.formaat( + self.args.target)) + raise SystemExit( + 'Unable to migrate to : {}'.format(self.args.target)) + devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid) + if (not target_lv.used_by_ceph): + self.migrate_to_new(self.args.osd_id, self.args.osd_fsid, + devices, + target_lv) + else: + if (target_lv.tags['ceph.osd_id'] != self.args.osd_id or + target_lv.tags['ceph.osd_fsid'] != self.args.osd_fsid): + mlogger.error( + 'Target Logical Volume isn\'t used by the specified OSD: ' + '{} FSID: {}'.format(self.args.osd_id, + self.args.osd_fsid)) + raise SystemExit( + 'Unable to migrate to : {}'.format(self.args.target)) + + self.migrate_to_existing(self.args.osd_id, self.args.osd_fsid, + devices, + target_lv) + + def parse_argv(self): + sub_command_help = dedent(""" + Moves BlueFS data from source volume(s) to the target one, source + volumes (except the main (i.e. data or block) one) are removed on + success. LVM volumes are permitted for Target only, both already + attached or new logical one. In the latter case it is attached to OSD + replacing one of the source devices. Following replacement rules apply + (in the order of precedence, stop on the first match): + * if source list has DB volume - target device replaces it. + * if source list has WAL volume - target device replace it. + * if source list has slow volume only - operation is not permitted, + requires explicit allocation via new-db/new-wal command. + + Example calls for supported scenarios: + + Moves BlueFS data from main device to LV already attached as DB: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data --target vgname/db + + Moves BlueFS data from shared main device to LV which will be attached + as a new DB: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data --target vgname/new_db + + Moves BlueFS data from DB device to new LV, DB is replaced: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from db --target vgname/new_db + + Moves BlueFS data from main and DB devices to new LV, DB is replaced: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data db --target vgname/new_db + + Moves BlueFS data from main, DB and WAL devices to new LV, WAL is + removed and DB is replaced: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from data db wal --target vgname/new_db + + Moves BlueFS data from main, DB and WAL devices to main device, WAL + and DB are removed: + + ceph-volume lvm migrate --osd-id 1 --osd-fsid --from db wal --target vgname/data + + """) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm migrate', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + '--osd-id', + required=True, + help='Specify an OSD ID to detect associated devices for zapping', + ) + + parser.add_argument( + '--osd-fsid', + required=True, + help='Specify an OSD FSID to detect associated devices for zapping', + ) + parser.add_argument( + '--target', + required=True, + help='Specify target Logical Volume (LV) to migrate data to', + ) + parser.add_argument( + '--from', + nargs='*', + dest='from_', + required=True, + choices=['data', 'db', 'wal'], + help='Copy BlueFS data from DB device', + ) + + if len(self.argv) == 0: + print(sub_command_help) + return + self.args = parser.parse_args(self.argv) + + def main(self): + self.parse_argv() + self.migrate_osd() + +class NewVolume(object): + def __init__(self, create_type, argv): + self.create_type = create_type + self.argv = argv + + def make_parser(self, prog, sub_command_help): + parser = argparse.ArgumentParser( + prog=prog, + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + '--osd-id', + required=True, + help='Specify an OSD ID to attach new volume to', + ) + + parser.add_argument( + '--osd-fsid', + required=True, + help='Specify an OSD FSIDto attach new volume to', + ) + parser.add_argument( + '--target', + required=True, + help='Specify target Logical Volume (LV) to attach', + ) + return parser + + @decorators.needs_root + def make_new_volume(self, osd_id, osd_fsid, devices, target_lv): + osd_path = get_osd_path(osd_id, osd_fsid) + mlogger.info( + 'Making new volume at {} for OSD: {} ({})'.format( + target_lv.lv_path, osd_id, osd_path)) + tag_tracker = VolumeTagTracker(devices, target_lv) + + try: + tag_tracker.update_tags_when_lv_create(self.create_type) + + stdout, stderr, exit_code = process.call([ + 'ceph-bluestore-tool', + '--path', + osd_path, + '--dev-target', + target_lv.lv_path, + '--command', + 'bluefs-bdev-new-{}'.format(self.create_type) + ]) + if exit_code != 0: + mlogger.error( + 'failed to attach new volume, error code:{}'.format( + exit_code)) + raise SystemExit( + "Failed to attach new volume: {}".format( + self.args.target)) + else: + system.chown(os.path.join(osd_path, "block.{}".format( + self.create_type))) + terminal.success('New volume attached.') + except: + tag_tracker.undo() + raise + return + + @decorators.needs_root + def new_volume(self): + if self.args.osd_id: + osd_is_running = systemctl.osd_is_active(self.args.osd_id) + if osd_is_running: + mlogger.error('OSD ID is running, stop it with:' + ' systemctl stop ceph-osd@{}'.format(self.args.osd_id)) + raise SystemExit( + 'Unable to attach new volume for OSD: {}'.format( + self.args.osd_id)) + + target_lv = api.get_lv_by_fullname(self.args.target) + if not target_lv: + mlogger.error( + 'Target path {} is not a Logical Volume'.format( + self.args.target)) + raise SystemExit( + 'Unable to attach new volume : {}'.format(self.args.target)) + if target_lv.used_by_ceph: + mlogger.error( + 'Target Logical Volume is already used by ceph: {}'.format( + self.args.target)) + raise SystemExit( + 'Unable to attach new volume : {}'.format(self.args.target)) + else: + devices = find_associated_devices(self.args.osd_id, + self.args.osd_fsid) + self.make_new_volume( + self.args.osd_id, + self.args.osd_fsid, + devices, + target_lv) + +class NewWAL(NewVolume): + + help = 'Allocate new WAL volume for OSD at specified Logical Volume' + + def __init__(self, argv): + super(NewWAL, self).__init__("wal", argv) + + def main(self): + sub_command_help = dedent(""" + Attaches the given logical volume to the given OSD as a WAL volume. + Logical volume format is vg/lv. Fails if OSD has already got attached DB. + + Example: + + Attach vgname/lvname as a WAL volume to OSD 1 + + ceph-volume lvm new-wal --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_wal + """) + parser = self.make_parser('ceph-volume lvm new-wal', sub_command_help) + + if len(self.argv) == 0: + print(sub_command_help) + return + + self.args = parser.parse_args(self.argv) + + self.new_volume() + +class NewDB(NewVolume): + + help = 'Allocate new DB volume for OSD at specified Logical Volume' + + def __init__(self, argv): + super(NewDB, self).__init__("db", argv) + + def main(self): + sub_command_help = dedent(""" + Attaches the given logical volume to the given OSD as a DB volume. + Logical volume format is vg/lv. Fails if OSD has already got attached DB. + + Example: + + Attach vgname/lvname as a DB volume to OSD 1 + + ceph-volume lvm new-db --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_db + """) + + parser = self.make_parser('ceph-volume lvm new-db', sub_command_help) + if len(self.argv) == 0: + print(sub_command_help) + return + self.args = parser.parse_args(self.argv) + + self.new_volume() diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py new file mode 100644 index 000000000..dc429793d --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py @@ -0,0 +1,1504 @@ +import pytest +from mock.mock import patch +from ceph_volume import process +from ceph_volume.api import lvm as api +from ceph_volume.devices.lvm import migrate +from ceph_volume.util.device import Device +from ceph_volume.util import system + +class TestGetClusterName(object): + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + def test_cluster_found(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234,ceph.cluster_name=name_of_the_cluster' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + self.mock_volumes = [] + self.mock_volumes.append([vol]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.get_cluster_name(osd_id='0', osd_fsid='1234') + assert "name_of_the_cluster" == result + + def test_cluster_not_found(self, monkeypatch, capsys): + self.mock_volumes = [] + self.mock_volumes.append([]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + with pytest.raises(SystemExit) as error: + migrate.get_cluster_name(osd_id='0', osd_fsid='1234') + stdout, stderr = capsys.readouterr() + expected = 'Unexpected error, terminating' + assert expected in str(error.value) + expected = 'Unable to find any LV for source OSD: id:0 fsid:1234' + assert expected in stderr + +class TestFindAssociatedDevices(object): + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + def test_lv_is_matched_id(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + self.mock_volumes = [] + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([]) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': vol} + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234') + assert len(result) == 1 + assert result[0][0].abspath == '/dev/VolGroup/lv1' + assert result[0][0].lvs == [vol] + assert result[0][1] == 'block' + + def test_lv_is_matched_id2(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234' + vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=tags2) + self.mock_volumes = [] + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([vol2]) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': vol, '/dev/VolGroup/lv2': vol2} + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234') + assert len(result) == 2 + for d in result: + if d[1] == 'block': + assert d[0].abspath == '/dev/VolGroup/lv1' + assert d[0].lvs == [vol] + elif d[1] == 'wal': + assert d[0].abspath == '/dev/VolGroup/lv2' + assert d[0].lvs == [vol2] + else: + assert False + + def test_lv_is_matched_id3(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=tags) + tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234' + vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=tags2) + tags3 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=db,ceph.osd_fsid=1234' + vol3 = api.Volume(lv_name='volume3', lv_uuid='z', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=tags3) + + self.mock_volumes = [] + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol]) + self.mock_volumes.append([vol3]) + self.mock_volumes.append([vol2]) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': vol, + '/dev/VolGroup/lv2': vol2, + '/dev/VolGroup/lv3': vol3} + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234') + assert len(result) == 3 + for d in result: + if d[1] == 'block': + assert d[0].abspath == '/dev/VolGroup/lv1' + assert d[0].lvs == [vol] + elif d[1] == 'wal': + assert d[0].abspath == '/dev/VolGroup/lv2' + assert d[0].lvs == [vol2] + elif d[1] == 'db': + assert d[0].abspath == '/dev/VolGroup/lv3' + assert d[0].lvs == [vol3] + else: + assert False + + def test_lv_is_not_matched(self, monkeypatch, capsys): + self.mock_volumes = [None] + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + with pytest.raises(SystemExit) as error: + migrate.find_associated_devices(osd_id='1', osd_fsid='1234') + stdout, stderr = capsys.readouterr() + expected = 'Unexpected error, terminating' + assert expected in str(error.value) + expected = 'Unable to find any LV for source OSD: id:1 fsid:1234' + assert expected in stderr + +class TestVolumeTagTracker(object): + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + mock_process_input = [] + def mock_process(self, *args, **kwargs): + self.mock_process_input.append(args[0]); + return ('', '', 0) + + def test_init(self, monkeypatch): + source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234' + source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal' + target_tags="ceph.a=1,ceph.b=2,c=3,ceph.d=4" # 'c' to be bypassed + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', lv_tags=target_tags, + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + assert 3 == len(t.old_target_tags) + + assert data_device == t.data_device + assert 4 == len(t.old_data_tags) + assert 'data' == t.old_data_tags['ceph.type'] + + assert db_device == t.db_device + assert 2 == len(t.old_db_tags) + assert 'db' == t.old_db_tags['ceph.type'] + + assert wal_device == t.wal_device + assert 3 == len(t.old_wal_tags) + assert 'wal' == t.old_wal_tags['ceph.type'] + + def test_update_tags_when_lv_create(self, monkeypatch): + source_tags = \ + 'ceph.osd_id=0,ceph.journal_uuid=x,' \ + 'ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = \ + 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \ + 'osd_fsid=1234' + + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + + target = api.Volume(lv_name='target_name', lv_tags='', + lv_uuid='wal_uuid', + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + self.mock_process_input = [] + t.update_tags_when_lv_create('wal') + + assert 3 == len(self.mock_process_input) + + assert ['lvchange', + '--addtag', 'ceph.wal_uuid=wal_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv1'] == self.mock_process_input[0] + + assert self.mock_process_input[1].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.journal_uuid=x', + '--addtag', 'ceph.type=wal', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.wal_uuid=wal_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv_target'].sort() + + assert ['lvchange', + '--addtag', 'ceph.wal_uuid=wal_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv2'] == self.mock_process_input[2] + + def test_remove_lvs(self, monkeypatch): + source_tags = \ + 'ceph.osd_id=0,ceph.journal_uuid=x,' \ + 'ceph.type=data,ceph.osd_fsid=1234,ceph.wal_uuid=aaaaa' + source_db_tags = \ + 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \ + 'osd_fsid=1234,ceph.wal_device=aaaaa' + source_wal_tags = \ + 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \ + 'ceph.osd_id=0,ceph.type=wal' + + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', lv_tags='', + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + device_to_remove = devices.copy() + + self.mock_process_input = [] + t.remove_lvs(device_to_remove, 'db') + + assert 3 == len(self.mock_process_input) + assert ['lvchange', + '--deltag', 'ceph.wal_uuid=uuid', + '--deltag', 'ceph.wal_device=device', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=wal', + '/dev/VolGroup/lv3'] == self.mock_process_input[0] + assert ['lvchange', + '--deltag', 'ceph.wal_uuid=aaaaa', + '/dev/VolGroup/lv1'] == self.mock_process_input[1] + assert ['lvchange', + '--deltag', 'ceph.wal_device=aaaaa', + '/dev/VolGroup/lv2'] == self.mock_process_input[2] + + def test_replace_lvs(self, monkeypatch): + source_tags = \ + 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\ + 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice' + source_db_tags = \ + 'ceph.osd_id=0,ceph.type=db,ceph.osd_fsid=1234' + source_wal_tags = \ + 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \ + 'ceph.osd_id=0,ceph.type=wal' + + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='dbuuid', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='waluuid', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', + lv_uuid='ttt', + lv_tags='ceph.tag_to_remove=aaa', + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + self.mock_process_input = [] + t.replace_lvs(devices, 'db') + + assert 5 == len(self.mock_process_input) + + assert ['lvchange', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '/dev/VolGroup/lv2'] == self.mock_process_input[0] + assert ['lvchange', + '--deltag', 'ceph.wal_uuid=uuid', + '--deltag', 'ceph.wal_device=device', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=wal', + '/dev/VolGroup/lv3'] == self.mock_process_input[1] + assert ['lvchange', + '--deltag', 'ceph.db_device=/dbdevice', + '--deltag', 'ceph.wal_uuid=wal_uuid', + '/dev/VolGroup/lv1'] == self.mock_process_input[2] + + assert ['lvchange', + '--addtag', 'ceph.db_uuid=ttt', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv1'] == self.mock_process_input[3] + + assert self.mock_process_input[4].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.db_uuid=ttt', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target', + '/dev/VolGroup/lv_target'].sort() + + def test_undo(self, monkeypatch): + source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234' + source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal' + target_tags="" + devices=[] + + data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags) + wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + data_device = Device(path = '/dev/VolGroup/lv1') + db_device = Device(path = '/dev/VolGroup/lv2') + wal_device = Device(path = '/dev/VolGroup/lv3') + devices.append([data_device, 'block']) + devices.append([db_device, 'db']) + devices.append([wal_device, 'wal']) + + target = api.Volume(lv_name='target_name', lv_tags=target_tags, + lv_path='/dev/VolGroup/lv_target') + t = migrate.VolumeTagTracker(devices, target); + + target.tags['ceph.a'] = 'aa'; + target.tags['ceph.b'] = 'bb'; + + data_vol.tags['ceph.journal_uuid'] = 'z'; + + db_vol.tags.pop('ceph.type') + + wal_vol.tags.clear() + + assert 2 == len(target.tags) + assert 4 == len(data_vol.tags) + assert 1 == len(db_vol.tags) + + self.mock_process_input = [] + t.undo() + + assert 0 == len(target.tags) + assert 4 == len(data_vol.tags) + assert 'x' == data_vol.tags['ceph.journal_uuid'] + + assert 2 == len(db_vol.tags) + assert 'db' == db_vol.tags['ceph.type'] + + assert 3 == len(wal_vol.tags) + assert 'wal' == wal_vol.tags['ceph.type'] + + assert 6 == len(self.mock_process_input) + assert 'lvchange' in self.mock_process_input[0] + assert '--deltag' in self.mock_process_input[0] + assert 'ceph.journal_uuid=z' in self.mock_process_input[0] + assert '/dev/VolGroup/lv1' in self.mock_process_input[0] + + assert 'lvchange' in self.mock_process_input[1] + assert '--addtag' in self.mock_process_input[1] + assert 'ceph.journal_uuid=x' in self.mock_process_input[1] + assert '/dev/VolGroup/lv1' in self.mock_process_input[1] + + assert 'lvchange' in self.mock_process_input[2] + assert '--deltag' in self.mock_process_input[2] + assert 'ceph.osd_id=0' in self.mock_process_input[2] + assert '/dev/VolGroup/lv2' in self.mock_process_input[2] + + assert 'lvchange' in self.mock_process_input[3] + assert '--addtag' in self.mock_process_input[3] + assert 'ceph.type=db' in self.mock_process_input[3] + assert '/dev/VolGroup/lv2' in self.mock_process_input[3] + + assert 'lvchange' in self.mock_process_input[4] + assert '--addtag' in self.mock_process_input[4] + assert 'ceph.type=wal' in self.mock_process_input[4] + assert '/dev/VolGroup/lv3' in self.mock_process_input[4] + + assert 'lvchange' in self.mock_process_input[5] + assert '--deltag' in self.mock_process_input[5] + assert 'ceph.a=aa' in self.mock_process_input[5] + assert 'ceph.b=bb' in self.mock_process_input[5] + assert '/dev/VolGroup/lv_target' in self.mock_process_input[5] + +class TestNew(object): + + mock_volume = None + def mock_get_lv_by_fullname(self, *args, **kwargs): + return self.mock_volume + + mock_process_input = [] + def mock_process(self, *args, **kwargs): + self.mock_process_input.append(args[0]); + return ('', '', 0) + + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + def test_newdb_non_root(self): + with pytest.raises(Exception) as error: + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + expected = 'This command needs to be executed with sudo or as root' + assert expected in str(error.value) + + @patch('os.getuid') + def test_newdb_not_target_lvm(self, m_getuid, capsys): + m_getuid.return_value = 0 + with pytest.raises(SystemExit) as error: + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to attach new volume : vgname/new_db' + assert expected in str(error.value) + expected = 'Target path vgname/new_db is not a Logical Volume' + assert expected in stderr + + + @patch('os.getuid') + def test_newdb_already_in_use(self, m_getuid, monkeypatch, capsys): + m_getuid.return_value = 0 + + self.mock_volume = api.Volume(lv_name='volume1', + lv_uuid='y', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags='ceph.osd_id=5') # this results in set used_by_ceph + monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname) + + with pytest.raises(SystemExit) as error: + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to attach new volume : vgname/new_db' + assert expected in str(error.value) + expected = 'Target Logical Volume is already used by ceph: vgname/new_db' + assert expected in stderr + + @patch('os.getuid') + def test_newdb(self, m_getuid, monkeypatch, capsys): + m_getuid.return_value = 0 + + source_tags = \ + 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\ + 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice' + source_wal_tags = \ + 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \ + 'ceph.osd_id=0,ceph.type=wal' + + data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv3': wal_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', + vg_name='vg', + lv_path='/dev/VolGroup/target_volume', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + #find_associated_devices will call get_lvs() 4 times + # and it this needs results to be arranged that way + self.mock_volumes = [] + self.mock_volumes.append([data_vol, wal_vol]) + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([wal_vol]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph_cluster') + monkeypatch.setattr(system, 'chown', lambda path: 0) + + migrate.NewDB(argv=[ + '--osd-id', '1', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_db']).main() + + n = len(self.mock_process_input) + assert n >= 5 + + assert self.mock_process_input[n - 5] == [ + 'lvchange', + '--deltag', 'ceph.db_device=/dbdevice', + '/dev/VolGroup/lv1'] + assert self.mock_process_input[n - 4] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=y', + '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/lv1'] + + assert self.mock_process_input[n - 3].sort() == [ + 'lvchange', + '--addtag', 'ceph.wal_uuid=uuid', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.db_uuid=y', + '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/target_volume'].sort() + + assert self.mock_process_input[n - 2] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=y', + '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/lv3'] + + assert self.mock_process_input[n - 1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph_cluster-1', + '--dev-target', '/dev/VolGroup/target_volume', + '--command', 'bluefs-bdev-new-db'] + + @patch('os.getuid') + def test_newwal(self, m_getuid, monkeypatch, capsys): + m_getuid.return_value = 0 + + source_tags = \ + 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234' + + data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol} + + monkeypatch.setattr(migrate.api, 'get_first_lv', self.mock_get_first_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg', + lv_path='/dev/VolGroup/target_volume', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: False) + + #find_associated_devices will call get_lvs() 4 times + # and it this needs results to be arranged that way + self.mock_volumes = [] + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + + monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster') + monkeypatch.setattr(system, 'chown', lambda path: 0) + + migrate.NewWAL(argv=[ + '--osd-id', '2', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_wal']).main() + + n = len(self.mock_process_input) + assert n >= 3 + + assert self.mock_process_input[n - 3] == [ + 'lvchange', + '--addtag', 'ceph.wal_uuid=y', + '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/lv1'] + + assert self.mock_process_input[n - 2].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.type=wal', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.wal_uuid=y', + '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/target_volume'].sort() + + assert self.mock_process_input[n - 1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/cluster-2', + '--dev-target', '/dev/VolGroup/target_volume', + '--command', 'bluefs-bdev-new-wal'] + +class TestMigrate(object): + + mock_volume = None + def mock_get_lv_by_fullname(self, *args, **kwargs): + return self.mock_volume + + mock_process_input = [] + def mock_process(self, *args, **kwargs): + self.mock_process_input.append(args[0]); + return ('', '', 0) + + mock_single_volumes = {} + def mock_get_first_lv(self, *args, **kwargs): + p = kwargs['filters']['lv_path'] + return self.mock_single_volumes[p] + + mock_volumes = [] + def mock_get_lvs(self, *args, **kwargs): + return self.mock_volumes.pop(0) + + def test_get_source_devices(self, monkeypatch): + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2', lv_uuid='y', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags='ceph.osd_id=5,ceph.osd_type=db') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--from', 'data', 'wal', + '--target', 'vgname/new_wal']) + m.parse_argv() + res_devices = m.get_source_devices(devices) + + assert 2 == len(res_devices) + assert devices[0] == res_devices[0] + assert devices[2] == res_devices[1] + + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--from', 'db', 'wal', 'data', + '--target', 'vgname/new_wal']) + m.parse_argv() + res_devices = m.get_source_devices(devices) + + assert 3 == len(res_devices) + assert devices[0] == res_devices[0] + assert devices[1] == res_devices[1] + assert devices[2] == res_devices[2] + + + @patch('os.getuid') + def test_migrate_data_db_to_new_db(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', 'wal', + '--target', 'vgname/new_wal']) + m.main() + + n = len(self.mock_process_input) + assert n >= 5 + + assert self. mock_process_input[n-5] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv2'] + + assert self. mock_process_input[n-4] == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-3] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-2] == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'] + + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/VolGroup/lv2_new', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db'] + + @patch('os.getuid') + def test_migrate_data_db_to_new_db_skip_wal(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', + '--target', 'vgname/new_wal']) + m.main() + + n = len(self.mock_process_input) + assert n >= 7 + + assert self. mock_process_input[n-7] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv2'] + + assert self. mock_process_input[n-6] == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-5] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-4] == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv3'] + + assert self. mock_process_input[n-3] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv3'] + + assert self. mock_process_input[n-2] == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'] + + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/VolGroup/lv2_new', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db'] + + @patch('os.getuid') + def test_migrate_data_db_wal_to_new_db(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=0,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', 'wal', + '--target', 'vgname/new_wal']) + m.main() + + n = len(self.mock_process_input) + assert n >= 6 + + assert self. mock_process_input[n-6] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv2'] + + assert self. mock_process_input[n-5] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=wal', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv3'] + + assert self. mock_process_input[n-4] == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-3] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-2] == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'] + + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/VolGroup/lv2_new', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'] + + @patch('os.getuid') + def test_dont_migrate_data_db_wal_to_new_data(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', + '--target', 'vgname/new_data']) + + with pytest.raises(SystemExit) as error: + m.main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to migrate to : vgname/new_data' + assert expected in str(error.value) + expected = 'Unable to determine new volume type,' + ' please use new-db or new-wal command before.' + assert expected in stderr + + @patch('os.getuid') + def test_dont_migrate_db_to_wal(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = wal_vol + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'db', + '--target', 'vgname/wal']) + + with pytest.raises(SystemExit) as error: + m.main() + stdout, stderr = capsys.readouterr() + expected = 'Unable to migrate to : vgname/wal' + assert expected in str(error.value) + expected = 'Migrate to WAL is not supported' + assert expected in stderr + + @patch('os.getuid') + def test_migrate_data_db_to_db(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = db_vol + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'db', 'data', + '--target', 'vgname/db']) + + m.main() + + n = len(self.mock_process_input) + assert n >= 1 + for s in self.mock_process_input: + print(s) + + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block'] + + @patch('os.getuid') + def test_migrate_data_wal_to_db(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_first_lv', + self.mock_get_first_lv) + + self.mock_volume = db_vol + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'db', 'data', 'wal', + '--target', 'vgname/db']) + + m.main() + + n = len(self.mock_process_input) + assert n >= 1 + for s in self.mock_process_input: + print(s) + + assert self. mock_process_input[n-4] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=wal', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv3'] + assert self. mock_process_input[n-3] == [ + 'lvchange', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv1'] + assert self. mock_process_input[n-2] == [ + 'lvchange', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv2'] + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'] diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini index f7969fe9b..508d1b4c6 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini @@ -12,11 +12,9 @@ whitelist_externals = sleep passenv=* setenv= - ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config - ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey ANSIBLE_STDOUT_CALLBACK = debug - ANSIBLE_RETRY_FILES_ENABLED = False - ANSIBLE_SSH_RETRIES = 5 VAGRANT_CWD = {changedir} CEPH_VOLUME_DEBUG = 1 DEBIAN_FRONTEND=noninteractive @@ -53,7 +51,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state using testinfra - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # reboot all vms - attempt bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} @@ -62,13 +60,13 @@ commands= sleep 30 # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # destroy an OSD, zap it's device and recreate it using it's ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # retest to ensure cluster came back up correctly - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # test zap OSDs by ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini index 2b63875bf..bec30e6d7 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -12,11 +12,9 @@ whitelist_externals = sleep passenv=* setenv= - ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config - ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey ANSIBLE_STDOUT_CALLBACK = debug - ANSIBLE_RETRY_FILES_ENABLED = False - ANSIBLE_SSH_RETRIES = 5 VAGRANT_CWD = {changedir} CEPH_VOLUME_DEBUG = 1 DEBIAN_FRONTEND=noninteractive @@ -53,7 +51,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state using testinfra - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # reboot all vms - attempt bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} @@ -62,12 +60,12 @@ commands= sleep 30 # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # destroy an OSD, zap it's device and recreate it using it's ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # retest to ensure cluster came back up correctly - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml index 0c1d13f8f..e5185e3fc 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml @@ -75,8 +75,8 @@ - name: install required packages for fedora > 23 raw: sudo dnf -y install python2-dnf libselinux-python ntp when: - - ansible_distribution == 'Fedora' - - ansible_distribution_major_version|int >= 23 + - ansible_facts['distribution'] == 'Fedora' + - ansible_facts['distribution_major_version']|int >= 23 - name: check if it is atomic host stat: @@ -120,7 +120,7 @@ dest: "/usr/lib/python3.6/site-packages" use_ssh_args: true when: - - ansible_os_family == "RedHat" + - ansible_facts['os_family'] == "RedHat" - inventory_hostname in groups.get(osd_group_name, []) - name: rsync ceph-volume to test nodes on ubuntu @@ -129,7 +129,7 @@ dest: "/usr/lib/python2.7/dist-packages" use_ssh_args: true when: - - ansible_os_family == "Debian" + - ansible_facts['os_family'] == "Debian" - inventory_hostname in groups.get(osd_group_name, []) - name: run ceph-config role diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini index c3b7d3648..1fdfe26a8 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -12,11 +12,9 @@ whitelist_externals = cp passenv=* setenv= - ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config - ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey ANSIBLE_STDOUT_CALLBACK = debug - ANSIBLE_RETRY_FILES_ENABLED = False - ANSIBLE_SSH_RETRIES = 5 VAGRANT_CWD = {changedir} CEPH_VOLUME_DEBUG = 1 DEBIAN_FRONTEND=noninteractive @@ -43,7 +41,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state testinfra - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml @@ -55,6 +53,6 @@ commands= sleep 120 # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} diff --git a/ceph/src/ceph-volume/ceph_volume/util/system.py b/ceph/src/ceph-volume/ceph_volume/util/system.py index 499862337..d0d6545d3 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/system.py +++ b/ceph/src/ceph-volume/ceph_volume/util/system.py @@ -260,6 +260,7 @@ def get_mounts(devices=False, paths=False, realpath=False): - tmpfs - devtmpfs + - /dev/root If ``devices`` is set to ``True`` the mapping will be a device-to-path(s), if ``paths`` is set to ``True`` then the mapping will be @@ -270,7 +271,7 @@ def get_mounts(devices=False, paths=False, realpath=False): """ devices_mounted = {} paths_mounted = {} - do_not_skip = ['tmpfs', 'devtmpfs'] + do_not_skip = ['tmpfs', 'devtmpfs', '/dev/root'] default_to_devices = devices is False and paths is False with open(PROCDIR + '/mounts', 'rb') as mounts: diff --git a/ceph/src/ceph.in b/ceph/src/ceph.in index 25622d4f2..3a2890929 100755 --- a/ceph/src/ceph.in +++ b/ceph/src/ceph.in @@ -386,8 +386,11 @@ daemonperf {type.id | path} list|ls [stat-pats] [priority] def do_extended_help(parser, args, target, partial): def help_for_sigs(sigs, partial=None): - sys.stdout.write(format_help(parse_json_funcsigs(sigs, 'cli'), - partial=partial)) + try: + sys.stdout.write(format_help(parse_json_funcsigs(sigs, 'cli'), + partial=partial)) + except BrokenPipeError: + pass def help_for_target(target, partial=None): # wait for osdmap because we know this is sent after the mgrmap diff --git a/ceph/src/ceph_mon.cc b/ceph/src/ceph_mon.cc index 3463110d1..4ae05567a 100644 --- a/ceph/src/ceph_mon.cc +++ b/ceph/src/ceph_mon.cc @@ -109,6 +109,14 @@ int obtain_monmap(MonitorDBStore &store, bufferlist &bl) } } + if (store.exists("mon_sync", "temp_newer_monmap")) { + dout(10) << __func__ << " found temp_newer_monmap" << dendl; + int err = store.get("mon_sync", "temp_newer_monmap", bl); + ceph_assert(err == 0); + ceph_assert(bl.length() > 0); + return 0; + } + if (store.exists("mkfs", "monmap")) { dout(10) << __func__ << " found mkfs monmap" << dendl; int err = store.get("mkfs", "monmap", bl); diff --git a/ceph/src/cephadm/cephadm b/ceph/src/cephadm/cephadm index 6eb2c2b5b..9b7348b8b 100755 --- a/ceph/src/cephadm/cephadm +++ b/ceph/src/cephadm/cephadm @@ -50,6 +50,7 @@ import platform import pwd import random import select +import shlex import shutil import socket import string @@ -712,6 +713,9 @@ def get_supported_daemons(): ################################## +class PortOccupiedError(Error): + pass + def attempt_bind(s, address, port): # type: (socket.socket, str, int) -> None @@ -719,12 +723,12 @@ def attempt_bind(s, address, port): s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((address, port)) except (socket.error, OSError) as e: # py2 and py3 - msg = 'Cannot bind to IP %s port %d: %s' % (address, port, e) - logger.warning(msg) if e.errno == errno.EADDRINUSE: - raise OSError(msg) - elif e.errno == errno.EADDRNOTAVAIL: - pass + msg = 'Cannot bind to IP %s port %d: %s' % (address, port, e) + logger.warning(msg) + raise PortOccupiedError(msg) + else: + raise e finally: s.close() @@ -733,16 +737,26 @@ def port_in_use(port_num): # type: (int) -> bool """Detect whether a port is in use on the local machine - IPv4 and IPv6""" logger.info('Verifying port %d ...' % port_num) - try: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - attempt_bind(s, '0.0.0.0', port_num) - - s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) - attempt_bind(s, '::', port_num) - except OSError: - return True - else: + def _port_in_use(af, address): + # type: (socket.AddressFamily, str) -> bool + try: + s = socket.socket(af, socket.SOCK_STREAM) + attempt_bind(s, address, port_num) + except PortOccupiedError: + return True + except OSError as e: + if e.errno in (errno.EAFNOSUPPORT, errno.EADDRNOTAVAIL): + # Ignore EAFNOSUPPORT and EADDRNOTAVAIL as two interfaces are + # being tested here and one might be intentionally be disabled. + # In that case no error should be raised. + return False + else: + raise e return False + return any(_port_in_use(af, address) for af, address in ( + (socket.AF_INET, '0.0.0.0'), + (socket.AF_INET6, '::') + )) def check_ip_port(ip, port): @@ -754,10 +768,7 @@ def check_ip_port(ip, port): ip = unwrap_ipv6(ip) else: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - attempt_bind(s, ip, port) - except OSError as e: - raise Error(e) + attempt_bind(s, ip, port) ################################## @@ -1765,7 +1776,7 @@ def get_daemon_args(fsid, daemon_type, daemon_id): '--setgroup', 'ceph', '--default-log-to-file=false', '--default-log-to-stderr=true', - '--default-log-stderr-prefix="debug "', + '--default-log-stderr-prefix=debug ', ] if daemon_type == 'mon': r += [ @@ -1814,7 +1825,6 @@ def create_daemon_dirs(fsid, daemon_type, daemon_id, uid, gid, if daemon_type in Monitoring.components.keys(): config_json: Dict[str, Any] = get_parm(args.config_json) - required_files = Monitoring.components[daemon_type].get('config-json-files', list()) # Set up directories specific to the monitoring component config_dir = '' @@ -1838,10 +1848,14 @@ def create_daemon_dirs(fsid, daemon_type, daemon_id, uid, gid, makedirs(os.path.join(data_dir_root, config_dir, 'data'), uid, gid, 0o755) # populate the config directory for the component from the config-json - for fname in required_files: - if 'files' in config_json: # type: ignore + if 'files' in config_json: + for fname in config_json['files']: content = dict_get_join(config_json['files'], fname) - with open(os.path.join(data_dir_root, config_dir, fname), 'w') as f: + if os.path.isabs(fname): + fpath = os.path.join(data_dir_root, fname.lstrip(os.path.sep)) + else: + fpath = os.path.join(data_dir_root, config_dir, fname) + with open(fpath, 'w', encoding='utf-8') as f: os.fchown(f.fileno(), uid, gid) os.fchmod(f.fileno(), 0o600) f.write(content) @@ -2230,10 +2244,15 @@ def _write_container_cmd_to_bash(file_obj, container, comment=None, background=F file_obj.write('! '+ ' '.join(container.rm_cmd()) + ' 2> /dev/null\n') # Sometimes, `podman rm` doesn't find the container. Then you'll have to add `--storage` if 'podman' in container_path: - file_obj.write('! '+ ' '.join(container.rm_cmd(storage=True)) + ' 2> /dev/null\n') + file_obj.write( + '! ' + + ' '.join([shlex.quote(a) for a in container.rm_cmd(storage=True)]) + + ' 2> /dev/null\n') # container run command - file_obj.write(' '.join(container.run_cmd()) + (' &' if background else '') + '\n') + file_obj.write( + ' '.join([shlex.quote(a) for a in container.run_cmd()]) + + (' &' if background else '') + '\n') def deploy_daemon_units(fsid, uid, gid, daemon_type, daemon_id, c, diff --git a/ceph/src/cephadm/samples/custom_container.json b/ceph/src/cephadm/samples/custom_container.json index d6e73c474..194a44d2a 100644 --- a/ceph/src/cephadm/samples/custom_container.json +++ b/ceph/src/cephadm/samples/custom_container.json @@ -2,8 +2,8 @@ "image": "docker.io/prom/alertmanager:v0.20.0", "ports": [9093, 9094], "args": [ - "-p 9093:9093", - "-p 9094:9094" + "-p", "9093:9093", + "-p", "9094:9094" ], "dirs": ["etc/alertmanager"], "files": { diff --git a/ceph/src/cephadm/tests/test_cephadm.py b/ceph/src/cephadm/tests/test_cephadm.py index 2df7d559a..66b3ca7b3 100644 --- a/ceph/src/cephadm/tests/test_cephadm.py +++ b/ceph/src/cephadm/tests/test_cephadm.py @@ -1,9 +1,9 @@ # type: ignore import mock -from mock import patch -import os -import sys +from mock import patch, call import unittest +import errno +import socket import pytest @@ -22,6 +22,97 @@ class TestCephAdm(object): r = cd.get_unit_file('9b9d7609-f4d5-4aba-94c8-effa764d96c9') assert 'Requires=docker.service' not in r + def test_attempt_bind(self): + cd.logger = mock.Mock() + address = None + port = 0 + + def os_error(errno): + _os_error = OSError() + _os_error.errno = errno + return _os_error + + for side_effect, expected_exception in ( + (os_error(errno.EADDRINUSE), cd.PortOccupiedError), + (os_error(errno.EAFNOSUPPORT), OSError), + (os_error(errno.EADDRNOTAVAIL), OSError), + (None, None), + ): + _socket = mock.Mock() + _socket.bind.side_effect = side_effect + try: + cd.attempt_bind(_socket, address, port) + except Exception as e: + assert isinstance(e, expected_exception) + else: + if expected_exception is not None: + assert False, '{} should not be None'.format(expected_exception) + + @mock.patch('cephadm.attempt_bind') + def test_port_in_use(self, attempt_bind): + + assert cd.port_in_use(9100) == False + + attempt_bind.side_effect = cd.PortOccupiedError('msg') + assert cd.port_in_use(9100) == True + + os_error = OSError() + os_error.errno = errno.EADDRNOTAVAIL + attempt_bind.side_effect = os_error + assert cd.port_in_use(9100) == False + + os_error = OSError() + os_error.errno = errno.EAFNOSUPPORT + attempt_bind.side_effect = os_error + assert cd.port_in_use(9100) == False + + @mock.patch('socket.socket') + @mock.patch('cephadm.args') + def test_check_ip_port_success(self, args, _socket): + args.skip_ping_check = False + + for address, address_family in ( + ('0.0.0.0', socket.AF_INET), + ('::', socket.AF_INET6), + ): + try: + cd.check_ip_port(address, 9100) + except: + assert False + else: + assert _socket.call_args == call(address_family, socket.SOCK_STREAM) + + @mock.patch('socket.socket') + @mock.patch('cephadm.args') + def test_check_ip_port_failure(self, args, _socket): + args.skip_ping_check = False + + def os_error(errno): + _os_error = OSError() + _os_error.errno = errno + return _os_error + + for address, address_family in ( + ('0.0.0.0', socket.AF_INET), + ('::', socket.AF_INET6), + ): + for side_effect, expected_exception in ( + (os_error(errno.EADDRINUSE), cd.PortOccupiedError), + (os_error(errno.EADDRNOTAVAIL), OSError), + (os_error(errno.EAFNOSUPPORT), OSError), + (None, None), + ): + mock_socket_obj = mock.Mock() + mock_socket_obj.bind.side_effect = side_effect + _socket.return_value = mock_socket_obj + try: + cd.check_ip_port(address, 9100) + except Exception as e: + assert isinstance(e, expected_exception) + else: + assert side_effect is None + + def test_is_not_fsid(self): assert not cd.is_fsid('no-uuid') @@ -416,3 +507,53 @@ class TestMonitoring(object): _call.return_value = '', '{}, version 0.16.1'.format(daemon_type.replace('-', '_')), 0 version = cd.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' + + @mock.patch('cephadm.os.fchown') + @mock.patch('cephadm.get_parm') + @mock.patch('cephadm.makedirs') + @mock.patch('cephadm.open') + @mock.patch('cephadm.make_log_dir') + @mock.patch('cephadm.make_data_dir') + @mock.patch('cephadm.args') + def test_create_daemon_dirs_prometheus(self, args, make_data_dir, make_log_dir, _open, makedirs, + get_parm, fchown): + """ + Ensures the required and optional files given in the configuration are + created and mapped correctly inside the container. Tests absolute and + relative file paths given in the configuration. + """ + args.data_dir = '/somedir' + fsid = 'aaf5a720-13fe-4a3b-82b9-2d99b7fd9704' + daemon_type = 'prometheus' + uid, gid = 50, 50 + daemon_id = 'home' + files = { + 'files': { + 'prometheus.yml': 'foo', + '/etc/prometheus/alerting/ceph_alerts.yml': 'bar' + } + } + get_parm.return_value = files + + cd.create_daemon_dirs(fsid, + daemon_type, + daemon_id, + uid, + gid, + config=None, + keyring=None) + + prefix = '{data_dir}/{fsid}/{daemon_type}.{daemon_id}'.format( + data_dir=args.data_dir, + fsid=fsid, + daemon_type=daemon_type, + daemon_id=daemon_id + ) + assert _open.call_args_list == [ + call('{}/etc/prometheus/prometheus.yml'.format(prefix), 'w', + encoding='utf-8'), + call('{}/etc/prometheus/alerting/ceph_alerts.yml'.format(prefix), 'w', + encoding='utf-8'), + ] + assert call().__enter__().write('foo') in _open.mock_calls + assert call().__enter__().write('bar') in _open.mock_calls diff --git a/ceph/src/client/Client.cc b/ceph/src/client/Client.cc index c6d5b0c55..96d7f5f2f 100755 --- a/ceph/src/client/Client.cc +++ b/ceph/src/client/Client.cc @@ -132,6 +132,14 @@ void client_flush_set_callback(void *p, ObjectCacher::ObjectSet *oset) client->flush_set_callback(oset); } +bool Client::is_reserved_vino(vinodeno_t &vino) { + if (MDS_IS_PRIVATE_INO(vino.ino)) { + ldout(cct, -1) << __func__ << " attempt to access reserved inode number " << vino << dendl; + return true; + } + return false; +} + // ------------- @@ -4369,7 +4377,7 @@ void Client::trim_caps(MetaSession *s, uint64_t max) ++q; if (dn->lru_is_expireable()) { if (can_invalidate_dentries && - dn->dir->parent_inode->ino == MDS_INO_ROOT) { + dn->dir->parent_inode->ino == CEPH_INO_ROOT) { // Only issue one of these per DN for inodes in root: handle // others more efficiently by calling for root-child DNs at // the end of this function. @@ -4382,10 +4390,10 @@ void Client::trim_caps(MetaSession *s, uint64_t max) all = false; } } - if (in->ll_ref == 1 && in->ino != MDS_INO_ROOT) { + if (in->ll_ref == 1 && in->ino != CEPH_INO_ROOT) { _schedule_ino_release_callback(in.get()); } - if (all && in->ino != MDS_INO_ROOT) { + if (all && in->ino != CEPH_INO_ROOT) { ldout(cct, 20) << __func__ << " counting as trimmed: " << *in << dendl; trimmed++; } @@ -8708,33 +8716,44 @@ int Client::lookup_hash(inodeno_t ino, inodeno_t dirino, const char *name, * the resulting Inode object in one operation, so that caller * can safely assume inode will still be there after return. */ -int Client::_lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode) +int Client::_lookup_vino(vinodeno_t vino, const UserPerm& perms, Inode **inode) { - ldout(cct, 8) << __func__ << " enter(" << ino << ")" << dendl; + ldout(cct, 8) << __func__ << " enter(" << vino << ")" << dendl; if (unmounting) return -ENOTCONN; + if (is_reserved_vino(vino)) + return -ESTALE; + MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPINO); - filepath path(ino); + filepath path(vino.ino); req->set_filepath(path); + /* + * The MDS expects either a "real" snapid here or 0. The special value + * carveouts for the snapid are all at the end of the range so we can + * just look for any snapid below this value. + */ + if (vino.snapid < CEPH_NOSNAP) + req->head.args.lookupino.snapid = vino.snapid; + int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds()); if (r == 0 && inode != NULL) { - vinodeno_t vino(ino, CEPH_NOSNAP); unordered_map::iterator p = inode_map.find(vino); ceph_assert(p != inode_map.end()); *inode = p->second; _ll_get(*inode); } - ldout(cct, 8) << __func__ << " exit(" << ino << ") = " << r << dendl; + ldout(cct, 8) << __func__ << " exit(" << vino << ") = " << r << dendl; return r; } int Client::lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode) { + vinodeno_t vino(ino, CEPH_NOSNAP); std::lock_guard lock(client_lock); - return _lookup_ino(ino, perms, inode); + return _lookup_vino(vino, perms, inode); } /** @@ -10896,58 +10915,61 @@ int Client::ll_lookup(Inode *parent, const char *name, struct stat *attr, return r; } -int Client::ll_lookup_inode( - struct inodeno_t ino, +int Client::ll_lookup_vino( + vinodeno_t vino, const UserPerm& perms, Inode **inode) { ceph_assert(inode != NULL); - std::lock_guard lock(client_lock); - ldout(cct, 3) << "ll_lookup_inode " << ino << dendl; - + if (unmounting) return -ENOTCONN; - // Num1: get inode and *inode - int r = _lookup_ino(ino, perms, inode); - if (r) - return r; + if (is_reserved_vino(vino)) + return -ESTALE; - ceph_assert(*inode != NULL); - - if (!(*inode)->dentries.empty()) { - ldout(cct, 8) << __func__ << " dentry already present" << dendl; + std::lock_guard lock(client_lock); + ldout(cct, 3) << __func__ << vino << dendl; + + // Check the cache first + unordered_map::iterator p = inode_map.find(vino); + if (p != inode_map.end()) { + *inode = p->second; + _ll_get(*inode); return 0; } - if ((*inode)->is_root()) { - ldout(cct, 8) << "ino is root, no parent" << dendl; - return 0; - } + uint64_t snapid = vino.snapid; - // Num2: Request the parent inode, so that we can look up the name - Inode *parent; - r = _lookup_parent(*inode, perms, &parent); - if (r) { - _ll_forget(*inode, 1); + // for snapdir, find the non-snapped dir inode + if (snapid == CEPH_SNAPDIR) + vino.snapid = CEPH_NOSNAP; + + int r = _lookup_vino(vino, perms, inode); + if (r) return r; - } + ceph_assert(*inode != NULL); - ceph_assert(parent != NULL); + if (snapid == CEPH_SNAPDIR) { + Inode *tmp = *inode; - // Num3: Finally, get the name (dentry) of the requested inode - r = _lookup_name(*inode, parent, perms); - if (r) { - // Unexpected error - _ll_forget(parent, 1); - _ll_forget(*inode, 1); - return r; + // open the snapdir and put the inode ref + *inode = open_snapdir(tmp); + _ll_forget(tmp, 1); + _ll_get(*inode); } - - _ll_forget(parent, 1); return 0; } +int Client::ll_lookup_inode( + struct inodeno_t ino, + const UserPerm& perms, + Inode **inode) +{ + vinodeno_t vino(ino, CEPH_NOSNAP); + return ll_lookup_vino(vino, perms, inode); +} + int Client::ll_lookupx(Inode *parent, const char *name, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms) @@ -11157,6 +11179,9 @@ Inode *Client::ll_get_inode(vinodeno_t vino) if (unmounting) return NULL; + if (is_reserved_vino(vino)) + return NULL; + unordered_map::iterator p = inode_map.find(vino); if (p == inode_map.end()) return NULL; diff --git a/ceph/src/client/Client.h b/ceph/src/client/Client.h index 95cb2fcfb..e4e651554 100644 --- a/ceph/src/client/Client.h +++ b/ceph/src/client/Client.h @@ -480,6 +480,7 @@ public: int ll_lookup(Inode *parent, const char *name, struct stat *attr, Inode **out, const UserPerm& perms); int ll_lookup_inode(struct inodeno_t ino, const UserPerm& perms, Inode **inode); + int ll_lookup_vino(vinodeno_t vino, const UserPerm& perms, Inode **inode); int ll_lookupx(Inode *parent, const char *name, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms); @@ -1013,6 +1014,7 @@ private: static const VXattr _common_vxattrs[]; + bool is_reserved_vino(vinodeno_t &vino); void fill_dirent(struct dirent *de, const char *name, int type, uint64_t ino, loff_t next_off); @@ -1183,7 +1185,7 @@ private: int _ll_getattr(Inode *in, int caps, const UserPerm& perms); int _lookup_parent(Inode *in, const UserPerm& perms, Inode **parent=NULL); int _lookup_name(Inode *in, Inode *parent, const UserPerm& perms); - int _lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode=NULL); + int _lookup_vino(vinodeno_t ino, const UserPerm& perms, Inode **inode=NULL); bool _ll_forget(Inode *in, uint64_t count); diff --git a/ceph/src/client/Inode.h b/ceph/src/client/Inode.h index 5b932a987..faa2c894d 100644 --- a/ceph/src/client/Inode.h +++ b/ceph/src/client/Inode.h @@ -164,7 +164,7 @@ struct Inode { version_t inline_version; bufferlist inline_data; - bool is_root() const { return ino == MDS_INO_ROOT; } + bool is_root() const { return ino == CEPH_INO_ROOT; } bool is_symlink() const { return (mode & S_IFMT) == S_IFLNK; } bool is_dir() const { return (mode & S_IFMT) == S_IFDIR; } bool is_file() const { return (mode & S_IFMT) == S_IFREG; } diff --git a/ceph/src/cls/rgw/cls_rgw.cc b/ceph/src/cls/rgw/cls_rgw.cc index 84e536af0..84792f740 100644 --- a/ceph/src/cls/rgw/cls_rgw.cc +++ b/ceph/src/cls/rgw/cls_rgw.cc @@ -42,6 +42,9 @@ static std::string bucket_index_prefixes[] = { "", /* special handling for the o /* this must be the last index */ "9999_",}; +static const std::string BI_PREFIX_END = string(1, BI_PREFIX_CHAR) + + bucket_index_prefixes[BI_BUCKET_LAST_INDEX]; + static bool bi_is_objs_index(const string& s) { return ((unsigned char)s[0] != BI_PREFIX_CHAR); } @@ -484,6 +487,7 @@ int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) bool has_delimiter = !op.delimiter.empty(); if (has_delimiter && + start_after_key > op.filter_prefix && boost::algorithm::ends_with(start_after_key, op.delimiter)) { // advance past all subdirectory entries if we start after a // subdirectory @@ -2424,18 +2428,14 @@ static int rgw_bi_put_op(cls_method_context_t hctx, bufferlist *in, bufferlist * } static int list_plain_entries(cls_method_context_t hctx, - const string& name, - const string& marker, - uint32_t max, + const string& filter, + const string& start_after_key, + const string& end_key, + uint32_t max, list *entries, - bool *pmore) + bool *end_key_reached, + bool *pmore) { - string filter = name; - string start_after_key = marker; - - string end_key; // stop listing at bi_log_prefix - bi_log_prefix(end_key); - int count = 0; map keys; int ret = cls_cxx_map_get_vals(hctx, start_after_key, filter, max, @@ -2444,13 +2444,12 @@ static int list_plain_entries(cls_method_context_t hctx, return ret; } - map::iterator iter; - for (iter = keys.begin(); iter != keys.end(); ++iter) { - if (iter->first >= end_key) { - /* past the end of plain namespace */ - if (pmore) { - *pmore = false; - } + *end_key_reached = false; + + for (auto iter = keys.begin(); iter != keys.end(); ++iter) { + if (!end_key.empty() && iter->first >= end_key) { + *end_key_reached = true; + *pmore = true; return count; } @@ -2469,13 +2468,12 @@ static int list_plain_entries(cls_method_context_t hctx, return -EIO; } - CLS_LOG(20, "%s(): entry.idx=%s e.key.name=%s", __func__, escape_str(entry.idx).c_str(), escape_str(e.key.name).c_str()); + CLS_LOG(20, "%s(): entry.idx=%s e.key.name=%s", __func__, + escape_str(entry.idx).c_str(), escape_str(e.key.name).c_str()); - if (!name.empty() && e.key.name != name) { + if (!filter.empty() && e.key.name != filter) { /* we are skipping the rest of the entries */ - if (pmore) { - *pmore = false; - } + *pmore = false; return count; } @@ -2484,12 +2482,54 @@ static int list_plain_entries(cls_method_context_t hctx, if (count >= (int)max) { return count; } - start_after_key = entry.idx; } return count; } +static int list_plain_entries(cls_method_context_t hctx, + const string& name, + const string& marker, + uint32_t max, + list *entries, + bool *pmore) { + string start_after_key = marker; + string end_key; + bi_log_prefix(end_key); + int r; + bool end_key_reached; + bool more; + + if (start_after_key < end_key) { + // listing ascii plain namespace + int r = list_plain_entries(hctx, name, start_after_key, end_key, max, + entries, &end_key_reached, &more); + if (r < 0) { + return r; + } + if (r >= (int)max || !end_key_reached || !more) { + if (pmore) { + *pmore = more; + } + return r; + } + start_after_key = BI_PREFIX_END; + max = max - r; + } + + // listing non-ascii plain namespace + r = list_plain_entries(hctx, name, start_after_key, {}, max, entries, + &end_key_reached, &more); + if (r < 0) { + return r; + } + if (pmore) { + *pmore = more; + } + + return r; +} + static int list_instance_entries(cls_method_context_t hctx, const string& name, const string& marker, diff --git a/ceph/src/common/config_proxy.h b/ceph/src/common/config_proxy.h index 7ca5a54af..136e8be73 100644 --- a/ceph/src/common/config_proxy.h +++ b/ceph/src/common/config_proxy.h @@ -13,11 +13,6 @@ // member methods. namespace ceph::common{ class ConfigProxy { - static ConfigValues get_config_values(const ConfigProxy &config_proxy) { - std::lock_guard locker(config_proxy.lock); - return config_proxy.values; - } - /** * The current values of all settings described by the schema */ @@ -115,7 +110,7 @@ public: : config{values, obs_mgr, is_daemon} {} explicit ConfigProxy(const ConfigProxy &config_proxy) - : values(get_config_values(config_proxy)), + : values(config_proxy.get_config_values()), config{values, obs_mgr, config_proxy.config.is_daemon} {} const ConfigValues* operator->() const noexcept { @@ -124,11 +119,16 @@ public: ConfigValues* operator->() noexcept { return &values; } -#ifdef WITH_SEASTAR + ConfigValues get_config_values() const { + std::lock_guard l{lock}; + return values; + } void set_config_values(const ConfigValues& val) { +#ifndef WITH_SEASTAR + std::lock_guard l{lock}; +#endif values = val; } -#endif int get_val(const std::string_view key, char** buf, int len) const { std::lock_guard l{lock}; return config.get_val(values, key, buf, len); diff --git a/ceph/src/common/ipaddr.cc b/ceph/src/common/ipaddr.cc index 0abf7f20c..bd11cbfc1 100644 --- a/ceph/src/common/ipaddr.cc +++ b/ceph/src/common/ipaddr.cc @@ -3,7 +3,6 @@ #include #include #include -#include #if defined(__FreeBSD__) #include #include @@ -29,54 +28,23 @@ void netmask_ipv4(const struct in_addr *addr, out->s_addr = addr->s_addr & mask; } - -static bool match_numa_node(const string& if_name, int numa_node) +bool matches_ipv4_in_subnet(const struct ifaddrs& addrs, + const struct sockaddr_in* net, + unsigned int prefix_len) { -#ifdef WITH_SEASTAR - return true; -#else - int if_node = -1; - int r = get_iface_numa_node(if_name, &if_node); - if (r < 0) { + if (addrs.ifa_addr == nullptr) return false; - } - return if_node == numa_node; -#endif -} - -const struct ifaddrs *find_ipv4_in_subnet(const struct ifaddrs *addrs, - const struct sockaddr_in *net, - unsigned int prefix_len, - int numa_node) { - struct in_addr want, temp; + if (addrs.ifa_addr->sa_family != net->sin_family) + return false; + struct in_addr want; netmask_ipv4(&net->sin_addr, prefix_len, &want); - for (; addrs != NULL; addrs = addrs->ifa_next) { - - if (addrs->ifa_addr == NULL) - continue; - - if (strcmp(addrs->ifa_name, "lo") == 0 || boost::starts_with(addrs->ifa_name, "lo:")) - continue; - - if (numa_node >= 0 && !match_numa_node(addrs->ifa_name, numa_node)) - continue; - - if (addrs->ifa_addr->sa_family != net->sin_family) - continue; - - struct in_addr *cur = &((struct sockaddr_in*)addrs->ifa_addr)->sin_addr; - netmask_ipv4(cur, prefix_len, &temp); - - if (temp.s_addr == want.s_addr) { - return addrs; - } - } - - return NULL; + struct in_addr *cur = &((struct sockaddr_in*)addrs.ifa_addr)->sin_addr; + struct in_addr temp; + netmask_ipv4(cur, prefix_len, &temp); + return temp.s_addr == want.s_addr; } - void netmask_ipv6(const struct in6_addr *addr, unsigned int prefix_len, struct in6_addr *out) { @@ -90,59 +58,25 @@ void netmask_ipv6(const struct in6_addr *addr, memset(out->s6_addr+prefix_len/8+1, 0, 16-prefix_len/8-1); } +bool matches_ipv6_in_subnet(const struct ifaddrs& addrs, + const struct sockaddr_in6* net, + unsigned int prefix_len) +{ + if (addrs.ifa_addr == nullptr) + return false; -const struct ifaddrs *find_ipv6_in_subnet(const struct ifaddrs *addrs, - const struct sockaddr_in6 *net, - unsigned int prefix_len, - int numa_node) { - struct in6_addr want, temp; - + if (addrs.ifa_addr->sa_family != net->sin6_family) + return false; + struct in6_addr want; netmask_ipv6(&net->sin6_addr, prefix_len, &want); - for (; addrs != NULL; addrs = addrs->ifa_next) { - - if (addrs->ifa_addr == NULL) - continue; - - if (strcmp(addrs->ifa_name, "lo") == 0 || boost::starts_with(addrs->ifa_name, "lo:")) - continue; - - if (numa_node >= 0 && !match_numa_node(addrs->ifa_name, numa_node)) - continue; - - if (addrs->ifa_addr->sa_family != net->sin6_family) - continue; - - struct in6_addr *cur = &((struct sockaddr_in6*)addrs->ifa_addr)->sin6_addr; - if (IN6_IS_ADDR_LINKLOCAL(cur)) - continue; - netmask_ipv6(cur, prefix_len, &temp); - - if (IN6_ARE_ADDR_EQUAL(&temp, &want)) - return addrs; - } - - return NULL; -} - - -const struct ifaddrs *find_ip_in_subnet(const struct ifaddrs *addrs, - const struct sockaddr *net, - unsigned int prefix_len, - int numa_node) { - switch (net->sa_family) { - case AF_INET: - return find_ipv4_in_subnet(addrs, (struct sockaddr_in*)net, prefix_len, - numa_node); - - case AF_INET6: - return find_ipv6_in_subnet(addrs, (struct sockaddr_in6*)net, prefix_len, - numa_node); - } - - return NULL; + struct in6_addr temp; + struct in6_addr *cur = &((struct sockaddr_in6*)addrs.ifa_addr)->sin6_addr; + if (IN6_IS_ADDR_LINKLOCAL(cur)) + return false; + netmask_ipv6(cur, prefix_len, &temp); + return IN6_ARE_ADDR_EQUAL(&temp, &want); } - bool parse_network(const char *s, struct sockaddr_storage *network, unsigned int *prefix_len) { char *slash = strchr((char*)s, '/'); if (!slash) { diff --git a/ceph/src/common/options.cc b/ceph/src/common/options.cc index b8cc55ef9..85f4203be 100644 --- a/ceph/src/common/options.cc +++ b/ceph/src/common/options.cc @@ -5256,6 +5256,20 @@ std::vector