From cd265ab1e2bb0c89e7a1001629426438754333f4 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 25 Mar 2021 11:23:20 +0100 Subject: [PATCH] import ceph 15.2.10 Signed-off-by: Thomas Lamprecht --- ceph/.github/CODEOWNERS | 3 +- ceph/.github/labeler.yml | 10 + ceph/.github/milestone.yml | 6 + ceph/.github/workflows/pr-triage.yml | 15 + ceph/CMakeLists.txt | 2 +- ceph/PendingReleaseNotes | 4 + ceph/alpine/APKBUILD | 6 +- ceph/ceph.spec | 6 +- ceph/changelog.upstream | 10 +- ceph/doc/cephfs/fs-volumes.rst | 18 + ceph/doc/mgr/dashboard.rst | 30 +- .../prometheus/alerts/ceph_default_alerts.yml | 45 +- .../qa/cephfs/overrides/whitelist_health.yaml | 2 +- .../whitelist_wrongly_marked_down.yaml | 2 +- .../tasks/cfuse_workunit_suites_ffsb.yaml | 2 +- ceph/qa/overrides/2-size-2-min-size.yaml | 2 +- .../whitelist_wrongly_marked_down.yaml | 2 +- ceph/qa/releases/luminous.yaml | 2 +- .../big/rados-thrash/thrashers/default.yaml | 2 +- .../tasks/alternate-pool.yaml | 2 +- .../basic_functional/tasks/auto-repair.yaml | 2 +- .../tasks/cephfs_scrub_tests.yaml | 2 +- .../basic_functional/tasks/client-limits.yaml | 2 +- .../tasks/client-recovery.yaml | 2 +- .../fs/basic_functional/tasks/damage.yaml | 2 +- .../fs/basic_functional/tasks/data-scan.yaml | 2 +- .../basic_functional/tasks/forward-scrub.yaml | 2 +- .../tasks/journal-repair.yaml | 2 +- .../fs/basic_functional/tasks/mds-full.yaml | 2 +- .../tasks/sessionmap/sessionmap.yaml | 2 +- .../tasks/volume-client/task/test/test.yaml | 2 +- .../fs/basic_functional/tasks/volumes.yaml | 2 +- .../client_trim_caps/tasks/trim-i22073.yaml | 2 +- .../multiclient/tasks/cephfs_misc_tests.yaml | 2 +- ceph/qa/suites/fs/multifs/tasks/failover.yaml | 2 +- ceph/qa/suites/fs/thrash/ceph-thrash/mds.yaml | 2 +- ceph/qa/suites/fs/thrash/ceph-thrash/mon.yaml | 2 +- .../thrash/msgr-failures/osd-mds-delay.yaml | 2 +- .../old_client/tasks/0-mimic.yaml | 2 +- .../old_client/tasks/2-upgrade.yaml | 2 +- .../tasks/3-compat_client/mimic.yaml | 2 +- .../upgraded_client/tasks/0-mimic.yaml | 2 +- .../upgraded_client/tasks/2-upgrade.yaml | 2 +- .../tasks/4-compat_client.yaml | 2 +- .../volumes/import-legacy/tasks/0-mimic.yaml | 2 +- .../import-legacy/tasks/2-upgrade.yaml | 2 +- .../volumes/import-legacy/tasks/3-verify.yaml | 2 +- .../suites/fs/verify/validater/valgrind.yaml | 3 +- .../tasks/kclient_workunit_suites_ffsb.yaml | 2 +- .../kcephfs/recovery/tasks/auto-repair.yaml | 2 +- .../kcephfs/recovery/tasks/client-limits.yaml | 2 +- .../recovery/tasks/client-recovery.yaml | 2 +- .../suites/kcephfs/recovery/tasks/damage.yaml | 2 +- .../kcephfs/recovery/tasks/data-scan.yaml | 2 +- .../kcephfs/recovery/tasks/failover.yaml | 2 +- .../kcephfs/recovery/tasks/forward-scrub.yaml | 2 +- .../recovery/tasks/journal-repair.yaml | 2 +- .../kcephfs/recovery/tasks/mds-full.yaml | 2 +- .../kcephfs/recovery/tasks/sessionmap.yaml | 2 +- .../kcephfs/recovery/tasks/volume-client.yaml | 2 +- .../kcephfs/thrash/thrashers/default.yaml | 2 +- .../suites/kcephfs/thrash/thrashers/mds.yaml | 2 +- .../suites/kcephfs/thrash/thrashers/mon.yaml | 2 +- .../kclient_workunit_suites_ffsb.yaml | 2 +- .../striping/default/msgr-failures/few.yaml | 2 +- .../striping/default/msgr-failures/many.yaml | 2 +- .../fsx/striping/fancy/msgr-failures/few.yaml | 2 +- .../krbd/rbd-nomount/msgr-failures/few.yaml | 2 +- .../krbd/rbd-nomount/msgr-failures/many.yaml | 2 +- .../tasks/krbd_udev_netlink_enobufs.yaml | 2 +- .../qa/suites/krbd/rbd/msgr-failures/few.yaml | 2 +- .../suites/krbd/rbd/msgr-failures/many.yaml | 2 +- .../krbd/singleton/msgr-failures/few.yaml | 2 +- .../krbd/singleton/msgr-failures/many.yaml | 2 +- .../suites/krbd/thrash/thrashers/backoff.yaml | 2 +- .../krbd/thrash/thrashers/mon-thrasher.yaml | 2 +- .../suites/krbd/thrash/thrashers/pggrow.yaml | 2 +- .../suites/krbd/thrash/thrashers/upmap.yaml | 2 +- .../krbd/wac/wac/verify/many-resets.yaml | 2 +- .../basic/tasks/cephfs_test_exports.yaml | 2 +- .../basic/tasks/cephfs_test_snapshots.yaml | 2 +- ceph/qa/suites/perf-basic/ceph.yaml | 2 +- .../powercycle/osd/tasks/rados_api_tests.yaml | 2 +- .../powercycle/osd/whitelist_health.yaml | 2 +- .../suites/rados/basic/msgr-failures/few.yaml | 2 +- .../rados/basic/msgr-failures/many.yaml | 2 +- .../rados/basic/tasks/rados_api_tests.yaml | 2 +- .../rados/basic/tasks/rados_python.yaml | 2 +- .../rados/basic/tasks/rados_stress_watch.yaml | 2 +- .../tasks/rados_workunit_loadgen_big.yaml | 2 +- .../tasks/rados_workunit_loadgen_mix.yaml | 2 +- .../rados_workunit_loadgen_mostlyread.yaml | 2 +- .../suites/rados/basic/tasks/repair_test.yaml | 2 +- .../suites/rados/basic/tasks/scrub_test.yaml | 2 +- .../orchestrator_cli/orchestrator_cli.yaml | 2 +- .../rados/dashboard/tasks/dashboard.yaml | 3 +- ceph/qa/suites/rados/mgr/tasks/crash.yaml | 2 +- ceph/qa/suites/rados/mgr/tasks/failover.yaml | 2 +- ceph/qa/suites/rados/mgr/tasks/insights.yaml | 2 +- .../rados/mgr/tasks/module_selftest.yaml | 2 +- ceph/qa/suites/rados/mgr/tasks/progress.yaml | 2 +- .../qa/suites/rados/mgr/tasks/prometheus.yaml | 2 +- ceph/qa/suites/rados/mgr/tasks/workunits.yaml | 2 +- ceph/qa/suites/rados/monthrash/ceph.yaml | 2 +- .../rados/monthrash/msgr-failures/few.yaml | 2 +- .../monthrash/msgr-failures/mon-delay.yaml | 2 +- .../monthrash/thrashers/force-sync-many.yaml | 2 +- .../rados/monthrash/thrashers/many.yaml | 2 +- .../suites/rados/monthrash/thrashers/one.yaml | 2 +- .../rados/monthrash/thrashers/sync-many.yaml | 2 +- .../rados/monthrash/thrashers/sync.yaml | 2 +- .../workloads/pool-create-delete.yaml | 2 +- .../rados/monthrash/workloads/rados_5925.yaml | 2 +- .../monthrash/workloads/rados_api_tests.yaml | 2 +- .../workloads/rados_mon_osdmap_prune.yaml | 2 +- .../workloads/rados_mon_workunits.yaml | 2 +- .../rados/multimon/msgr-failures/few.yaml | 2 +- .../rados/multimon/msgr-failures/many.yaml | 2 +- .../multimon/tasks/mon_clock_no_skews.yaml | 2 +- .../multimon/tasks/mon_clock_with_skews.yaml | 2 +- .../rados/multimon/tasks/mon_recovery.yaml | 2 +- .../backends/ceph_objectstore_tool.yaml | 2 +- ceph/qa/suites/rados/perf/ceph.yaml | 2 +- ceph/qa/suites/rados/rest/mgr-restful.yaml | 2 +- .../singleton-bluestore/all/cephtool.yaml | 2 +- .../all/admin_socket_output.yaml | 2 +- .../rados/singleton-nomsgr/all/balancer.yaml | 2 +- .../singleton-nomsgr/all/cache-fs-trunc.yaml | 2 +- .../all/ceph-kvstore-tool.yaml | 2 +- .../all/export-after-evict.yaml | 2 +- .../singleton-nomsgr/all/full-tiering.yaml | 2 +- .../singleton-nomsgr/all/health-warnings.yaml | 2 +- .../all/large-omap-object-warnings.yaml | 2 +- .../all/lazy_omap_stats_output.yaml | 2 +- .../all/librados_hello_world.yaml | 2 +- .../all/multi-backfill-reject.yaml | 2 +- .../singleton-nomsgr/all/osd_stale_reads.yaml | 2 +- .../all/recovery-unfound-found.yaml | 2 +- .../all/version-number-sanity.yaml | 2 +- .../suites/rados/singleton/all/deduptool.yaml | 2 +- .../rados/singleton/all/divergent_priors.yaml | 2 +- .../singleton/all/divergent_priors2.yaml | 2 +- .../rados/singleton/all/dump-stuck.yaml | 2 +- .../rados/singleton/all/ec-lost-unfound.yaml | 2 +- .../singleton/all/lost-unfound-delete.yaml | 2 +- .../rados/singleton/all/lost-unfound.yaml | 2 +- .../all/max-pg-per-osd.from-mon.yaml | 2 +- .../all/max-pg-per-osd.from-primary.yaml | 2 +- .../all/max-pg-per-osd.from-replica.yaml | 2 +- .../rados/singleton/all/mon-auth-caps.yaml | 2 +- .../singleton/all/mon-config-key-caps.yaml | 2 +- ...mon-memory-target-compliance.yaml.disabled | 2 +- .../rados/singleton/all/osd-backfill.yaml | 2 +- .../all/osd-recovery-incomplete.yaml | 2 +- .../rados/singleton/all/osd-recovery.yaml | 2 +- ceph/qa/suites/rados/singleton/all/peer.yaml | 2 +- .../rados/singleton/all/pg-autoscaler.yaml | 2 +- .../all/pg-removal-interruption.yaml | 2 +- .../suites/rados/singleton/all/radostool.yaml | 2 +- .../rados/singleton/all/random-eio.yaml | 2 +- .../rados/singleton/all/rebuild-mondb.yaml | 2 +- .../singleton/all/recovery-preemption.yaml | 2 +- .../singleton/all/resolve_stuck_peering.yaml | 2 +- .../rados/singleton/all/test-crash.yaml | 2 +- .../all/test_envlibrados_for_rocksdb.yaml | 2 +- .../singleton/all/thrash-backfill-full.yaml | 2 +- .../rados/singleton/all/thrash-eio.yaml | 2 +- .../all/thrash-rados/thrash-rados.yaml | 2 +- .../thrash_cache_writeback_proxy_none.yaml | 2 +- .../all/watch-notify-same-primary.yaml | 2 +- .../rados/singleton/msgr-failures/few.yaml | 2 +- .../rados/singleton/msgr-failures/many.yaml | 2 +- .../thrashers/careful.yaml | 2 +- .../thrashers/default.yaml | 2 +- .../thrashers/fastread.yaml | 2 +- .../thrashers/mapgap.yaml | 2 +- .../thrashers/morepggrow.yaml | 2 +- .../thrashers/pggrow.yaml | 2 +- .../thrashers/careful.yaml | 2 +- .../thrashers/default.yaml | 2 +- .../thrashers/careful.yaml | 2 +- .../thrashers/default.yaml | 2 +- .../thrashers/fastread.yaml | 2 +- .../thrashers/minsize_recovery.yaml | 2 +- .../thrashers/morepggrow.yaml | 2 +- .../thrash-erasure-code/thrashers/pggrow.yaml | 2 +- .../1-install/jewel-v1only.yaml | 2 +- .../thrash-old-clients/1-install/jewel.yaml | 2 +- .../1-install/luminous-v1only.yaml | 2 +- .../1-install/luminous.yaml | 2 +- .../1-install/mimic-v1only.yaml | 2 +- .../thrash-old-clients/1-install/mimic.yaml | 2 +- .../1-install/nautilus-v1only.yaml | 2 +- .../1-install/nautilus-v2only.yaml | 2 +- .../1-install/nautilus.yaml | 2 +- .../msgr-failures/fastclose.yaml | 2 +- .../thrash-old-clients/msgr-failures/few.yaml | 2 +- .../msgr-failures/osd-delay.yaml | 2 +- .../thrash-old-clients/thrashers/careful.yaml | 2 +- .../thrash-old-clients/thrashers/default.yaml | 2 +- .../thrash-old-clients/thrashers/mapgap.yaml | 2 +- .../thrashers/morepggrow.yaml | 2 +- .../thrash-old-clients/thrashers/pggrow.yaml | 2 +- .../workloads/cache-snaps.yaml | 2 +- .../crc-failures/bad_map_crc_failure.yaml | 2 +- .../rados/thrash/msgr-failures/fastclose.yaml | 2 +- .../rados/thrash/msgr-failures/few.yaml | 2 +- .../rados/thrash/msgr-failures/osd-delay.yaml | 2 +- .../rados/thrash/thrashers/careful.yaml | 2 +- .../rados/thrash/thrashers/default.yaml | 2 +- .../suites/rados/thrash/thrashers/mapgap.yaml | 2 +- .../rados/thrash/thrashers/morepggrow.yaml | 2 +- .../suites/rados/thrash/thrashers/pggrow.yaml | 2 +- .../thrash/workloads/cache-agent-big.yaml | 2 +- .../thrash/workloads/cache-agent-small.yaml | 2 +- .../workloads/cache-pool-snaps-readproxy.yaml | 2 +- .../thrash/workloads/cache-pool-snaps.yaml | 2 +- .../workloads/cache-snaps-balanced.yaml | 2 +- .../rados/thrash/workloads/cache-snaps.yaml | 2 +- .../suites/rados/thrash/workloads/cache.yaml | 2 +- .../thrash/workloads/rados_api_tests.yaml | 2 +- .../suites/rados/valgrind-leaks/1-start.yaml | 3 +- .../verify/d-thrash/default/default.yaml | 2 +- .../rados/verify/msgr-failures/few.yaml | 2 +- .../rados/verify/tasks/mon_recovery.yaml | 2 +- .../rados/verify/tasks/rados_api_tests.yaml | 2 +- .../rados/verify/validater/valgrind.yaml | 3 +- ceph/qa/suites/rbd/basic/cachepool/small.yaml | 2 +- .../suites/rbd/basic/msgr-failures/few.yaml | 2 +- .../basic/tasks/rbd_api_tests_old_format.yaml | 2 +- .../rbd_python_api_tests_old_format.yaml | 2 +- ceph/qa/suites/rbd/cli/msgr-failures/few.yaml | 2 +- ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml | 2 +- .../suites/rbd/cli/pool/small-cache-pool.yaml | 2 +- .../suites/rbd/cli_v1/msgr-failures/few.yaml | 2 +- .../rbd/cli_v1/pool/small-cache-pool.yaml | 2 +- .../suites/rbd/librbd/msgr-failures/few.yaml | 2 +- .../rbd/librbd/pool/small-cache-pool.yaml | 2 +- .../rbd/librbd/workloads/c_api_tests.yaml | 2 +- .../workloads/c_api_tests_with_defaults.yaml | 2 +- .../c_api_tests_with_journaling.yaml | 2 +- ...> rbd-mirror-journal-stress-workunit.yaml} | 2 + ...apshot-stress-workunit-exclusive-lock.yaml | 16 + ...or-snapshot-stress-workunit-fast-diff.yaml | 16 + ...rror-snapshot-stress-workunit-minimum.yaml | 16 + .../qa/suites/rbd/qemu/msgr-failures/few.yaml | 2 +- .../suites/rbd/qemu/pool/ec-cache-pool.yaml | 2 +- .../rbd/qemu/pool/small-cache-pool.yaml | 2 +- .../singleton-bluestore/all/issue-20295.yaml | 2 +- .../suites/rbd/singleton/all/rbd_mirror.yaml | 2 +- .../suites/rbd/singleton/all/rbd_tasks.yaml | 2 +- .../suites/rbd/thrash/msgr-failures/few.yaml | 2 +- .../qa/suites/rbd/thrash/thrashers/cache.yaml | 2 +- .../suites/rbd/thrash/thrashers/default.yaml | 2 +- .../rbd/thrash/workloads/rbd_api_tests.yaml | 2 +- .../workloads/rbd_api_tests_copy_on_read.yaml | 2 +- .../workloads/rbd_api_tests_journaling.yaml | 2 +- .../workloads/rbd_api_tests_no_locking.yaml | 2 +- .../rbd/valgrind/validator/memcheck.yaml | 1 - .../rbd/valgrind/workloads/c_api_tests.yaml | 2 +- .../workloads/c_api_tests_with_defaults.yaml | 2 +- .../c_api_tests_with_journaling.yaml | 2 +- .../rbd/valgrind/workloads/rbd_mirror.yaml | 2 +- ceph/qa/suites/rgw/multisite/valgrind.yaml | 1 - .../suites/rgw/verify/msgr-failures/few.yaml | 2 +- .../suites/rgw/verify/validater/valgrind.yaml | 1 - ceph/qa/suites/rgw/website/overrides.yaml | 1 - .../suites/smoke/basic/tasks/mon_thrash.yaml | 2 +- .../smoke/basic/tasks/rados_api_tests.yaml | 2 +- .../suites/smoke/basic/tasks/rados_bench.yaml | 2 +- .../smoke/basic/tasks/rados_cache_snaps.yaml | 2 +- .../smoke/basic/tasks/rados_ec_snaps.yaml | 2 +- .../smoke/basic/tasks/rados_python.yaml | 2 +- .../tasks/rados_workunit_loadgen_mix.yaml | 2 +- .../smoke/basic/tasks/rbd_api_tests.yaml | 2 +- ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml | 2 +- .../stress/thrash/thrashers/default.yaml | 2 +- .../suites/stress/thrash/thrashers/fast.yaml | 2 +- .../stress/thrash/thrashers/more-down.yaml | 2 +- .../teuthology/multi-cluster/all/upgrade.yaml | 2 +- .../suites/tgt/basic/msgr-failures/few.yaml | 2 +- .../suites/tgt/basic/msgr-failures/many.yaml | 2 +- .../octopus-client-x/rbd/0-cluster/start.yaml | 2 +- .../3-workload/rbd_notification_tests.yaml | 2 +- .../mimic-x-singleton/1-install/mimic.yaml | 2 +- .../mimic-x-singleton/3-thrash/default.yaml | 2 +- .../mimic-x-singleton/6-finish-upgrade.yaml | 2 +- .../mimic-x/parallel/0-cluster/start.yaml | 2 +- .../parallel/1-ceph-install/mimic.yaml | 2 +- .../6-final-workload/rados_mon_thrash.yaml | 2 +- .../3-thrash/default.yaml | 2 +- .../mimic-x/stress-split/0-cluster/start.yaml | 2 +- .../stress-split/3-thrash/default.yaml | 2 +- .../1-install/nautilus.yaml | 2 +- .../3-thrash/default.yaml | 2 +- .../6-finish-upgrade.yaml | 2 +- .../nautilus-x/parallel/0-cluster/start.yaml | 2 +- .../parallel/1-ceph-install/nautilus.yaml | 2 +- .../5-final-workload/rados_mon_thrash.yaml | 2 +- .../3-thrash/default.yaml | 2 +- .../stress-split/0-cluster/start.yaml | 2 +- .../stress-split/1-ceph-install/nautilus.yaml | 2 +- .../stress-split/3-thrash/default.yaml | 2 +- .../point-to-point-upgrade.yaml | 2 +- .../0-cluster/start.yaml | 2 +- .../3-thrash/default.yaml | 2 +- .../thrashosds-health.yaml | 2 +- ceph/qa/tasks/ceph.py | 8 +- ceph/qa/tasks/cephadm.py | 4 +- ceph/qa/tasks/cephfs/filesystem.py | 38 +- ceph/qa/tasks/cephfs/mount.py | 25 + ceph/qa/tasks/cephfs/test_volume_client.py | 439 +++++++- ceph/qa/tasks/cephfs/test_volumes.py | 960 +++++++++++++++++- ceph/qa/tasks/mgr/dashboard/helper.py | 40 +- ceph/qa/tasks/mgr/dashboard/test_auth.py | 41 +- ceph/qa/tasks/mgr/dashboard/test_ganesha.py | 4 +- ceph/qa/tasks/mgr/dashboard/test_requests.py | 4 + ceph/qa/tasks/mgr/dashboard/test_rgw.py | 12 +- ceph/qa/tasks/mgr/dashboard/test_user.py | 37 +- ceph/qa/tasks/mgr/mgr_test_case.py | 12 +- ceph/qa/tasks/mgr/test_prometheus.py | 1 + ceph/qa/tasks/osd_failsafe_enospc.py | 4 +- ceph/qa/tasks/repair_test.py | 4 +- ceph/qa/tasks/scrub_test.py | 2 +- ceph/qa/tasks/thrashosds-health.yaml | 2 +- ceph/qa/tasks/vstart_runner.py | 59 +- ceph/qa/valgrind.supp | 8 + ceph/qa/workunits/rbd/rbd_mirror_helpers.sh | 7 +- ceph/qa/workunits/rbd/rbd_mirror_stress.sh | 36 +- ceph/src/.git_version | 4 +- ceph/src/ceph-volume/ceph_volume/api/lvm.py | 26 +- .../ceph_volume/devices/lvm/batch.py | 2 +- .../ceph_volume/drive_group/main.py | 2 +- .../ceph_volume/tests/api/test_lvm.py | 33 +- .../ceph-volume/ceph_volume/util/device.py | 2 +- ceph/src/ceph-volume/ceph_volume/util/disk.py | 2 +- ceph/src/cephadm/cephadm | 12 +- ceph/src/cephadm/tests/test_cephadm.py | 4 +- ceph/src/common/legacy_config_opts.h | 2 + ceph/src/common/options.cc | 14 +- ceph/src/include/rbd/librbd.h | 8 + ceph/src/include/rbd/librbd.hpp | 2 + ceph/src/librbd/Operations.cc | 49 +- ceph/src/librbd/api/DiffIterate.cc | 15 +- ceph/src/librbd/api/Mirror.cc | 92 +- ceph/src/librbd/api/Mirror.h | 5 +- ceph/src/librbd/deep_copy/ImageCopyRequest.cc | 9 +- .../src/librbd/deep_copy/ObjectCopyRequest.cc | 274 +++-- ceph/src/librbd/deep_copy/ObjectCopyRequest.h | 24 +- ceph/src/librbd/deep_copy/Types.h | 5 + ceph/src/librbd/io/CopyupRequest.cc | 7 +- ceph/src/librbd/librbd.cc | 29 +- ceph/src/librbd/mirror/GetStatusRequest.cc | 6 +- .../mirror/snapshot/SetImageStateRequest.cc | 40 +- .../mirror/snapshot/SetImageStateRequest.h | 6 + ceph/src/librbd/object_map/DiffRequest.cc | 61 +- ceph/src/librbd/object_map/DiffRequest.h | 3 +- ceph/src/librbd/object_map/Types.h | 7 +- .../operation/DisableFeaturesRequest.cc | 11 +- ceph/src/librbd/operation/MigrateRequest.cc | 7 +- ceph/src/librbd/operation/RenameRequest.cc | 49 +- ceph/src/librbd/operation/RenameRequest.h | 8 +- ceph/src/mgr/BaseMgrModule.cc | 22 +- ceph/src/mon/MonCap.cc | 2 +- ceph/src/os/bluestore/BlueFS.cc | 169 ++- ceph/src/os/bluestore/BlueFS.h | 39 +- ceph/src/os/bluestore/BlueRocksEnv.cc | 2 +- ceph/src/os/bluestore/BlueStore.cc | 17 + ceph/src/pybind/ceph_volume_client.py | 52 +- ceph/src/pybind/mgr/ceph_module.pyi | 2 +- ceph/src/pybind/mgr/cephadm/module.py | 2 +- .../mgr/cephadm/services/cephadmservice.py | 3 +- ceph/src/pybind/mgr/cephadm/services/iscsi.py | 2 +- .../pybind/mgr/cephadm/tests/test_cephadm.py | 3 +- .../pybind/mgr/cephadm/tests/test_services.py | 2 +- ceph/src/pybind/mgr/dashboard/constraints.txt | 2 +- .../pybind/mgr/dashboard/controllers/auth.py | 5 +- .../mgr/dashboard/controllers/cephfs.py | 2 + .../controllers/cluster_configuration.py | 3 + .../mgr/dashboard/controllers/prometheus.py | 13 +- .../cluster/configuration.e2e-spec.ts | 2 +- .../dist/en-US/1.a08d918239b8b76c4810.js | 1 - .../dist/en-US/1.ecdc99fc68ced4743e9f.js | 1 + .../dashboard/frontend/dist/en-US/index.html | 5 +- ...eda88e.js => main.d1dfb2abcfb79d8a0eaa.js} | 4 +- ... main.d1dfb2abcfb79d8a0eaa.js.LICENSE.txt} | 0 ...a71.js => runtime.b3395e26e2e5a50aa6e9.js} | 2 +- .../dashboard/frontend/src/app/app.module.ts | 5 + .../rbd-details/rbd-details.component.html | 4 +- .../rgw-user-form.component.spec.ts | 8 +- .../navigation/navigation.component.html | 4 +- .../app/shared/api/rgw-user.service.spec.ts | 20 +- .../src/app/shared/api/rgw-user.service.ts | 13 +- .../services/prometheus-alert.service.spec.ts | 22 + .../services/prometheus-alert.service.ts | 6 + .../mgr/dashboard/frontend/src/index.html | 3 +- ceph/src/pybind/mgr/dashboard/module.py | 2 +- .../mgr/dashboard/run-frontend-e2e-tests.sh | 8 +- .../mgr/dashboard/services/access_control.py | 35 +- .../src/pybind/mgr/dashboard/services/auth.py | 9 +- .../mgr/dashboard/services/iscsi_cli.py | 9 +- ceph/src/pybind/mgr/dashboard/settings.py | 31 +- .../pybind/mgr/dashboard/tests/__init__.py | 4 +- .../dashboard/tests/test_access_control.py | 46 +- .../pybind/mgr/dashboard/tests/test_auth.py | 20 + .../pybind/mgr/dashboard/tests/test_iscsi.py | 18 +- .../mgr/dashboard/tests/test_prometheus.py | 14 +- .../mgr/dashboard/tests/test_settings.py | 44 +- ceph/src/pybind/mgr/mgr_module.py | 39 +- ceph/src/pybind/mgr/prometheus/module.py | 3 + .../rbd_support/mirror_snapshot_schedule.py | 462 ++++++++- ceph/src/pybind/mgr/rbd_support/module.py | 1 + ceph/src/pybind/mgr/tests/__init__.py | 4 +- .../src/pybind/mgr/volumes/fs/async_cloner.py | 8 +- ceph/src/pybind/mgr/volumes/fs/exception.py | 26 + ceph/src/pybind/mgr/volumes/fs/fs_util.py | 28 + .../mgr/volumes/fs/operations/access.py | 139 +++ .../pybind/mgr/volumes/fs/operations/group.py | 4 +- .../mgr/volumes/fs/operations/rankevicter.py | 114 +++ .../mgr/volumes/fs/operations/subvolume.py | 16 +- .../mgr/volumes/fs/operations/template.py | 4 + .../fs/operations/versions/__init__.py | 14 +- .../fs/operations/versions/auth_metadata.py | 208 ++++ .../fs/operations/versions/subvolume_base.py | 5 +- .../fs/operations/versions/subvolume_v1.py | 427 +++++++- .../fs/operations/versions/subvolume_v2.py | 7 + .../mgr/volumes/fs/operations/volume.py | 11 + ceph/src/pybind/mgr/volumes/fs/purge_queue.py | 2 +- ceph/src/pybind/mgr/volumes/fs/volume.py | 117 ++- ceph/src/pybind/mgr/volumes/module.py | 80 ++ ceph/src/pybind/mgr/zabbix/module.py | 2 +- .../src/pybind/mgr/zabbix/zabbix_template.xml | 2 +- ceph/src/pybind/rados/rados.pxd | 1 + ceph/src/pybind/rados/rados.pyx | 206 +++- ceph/src/pybind/rbd/rbd.pyx | 271 ++++- ceph/src/rgw/CMakeLists.txt | 4 + ceph/src/rgw/rgw_putobj_processor.cc | 3 +- ceph/src/rgw/rgw_user.cc | 9 +- .../deep_copy/test_mock_ImageCopyRequest.cc | 2 +- .../deep_copy/test_mock_ObjectCopyRequest.cc | 206 +++- .../test/librbd/io/test_mock_CopyupRequest.cc | 5 +- .../object_map/test_mock_DiffRequest.cc | 19 +- ceph/src/test/librbd/test_DeepCopy.cc | 4 +- ceph/src/test/librbd/test_Migration.cc | 7 +- ceph/src/test/librbd/test_librbd.cc | 16 +- ceph/src/test/librbd/test_mirroring.cc | 63 ++ ceph/src/test/mgr/mgr-dashboard-smoke.sh | 5 +- ceph/src/test/objectstore/test_bluefs.cc | 59 +- ceph/src/test/pybind/test_rados.py | 94 +- ceph/src/test/pybind/test_rbd.py | 90 ++ .../src/test/rbd_mirror/test_ImageReplayer.cc | 2 +- ceph/src/tools/rbd_mirror/ImageReplayer.cc | 16 +- .../image_replayer/snapshot/Replayer.cc | 12 +- ceph/src/vstart.sh | 5 +- 454 files changed, 5827 insertions(+), 1031 deletions(-) create mode 100644 ceph/.github/labeler.yml create mode 100644 ceph/.github/milestone.yml create mode 100644 ceph/.github/workflows/pr-triage.yml rename ceph/qa/suites/rbd/mirror-thrash/workloads/{rbd-mirror-stress-workunit.yaml => rbd-mirror-journal-stress-workunit.yaml} (85%) create mode 100644 ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock.yaml create mode 100644 ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-fast-diff.yaml create mode 100644 ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-minimum.yaml delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.a08d918239b8b76c4810.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.ecdc99fc68ced4743e9f.js rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{main.8b6127522c6248eda88e.js => main.d1dfb2abcfb79d8a0eaa.js} (55%) rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{main.8b6127522c6248eda88e.js.LICENSE.txt => main.d1dfb2abcfb79d8a0eaa.js.LICENSE.txt} (100%) rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{runtime.c67af31016ccc1ccaa71.js => runtime.b3395e26e2e5a50aa6e9.js} (94%) create mode 100644 ceph/src/pybind/mgr/dashboard/tests/test_auth.py create mode 100644 ceph/src/pybind/mgr/volumes/fs/operations/access.py create mode 100644 ceph/src/pybind/mgr/volumes/fs/operations/rankevicter.py create mode 100644 ceph/src/pybind/mgr/volumes/fs/operations/versions/auth_metadata.py diff --git a/ceph/.github/CODEOWNERS b/ceph/.github/CODEOWNERS index 5b1139f7f..f14a3347d 100644 --- a/ceph/.github/CODEOWNERS +++ b/ceph/.github/CODEOWNERS @@ -3,8 +3,7 @@ /qa/suites/rados/dashboard @ceph/dashboard /qa/tasks/mgr/test_dashboard.py @ceph/dashboard /qa/tasks/mgr/dashboard @ceph/dashboard -/monitoring/grafana @ceph/dashboard -/monitoring/prometheus @ceph/dashboard +/monitoring @ceph/dashboard /doc/mgr/dashboard.rst @ceph/dashboard # Dashboard API team diff --git a/ceph/.github/labeler.yml b/ceph/.github/labeler.yml new file mode 100644 index 000000000..d4f91584c --- /dev/null +++ b/ceph/.github/labeler.yml @@ -0,0 +1,10 @@ +dashboard: + - /src/pybind/mgr/dashboard/** + - /qa/suites/rados/dashboard/** + - /qa/tasks/mgr/test_dashboard.py + - /qa/tasks/mgr/dashboard/** + - /monitoring/** + - /doc/mgr/dashboard.rst + +CI: + - /.github/** diff --git a/ceph/.github/milestone.yml b/ceph/.github/milestone.yml new file mode 100644 index 000000000..d42069501 --- /dev/null +++ b/ceph/.github/milestone.yml @@ -0,0 +1,6 @@ +base-branch: + - "(luminous)" + - "(nautilus)" + - "(octopus)" + - "(pacific)" + - "(quincy)" diff --git a/ceph/.github/workflows/pr-triage.yml b/ceph/.github/workflows/pr-triage.yml new file mode 100644 index 000000000..31791d4ae --- /dev/null +++ b/ceph/.github/workflows/pr-triage.yml @@ -0,0 +1,15 @@ +--- +name: "Pull Request Triage" +on: pull_request_target +jobs: + pr-triage: + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@9794b1493b6f1fa7b006c5f8635a19c76c98be95 + with: + sync-labels: '' + repo-token: "${{ secrets.GITHUB_TOKEN }}" + - uses: iyu/actions-milestone@dbf7e5348844c9ddc6b803a5721b85fa70fe3bb9 + with: + configuration-path: .github/milestone.yml + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index 0de31a5a8..e94f5da15 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -668,4 +668,4 @@ add_custom_target(tags DEPENDS ctags) find_package(CppCheck) find_package(IWYU) -set(VERSION 15.2.9) +set(VERSION 15.2.10) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index 9715be569..cf62599de 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -3,6 +3,10 @@ * MGR: progress module can now be turned on/off, using the commands: ``ceph progress on`` and ``ceph progress off``. +* New bluestore_rocksdb_options_annex config parameter. Complements + bluestore_rocksdb_options and allows setting rocksdb options without repeating + the existing defaults. + 15.2.8 ------ * $pid expansion in config paths like `admin_socket` will now properly expand diff --git a/ceph/alpine/APKBUILD b/ceph/alpine/APKBUILD index 2bf1923ce..f2b431e71 100644 --- a/ceph/alpine/APKBUILD +++ b/ceph/alpine/APKBUILD @@ -1,7 +1,7 @@ # Contributor: John Coyle # Maintainer: John Coyle pkgname=ceph -pkgver=15.2.9 +pkgver=15.2.10 pkgrel=0 pkgdesc="Ceph is a distributed object store and file system" pkgusers="ceph" @@ -63,7 +63,7 @@ makedepends=" xmlstarlet yasm " -source="ceph-15.2.9.tar.bz2" +source="ceph-15.2.10.tar.bz2" subpackages=" $pkgname-base $pkgname-common @@ -116,7 +116,7 @@ _sysconfdir=/etc _udevrulesdir=/etc/udev/rules.d _python_sitelib=/usr/lib/python2.7/site-packages -builddir=$srcdir/ceph-15.2.9 +builddir=$srcdir/ceph-15.2.10 build() { export CEPH_BUILD_VIRTUALENV=$builddir diff --git a/ceph/ceph.spec b/ceph/ceph.spec index 6e5bf4a8e..09bfda181 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -98,7 +98,7 @@ # main package definition ################################################################################# Name: ceph -Version: 15.2.9 +Version: 15.2.10 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -114,7 +114,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-15.2.9.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-15.2.10.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -1140,7 +1140,7 @@ This package provides Ceph’s default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-15.2.9 +%autosetup -p1 -n ceph-15.2.10 %build # LTO can be enabled as soon as the following GCC bug is fixed: diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index 51d93f8a0..9287b9a85 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,7 +1,13 @@ -ceph (15.2.9-1bionic) bionic; urgency=medium +ceph (15.2.10-1bionic) bionic; urgency=medium - -- Jenkins Build Slave User Tue, 23 Feb 2021 14:23:03 +0000 + -- Jenkins Build Slave User Wed, 17 Mar 2021 13:15:33 -0400 + +ceph (15.2.10-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Wed, 17 Mar 2021 17:02:38 +0000 ceph (15.2.9-1) stable; urgency=medium diff --git a/ceph/doc/cephfs/fs-volumes.rst b/ceph/doc/cephfs/fs-volumes.rst index dd38e38e4..6174a53ef 100644 --- a/ceph/doc/cephfs/fs-volumes.rst +++ b/ceph/doc/cephfs/fs-volumes.rst @@ -173,6 +173,24 @@ The command resizes the subvolume quota using the size specified by 'new_size'. The subvolume can be resized to an infinite size by passing 'inf' or 'infinite' as the new_size. +Authorize cephx auth IDs, the read/read-write access to fs subvolumes:: + + $ ceph fs subvolume authorize [--group_name=] [--access_level=] + +The 'access_level' takes 'r' or 'rw' as value. + +Deauthorize cephx auth IDs, the read/read-write access to fs subvolumes:: + + $ ceph fs subvolume deauthorize [--group_name=] + +List cephx auth IDs authorized to access fs subvolume:: + + $ ceph fs subvolume authorized_list [--group_name=] + +Evict fs clients based on auth ID and subvolume mounted:: + + $ ceph fs subvolume evict [--group_name=] + Fetch the absolute path of a subvolume using:: $ ceph fs subvolume getpath [--group_name ] diff --git a/ceph/doc/mgr/dashboard.rst b/ceph/doc/mgr/dashboard.rst index cdedfd8e8..31cbe585c 100644 --- a/ceph/doc/mgr/dashboard.rst +++ b/ceph/doc/mgr/dashboard.rst @@ -268,7 +268,7 @@ section. To create a user with the administrator role you can use the following commands:: - $ ceph dashboard ac-user-create administrator + $ ceph dashboard ac-user-create -i administrator Account Lock-out ^^^^^^^^^^^^^^^^ @@ -333,8 +333,8 @@ The credentials of an existing user can also be obtained by using Finally, provide the credentials to the dashboard:: - $ ceph dashboard set-rgw-api-access-key - $ ceph dashboard set-rgw-api-secret-key + $ ceph dashboard set-rgw-api-access-key -i + $ ceph dashboard set-rgw-api-secret-key -i In a typical default configuration with a single RGW endpoint, this is all you have to do to get the Object Gateway management functionality working. The @@ -396,7 +396,8 @@ To disable API SSL verification run the following command:: The available iSCSI gateways must be defined using the following commands:: $ ceph dashboard iscsi-gateway-list - $ ceph dashboard iscsi-gateway-add ://:@[:port] + $ # Gateway URL format for a new gateway: ://:@[:port] + $ ceph dashboard iscsi-gateway-add -i [] $ ceph dashboard iscsi-gateway-rm @@ -719,6 +720,19 @@ in order to manage silences. should not disturb each other through annoying duplicated notifications popping up. +If you are using a self-signed certificate in your Prometheus or your +Alertmanager setup, you should disable certificate verification in the +dashboard to avoid refused connections, e.g. caused by certificates signed by +unknown CA or not matching the host name. + +- For Prometheus:: + + $ ceph dashboard set-prometheus-api-ssl-verify False + +- For Alertmanager:: + + $ ceph dashboard set-alertmanager-api-ssl-verify False + .. _dashboard-user-role-management: User and Role Management @@ -795,7 +809,7 @@ We provide a set of CLI commands to manage user accounts: - *Create User*:: - $ ceph dashboard ac-user-create [--enabled] [--force-password] [--pwd_update_required] [] [] [] [] [] + $ ceph dashboard ac-user-create [--enabled] [--force-password] [--pwd_update_required] -i [] [] [] [] To bypass the password policy checks use the `force-password` option. Use the option `pwd_update_required` so that a newly created user has @@ -807,11 +821,11 @@ We provide a set of CLI commands to manage user accounts: - *Change Password*:: - $ ceph dashboard ac-user-set-password [--force-password] + $ ceph dashboard ac-user-set-password [--force-password] -i - *Change Password Hash*:: - $ ceph dashboard ac-user-set-password-hash + $ ceph dashboard ac-user-set-password-hash -i The hash must be a bcrypt hash and salt, e.g. ``$2b$12$Pt3Vq/rDt2y9glTPSV.VFegiLkQeIpddtkhoFetNApYmIJOY8gau2``. This can be used to import users from an external database. @@ -948,7 +962,7 @@ view and create Ceph pools, and have read-only access to any other scopes. 1. *Create the user*:: - $ ceph dashboard ac-user-create bob mypassword + $ ceph dashboard ac-user-create bob -i 2. *Create role and specify scope permissions*:: diff --git a/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml b/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml index b07ea80ea..d4a0b8209 100644 --- a/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml +++ b/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml @@ -175,30 +175,48 @@ groups: description: > Root volume (OSD and MON store) is dangerously full: {{ $value | humanize }}% free. - # alert on nic packet errors and drops rates > 1 packet/s + # alert on nic packet errors and drops rates > 1% packets/s - alert: network packets dropped - expr: irate(node_network_receive_drop_total{device!="lo"}[5m]) + irate(node_network_transmit_drop_total{device!="lo"}[5m]) > 1 + expr: | + ( + increase(node_network_receive_drop_total{device!="lo"}[1m]) + + increase(node_network_transmit_drop_total{device!="lo"}[1m]) + ) / ( + increase(node_network_receive_packets_total{device!="lo"}[1m]) + + increase(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0001 or ( + increase(node_network_receive_drop_total{device!="lo"}[1m]) + + increase(node_network_transmit_drop_total{device!="lo"}[1m]) + ) >= 10 labels: severity: warning type: ceph_default oid: 1.3.6.1.4.1.50495.15.1.2.8.2 annotations: description: > - Node {{ $labels.instance }} experiences packet drop > 1 - packet/s on interface {{ $labels.device }}. + Node {{ $labels.instance }} experiences packet drop > 0.01% or > + 10 packets/s on interface {{ $labels.device }}. - alert: network packet errors expr: | - irate(node_network_receive_errs_total{device!="lo"}[5m]) + - irate(node_network_transmit_errs_total{device!="lo"}[5m]) > 1 + ( + increase(node_network_receive_errs_total{device!="lo"}[1m]) + + increase(node_network_transmit_errs_total{device!="lo"}[1m]) + ) / ( + increase(node_network_receive_packets_total{device!="lo"}[1m]) + + increase(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0001 or ( + increase(node_network_receive_errs_total{device!="lo"}[1m]) + + increase(node_network_transmit_errs_total{device!="lo"}[1m]) + ) >= 10 labels: severity: warning type: ceph_default oid: 1.3.6.1.4.1.50495.15.1.2.8.3 annotations: description: > - Node {{ $labels.instance }} experiences packet errors > 1 - packet/s on interface {{ $labels.device }}. + Node {{ $labels.instance }} experiences packet errors > 0.01% or + > 10 packets/s on interface {{ $labels.device }}. - alert: storage filling up expr: | @@ -214,6 +232,17 @@ groups: will be full in less than 5 days assuming the average fill-up rate of the past 48 hours. + - alert: MTU Mismatch + expr: node_network_mtu_bytes{device!="lo"} != on() group_left() (quantile(0.5, node_network_mtu_bytes{device!="lo"})) + labels: + severity: warning + type: ceph_default + oid: 1.3.6.1.4.1.50495.15.1.2.8.5 + annotations: + description: > + Node {{ $labels.instance }} has a different MTU size ({{ $value }}) + than the median value on device {{ $labels.device }}. + - name: pools rules: - alert: pool full diff --git a/ceph/qa/cephfs/overrides/whitelist_health.yaml b/ceph/qa/cephfs/overrides/whitelist_health.yaml index 195562995..7f0d49eab 100644 --- a/ceph/qa/cephfs/overrides/whitelist_health.yaml +++ b/ceph/qa/cephfs/overrides/whitelist_health.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_DEGRADED\) - \(MDS_FAILED\) diff --git a/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml b/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml index 48c1b837d..41ba84f04 100644 --- a/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml +++ b/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSD_DOWN\) - \(OSD_ diff --git a/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml b/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml index 1e96e6d8a..6a2b35a18 100644 --- a/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml +++ b/ceph/qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - SLOW_OPS - slow request conf: diff --git a/ceph/qa/overrides/2-size-2-min-size.yaml b/ceph/qa/overrides/2-size-2-min-size.yaml index f667a6ae8..d46db3fad 100644 --- a/ceph/qa/overrides/2-size-2-min-size.yaml +++ b/ceph/qa/overrides/2-size-2-min-size.yaml @@ -4,5 +4,5 @@ overrides: global: osd_pool_default_size: 2 osd_pool_default_min_size: 2 - log-whitelist: + log-ignorelist: - \(REQUEST_STUCK\) diff --git a/ceph/qa/overrides/whitelist_wrongly_marked_down.yaml b/ceph/qa/overrides/whitelist_wrongly_marked_down.yaml index 4e21dc9b8..2ce7ffd97 100644 --- a/ceph/qa/overrides/whitelist_wrongly_marked_down.yaml +++ b/ceph/qa/overrides/whitelist_wrongly_marked_down.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running conf: mds: diff --git a/ceph/qa/releases/luminous.yaml b/ceph/qa/releases/luminous.yaml index 9ed76715a..768861c21 100644 --- a/ceph/qa/releases/luminous.yaml +++ b/ceph/qa/releases/luminous.yaml @@ -17,5 +17,5 @@ overrides: conf: mon: mon warn on osd down out interval zero: false - log-whitelist: + log-ignorelist: - no active mgr diff --git a/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml b/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml index 8f2b26674..41b35926f 100644 --- a/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml +++ b/ceph/qa/suites/big/rados-thrash/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml b/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml index 94d5cc6f3..3bff0ffe9 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - bad backtrace - object missing on disk - error reading table object diff --git a/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml b/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml index 90d0e7bcb..c401ffd1b 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/auto-repair.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - force file system read-only - bad backtrace - MDS in read-only mode diff --git a/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml b/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml index a86612149..09e666849 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - Replacing daemon mds - Scrub error on inode - Behind on trimming diff --git a/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml b/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml index 635d0b6d8..e1ea5c157 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/client-limits.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - responding to mclientcaps\(revoke\) - not advance its oldest_client_tid - failing to advance its oldest client/flush tid diff --git a/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml b/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml index d1cef8025..cdbbb31ee 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/client-recovery.yaml @@ -3,7 +3,7 @@ # to permit OSDs to complain about that. overrides: ceph: - log-whitelist: + log-ignorelist: - evicting unresponsive client - but it is still running - slow request diff --git a/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml b/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml index 9ae738f01..16d56ee2f 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/damage.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - bad backtrace - object missing on disk - error reading table object diff --git a/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml b/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml index 0a2eb0d43..c720d3d7f 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/data-scan.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - bad backtrace - object missing on disk - error reading table object diff --git a/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml b/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml index b92cf1052..73090662f 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - inode wrongly marked free - bad backtrace on inode - inode table repaired for inode diff --git a/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml b/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml index 66f819d06..71c06de07 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/journal-repair.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - bad backtrace on directory inode - error reading table object - Metadata damage detected diff --git a/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml b/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml index 7e57dc6b1..9ddccf837 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/mds-full.yaml @@ -3,7 +3,7 @@ overrides: ceph: cephfs_ec_profile: - disabled - log-whitelist: + log-ignorelist: - OSD full dropping all updates - OSD near full - pausewr flag diff --git a/ceph/qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml b/ceph/qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml index 1d72301bd..900fb4ecd 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - client session with non-allowable root tasks: diff --git a/ceph/qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml index 2ad97a00a..61094215f 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - MON_DOWN tasks: - cephfs_test_runner: diff --git a/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml b/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml index 1315980ed..1e163b5c8 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/volumes.yaml @@ -3,7 +3,7 @@ overrides: conf: mgr: debug client: 10 - log-whitelist: + log-ignorelist: - OSD full dropping all updates - OSD near full - pausewr flag diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml b/ceph/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml index a86e918e6..bc5cea98f 100644 --- a/ceph/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml +++ b/ceph/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml @@ -4,7 +4,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - MDS cache is too large - \(MDS_CACHE_OVERSIZED\) tasks: diff --git a/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml b/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml index 564989d6d..40d63ba79 100644 --- a/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml +++ b/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml @@ -5,7 +5,7 @@ tasks: overrides: ceph: - log-whitelist: + log-ignorelist: - evicting unresponsive client - POOL_APP_NOT_ENABLED - has not responded to cap revoke by MDS for over diff --git a/ceph/qa/suites/fs/multifs/tasks/failover.yaml b/ceph/qa/suites/fs/multifs/tasks/failover.yaml index 2af99288b..1425da720 100644 --- a/ceph/qa/suites/fs/multifs/tasks/failover.yaml +++ b/ceph/qa/suites/fs/multifs/tasks/failover.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - Replacing daemon mds - \(MDS_INSUFFICIENT_STANDBY\) - \(MDS_ALL_DOWN\) diff --git a/ceph/qa/suites/fs/thrash/ceph-thrash/mds.yaml b/ceph/qa/suites/fs/thrash/ceph-thrash/mds.yaml index 8120ab046..33748cea5 100644 --- a/ceph/qa/suites/fs/thrash/ceph-thrash/mds.yaml +++ b/ceph/qa/suites/fs/thrash/ceph-thrash/mds.yaml @@ -3,5 +3,5 @@ tasks: overrides: ceph: - log-whitelist: + log-ignorelist: - Replacing daemon mds diff --git a/ceph/qa/suites/fs/thrash/ceph-thrash/mon.yaml b/ceph/qa/suites/fs/thrash/ceph-thrash/mon.yaml index fdc9dd968..fbbe16151 100644 --- a/ceph/qa/suites/fs/thrash/ceph-thrash/mon.yaml +++ b/ceph/qa/suites/fs/thrash/ceph-thrash/mon.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) tasks: diff --git a/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml b/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml index ec583e6d3..b4ca87f51 100644 --- a/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml +++ b/ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml @@ -7,5 +7,5 @@ overrides: ms inject delay probability: .005 ms inject delay max: 1 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-mimic.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-mimic.yaml index 41630e5a1..e521782a1 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-mimic.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-mimic.yaml @@ -16,7 +16,7 @@ tasks: - ceph: mon_bind_addrvec: false mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml index 55470718b..cc72591e8 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml @@ -2,7 +2,7 @@ overrides: ceph: mon_bind_msgr2: false mon_bind_addrvec: false - log-whitelist: + log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml index a3a51ffda..46577c38f 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - missing required features tasks: - exec: diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-mimic.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-mimic.yaml index 41630e5a1..e521782a1 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-mimic.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-mimic.yaml @@ -16,7 +16,7 @@ tasks: - ceph: mon_bind_addrvec: false mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml index aeb17e56f..88ca2789a 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml @@ -2,7 +2,7 @@ overrides: ceph: mon_bind_msgr2: false mon_bind_addrvec: false - log-whitelist: + log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml index a553f31d5..6b356a9d7 100644 --- a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml +++ b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - missing required features tasks: - exec: diff --git a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml b/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml index 1ca8973bb..21178355f 100644 --- a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml +++ b/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml @@ -17,7 +17,7 @@ tasks: - ceph: mon_bind_addrvec: false mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ diff --git a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml b/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml index 715ef4887..3e4eecfde 100644 --- a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml +++ b/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml @@ -2,7 +2,7 @@ overrides: ceph: mon_bind_msgr2: false mon_bind_addrvec: false - log-whitelist: + log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked diff --git a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml b/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml index 003409ca3..e14b48383 100644 --- a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml +++ b/ceph/qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - missing required features tasks: - exec: diff --git a/ceph/qa/suites/fs/verify/validater/valgrind.yaml b/ceph/qa/suites/fs/verify/validater/valgrind.yaml index bf10aee69..d614d01c5 100644 --- a/ceph/qa/suites/fs/verify/validater/valgrind.yaml +++ b/ceph/qa/suites/fs/verify/validater/valgrind.yaml @@ -4,11 +4,10 @@ overrides: install: ceph: - flavor: notcmalloc debuginfo: true ceph: # Valgrind makes everything slow, so ignore slow requests and extend heartbeat grace - log-whitelist: + log-ignorelist: - slow requests are blocked conf: global: diff --git a/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml b/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml index 3eedd281e..bff1306da 100644 --- a/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml +++ b/ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - SLOW_OPS - slow request tasks: diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml index 90d0e7bcb..c401ffd1b 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - force file system read-only - bad backtrace - MDS in read-only mode diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml index f816cee9b..9bbe92f2a 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/client-limits.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - responding to mclientcaps\(revoke\) - not advance its oldest_client_tid - failing to advance its oldest client/flush tid diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml index 725a259d2..27f1a2352 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml @@ -3,7 +3,7 @@ # to permit OSDs to complain about that. overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - slow request - evicting unresponsive client diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml index 9ae738f01..16d56ee2f 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - bad backtrace - object missing on disk - error reading table object diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml index 8a05e22a9..016faea12 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/data-scan.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - bad backtrace - object missing on disk - error reading table object diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml index 8bb8134eb..bc6e92aa5 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/failover.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - Replacing daemon mds - \(MDS_INSUFFICIENT_STANDBY\) - \(MDS_ALL_DOWN\) diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml index b92cf1052..73090662f 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - inode wrongly marked free - bad backtrace on inode - inode table repaired for inode diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml index 66f819d06..71c06de07 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml @@ -1,7 +1,7 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - bad backtrace on directory inode - error reading table object - Metadata damage detected diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml index e9744e719..c1d24db5c 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/mds-full.yaml @@ -3,7 +3,7 @@ overrides: ceph: cephfs_ec_profile: - disabled - log-whitelist: + log-ignorelist: - OSD full dropping all updates - OSD near full - pausewr flag diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml index 88ae6019a..d0be7b75d 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - client session with non-allowable root tasks: diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml b/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml index 9ecaaf4f6..04ee27657 100644 --- a/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml +++ b/ceph/qa/suites/kcephfs/recovery/tasks/volume-client.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - MON_DOWN tasks: - cephfs_test_runner: diff --git a/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml b/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml index 1829619bd..d69fb1402 100644 --- a/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml +++ b/ceph/qa/suites/kcephfs/thrash/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml b/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml index 2d88dd9a0..d03d4f922 100644 --- a/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml +++ b/ceph/qa/suites/kcephfs/thrash/thrashers/mds.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - Replacing daemon mds tasks: diff --git a/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml b/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml index d72a99cbc..8d68152ae 100644 --- a/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml +++ b/ceph/qa/suites/kcephfs/thrash/thrashers/mon.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(MON_DOWN\) tasks: diff --git a/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml b/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml index 53e74bea3..7e4f711a2 100644 --- a/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml +++ b/ceph/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - SLOW_OPS - slow request conf: diff --git a/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml b/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml +++ b/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml b/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml index 31edc9159..e3855297d 100644 --- a/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml +++ b/ceph/qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 500 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml b/ceph/qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml +++ b/ceph/qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml b/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml +++ b/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml b/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml index 31edc9159..e3855297d 100644 --- a/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml +++ b/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 500 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml b/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml index b0530d52c..ed1b2ae63 100644 --- a/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml +++ b/ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - pauserd,pausewr flag\(s\) set tasks: diff --git a/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml b/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml +++ b/ceph/qa/suites/krbd/rbd/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml b/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml index 31edc9159..e3855297d 100644 --- a/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml +++ b/ceph/qa/suites/krbd/rbd/msgr-failures/many.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 500 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml b/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml +++ b/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml b/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml index 31edc9159..e3855297d 100644 --- a/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml +++ b/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 500 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml b/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml index 48a7a2a29..a98fec611 100644 --- a/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml +++ b/ceph/qa/suites/krbd/thrash/thrashers/backoff.yaml @@ -4,7 +4,7 @@ overrides: osd: osd backoff on peering: true osd backoff on degraded: true - log-whitelist: + log-ignorelist: - wrongly marked me down - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml b/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml index 415684dee..4ef5fcaea 100644 --- a/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml +++ b/ceph/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(MON_DOWN\) tasks: - mon_thrash: diff --git a/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml b/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml index 14346a26e..07a227325 100644 --- a/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml +++ b/ceph/qa/suites/krbd/thrash/thrashers/pggrow.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml b/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml index 86b517097..f7d456627 100644 --- a/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml +++ b/ceph/qa/suites/krbd/thrash/thrashers/upmap.yaml @@ -4,7 +4,7 @@ overrides: conf: mon: mon osd initial require min compat client: luminous - log-whitelist: + log-ignorelist: - wrongly marked me down - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml b/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml index fab0abd65..1f434fd28 100644 --- a/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml +++ b/ceph/qa/suites/krbd/wac/wac/verify/many-resets.yaml @@ -4,7 +4,7 @@ overrides: global: ms inject socket failures: 500 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME tasks: - exec: diff --git a/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml b/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml index 6eb6c987c..76819fee9 100644 --- a/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml +++ b/ceph/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - Replacing daemon mds tasks: - cephfs_test_runner: diff --git a/ceph/qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml b/ceph/qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml index c83c12821..d5951468b 100644 --- a/ceph/qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml +++ b/ceph/qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml @@ -2,7 +2,7 @@ overrides: check-counter: dry_run: true ceph: - log-whitelist: + log-ignorelist: - evicting unresponsive client - RECENT_CRASH diff --git a/ceph/qa/suites/perf-basic/ceph.yaml b/ceph/qa/suites/perf-basic/ceph.yaml index 43807c693..f08ce1a53 100644 --- a/ceph/qa/suites/perf-basic/ceph.yaml +++ b/ceph/qa/suites/perf-basic/ceph.yaml @@ -15,7 +15,7 @@ tasks: - ceph: fs: xfs wait-for-scrub: false - log-whitelist: + log-ignorelist: - \(PG_ - \(OSD_ - \(OBJECT_ diff --git a/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml b/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml index 4899bf164..5df88b380 100644 --- a/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml +++ b/ceph/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - \(POOL_APP_NOT_ENABLED\) - \(PG_AVAILABILITY\) diff --git a/ceph/qa/suites/powercycle/osd/whitelist_health.yaml b/ceph/qa/suites/powercycle/osd/whitelist_health.yaml index f724302a4..bce5e9588 100644 --- a/ceph/qa/suites/powercycle/osd/whitelist_health.yaml +++ b/ceph/qa/suites/powercycle/osd/whitelist_health.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(MDS_TRIM\) - \(MDS_SLOW_REQUEST\) - MDS_SLOW_METADATA_IO diff --git a/ceph/qa/suites/rados/basic/msgr-failures/few.yaml b/ceph/qa/suites/rados/basic/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rados/basic/msgr-failures/few.yaml +++ b/ceph/qa/suites/rados/basic/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/basic/msgr-failures/many.yaml b/ceph/qa/suites/rados/basic/msgr-failures/many.yaml index dcf7a471f..075d959a7 100644 --- a/ceph/qa/suites/rados/basic/msgr-failures/many.yaml +++ b/ceph/qa/suites/rados/basic/msgr-failures/many.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 1500 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml b/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml index 35b832bc3..a54e03b1a 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - but it is still running - overall HEALTH_ diff --git a/ceph/qa/suites/rados/basic/tasks/rados_python.yaml b/ceph/qa/suites/rados/basic/tasks/rados_python.yaml index 8c70304d0..965909450 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_python.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_python.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml index bee513eb9..7b5c89b9b 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(TOO_FEW_PGS\) diff --git a/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml index 2dade6dee..53effb42d 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml index 6b764a875..847aedb21 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml index c82023c2a..b25392ffa 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rados/basic/tasks/repair_test.yaml b/ceph/qa/suites/rados/basic/tasks/repair_test.yaml index f3a7868d1..0d3749f4d 100644 --- a/ceph/qa/suites/rados/basic/tasks/repair_test.yaml +++ b/ceph/qa/suites/rados/basic/tasks/repair_test.yaml @@ -1,7 +1,7 @@ overrides: ceph: wait-for-scrub: false - log-whitelist: + log-ignorelist: - candidate had a stat error - candidate had a read error - deep-scrub 0 missing, 1 inconsistent objects diff --git a/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml b/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml index 00e85f9e5..dde468940 100644 --- a/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml +++ b/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml @@ -1,7 +1,7 @@ overrides: ceph: wait-for-scrub: false - log-whitelist: + log-ignorelist: - '!= data_digest' - '!= omap_digest' - '!= size' diff --git a/ceph/qa/suites/rados/cephadm/orchestrator_cli/orchestrator_cli.yaml b/ceph/qa/suites/rados/cephadm/orchestrator_cli/orchestrator_cli.yaml index d585616e5..564a2eb02 100644 --- a/ceph/qa/suites/rados/cephadm/orchestrator_cli/orchestrator_cli.yaml +++ b/ceph/qa/suites/rados/cephadm/orchestrator_cli/orchestrator_cli.yaml @@ -5,7 +5,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(DEVICE_IDENT_ON\) diff --git a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml index 27f466ebd..317c5de17 100644 --- a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml +++ b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml @@ -5,7 +5,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ @@ -23,6 +23,7 @@ tasks: - \(POOL_APP_NOT_ENABLED\) - \(OSDMAP_FLAGS\) - \(OSD_FLAGS\) + - \(TELEMETRY_CHANGED\) - pauserd,pausewr flag\(s\) set - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running - evicting unresponsive client .+ diff --git a/ceph/qa/suites/rados/mgr/tasks/crash.yaml b/ceph/qa/suites/rados/mgr/tasks/crash.yaml index 7b4c44460..af4c40642 100644 --- a/ceph/qa/suites/rados/mgr/tasks/crash.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/crash.yaml @@ -5,7 +5,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ diff --git a/ceph/qa/suites/rados/mgr/tasks/failover.yaml b/ceph/qa/suites/rados/mgr/tasks/failover.yaml index 34be47151..42c2f5c5b 100644 --- a/ceph/qa/suites/rados/mgr/tasks/failover.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/failover.yaml @@ -5,7 +5,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ diff --git a/ceph/qa/suites/rados/mgr/tasks/insights.yaml b/ceph/qa/suites/rados/mgr/tasks/insights.yaml index 521606656..5cb124bf7 100644 --- a/ceph/qa/suites/rados/mgr/tasks/insights.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/insights.yaml @@ -5,7 +5,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(MGR_INSIGHTS_WARNING\) diff --git a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml index 11053d6a2..deab01adb 100644 --- a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml @@ -5,7 +5,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ diff --git a/ceph/qa/suites/rados/mgr/tasks/progress.yaml b/ceph/qa/suites/rados/mgr/tasks/progress.yaml index 784625756..4a0e802b2 100644 --- a/ceph/qa/suites/rados/mgr/tasks/progress.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/progress.yaml @@ -9,7 +9,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(MDS_ALL_DOWN\) diff --git a/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml b/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml index 1a7776819..388e19678 100644 --- a/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml @@ -5,7 +5,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ diff --git a/ceph/qa/suites/rados/mgr/tasks/workunits.yaml b/ceph/qa/suites/rados/mgr/tasks/workunits.yaml index d7261f44b..6074de0ed 100644 --- a/ceph/qa/suites/rados/mgr/tasks/workunits.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/workunits.yaml @@ -4,7 +4,7 @@ tasks: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ diff --git a/ceph/qa/suites/rados/monthrash/ceph.yaml b/ceph/qa/suites/rados/monthrash/ceph.yaml index 6c53b3150..124ac3850 100644 --- a/ceph/qa/suites/rados/monthrash/ceph.yaml +++ b/ceph/qa/suites/rados/monthrash/ceph.yaml @@ -11,7 +11,7 @@ overrides: mon scrub inject crc mismatch: 0.01 mon scrub inject missing keys: 0.05 # thrashing monitors may make mgr have trouble w/ its keepalive - log-whitelist: + log-ignorelist: - ScrubResult - scrub mismatch - overall HEALTH_ diff --git a/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml b/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml +++ b/ceph/qa/suites/rados/monthrash/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml index c4bc304fc..83b136518 100644 --- a/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml +++ b/ceph/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml @@ -10,5 +10,5 @@ overrides: mon client directed command retry: 5 mgr: debug monc: 10 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml index 829e3592b..c2ec78fd3 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(TOO_FEW_PGS\) diff --git a/ceph/qa/suites/rados/monthrash/thrashers/many.yaml b/ceph/qa/suites/rados/monthrash/thrashers/many.yaml index fa829b34b..958831232 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/many.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/many.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) conf: diff --git a/ceph/qa/suites/rados/monthrash/thrashers/one.yaml b/ceph/qa/suites/rados/monthrash/thrashers/one.yaml index 041cee0b3..e969a0d8d 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/one.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/one.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) tasks: diff --git a/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml index 14f41f7fb..e721b9b38 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) conf: diff --git a/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml b/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml index 08b1522c7..8fdd1ad48 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) conf: diff --git a/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml index c6b00b486..522302cd7 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - slow request - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml index 940d3a8e4..ad19bd341 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) tasks: diff --git a/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml index f0bd56856..e5ce0f6fd 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml b/ceph/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml index cca902af2..372bf2561 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml @@ -10,7 +10,7 @@ overrides: mon osdmap full prune txsize: 100 osd: osd beacon report interval: 10 - log-whitelist: + log-ignorelist: # setting/unsetting noup will trigger health warns, # causing tests to fail due to health warns, even if # the tests themselves are successful. diff --git a/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml index 63b88c0dd..2f9729c92 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(PG_ diff --git a/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml b/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml +++ b/ceph/qa/suites/rados/multimon/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml b/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml index b7104d313..d47b466b9 100644 --- a/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml +++ b/ceph/qa/suites/rados/multimon/msgr-failures/many.yaml @@ -5,5 +5,5 @@ overrides: ms inject socket failures: 1000 mon client directed command retry: 5 mon mgr beacon grace: 90 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml index a4cea8f3a..2a4bf2bac 100644 --- a/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml +++ b/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml @@ -1,7 +1,7 @@ tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - slow request - .*clock.*skew.* - clocks not synchronized diff --git a/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml index abfc3a1c4..41749349e 100644 --- a/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml +++ b/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml @@ -9,7 +9,7 @@ tasks: - date -u -s @$(expr $(date -u +%s) + 2) - ceph: wait-for-healthy: false - log-whitelist: + log-ignorelist: - .*clock.*skew.* - clocks not synchronized - overall HEALTH_ diff --git a/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml index 14da275e5..6373663c5 100644 --- a/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml +++ b/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml @@ -1,7 +1,7 @@ tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(PG_AVAILABILITY\) diff --git a/ceph/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml b/ceph/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml index 042bd0657..72b143ad9 100644 --- a/ceph/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml +++ b/ceph/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml @@ -14,7 +14,7 @@ tasks: osd max object namespace len: 64 osd: osd objectstore: filestore - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/perf/ceph.yaml b/ceph/qa/suites/rados/perf/ceph.yaml index 912dcbdca..59e8029b1 100644 --- a/ceph/qa/suites/rados/perf/ceph.yaml +++ b/ceph/qa/suites/rados/perf/ceph.yaml @@ -5,7 +5,7 @@ tasks: - ceph: fs: xfs wait-for-scrub: false - log-whitelist: + log-ignorelist: - \(PG_ - \(OSD_ - \(OBJECT_ diff --git a/ceph/qa/suites/rados/rest/mgr-restful.yaml b/ceph/qa/suites/rados/rest/mgr-restful.yaml index c579592cb..c863463de 100644 --- a/ceph/qa/suites/rados/rest/mgr-restful.yaml +++ b/ceph/qa/suites/rados/rest/mgr-restful.yaml @@ -7,7 +7,7 @@ roles: tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ diff --git a/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml b/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml index 2b84c716e..f86be3459 100644 --- a/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml +++ b/ceph/qa/suites/rados/singleton-bluestore/all/cephtool.yaml @@ -14,7 +14,7 @@ openstack: tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - but it is still running - had wrong client addr - had wrong cluster addr diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml index 3aab0add8..04c40197a 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -6,7 +6,7 @@ roles: - [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] overrides: ceph: - log-whitelist: + log-ignorelist: - MDS in read-only mode - force file system read-only - overall HEALTH_ diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml index 5d88445bb..d4c6e3ca5 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml @@ -6,7 +6,7 @@ tasks: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force fs: xfs - log-whitelist: + log-ignorelist: - \(PG_AVAILABILITY\) - cram: clients: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml index dce6f51ba..f998c51c9 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml @@ -9,7 +9,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) conf: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml index 596cf3e5c..e116b5ae0 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml @@ -9,7 +9,7 @@ overrides: ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml index 65e085b65..ee800e5a7 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -14,7 +14,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) conf: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml index af0a47a05..8d26cd323 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -7,7 +7,7 @@ roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: - log-whitelist: + log-ignorelist: - is full - overall HEALTH_ - \(POOL_FULL\) diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml index e7273031e..bc57a9cd9 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml @@ -10,7 +10,7 @@ tasks: # we may land on ext4 osd max object name len: 400 osd max object namespace len: 64 - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml index a8c688543..b08ab343f 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml @@ -8,7 +8,7 @@ overrides: ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - \(OSDMAP_FLAGS\) - \(OSD_FULL\) - \(MDS_READ_ONLY\) diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml index 0494c71bd..7228522be 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml @@ -6,7 +6,7 @@ roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: - log-whitelist: + log-ignorelist: - \(POOL_APP_NOT_ENABLED\) tasks: - install: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml index 5eca3fa22..f670a0849 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml @@ -2,7 +2,7 @@ roles: - [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] overrides: ceph: - log-whitelist: + log-ignorelist: - \(POOL_APP_NOT_ENABLED\) tasks: - install: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml index e0e972192..a3ce46e6a 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -17,7 +17,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(PG_ - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml index 3d3003871..408268a09 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml @@ -6,7 +6,7 @@ roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: - log-whitelist: + log-ignorelist: - \(OSD_DOWN\) - \(POOL_APP_NOT_ENABLED\) - \(SLOW_OPS\) diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml index eb29d297a..9cf4eec89 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml @@ -19,7 +19,7 @@ tasks: osd: osd recovery sleep: .1 osd objectstore: filestore - log-whitelist: + log-ignorelist: - \(POOL_APP_NOT_ENABLED\) - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml index 21d99cf57..6d48796f0 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml @@ -2,7 +2,7 @@ roles: - [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] overrides: ceph: - log-whitelist: + log-ignorelist: - \(POOL_APP_NOT_ENABLED\) tasks: - install: diff --git a/ceph/qa/suites/rados/singleton/all/deduptool.yaml b/ceph/qa/suites/rados/singleton/all/deduptool.yaml index 4ae0fef39..616a0b33c 100644 --- a/ceph/qa/suites/rados/singleton/all/deduptool.yaml +++ b/ceph/qa/suites/rados/singleton/all/deduptool.yaml @@ -14,7 +14,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - had wrong client addr - had wrong cluster addr diff --git a/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml b/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml index a09cde97d..24b42557f 100644 --- a/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml +++ b/ceph/qa/suites/rados/singleton/all/divergent_priors.yaml @@ -12,7 +12,7 @@ openstack: overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml b/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml index a9dc87ca1..6bef63958 100644 --- a/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml +++ b/ceph/qa/suites/rados/singleton/all/divergent_priors2.yaml @@ -12,7 +12,7 @@ openstack: overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml b/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml index 43ac2a092..c1d28ee8e 100644 --- a/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml +++ b/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml @@ -12,7 +12,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml index a74ff624a..9c423c8d8 100644 --- a/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml +++ b/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml @@ -17,7 +17,7 @@ tasks: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml index 2ab4df6a3..bb170b506 100644 --- a/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml +++ b/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml @@ -15,7 +15,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml b/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml index 6117b373f..fceee20c0 100644 --- a/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml +++ b/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml @@ -15,7 +15,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml index 7877310bf..e5999bc9b 100644 --- a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml +++ b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml @@ -18,7 +18,7 @@ overrides: osd: mon max pg per osd : 2 osd max pg per osd hard ratio : 1 - log-whitelist: + log-ignorelist: - \(TOO_FEW_PGS\) - \(PENDING_CREATING_PGS\) tasks: diff --git a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml index 633bf82f0..075d6be1f 100644 --- a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml +++ b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml @@ -20,7 +20,7 @@ overrides: osd: mon max pg per osd : 1 osd max pg per osd hard ratio : 1 - log-whitelist: + log-ignorelist: - \(TOO_FEW_PGS\) - \(PG_ - \(PENDING_CREATING_PGS\) diff --git a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml index fcae175c6..db2856484 100644 --- a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml +++ b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml @@ -20,7 +20,7 @@ overrides: osd: mon max pg per osd : 1 osd max pg per osd hard ratio : 1 - log-whitelist: + log-ignorelist: - \(TOO_FEW_PGS\) - \(PG_ - \(PENDING_CREATING_PGS\) diff --git a/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml index f38a36b9a..8c23c0bc9 100644 --- a/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml +++ b/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml @@ -10,7 +10,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) - workunit: diff --git a/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml b/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml index 534e6db98..f987f3c98 100644 --- a/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml +++ b/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml @@ -10,7 +10,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) - workunit: diff --git a/ceph/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled b/ceph/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled index 46f0ab1b4..120e073a7 100644 --- a/ceph/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled +++ b/ceph/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled @@ -44,7 +44,7 @@ tasks: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml b/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml index e9a690503..bbbd9b4b3 100644 --- a/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml +++ b/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml @@ -15,7 +15,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml index 74f9cfad4..15a0ea342 100644 --- a/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml +++ b/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml @@ -16,7 +16,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml b/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml index a5ffc3067..75cea6a94 100644 --- a/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml +++ b/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml @@ -15,7 +15,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/peer.yaml b/ceph/qa/suites/rados/singleton/all/peer.yaml index 3e6f6bcfc..24fd74b82 100644 --- a/ceph/qa/suites/rados/singleton/all/peer.yaml +++ b/ceph/qa/suites/rados/singleton/all/peer.yaml @@ -18,7 +18,7 @@ tasks: config: global: osd pool default min size : 1 - log-whitelist: + log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml index 667f6cc24..abc6f7bd1 100644 --- a/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml +++ b/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml @@ -22,7 +22,7 @@ tasks: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml index f863ffe25..b3f11264f 100644 --- a/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml +++ b/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml @@ -14,7 +14,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - slow request - overall HEALTH_ diff --git a/ceph/qa/suites/rados/singleton/all/radostool.yaml b/ceph/qa/suites/rados/singleton/all/radostool.yaml index 1342b2842..fa3a1b0f7 100644 --- a/ceph/qa/suites/rados/singleton/all/radostool.yaml +++ b/ceph/qa/suites/rados/singleton/all/radostool.yaml @@ -14,7 +14,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - had wrong client addr - had wrong cluster addr diff --git a/ceph/qa/suites/rados/singleton/all/random-eio.yaml b/ceph/qa/suites/rados/singleton/all/random-eio.yaml index 1d5adae75..782b906d6 100644 --- a/ceph/qa/suites/rados/singleton/all/random-eio.yaml +++ b/ceph/qa/suites/rados/singleton/all/random-eio.yaml @@ -17,7 +17,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - missing primary copy of - objects unfound and apparently lost - had a read error diff --git a/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml index 875e5930a..f678d08ce 100644 --- a/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml +++ b/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml @@ -16,7 +16,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - no reply from - overall HEALTH_ - \(MON_DOWN\) diff --git a/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml b/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml index bb8d3de5d..7438f9e77 100644 --- a/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml +++ b/ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml @@ -23,7 +23,7 @@ tasks: osd max pg log entries: 1000 osd_target_pg_log_entries_per_osd: 0 osd pg log trim min: 10 - log-whitelist: + log-ignorelist: - \(POOL_APP_NOT_ENABLED\) - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml index 17fdae044..2756ebe82 100644 --- a/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml +++ b/ceph/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml @@ -8,7 +8,7 @@ tasks: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/singleton/all/test-crash.yaml b/ceph/qa/suites/rados/singleton/all/test-crash.yaml index 7a0d0d23d..beb83f0bb 100644 --- a/ceph/qa/suites/rados/singleton/all/test-crash.yaml +++ b/ceph/qa/suites/rados/singleton/all/test-crash.yaml @@ -6,7 +6,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - Reduced data availability - OSD_.*DOWN - \(RECENT_CRASH\) diff --git a/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml index 2f1d42980..0e3c92330 100644 --- a/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml +++ b/ceph/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml @@ -12,7 +12,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) - workunit: diff --git a/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml b/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml index a318eafc4..0f2924db3 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml @@ -24,7 +24,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - missing primary copy of - objects unfound and apparently lost diff --git a/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml b/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml index 96560bade..5d9770061 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml @@ -22,7 +22,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - missing primary copy of - objects unfound and apparently lost diff --git a/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml index 37be8df98..1caef6db5 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml @@ -15,7 +15,7 @@ openstack: tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - but it is still running - thrashosds: op_delay: 30 diff --git a/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml index 5576404db..5f9e1029d 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml @@ -17,7 +17,7 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force - log-whitelist: + log-ignorelist: - but it is still running - slow request - overall HEALTH_ diff --git a/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml index c0cecde32..eeb585c88 100644 --- a/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml +++ b/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml @@ -23,7 +23,7 @@ tasks: debug ms: 1 debug objecter: 20 debug rados: 20 - log-whitelist: + log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml b/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml +++ b/ceph/qa/suites/rados/singleton/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml b/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml index 7185596c2..80b86bfb8 100644 --- a/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml +++ b/ceph/qa/suites/rados/singleton/msgr-failures/many.yaml @@ -8,5 +8,5 @@ overrides: mon client directed command retry: 5 mgr: debug monc: 10 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml index 426943596..f032d88e7 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml index 13ca050fe..c36175c50 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml index 17087078e..371ed570b 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml index fb3af9825..318b20266 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - osd_map_cache_size diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml index 572832d8c..23c9f7d84 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml @@ -5,7 +5,7 @@ overrides: osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 9 - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml index 148d9fe59..772f2698b 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml index 6f2f7a447..94b5d07f8 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml index a438f43fc..3dab3f9c5 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml index 018267f0e..9a708db31 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml index 31c19704c..e66dab90d 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml index 4701fae56..4ba4eb2a2 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml index 15f89c3fc..8362b6b1d 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost create_rbd_pool: False diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml index 12c11fa3c..a3e66e693 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml @@ -5,7 +5,7 @@ overrides: osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 9 - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml index 2bbe5e5fb..98f87d6df 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel-v1only.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel-v1only.yaml index 5c5fdc0b2..b245534eb 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel-v1only.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel-v1only.yaml @@ -1,7 +1,7 @@ overrides: ceph: mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml index fea79d4a7..c74015b09 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml @@ -1,7 +1,7 @@ overrides: ceph: mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous-v1only.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous-v1only.yaml index 8a720404d..f29bb3527 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous-v1only.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous-v1only.yaml @@ -1,7 +1,7 @@ overrides: ceph: mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml index 54d4791d7..f51a883fa 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml @@ -1,7 +1,7 @@ overrides: ceph: mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic-v1only.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic-v1only.yaml index 8eb1cca93..a37b451e5 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic-v1only.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic-v1only.yaml @@ -1,7 +1,7 @@ overrides: ceph: mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic.yaml index b6032463f..07aefd15c 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/mimic.yaml @@ -1,7 +1,7 @@ overrides: ceph: mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v1only.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v1only.yaml index 7e32e8c26..68cc30d47 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v1only.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v1only.yaml @@ -1,7 +1,7 @@ overrides: ceph: mon_bind_msgr2: false - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v2only.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v2only.yaml index c75b9cb18..8e3f2956a 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v2only.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus-v2only.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(MON_DOWN\) conf: global: diff --git a/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus.yaml b/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus.yaml index f98c716be..c1aca646f 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/1-install/nautilus.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(MON_DOWN\) tasks: - install: diff --git a/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml b/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml index 5e0b180c6..ec45f8882 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml @@ -5,5 +5,5 @@ overrides: ms inject socket failures: 2500 ms tcp read timeout: 5 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml b/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml index 0bd752f21..cc9a3ae69 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml @@ -6,5 +6,5 @@ overrides: mon client directed command retry: 5 osd: osd heartbeat use min delay socket: true - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml b/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml index b3f2f37f4..d7cec6f36 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml @@ -8,5 +8,5 @@ overrides: ms inject delay max: 1 ms inject internal delays: .002 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml index df77f73a1..cc232ab88 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml index e8e2007f8..c04f9535c 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml index 7b55097f7..27881d218 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - osd_map_cache_size diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml index 91d2173e8..f18a88711 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml @@ -9,7 +9,7 @@ overrides: filestore queue throttle high multiple: 2 filestore queue throttle max multiple: 10 osd max backfills: 9 - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml index 8721fd181..54498d0cf 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml b/ceph/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml index fc1f5b45c..33f667ffd 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate tasks: - exec: diff --git a/ceph/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml b/ceph/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml index 1e04fb369..5bbb4385e 100644 --- a/ceph/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml +++ b/ceph/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml @@ -3,5 +3,5 @@ overrides: conf: osd: osd inject bad map crc probability: 0.1 - log-whitelist: + log-ignorelist: - failed to encode map diff --git a/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml b/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml index 5e0b180c6..ec45f8882 100644 --- a/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml +++ b/ceph/qa/suites/rados/thrash/msgr-failures/fastclose.yaml @@ -5,5 +5,5 @@ overrides: ms inject socket failures: 2500 ms tcp read timeout: 5 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml b/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml index 0bd752f21..cc9a3ae69 100644 --- a/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml +++ b/ceph/qa/suites/rados/thrash/msgr-failures/few.yaml @@ -6,5 +6,5 @@ overrides: mon client directed command retry: 5 osd: osd heartbeat use min delay socket: true - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml b/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml index b3f2f37f4..d7cec6f36 100644 --- a/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml +++ b/ceph/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml @@ -8,5 +8,5 @@ overrides: ms inject delay max: 1 ms inject internal delays: .002 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/thrash/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash/thrashers/careful.yaml index 85e0c2689..8190657f3 100644 --- a/ceph/qa/suites/rados/thrash/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash/thrashers/careful.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash/thrashers/default.yaml b/ceph/qa/suites/rados/thrash/thrashers/default.yaml index 536e85cb9..05e0f8e76 100644 --- a/ceph/qa/suites/rados/thrash/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml b/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml index bbc3dbdcd..3b34f5b6b 100644 --- a/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml +++ b/ceph/qa/suites/rados/thrash/thrashers/mapgap.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - osd_map_cache_size diff --git a/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml b/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml index 91d2173e8..f18a88711 100644 --- a/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml +++ b/ceph/qa/suites/rados/thrash/thrashers/morepggrow.yaml @@ -9,7 +9,7 @@ overrides: filestore queue throttle high multiple: 2 filestore queue throttle max multiple: 10 osd max backfills: 9 - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml b/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml index 8721fd181..54498d0cf 100644 --- a/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml +++ b/ceph/qa/suites/rados/thrash/thrashers/pggrow.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost conf: diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml index 31a964d1d..3f377858a 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate conf: osd: diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml index f082b0b97..29219a7e8 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate conf: osd: diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml index b84d4d957..808968d6b 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate conf: osd: diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml index 8d712e866..4aec8611c 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate conf: osd: diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml index 18791a1c4..1e55f573f 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate conf: osd: diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml index 7ece997e1..11401d7e2 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate conf: osd: diff --git a/ceph/qa/suites/rados/thrash/workloads/cache.yaml b/ceph/qa/suites/rados/thrash/workloads/cache.yaml index 42cfa6cb9..c557a6751 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - must scrub before tier agent can activate conf: osd: diff --git a/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml b/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml index 7c23a5ca9..eb4c9b1ee 100644 --- a/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/rados_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - \(POOL_APP_NOT_ENABLED\) - \(PG_AVAILABILITY\) diff --git a/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml b/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml index 4f4fc33cc..9263f2a83 100644 --- a/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml +++ b/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml @@ -6,10 +6,9 @@ openstack: overrides: install: ceph: - flavor: notcmalloc debuginfo: true ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(PG_ conf: diff --git a/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml b/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml index 8f2b26674..41b35926f 100644 --- a/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml +++ b/ceph/qa/suites/rados/verify/d-thrash/default/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/rados/verify/msgr-failures/few.yaml b/ceph/qa/suites/rados/verify/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rados/verify/msgr-failures/few.yaml +++ b/ceph/qa/suites/rados/verify/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml b/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml index 534474cb5..06d9602e6 100644 --- a/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml +++ b/ceph/qa/suites/rados/verify/tasks/mon_recovery.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml b/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml index 79f24479a..f89109aa1 100644 --- a/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml +++ b/ceph/qa/suites/rados/verify/tasks/rados_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rados/verify/validater/valgrind.yaml b/ceph/qa/suites/rados/verify/validater/valgrind.yaml index 83eb2add5..b15b14bd4 100644 --- a/ceph/qa/suites/rados/verify/validater/valgrind.yaml +++ b/ceph/qa/suites/rados/verify/validater/valgrind.yaml @@ -4,7 +4,6 @@ os_type: centos overrides: install: ceph: - flavor: notcmalloc debuginfo: true ceph: conf: @@ -16,7 +15,7 @@ overrides: osd fast shutdown: false debug bluestore: 1 debug bluefs: 1 - log-whitelist: + log-ignorelist: - overall HEALTH_ # valgrind is slow.. we might get PGs stuck peering etc - \(PG_ diff --git a/ceph/qa/suites/rbd/basic/cachepool/small.yaml b/ceph/qa/suites/rbd/basic/cachepool/small.yaml index 1b5056573..bad95eadd 100644 --- a/ceph/qa/suites/rbd/basic/cachepool/small.yaml +++ b/ceph/qa/suites/rbd/basic/cachepool/small.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml b/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml +++ b/ceph/qa/suites/rbd/basic/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml b/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml index fe1e26d53..36393831a 100644 --- a/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml +++ b/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml b/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml index 90bc152e8..441ac9727 100644 --- a/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml +++ b/ceph/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(SLOW_OPS\) - slow request tasks: diff --git a/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml b/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml +++ b/ceph/qa/suites/rbd/cli/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml index 376bf08ed..db289c7e7 100644 --- a/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml +++ b/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml @@ -12,7 +12,7 @@ overrides: bdev_inject_crash_probability: .5 ceph: fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) conf: diff --git a/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml index 1b5056573..bad95eadd 100644 --- a/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml +++ b/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rbd/cli_v1/msgr-failures/few.yaml b/ceph/qa/suites/rbd/cli_v1/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rbd/cli_v1/msgr-failures/few.yaml +++ b/ceph/qa/suites/rbd/cli_v1/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml index 1b5056573..bad95eadd 100644 --- a/ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml +++ b/ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml b/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml index c1b4047d1..df2a313a6 100644 --- a/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml +++ b/ceph/qa/suites/rbd/librbd/msgr-failures/few.yaml @@ -4,6 +4,6 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - but it is still running - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml index 1b5056573..bad95eadd 100644 --- a/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml +++ b/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml index 04af9c85b..3073d77a8 100644 --- a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml +++ b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml index 6ae7f4627..35c4b0848 100644 --- a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml +++ b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml index 578115ee6..6fae0fea9 100644 --- a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml +++ b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-stress-workunit.yaml b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-journal-stress-workunit.yaml similarity index 85% rename from ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-stress-workunit.yaml rename to ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-journal-stress-workunit.yaml index 62bda8817..9579b70d6 100644 --- a/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-stress-workunit.yaml +++ b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-journal-stress-workunit.yaml @@ -10,4 +10,6 @@ tasks: RBD_MIRROR_INSTANCES: '4' RBD_MIRROR_USE_EXISTING_CLUSTER: '1' RBD_MIRROR_USE_RBD_MIRROR: '1' + MIRROR_POOL_MODE: 'pool' + MIRROR_IMAGE_MODE: 'journal' timeout: 6h diff --git a/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock.yaml b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock.yaml new file mode 100644 index 000000000..87632483d --- /dev/null +++ b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock.yaml @@ -0,0 +1,16 @@ +meta: +- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_stress.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + MIRROR_POOL_MODE: 'image' + MIRROR_IMAGE_MODE: 'snapshot' + RBD_IMAGE_FEATURES: 'layering,exclusive-lock' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_USE_RBD_MIRROR: '1' + timeout: 6h diff --git a/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-fast-diff.yaml b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-fast-diff.yaml new file mode 100644 index 000000000..fc43b0ec2 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-fast-diff.yaml @@ -0,0 +1,16 @@ +meta: +- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_stress.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + MIRROR_POOL_MODE: 'image' + MIRROR_IMAGE_MODE: 'snapshot' + RBD_IMAGE_FEATURES: 'layering,exclusive-lock,object-map,fast-diff' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_USE_RBD_MIRROR: '1' + timeout: 6h diff --git a/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-minimum.yaml b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-minimum.yaml new file mode 100644 index 000000000..af0ea1240 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-minimum.yaml @@ -0,0 +1,16 @@ +meta: +- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_stress.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + MIRROR_POOL_MODE: 'image' + MIRROR_IMAGE_MODE: 'snapshot' + RBD_IMAGE_FEATURES: 'layering' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_USE_RBD_MIRROR: '1' + timeout: 6h diff --git a/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml b/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml index 2891ec15c..ca8e09853 100644 --- a/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml +++ b/ceph/qa/suites/rbd/qemu/msgr-failures/few.yaml @@ -4,6 +4,6 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - but it is still running - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml b/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml index c75e6fd47..a0f88b409 100644 --- a/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml +++ b/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml index 1b5056573..bad95eadd 100644 --- a/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml +++ b/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) diff --git a/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml b/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml index 9af52e0ed..b41f92d52 100644 --- a/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml +++ b/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml @@ -6,7 +6,7 @@ roles: tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - 'application not enabled' - workunit: timeout: 30m diff --git a/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml b/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml index 0800cbfce..954760159 100644 --- a/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml +++ b/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml @@ -4,7 +4,7 @@ tasks: - install: - ceph: fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml b/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml index b920cfc7c..b06ede605 100644 --- a/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml +++ b/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml @@ -4,7 +4,7 @@ tasks: - install: - ceph: fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml b/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml +++ b/ceph/qa/suites/rbd/thrash/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml b/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml index 249564841..b434e28be 100644 --- a/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml +++ b/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - overall HEALTH_ diff --git a/ceph/qa/suites/rbd/thrash/thrashers/default.yaml b/ceph/qa/suites/rbd/thrash/thrashers/default.yaml index 3f1615c8a..3e2bf7fe1 100644 --- a/ceph/qa/suites/rbd/thrash/thrashers/default.yaml +++ b/ceph/qa/suites/rbd/thrash/thrashers/default.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml index 6ae7f4627..35c4b0848 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml index a9021548d..8f929162e 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml @@ -7,7 +7,7 @@ tasks: RBD_FEATURES: "61" overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml index 578115ee6..6fae0fea9 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml index 04af9c85b..3073d77a8 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml b/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml index c660dce62..fcea1b88c 100644 --- a/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml +++ b/ceph/qa/suites/rbd/valgrind/validator/memcheck.yaml @@ -4,7 +4,6 @@ os_type: centos overrides: install: ceph: - flavor: notcmalloc debuginfo: true rbd_fsx: valgrind: ["--tool=memcheck"] diff --git a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml index 04af9c85b..3073d77a8 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml index 6ae7f4627..35c4b0848 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml index 578115ee6..6fae0fea9 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml b/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml index e0943439a..251de1c1f 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/rgw/multisite/valgrind.yaml b/ceph/qa/suites/rgw/multisite/valgrind.yaml index d6686ef2a..71c56fae4 100644 --- a/ceph/qa/suites/rgw/multisite/valgrind.yaml +++ b/ceph/qa/suites/rgw/multisite/valgrind.yaml @@ -6,7 +6,6 @@ os_type: ubuntu overrides: install: ceph: -# flavor: notcmalloc ceph: conf: global: diff --git a/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml b/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml +++ b/ceph/qa/suites/rgw/verify/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/rgw/verify/validater/valgrind.yaml b/ceph/qa/suites/rgw/verify/validater/valgrind.yaml index 42af3adf5..b3f412748 100644 --- a/ceph/qa/suites/rgw/verify/validater/valgrind.yaml +++ b/ceph/qa/suites/rgw/verify/validater/valgrind.yaml @@ -5,7 +5,6 @@ os_version: "8.0" overrides: install: ceph: -# flavor: notcmalloc #debuginfo: true ceph: conf: diff --git a/ceph/qa/suites/rgw/website/overrides.yaml b/ceph/qa/suites/rgw/website/overrides.yaml index 0eb474980..e1ac1a983 100644 --- a/ceph/qa/suites/rgw/website/overrides.yaml +++ b/ceph/qa/suites/rgw/website/overrides.yaml @@ -4,7 +4,6 @@ overrides: install: -# flavor: notcmalloc ceph: conf: global: diff --git a/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml b/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml index 322908aa3..6bc0957a0 100644 --- a/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/mon_thrash.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - mons down - overall HEALTH_ diff --git a/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml b/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml index 3c7724811..aec932999 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rados_api_tests.yaml @@ -2,7 +2,7 @@ tasks: - install: null - ceph: fs: ext4 - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml b/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml index 68d450a5f..1268e3652 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rados_bench.yaml @@ -12,7 +12,7 @@ tasks: - install: null - ceph: fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml b/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml index f34d3b83f..e433a4e05 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml @@ -1,7 +1,7 @@ tasks: - install: null - ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml b/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml index 3e87eefb7..c60d110c0 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml @@ -2,7 +2,7 @@ tasks: - install: null - ceph: fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml b/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml index 2a802cd63..7df4616bb 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rados_python.yaml @@ -1,7 +1,7 @@ tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) diff --git a/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml b/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml index 5e82e9842..cda4df2a3 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml @@ -2,7 +2,7 @@ tasks: - install: - ceph: fs: ext4 - log-whitelist: + log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml b/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml index f7245bab6..b3aed5c84 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml @@ -1,7 +1,7 @@ tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml b/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml index 060d3df7a..a84d184d4 100644 --- a/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/rbd_fsx.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/stress/thrash/thrashers/default.yaml b/ceph/qa/suites/stress/thrash/thrashers/default.yaml index e628ba6da..47fa40480 100644 --- a/ceph/qa/suites/stress/thrash/thrashers/default.yaml +++ b/ceph/qa/suites/stress/thrash/thrashers/default.yaml @@ -1,7 +1,7 @@ tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - thrashosds: diff --git a/ceph/qa/suites/stress/thrash/thrashers/fast.yaml b/ceph/qa/suites/stress/thrash/thrashers/fast.yaml index 6bc9dff03..b2466dbe8 100644 --- a/ceph/qa/suites/stress/thrash/thrashers/fast.yaml +++ b/ceph/qa/suites/stress/thrash/thrashers/fast.yaml @@ -1,7 +1,7 @@ tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - thrashosds: diff --git a/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml b/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml index 6042bf6dd..8ba738d1f 100644 --- a/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml +++ b/ceph/qa/suites/stress/thrash/thrashers/more-down.yaml @@ -1,7 +1,7 @@ tasks: - install: - ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - thrashosds: diff --git a/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml b/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml index 42cd93b2c..0973fc390 100644 --- a/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml +++ b/ceph/qa/suites/teuthology/multi-cluster/all/upgrade.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - failed to encode map conf: mon: diff --git a/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml b/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml index dea65948b..519288992 100644 --- a/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml +++ b/ceph/qa/suites/tgt/basic/msgr-failures/few.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 5000 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml b/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml index 31edc9159..e3855297d 100644 --- a/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml +++ b/ceph/qa/suites/tgt/basic/msgr-failures/many.yaml @@ -4,5 +4,5 @@ overrides: global: ms inject socket failures: 500 mon client directed command retry: 5 - log-whitelist: + log-ignorelist: - \(OSD_SLOW_PING_TIME diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/0-cluster/start.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/0-cluster/start.yaml index c631b0ed2..8354b1772 100644 --- a/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/0-cluster/start.yaml +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/0-cluster/start.yaml @@ -16,6 +16,6 @@ roles: - - client.1 overrides: ceph: - #log-whitelist: + #log-ignorelist: #- failed to encode map fs: xfs diff --git a/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml b/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml index 10b86530a..212846d4d 100644 --- a/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml +++ b/ceph/qa/suites/upgrade-clients/client-upgrade-octopus-pacific/octopus-client-x/rbd/3-workload/rbd_notification_tests.yaml @@ -27,7 +27,7 @@ tasks: RBD_FEATURES: "61" - workunit: #The line below to change to 'pacific' - branch: master + branch: pacific clients: client.1: - rbd/notify_master.sh diff --git a/ceph/qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml b/ceph/qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml index bfc14e560..f1ae68180 100644 --- a/ceph/qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml +++ b/ceph/qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(MON_DOWN\) - \(MGR_DOWN\) - slow request diff --git a/ceph/qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml b/ceph/qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml index e36882da4..5360e867f 100644 --- a/ceph/qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml +++ b/ceph/qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml @@ -4,7 +4,7 @@ meta: small chance to increase the number of pgs overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - log bound mismatch diff --git a/ceph/qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml b/ceph/qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml index e7fa4b2f4..3f72ccaa6 100644 --- a/ceph/qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml +++ b/ceph/qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml @@ -4,7 +4,7 @@ meta: restartin remaining osds overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_DEGRADED\) - \(MDS_ diff --git a/ceph/qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml b/ceph/qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml index 099b70c30..f7f8f4a98 100644 --- a/ceph/qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml +++ b/ceph/qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml @@ -29,7 +29,7 @@ overrides: ceph: mon_bind_msgr2: false mon_bind_addrvec: false - log-whitelist: + log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked diff --git a/ceph/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml b/ceph/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml index f4a882583..0abc11e7f 100644 --- a/ceph/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml +++ b/ceph/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml @@ -16,7 +16,7 @@ tasks: extra_packages: ['librados2'] - print: "**** done installing mimic" - ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ diff --git a/ceph/qa/suites/upgrade/mimic-x/parallel/6-final-workload/rados_mon_thrash.yaml b/ceph/qa/suites/upgrade/mimic-x/parallel/6-final-workload/rados_mon_thrash.yaml index 129d13866..08706dfc1 100644 --- a/ceph/qa/suites/upgrade/mimic-x/parallel/6-final-workload/rados_mon_thrash.yaml +++ b/ceph/qa/suites/upgrade/mimic-x/parallel/6-final-workload/rados_mon_thrash.yaml @@ -3,7 +3,7 @@ meta: librados C and C++ api tests overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - \(REQUEST_SLOW\) tasks: diff --git a/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml b/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml index b12d76e9f..4f15c6484 100644 --- a/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml +++ b/ceph/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml @@ -4,7 +4,7 @@ meta: small chance to increase the number of pgs overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - wrongly marked me down - objects unfound and apparently lost diff --git a/ceph/qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml b/ceph/qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml index 433f35345..2583082bb 100644 --- a/ceph/qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml +++ b/ceph/qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml @@ -11,7 +11,7 @@ overrides: mon_bind_msgr2: false mon_bind_addrvec: false fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(MGR_DOWN\) diff --git a/ceph/qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml b/ceph/qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml index e0f317b0c..2be9c1f29 100644 --- a/ceph/qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml +++ b/ceph/qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml @@ -4,7 +4,7 @@ meta: small chance to increase the number of pgs overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - wrongly marked me down - objects unfound and apparently lost diff --git a/ceph/qa/suites/upgrade/nautilus-x-singleton/1-install/nautilus.yaml b/ceph/qa/suites/upgrade/nautilus-x-singleton/1-install/nautilus.yaml index e869063ca..f108667d8 100644 --- a/ceph/qa/suites/upgrade/nautilus-x-singleton/1-install/nautilus.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x-singleton/1-install/nautilus.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - \(MON_DOWN\) - \(MGR_DOWN\) - slow request diff --git a/ceph/qa/suites/upgrade/nautilus-x-singleton/3-thrash/default.yaml b/ceph/qa/suites/upgrade/nautilus-x-singleton/3-thrash/default.yaml index e36882da4..5360e867f 100644 --- a/ceph/qa/suites/upgrade/nautilus-x-singleton/3-thrash/default.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x-singleton/3-thrash/default.yaml @@ -4,7 +4,7 @@ meta: small chance to increase the number of pgs overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - objects unfound and apparently lost - log bound mismatch diff --git a/ceph/qa/suites/upgrade/nautilus-x-singleton/6-finish-upgrade.yaml b/ceph/qa/suites/upgrade/nautilus-x-singleton/6-finish-upgrade.yaml index a4ea09655..222ba4878 100644 --- a/ceph/qa/suites/upgrade/nautilus-x-singleton/6-finish-upgrade.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x-singleton/6-finish-upgrade.yaml @@ -4,7 +4,7 @@ meta: restartin remaining osds overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_DEGRADED\) - \(MDS_ diff --git a/ceph/qa/suites/upgrade/nautilus-x/parallel/0-cluster/start.yaml b/ceph/qa/suites/upgrade/nautilus-x/parallel/0-cluster/start.yaml index 1a3eef867..71a64bbc1 100644 --- a/ceph/qa/suites/upgrade/nautilus-x/parallel/0-cluster/start.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x/parallel/0-cluster/start.yaml @@ -28,7 +28,7 @@ roles: - client.3 overrides: ceph: - log-whitelist: + log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked diff --git a/ceph/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/nautilus.yaml b/ceph/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/nautilus.yaml index 2bbbfa9d5..c26350ceb 100644 --- a/ceph/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/nautilus.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/nautilus.yaml @@ -12,7 +12,7 @@ tasks: branch: nautilus - print: "**** done installing nautilus" - ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ diff --git a/ceph/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_mon_thrash.yaml b/ceph/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_mon_thrash.yaml index 129d13866..08706dfc1 100644 --- a/ceph/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_mon_thrash.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_mon_thrash.yaml @@ -3,7 +3,7 @@ meta: librados C and C++ api tests overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - \(REQUEST_SLOW\) tasks: diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/default.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/default.yaml index 856254595..3290918ac 100644 --- a/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/default.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/default.yaml @@ -4,7 +4,7 @@ meta: small chance to increase the number of pgs overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - wrongly marked me down - objects unfound and apparently lost diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/start.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/start.yaml index e2096debf..5d61bb7d9 100644 --- a/ceph/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/start.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/start.yaml @@ -8,7 +8,7 @@ overrides: mon_bind_msgr2: false mon_bind_addrvec: false fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(MGR_DOWN\) diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml index 59b666315..62469c090 100644 --- a/ceph/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml @@ -14,7 +14,7 @@ tasks: bluestore_warn_on_legacy_statfs: false bluestore warn on no per pool omap: false mon pg warn min per osd: 0 - log-whitelist: + log-ignorelist: - evicting unresponsive client - exec: osd.0: diff --git a/ceph/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/default.yaml b/ceph/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/default.yaml index e0f317b0c..2be9c1f29 100644 --- a/ceph/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/default.yaml +++ b/ceph/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/default.yaml @@ -4,7 +4,7 @@ meta: small chance to increase the number of pgs overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - wrongly marked me down - objects unfound and apparently lost diff --git a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-parallel/point-to-point-upgrade.yaml b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-parallel/point-to-point-upgrade.yaml index 165076b01..ad97ea422 100644 --- a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-parallel/point-to-point-upgrade.yaml +++ b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-parallel/point-to-point-upgrade.yaml @@ -11,7 +11,7 @@ meta: run workload and upgrade-sequence in parallel overrides: ceph: - log-whitelist: + log-ignorelist: - reached quota - scrub - osd_map_max_advance diff --git a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/0-cluster/start.yaml b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/0-cluster/start.yaml index 5ebce3ced..1271edd8b 100644 --- a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/0-cluster/start.yaml +++ b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/0-cluster/start.yaml @@ -6,7 +6,7 @@ meta: overrides: ceph: fs: xfs - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(MGR_DOWN\) diff --git a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/3-thrash/default.yaml b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/3-thrash/default.yaml index 49e6f84f8..c739d8fea 100644 --- a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/3-thrash/default.yaml +++ b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/3-thrash/default.yaml @@ -4,7 +4,7 @@ meta: small chance to increase the number of pgs overrides: ceph: - log-whitelist: + log-ignorelist: - but it is still running - wrongly marked me down - objects unfound and apparently lost diff --git a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/thrashosds-health.yaml b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/thrashosds-health.yaml index 914f6e25e..9903fa578 100644 --- a/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/thrashosds-health.yaml +++ b/ceph/qa/suites/upgrade/octopus-p2p/octopus-p2p-stress-split/thrashosds-health.yaml @@ -1,6 +1,6 @@ overrides: ceph: - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/tasks/ceph.py b/ceph/qa/tasks/ceph.py index 3654ffa27..848cdae3e 100644 --- a/ceph/qa/tasks/ceph.py +++ b/ceph/qa/tasks/ceph.py @@ -1085,13 +1085,13 @@ def cluster(ctx, config): return stdout or None if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]', - config['log_whitelist']) is not None: + config['log_ignorelist']) is not None: log.warning('Found errors (ERR|WRN|SEC) in cluster log') ctx.summary['success'] = False # use the most severe problem as the failure reason if 'failure_reason' not in ctx.summary: for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']: - match = first_in_ceph_log(pattern, config['log_whitelist']) + match = first_in_ceph_log(pattern, config['log_ignorelist']) if match is not None: ctx.summary['failure_reason'] = \ '"{match}" in cluster log'.format( @@ -1732,7 +1732,7 @@ def task(ctx, config): tasks: - ceph: - log-whitelist: ['foo.*bar', 'bad message'] + log-ignorelist: ['foo.*bar', 'bad message'] To run multiple ceph clusters, use multiple ceph tasks, and roles with a cluster name prefix, e.g. cluster1.client.0. Roles with no @@ -1804,7 +1804,7 @@ def task(ctx, config): mkfs_options=config.get('mkfs_options', None), mount_options=config.get('mount_options', None), skip_mgr_daemons=config.get('skip_mgr_daemons', False), - log_whitelist=config.get('log-whitelist', []), + log_ignorelist=config.get('log-ignorelist', []), cpu_profile=set(config.get('cpu_profile', []),), cluster=config['cluster'], mon_bind_msgr2=config.get('mon_bind_msgr2', True), diff --git a/ceph/qa/tasks/cephadm.py b/ceph/qa/tasks/cephadm.py index aaf0e68ff..431392745 100644 --- a/ceph/qa/tasks/cephadm.py +++ b/ceph/qa/tasks/cephadm.py @@ -212,13 +212,13 @@ def ceph_log(ctx, config): return None if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]', - config.get('log-whitelist')) is not None: + config.get('log-ignorelist')) is not None: log.warning('Found errors (ERR|WRN|SEC) in cluster log') ctx.summary['success'] = False # use the most severe problem as the failure reason if 'failure_reason' not in ctx.summary: for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']: - match = first_in_ceph_log(pattern, config['log-whitelist']) + match = first_in_ceph_log(pattern, config['log-ignorelist']) if match is not None: ctx.summary['failure_reason'] = \ '"{match}" in cluster log'.format( diff --git a/ceph/qa/tasks/cephfs/filesystem.py b/ceph/qa/tasks/cephfs/filesystem.py index bf337f84f..7f01b0ff4 100644 --- a/ceph/qa/tasks/cephfs/filesystem.py +++ b/ceph/qa/tasks/cephfs/filesystem.py @@ -510,7 +510,7 @@ class Filesystem(MDSCluster): while count > max_mds: targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True) target = targets[0] - log.info("deactivating rank %d" % target['rank']) + log.debug("deactivating rank %d" % target['rank']) self.deactivate(target['rank']) status = self.wait_for_daemons(skip_max_mds_check=True) count = len(list(self.get_ranks(status=status))) @@ -571,7 +571,7 @@ class Filesystem(MDSCluster): else: data_pool_name = self.data_pool_name - log.info("Creating filesystem '{0}'".format(self.name)) + log.debug("Creating filesystem '{0}'".format(self.name)) self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', self.metadata_pool_name, self.pgs_per_fs_pool.__str__()) @@ -581,7 +581,7 @@ class Filesystem(MDSCluster): '--allow-dangerous-metadata-overlay') else: if self.ec_profile and 'disabled' not in self.ec_profile: - log.info("EC profile is %s", self.ec_profile) + log.debug("EC profile is %s", self.ec_profile) cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name] cmd.extend(self.ec_profile) self.mon_manager.raw_cluster_cmd(*cmd) @@ -782,7 +782,7 @@ class Filesystem(MDSCluster): else: raise - log.info("are_daemons_healthy: mds map: {0}".format(mds_map)) + log.debug("are_daemons_healthy: mds map: {0}".format(mds_map)) for mds_id, mds_status in mds_map['info'].items(): if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]: @@ -791,13 +791,13 @@ class Filesystem(MDSCluster): elif mds_status['state'] == 'up:active': active_count += 1 - log.info("are_daemons_healthy: {0}/{1}".format( + log.debug("are_daemons_healthy: {0}/{1}".format( active_count, mds_map['max_mds'] )) if not skip_max_mds_check: if active_count > mds_map['max_mds']: - log.info("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map)) + log.debug("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map)) return False elif active_count == mds_map['max_mds']: # The MDSMap says these guys are active, but let's check they really are @@ -821,7 +821,7 @@ class Filesystem(MDSCluster): else: return False else: - log.info("are_daemons_healthy: skipping max_mds check") + log.debug("are_daemons_healthy: skipping max_mds check") return True def get_daemon_names(self, state=None, status=None): @@ -932,7 +932,7 @@ class Filesystem(MDSCluster): elapsed += 1 if elapsed > timeout: - log.info("status = {0}".format(status)) + log.debug("status = {0}".format(status)) raise RuntimeError("Timed out waiting for MDS daemons to become healthy") status = self.status() @@ -1014,7 +1014,7 @@ class Filesystem(MDSCluster): journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object) version = journal_header_dump['journal_header']['stream_format'] - log.info("Read journal version {0}".format(version)) + log.debug("Read journal version {0}".format(version)) return version @@ -1079,11 +1079,11 @@ class Filesystem(MDSCluster): try: mds_info = status.get_rank(self.id, rank) current_state = mds_info['state'] if mds_info else None - log.info("Looked up MDS state for mds.{0}: {1}".format(rank, current_state)) + log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state)) except: mdsmap = self.get_mds_map(status=status) if rank in mdsmap['failed']: - log.info("Waiting for rank {0} to come back.".format(rank)) + log.debug("Waiting for rank {0} to come back.".format(rank)) current_state = None else: raise @@ -1091,7 +1091,7 @@ class Filesystem(MDSCluster): # mds_info is None if no daemon with this ID exists in the map mds_info = status.get_mds(mds_id) current_state = mds_info['state'] if mds_info else None - log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state)) + log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state)) else: # In general, look for a single MDS states = [m['state'] for m in status.get_ranks(self.id)] @@ -1101,11 +1101,11 @@ class Filesystem(MDSCluster): current_state = reject else: current_state = None - log.info("mapped states {0} to {1}".format(states, current_state)) + log.debug("mapped states {0} to {1}".format(states, current_state)) elapsed = time.time() - started_at if current_state == goal_state: - log.info("reached state '{0}' in {1}s".format(current_state, elapsed)) + log.debug("reached state '{0}' in {1}s".format(current_state, elapsed)) return elapsed elif reject is not None and current_state == reject: raise RuntimeError("MDS in reject state {0}".format(current_state)) @@ -1237,12 +1237,12 @@ class Filesystem(MDSCluster): missing = set(want_objects) - set(exist_objects) if missing: - log.info("Objects missing (ino {0}, size {1}): {2}".format( + log.debug("Objects missing (ino {0}, size {1}): {2}".format( ino, size, missing )) return False else: - log.info("All objects for ino {0} size {1} found".format(ino, size)) + log.debug("All objects for ino {0} size {1} found".format(ino, size)) return True def data_objects_absent(self, ino, size): @@ -1250,12 +1250,12 @@ class Filesystem(MDSCluster): present = set(want_objects) & set(exist_objects) if present: - log.info("Objects not absent (ino {0}, size {1}): {2}".format( + log.debug("Objects not absent (ino {0}, size {1}): {2}".format( ino, size, present )) return False else: - log.info("All objects for ino {0} size {1} are absent".format(ino, size)) + log.debug("All objects for ino {0} size {1} are absent".format(ino, size)) return True def dirfrag_exists(self, ino, frag): @@ -1374,7 +1374,7 @@ class Filesystem(MDSCluster): t1 = datetime.datetime.now() r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip() duration = datetime.datetime.now() - t1 - log.info("Ran {0} in time {1}, result:\n{2}".format( + log.debug("Ran {0} in time {1}, result:\n{2}".format( base_args + args, duration, r )) return r diff --git a/ceph/qa/tasks/cephfs/mount.py b/ceph/qa/tasks/cephfs/mount.py index 7d9edda27..e4d6d659e 100644 --- a/ceph/qa/tasks/cephfs/mount.py +++ b/ceph/qa/tasks/cephfs/mount.py @@ -7,6 +7,8 @@ import time from six import StringIO from textwrap import dedent import os + +from teuthology.misc import sudo_write_file from teuthology.orchestra import run from teuthology.orchestra.run import CommandFailedError, ConnectionLostError, Raw from tasks.cephfs.filesystem import Filesystem @@ -173,6 +175,29 @@ class CephFSMount(object): if r.exitstatus != 0: raise RuntimeError("Expected file {0} not found".format(suffix)) + def write_file(self, path, data, perms=None): + """ + Write the given data at the given path and set the given perms to the + file on the path. + """ + if path.find(self.mountpoint) == -1: + path = os.path.join(self.mountpoint, path) + + sudo_write_file(self.client_remote, path, data) + + if perms: + self.run_shell(args=f'chmod {perms} {path}') + + def read_file(self, path): + """ + Return the data from the file on given path. + """ + if path.find(self.mountpoint) == -1: + path = os.path.join(self.mountpoint, path) + + return self.run_shell(args=['sudo', 'cat', path], omit_sudo=False).\ + stdout.getvalue().strip() + def create_destroy(self): assert(self.is_mounted()) diff --git a/ceph/qa/tasks/cephfs/test_volume_client.py b/ceph/qa/tasks/cephfs/test_volume_client.py index 3e6c7d63b..37ca67d15 100644 --- a/ceph/qa/tasks/cephfs/test_volume_client.py +++ b/ceph/qa/tasks/cephfs/test_volume_client.py @@ -745,10 +745,10 @@ vc.disconnect() # for different volumes, versioning details, etc. expected_auth_metadata = { "version": 2, - "compat_version": 1, + "compat_version": 6, "dirty": False, "tenant_id": "tenant1", - "volumes": { + "subvolumes": { "groupid/volumeid": { "dirty": False, "access_level": "rw" @@ -1100,6 +1100,441 @@ vc.disconnect() auth_id=guestclient["auth_id"], ))) + def test_update_old_style_auth_metadata_to_new_during_recover(self): + """ + From nautilus onwards 'volumes' created by ceph_volume_client were + renamed and used as CephFS subvolumes accessed via the ceph-mgr + interface. Hence it makes sense to store the subvolume data in + auth-metadata file with 'subvolumes' key instead of 'volumes' key. + This test validates the transparent update of 'volumes' key to + 'subvolumes' key in auth metadata file during recover. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id = "volumeid" + + guestclient = { + "auth_id": "guest", + "tenant_id": "tenant", + } + + # Create a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Check that volume metadata file is created on volume creation. + vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id) + self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + # Authorize 'guestclient' access to the volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient["auth_id"], + tenant_id=guestclient["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest' is created. + auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different volumes, versioning details, etc. + expected_auth_metadata = { + "version": 2, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant", + "subvolumes": { + "groupid/volumeid": { + "dirty": False, + "access_level": "rw" + } + } + } + + # Induce partial auth update state by modifying the auth metadata file, + # and then run recovery procedure. This should also update 'volumes' key + # to 'subvolumes'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + auth_metadata = vc._auth_metadata_get("{auth_id}") + auth_metadata['dirty'] = True + vc._auth_metadata_set("{auth_id}", auth_metadata) + vc.recover() + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient["auth_id"], + ))) + + auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + auth_metadata = vc._auth_metadata_get("{auth_id}") + print(json.dumps(auth_metadata)) + """.format( + auth_id=guestclient["auth_id"], + ))) + auth_metadata = json.loads(auth_metadata) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Check that auth metadata file is cleaned up on removing + # auth ID's access to volumes 'volumeid'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guestclient["auth_id"] + ))) + self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on volume deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + def test_update_old_style_auth_metadata_to_new_during_authorize(self): + """ + From nautilus onwards 'volumes' created by ceph_volume_client were + renamed and used as CephFS subvolumes accessed via the ceph-mgr + interface. Hence it makes sense to store the subvolume data in + auth-metadata file with 'subvolumes' key instead of 'volumes' key. + This test validates the transparent update of 'volumes' key to + 'subvolumes' key in auth metadata file during authorize. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id1 = "volumeid1" + volume_id2 = "volumeid2" + + auth_id = "guest" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create a volume volumeid1. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + + # Create a volume volumeid2. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + + # Check that volume metadata file is created on volume creation. + vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id1) + self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + vol_metadata_filename2 = "_{0}:{1}.meta".format(group_id, volume_id2) + self.assertIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid1'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id1, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest', is + # created on authorizing 'guest' access to the volume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid2'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id2, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different volumes, versioning details, etc. + expected_auth_metadata = { + "version": 2, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "groupid/volumeid1": { + "dirty": False, + "access_level": "rw" + }, + "groupid/volumeid2": { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + auth_metadata = vc._auth_metadata_get("{auth_id}") + print(json.dumps(auth_metadata)) + """.format( + auth_id=guestclient_1["auth_id"], + ))) + auth_metadata = json.loads(auth_metadata) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Check that auth metadata file is cleaned up on removing + # auth ID's access to volumes 'volumeid1' and 'volumeid2'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id1, + guest_entity=guestclient_1["auth_id"] + ))) + + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id2, + guest_entity=guestclient_1["auth_id"] + ))) + self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on volume deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on volume deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + self.assertNotIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + + def test_update_old_style_auth_metadata_to_new_during_deauthorize(self): + """ + From nautilus onwards 'volumes' created by ceph_volume_client were + renamed and used as CephFS subvolumes accessed via the ceph-mgr + interface. Hence it makes sense to store the subvolume data in + auth-metadata file with 'subvolumes' key instead of 'volumes' key. + This test validates the transparent update of 'volumes' key to + 'subvolumes' key in auth metadata file during de-authorize. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id1 = "volumeid1" + volume_id2 = "volumeid2" + + auth_id = "guest" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create a volume volumeid1. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + + # Create a volume volumeid2. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + + # Check that volume metadata file is created on volume creation. + vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id1) + self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + vol_metadata_filename2 = "_{0}:{1}.meta".format(group_id, volume_id2) + self.assertIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid1'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id1, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid2'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id2, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest', is + # created on authorizing 'guest' access to the volume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Deauthorize 'guestclient_1' to access 'volumeid2'. This should update + # 'volumes' key to 'subvolumes' + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id2, + guest_entity=guestclient_1["auth_id"], + ))) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different volumes, versioning details, etc. + expected_auth_metadata = { + "version": 2, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "groupid/volumeid1": { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + auth_metadata = vc._auth_metadata_get("{auth_id}") + print(json.dumps(auth_metadata)) + """.format( + auth_id=guestclient_1["auth_id"], + ))) + auth_metadata = json.loads(auth_metadata) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Check that auth metadata file is cleaned up on removing + # auth ID's access to volumes 'volumeid1' and 'volumeid2' + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id1, + guest_entity=guestclient_1["auth_id"] + ))) + self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on 'volumeid1' deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on 'volumeid2' deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + self.assertNotIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + def test_put_object(self): vc_mount = self.mounts[1] vc_mount.umount_wait() diff --git a/ceph/qa/tasks/cephfs/test_volumes.py b/ceph/qa/tasks/cephfs/test_volumes.py index ed78775b6..735a59c0b 100644 --- a/ceph/qa/tasks/cephfs/test_volumes.py +++ b/ceph/qa/tasks/cephfs/test_volumes.py @@ -25,7 +25,7 @@ class TestVolumes(CephFSTestCase): TEST_FILE_NAME_PREFIX="subvolume_file" # for filling subvolume with data - CLIENTS_REQUIRED = 1 + CLIENTS_REQUIRED = 2 MDSS_REQUIRED = 2 # io defaults @@ -346,6 +346,36 @@ class TestVolumes(CephFSTestCase): else: self.mount_a.run_shell(['rmdir', trashpath]) + def _configure_guest_auth(self, guest_mount, authid, key): + """ + Set up auth credentials for a guest client. + """ + # Create keyring file for the guest client. + keyring_txt = dedent(""" + [client.{authid}] + key = {key} + + """.format(authid=authid,key=key)) + + guest_mount.client_id = authid + guest_mount.client_remote.write_file(guest_mount.get_keyring_path(), + keyring_txt, sudo=True) + # Add a guest client section to the ceph config file. + self.config_set("client.{0}".format(authid), "debug client", 20) + self.config_set("client.{0}".format(authid), "debug objecter", 20) + self.set_conf("client.{0}".format(authid), + "keyring", guest_mount.get_keyring_path()) + + def _auth_metadata_get(self, filedata): + """ + Return a deserialized JSON object, or None + """ + try: + data = json.loads(filedata) + except json.decoder.JSONDecodeError: + data = None + return data + def setUp(self): super(TestVolumes, self).setUp() self.volname = None @@ -366,7 +396,8 @@ class TestVolumes(CephFSTestCase): def test_connection_expiration(self): # unmount any cephfs mounts - self.mount_a.umount_wait() + for i in range(0, self.CLIENTS_REQUIRED): + self.mounts[i].umount_wait() sessions = self._session_list() self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted @@ -402,7 +433,7 @@ class TestVolumes(CephFSTestCase): volumes = [volume['name'] for volume in vls] #create new volumes and add it to the existing list of volumes - volumenames = self._generate_random_volume_name(3) + volumenames = self._generate_random_volume_name(2) for volumename in volumenames: self._fs_cmd("volume", "create", volumename) volumes.extend(volumenames) @@ -1093,6 +1124,122 @@ class TestVolumes(CephFSTestCase): # verify trash dir is clean self._wait_for_trash_empty() + ### authorize operations + + def test_authorize_deauthorize_legacy_subvolume(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + authid = "alice" + + guest_mount = self.mount_b + guest_mount.umount_wait() + + # emulate a old-fashioned subvolume in a custom group + createpath = os.path.join(".", "volumes", group, subvolume) + self.mount_a.run_shell(['mkdir', '-p', createpath]) + + # add required xattrs to subvolume + default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") + self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool) + + mount_path = os.path.join("/", "volumes", group, subvolume) + + # authorize guest authID read-write access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id") + + # guest authID should exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertIn("client.{0}".format(authid), existing_ids) + + # configure credentials for guest client + self._configure_guest_auth(guest_mount, authid, key) + + # mount the subvolume, and write to it + guest_mount.mount(mount_path=mount_path) + guest_mount.write_n_mb("data.bin", 1) + + # authorize guest authID read access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r") + + # guest client sees the change in access level to read only after a + # remount of the subvolume. + guest_mount.umount_wait() + guest_mount.mount(mount_path=mount_path) + + # read existing content of the subvolume + self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) + # cannot write into read-only subvolume + with self.assertRaises(CommandFailedError): + guest_mount.write_n_mb("rogue.bin", 1) + + # cleanup + guest_mount.umount_wait() + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid, + "--group_name", group) + # guest authID should no longer exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertNotIn("client.{0}".format(authid), existing_ids) + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_authorize_deauthorize_subvolume(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + authid = "alice" + + guest_mount = self.mount_b + guest_mount.umount_wait() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, + "--group_name", group).rstrip() + + # authorize guest authID read-write access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id") + + # guest authID should exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertIn("client.{0}".format(authid), existing_ids) + + # configure credentials for guest client + self._configure_guest_auth(guest_mount, authid, key) + + # mount the subvolume, and write to it + guest_mount.mount(mount_path=mount_path) + guest_mount.write_n_mb("data.bin", 1) + + # authorize guest authID read access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r") + + # guest client sees the change in access level to read only after a + # remount of the subvolume. + guest_mount.umount_wait() + guest_mount.mount(mount_path=mount_path) + + # read existing content of the subvolume + self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) + # cannot write into read-only subvolume + with self.assertRaises(CommandFailedError): + guest_mount.write_n_mb("rogue.bin", 1) + + # cleanup + guest_mount.umount_wait() + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid, + "--group_name", group) + # guest authID should no longer exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertNotIn("client.{0}".format(authid), existing_ids) + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + def test_subvolume_resize_infinite_size_future_writes(self): """ That a subvolume can be resized to an infinite size and the future writes succeed. @@ -1134,6 +1281,556 @@ class TestVolumes(CephFSTestCase): # verify trash dir is clean self._wait_for_trash_empty() + def test_multitenant_subvolumes(self): + """ + That subvolume access can be restricted to a tenant. + + That metadata used to enforce tenant isolation of + subvolumes is stored as a two-way mapping between auth + IDs and subvolumes that they're authorized to access. + """ + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + guest_mount = self.mount_b + + # Guest clients belonging to different tenants, but using the same + # auth ID. + auth_id = "alice" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + guestclient_2 = { + "auth_id": auth_id, + "tenant_id": "tenant2", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Check that subvolume metadata file is created on subvolume creation. + subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume) + self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes")) + + # Authorize 'guestclient_1', using auth ID 'alice' and belonging to + # 'tenant1', with 'rw' access to the volume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'alice', is + # created on authorizing 'alice' access to the subvolume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different subvolumes, versioning details, etc. + expected_auth_metadata = { + "version": 5, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "{0}/{1}".format(group,subvolume): { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Verify that the subvolume metadata file stores info about auth IDs + # and their access levels to the subvolume, versioning details, etc. + expected_subvol_metadata = { + "version": 1, + "compat_version": 1, + "auths": { + "alice": { + "dirty": False, + "access_level": "rw" + } + } + } + subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename))) + + self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"]) + del expected_subvol_metadata["version"] + del subvol_metadata["version"] + self.assertEqual(expected_subvol_metadata, subvol_metadata) + + # Cannot authorize 'guestclient_2' to access the volume. + # It uses auth ID 'alice', which has already been used by a + # 'guestclient_1' belonging to an another tenant for accessing + # the volume. + + try: + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"], + "--group_name", group, "--tenant_id", guestclient_2["tenant_id"]) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM, + "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id") + else: + self.fail("expected the 'fs subvolume authorize' command to fail") + + # Check that auth metadata file is cleaned up on removing + # auth ID's only access to a volume. + + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, + "--group_name", group) + self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Check that subvolume metadata file is cleaned up on subvolume deletion. + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes")) + + # clean up + guest_mount.umount_wait() + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_authorized_list(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + authid1 = "alice" + authid2 = "guest1" + authid3 = "guest2" + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # authorize alice authID read-write access to subvolume + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1, + "--group_name", group) + # authorize guest1 authID read-write access to subvolume + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2, + "--group_name", group) + # authorize guest2 authID read access to subvolume + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3, + "--group_name", group, "--access_level", "r") + + # list authorized-ids of the subvolume + expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}] + auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group)) + self.assertCountEqual(expected_auth_list, auth_list) + + # cleanup + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1, + "--group_name", group) + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2, + "--group_name", group) + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3, + "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_authorize_auth_id_not_created_by_mgr_volumes(self): + """ + If the auth_id already exists and is not created by mgr plugin, + it's not allowed to authorize the auth-id by default. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # Create auth_id + self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.guest1", + "mds", "allow *", + "osd", "allow rw", + "mon", "allow *" + ) + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + try: + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM, + "Invalid error code returned on authorize of subvolume for auth_id created out of band") + else: + self.fail("expected the 'fs subvolume authorize' command to fail") + + # clean up + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_authorize_allow_existing_id_option(self): + """ + If the auth_id already exists and is not created by mgr volumes, + it's not allowed to authorize the auth-id by default but is + allowed with option allow_existing_id. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # Create auth_id + self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.guest1", + "mds", "allow *", + "osd", "allow rw", + "mon", "allow *" + ) + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Cannot authorize 'guestclient_1' to access the volume by default, + # which already exists and not created by mgr volumes but is allowed + # with option 'allow_existing_id'. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id") + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, + "--group_name", group) + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_deauthorize_auth_id_after_out_of_band_update(self): + """ + If the auth_id authorized by mgr/volumes plugin is updated + out of band, the auth_id should not be deleted after a + deauthorize. It should only remove caps associated with it. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, + "--group_name", group).rstrip() + + # Update caps for guestclient_1 out of band + out = self.fs.mon_manager.raw_cluster_cmd( + "auth", "caps", "client.guest1", + "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path), + "osd", "allow rw pool=cephfs_data", + "mon", "allow r", + "mgr", "allow *" + ) + + # Deauthorize guestclient_1 + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group) + + # Validate the caps of guestclient_1 after deauthorize. It should not have deleted + # guestclient_1. The mgr and mds caps should be present which was updated out of band. + out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty")) + + self.assertEqual("client.guest1", out[0]["entity"]) + self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"]) + self.assertEqual("allow *", out[0]["caps"]["mgr"]) + self.assertNotIn("osd", out[0]["caps"]) + + # clean up + out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_recover_auth_metadata_during_authorize(self): + """ + That auth metadata manager can recover from partial auth updates using + metadata files, which store auth info and its update status info. This + test validates the recovery during authorize. + """ + + guest_mount = self.mount_b + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is + # created on authorizing 'guest1' access to the subvolume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + + # Induce partial auth update state by modifying the auth metadata file, + # and then run authorize again. + guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Authorize 'guestclient_1' to access the subvolume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + self.assertEqual(auth_metadata_content, expected_auth_metadata_content) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_recover_auth_metadata_during_deauthorize(self): + """ + That auth metadata manager can recover from partial auth updates using + metadata files, which store auth info and its update status info. This + test validates the recovery during deauthorize. + """ + + guest_mount = self.mount_b + + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + guestclient_1 = { + "auth_id": "guest1", + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolumes in group + self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume1. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is + # created on authorizing 'guest1' access to the subvolume1. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + + # Authorize 'guestclient_1' to access the subvolume2. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Induce partial auth update state by modifying the auth metadata file, + # and then run de-authorize. + guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Deauthorize 'guestclient_1' to access the subvolume2. + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group) + + auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + self.assertEqual(auth_metadata_content, expected_auth_metadata_content) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_update_old_style_auth_metadata_to_new_during_authorize(self): + """ + CephVolumeClient stores the subvolume data in auth metadata file with + 'volumes' key as there was no subvolume namespace. It doesn't makes sense + with mgr/volumes. This test validates the transparent update of 'volumes' + key to 'subvolumes' key in auth metadata file during authorize. + """ + + guest_mount = self.mount_b + + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolumes in group + self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume1. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is + # created on authorizing 'guest1' access to the subvolume1. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes' + self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + expected_auth_metadata = { + "version": 5, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "{0}/{1}".format(group,subvolume1): { + "dirty": False, + "access_level": "rw" + }, + "{0}/{1}".format(group,subvolume2): { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group) + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_update_old_style_auth_metadata_to_new_during_deauthorize(self): + """ + CephVolumeClient stores the subvolume data in auth metadata file with + 'volumes' key as there was no subvolume namespace. It doesn't makes sense + with mgr/volumes. This test validates the transparent update of 'volumes' + key to 'subvolumes' key in auth metadata file during deauthorize. + """ + + guest_mount = self.mount_b + + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolumes in group + self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume1. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Authorize 'guestclient_1' to access the subvolume2. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is created. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes' + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group) + + expected_auth_metadata = { + "version": 5, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "{0}/{1}".format(group,subvolume1): { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + def test_subvolume_info(self): # tests the 'fs subvolume info' command @@ -1184,6 +1881,75 @@ class TestVolumes(CephFSTestCase): # verify trash dir is clean self._wait_for_trash_empty() + def test_subvolume_evict_client(self): + """ + That a subvolume client can be evicted based on the auth ID + """ + + subvolumes = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares. + for i in range(0, 2): + self.mounts[i].umount_wait() + guest_mounts = (self.mounts[0], self.mounts[1]) + auth_id = "guest" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create two subvolumes. Authorize 'guest' auth ID to mount the two + # subvolumes. Mount the two subvolumes. Write data to the volumes. + for i in range(2): + # Create subvolume. + self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group) + + # authorize guest authID read-write access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i], + "--group_name", group).rstrip() + # configure credentials for guest client + self._configure_guest_auth(guest_mounts[i], auth_id, key) + + # mount the subvolume, and write to it + guest_mounts[i].mount(mount_path=mount_path) + guest_mounts[i].write_n_mb("data.bin", 1) + + # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted + # one volume. + self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group) + + # Evicted guest client, guest_mounts[0], should not be able to do + # anymore metadata ops. It should start failing all operations + # when it sees that its own address is in the blocklist. + try: + guest_mounts[0].write_n_mb("rogue.bin", 1) + except CommandFailedError: + pass + else: + raise RuntimeError("post-eviction write should have failed!") + + # The blocklisted guest client should now be unmountable + guest_mounts[0].umount_wait() + + # Guest client, guest_mounts[1], using the same auth ID 'guest', but + # has mounted the other volume, should be able to use its volume + # unaffected. + guest_mounts[1].write_n_mb("data.bin.1", 1) + + # Cleanup. + guest_mounts[1].umount_wait() + for i in range(2): + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + def test_clone_subvolume_info(self): # tests the 'fs subvolume info' command for a clone @@ -1716,6 +2482,191 @@ class TestVolumes(CephFSTestCase): # remove group self._fs_cmd("subvolumegroup", "rm", self.volname, group) + def test_subvolume_inherited_snapshot_ls(self): + # tests the scenario where 'fs subvolume snapshot ls' command + # should not list inherited snapshots created as part of snapshot + # at ancestral level + + snapshots = [] + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snap_count = 3 + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # create subvolume snapshots + snapshots = self._generate_random_snapshot_name(snap_count) + for snapshot in snapshots: + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) + + # Create snapshot at ancestral level + ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1") + ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2") + self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2]) + + subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group)) + self.assertEqual(len(subvolsnapshotls), snap_count) + + # remove ancestral snapshots + self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2]) + + # remove snapshot + for snapshot in snapshots: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_inherited_snapshot_info(self): + """ + tests the scenario where 'fs subvolume snapshot info' command + should fail for inherited snapshots created as part of snapshot + at ancestral level + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Create snapshot at ancestral level + ancestral_snap_name = "ancestral_snap_1" + ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name) + self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1]) + + # Validate existence of inherited snapshot + group_path = os.path.join(".", "volumes", group) + inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip()) + inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir) + inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap) + self.mount_a.run_shell(['ls', inherited_snappath]) + + # snapshot info on inherited snapshot + try: + self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot") + else: + self.fail("expected snapshot info of inherited snapshot to fail") + + # remove ancestral snapshots + self.mount_a.run_shell(['rmdir', ancestral_snappath1]) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_inherited_snapshot_rm(self): + """ + tests the scenario where 'fs subvolume snapshot rm' command + should fail for inherited snapshots created as part of snapshot + at ancestral level + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Create snapshot at ancestral level + ancestral_snap_name = "ancestral_snap_1" + ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name) + self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1]) + + # Validate existence of inherited snap + group_path = os.path.join(".", "volumes", group) + inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip()) + inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir) + inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap) + self.mount_a.run_shell(['ls', inherited_snappath]) + + # inherited snapshot should not be deletable + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot") + else: + self.fail("expected removing inheirted snapshot to fail") + + # remove ancestral snapshots + self.mount_a.run_shell(['rmdir', ancestral_snappath1]) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_subvolumegroup_snapshot_name_conflict(self): + """ + tests the scenario where creation of subvolume snapshot name + with same name as it's subvolumegroup snapshot name. This should + fail. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + group_snapshot = self._generate_random_snapshot_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Create subvolumegroup snapshot + group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot) + self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path]) + + # Validate existence of subvolumegroup snapshot + self.mount_a.run_shell(['ls', group_snapshot_path]) + + # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail + try: + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot") + else: + self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail") + + # remove subvolumegroup snapshot + self.mount_a.run_shell(['rmdir', group_snapshot_path]) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + @unittest.skip("skipping subvolumegroup snapshot tests") def test_subvolume_group_snapshot_create_and_rm(self): subvolume = self._generate_random_subvolume_name() @@ -1864,7 +2815,8 @@ class TestVolumes(CephFSTestCase): def test_mgr_eviction(self): # unmount any cephfs mounts - self.mount_a.umount_wait() + for i in range(0, self.CLIENTS_REQUIRED): + self.mounts[i].umount_wait() sessions = self._session_list() self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted diff --git a/ceph/qa/tasks/mgr/dashboard/helper.py b/ceph/qa/tasks/mgr/dashboard/helper.py index 64cbba9e3..4c5cd8d72 100644 --- a/ceph/qa/tasks/mgr/dashboard/helper.py +++ b/ceph/qa/tasks/mgr/dashboard/helper.py @@ -4,8 +4,12 @@ from __future__ import absolute_import import json import logging -from collections import namedtuple +import random +import re +import string import time +from collections import namedtuple +from typing import List import requests import six @@ -65,14 +69,13 @@ class DashboardTestCase(MgrTestCase): raise ex user_create_args = [ - 'dashboard', 'ac-user-create', username, password + 'dashboard', 'ac-user-create', username ] if force_password: user_create_args.append('--force-password') if cmd_args: user_create_args.extend(cmd_args) - cls._ceph_cmd(user_create_args) - + cls._ceph_cmd_with_secret(user_create_args, password) if roles: set_roles_args = ['dashboard', 'ac-user-set-roles', username] for idx, role in enumerate(roles): @@ -200,7 +203,7 @@ class DashboardTestCase(MgrTestCase): @classmethod def _request(cls, url, method, data=None, params=None, set_cookies=False): url = "{}{}".format(cls._base_uri, url) - log.info("Request %s to %s", method, url) + log.debug("Request %s to %s", method, url) headers = {} cookies = {} if cls._token: @@ -310,7 +313,7 @@ class DashboardTestCase(MgrTestCase): return None if cls._resp.status_code != 202: - log.info("task finished immediately") + log.debug("task finished immediately") return res cls._assertIn('name', res) @@ -322,8 +325,7 @@ class DashboardTestCase(MgrTestCase): res_task = None while retries > 0 and not res_task: retries -= 1 - log.info("task (%s, %s) is still executing", task_name, - task_metadata) + log.debug("task (%s, %s) is still executing", task_name, task_metadata) time.sleep(1) _res = cls._get('/api/task?name={}'.format(task_name)) cls._assertEq(cls._resp.status_code, 200) @@ -338,7 +340,7 @@ class DashboardTestCase(MgrTestCase): raise Exception("Waiting for task ({}, {}) to finish timed out. {}" .format(task_name, task_metadata, _res)) - log.info("task (%s, %s) finished", task_name, task_metadata) + log.debug("task (%s, %s) finished", task_name, task_metadata) if res_task['success']: if method == 'POST': cls._resp.status_code = 201 @@ -424,15 +426,31 @@ class DashboardTestCase(MgrTestCase): @classmethod def _ceph_cmd(cls, cmd): res = cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd) - log.info("command result: %s", res) + log.debug("command result: %s", res) return res @classmethod def _ceph_cmd_result(cls, cmd): exitstatus = cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd) - log.info("command exit status: %d", exitstatus) + log.debug("command exit status: %d", exitstatus) return exitstatus + @classmethod + def _ceph_cmd_with_secret(cls, cmd: List[str], secret: str, return_exit_code: bool = False): + cmd.append('-i') + cmd.append('{}'.format(cls._ceph_create_tmp_file(secret))) + if return_exit_code: + return cls._ceph_cmd_result(cmd) + return cls._ceph_cmd(cmd) + + @classmethod + def _ceph_create_tmp_file(cls, content: str) -> str: + """Create a temporary file in the remote cluster""" + file_name = ''.join(random.choices(string.ascii_letters + string.digits, k=20)) + file_path = '/tmp/{}'.format(file_name) + cls._cmd(['sh', '-c', 'echo -n {} > {}'.format(content, file_path)]) + return file_path + def set_config_key(self, key, value): self._ceph_cmd(['config-key', 'set', key, value]) diff --git a/ceph/qa/tasks/mgr/dashboard/test_auth.py b/ceph/qa/tasks/mgr/dashboard/test_auth.py index e1c9b8e63..12dacf130 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_auth.py +++ b/ceph/qa/tasks/mgr/dashboard/test_auth.py @@ -5,6 +5,8 @@ from __future__ import absolute_import import time import jwt +from teuthology.orchestra.run import \ + CommandFailedError # pylint: disable=import-error from .helper import DashboardTestCase, JObj, JLeaf @@ -18,7 +20,7 @@ class AuthTest(DashboardTestCase): self.reset_session() def _validate_jwt_token(self, token, username, permissions): - payload = jwt.decode(token, verify=False) + payload = jwt.decode(token, options={'verify_signature': False}) self.assertIn('username', payload) self.assertEqual(payload['username'], username) @@ -29,6 +31,10 @@ class AuthTest(DashboardTestCase): self.assertIn('create', perms) self.assertIn('delete', perms) + def test_login_without_password(self): + with self.assertRaises(CommandFailedError): + self.create_user('admin2', '', ['administrator'], force_password=True) + def test_a_set_login_credentials(self): # test with Authorization header self.create_user('admin2', 'admin2', ['administrator']) @@ -94,29 +100,6 @@ class AuthTest(DashboardTestCase): "detail": "Invalid credentials" }) - def test_login_without_password(self): - # test with Authorization header - self.create_user('admin2', '', ['administrator']) - self._post("/api/auth", {'username': 'admin2', 'password': ''}) - self.assertStatus(400) - self.assertJsonBody({ - "component": "auth", - "code": "invalid_credentials", - "detail": "Invalid credentials" - }) - self.delete_user('admin2') - - # test with Cookies set - self.create_user('admin2', '', ['administrator']) - self._post("/api/auth", {'username': 'admin2', 'password': ''}, set_cookies=True) - self.assertStatus(400) - self.assertJsonBody({ - "component": "auth", - "code": "invalid_credentials", - "detail": "Invalid credentials" - }) - self.delete_user('admin2') - def test_lockout_user(self): # test with Authorization header self._ceph_cmd(['dashboard', 'set-account-lockout-attempts', '3']) @@ -288,8 +271,9 @@ class AuthTest(DashboardTestCase): self._get("/api/host") self.assertStatus(200) time.sleep(1) - self._ceph_cmd(['dashboard', 'ac-user-set-password', '--force-password', - 'user', 'user2']) + self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', '--force-password', + 'user'], + 'user2') time.sleep(1) self._get("/api/host") self.assertStatus(401) @@ -312,8 +296,9 @@ class AuthTest(DashboardTestCase): self._get("/api/host", set_cookies=True) self.assertStatus(200) time.sleep(1) - self._ceph_cmd(['dashboard', 'ac-user-set-password', '--force-password', - 'user', 'user2']) + self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', '--force-password', + 'user'], + 'user2') time.sleep(1) self._get("/api/host", set_cookies=True) self.assertStatus(401) diff --git a/ceph/qa/tasks/mgr/dashboard/test_ganesha.py b/ceph/qa/tasks/mgr/dashboard/test_ganesha.py index 8583f3d66..c1acda143 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_ganesha.py +++ b/ceph/qa/tasks/mgr/dashboard/test_ganesha.py @@ -40,8 +40,8 @@ class GaneshaTest(DashboardTestCase): 'user', 'create', '--uid', 'admin', '--display-name', 'admin', '--system', '--access-key', 'admin', '--secret', 'admin' ]) - cls._ceph_cmd(['dashboard', 'set-rgw-api-secret-key', 'admin']) - cls._ceph_cmd(['dashboard', 'set-rgw-api-access-key', 'admin']) + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') @classmethod def tearDownClass(cls): diff --git a/ceph/qa/tasks/mgr/dashboard/test_requests.py b/ceph/qa/tasks/mgr/dashboard/test_requests.py index 0d9f8d9ba..eba81d673 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_requests.py +++ b/ceph/qa/tasks/mgr/dashboard/test_requests.py @@ -20,4 +20,8 @@ class RequestsTest(DashboardTestCase): self.assertNotIn('Content-Encoding', self._resp.headers) self.assertHeaders({ 'Content-Type': 'application/json', + 'server': 'Ceph-Dashboard', + 'Content-Security-Policy': "frame-ancestors 'self';", + 'X-Content-Type-Options': 'nosniff', + 'Strict-Transport-Security': 'max-age=63072000; includeSubDomains; preload' }) diff --git a/ceph/qa/tasks/mgr/dashboard/test_rgw.py b/ceph/qa/tasks/mgr/dashboard/test_rgw.py index 6f1acebec..9760276a3 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_rgw.py +++ b/ceph/qa/tasks/mgr/dashboard/test_rgw.py @@ -32,8 +32,8 @@ class RgwTestCase(DashboardTestCase): ]) # Update the dashboard configuration. cls._ceph_cmd(['dashboard', 'set-rgw-api-user-id', 'admin']) - cls._ceph_cmd(['dashboard', 'set-rgw-api-secret-key', 'admin']) - cls._ceph_cmd(['dashboard', 'set-rgw-api-access-key', 'admin']) + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') # Create a test user? if cls.create_test_user: cls._radosgw_admin_cmd([ @@ -80,13 +80,13 @@ class RgwApiCredentialsTest(RgwTestCase): self._ceph_cmd(['mgr', 'module', 'enable', 'dashboard', '--force']) # Set the default credentials. self._ceph_cmd(['dashboard', 'set-rgw-api-user-id', '']) - self._ceph_cmd(['dashboard', 'set-rgw-api-secret-key', 'admin']) - self._ceph_cmd(['dashboard', 'set-rgw-api-access-key', 'admin']) + self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') + self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') super(RgwApiCredentialsTest, self).setUp() def test_no_access_secret_key(self): - self._ceph_cmd(['dashboard', 'set-rgw-api-secret-key', '']) - self._ceph_cmd(['dashboard', 'set-rgw-api-access-key', '']) + self._ceph_cmd(['dashboard', 'reset-rgw-api-secret-key']) + self._ceph_cmd(['dashboard', 'reset-rgw-api-access-key']) resp = self._get('/api/rgw/user') self.assertStatus(500) self.assertIn('detail', resp) diff --git a/ceph/qa/tasks/mgr/dashboard/test_user.py b/ceph/qa/tasks/mgr/dashboard/test_user.py index ea7beee6d..171a8f3ab 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_user.py +++ b/ceph/qa/tasks/mgr/dashboard/test_user.py @@ -286,39 +286,50 @@ class UserTest(DashboardTestCase): self.assertError(code='invalid_credentials', component='auth') def test_create_user_password_cli(self): - exitcode = self._ceph_cmd_result(['dashboard', 'ac-user-create', - 'test1', 'mypassword10#']) + exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create', + 'test1'], + 'mypassword10#', + return_exit_code=True) self.assertEqual(exitcode, 0) self.delete_user('test1') @DashboardTestCase.RunAs('test2', 'foo_bar_10#', force_password=False, login=False) def test_change_user_password_cli(self): - exitcode = self._ceph_cmd_result(['dashboard', 'ac-user-set-password', - 'test2', 'foo_new-password01#']) + exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', + 'test2'], + 'foo_new-password01#', + return_exit_code=True) self.assertEqual(exitcode, 0) def test_create_user_password_force_cli(self): - exitcode = self._ceph_cmd_result(['dashboard', 'ac-user-create', - '--force-password', 'test11', - 'bar']) + exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create', + '--force-password', 'test11'], + 'bar', + return_exit_code=True) self.assertEqual(exitcode, 0) self.delete_user('test11') @DashboardTestCase.RunAs('test22', 'foo_bar_10#', force_password=False, login=False) def test_change_user_password_force_cli(self): - exitcode = self._ceph_cmd_result(['dashboard', 'ac-user-set-password', - '--force-password', 'test22', - 'bar']) + exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', + '--force-password', 'test22'], + 'bar', + return_exit_code=True) self.assertEqual(exitcode, 0) def test_create_user_password_cli_fail(self): - exitcode = self._ceph_cmd_result(['dashboard', 'ac-user-create', 'test3', 'foo']) + exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create', + 'test3'], + 'foo', + return_exit_code=True) self.assertNotEqual(exitcode, 0) @DashboardTestCase.RunAs('test4', 'x1z_tst+_10#', force_password=False, login=False) def test_change_user_password_cli_fail(self): - exitcode = self._ceph_cmd_result(['dashboard', 'ac-user-set-password', - 'test4', 'bar']) + exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', + 'test4'], + 'bar', + return_exit_code=True) self.assertNotEqual(exitcode, 0) def test_create_user_with_pwd_expiration_date(self): diff --git a/ceph/qa/tasks/mgr/mgr_test_case.py b/ceph/qa/tasks/mgr/mgr_test_case.py index 8687b5f29..f5392d3ba 100644 --- a/ceph/qa/tasks/mgr/mgr_test_case.py +++ b/ceph/qa/tasks/mgr/mgr_test_case.py @@ -117,7 +117,7 @@ class MgrTestCase(CephTestCase): if is_disabled(): return - log.info("Unloading Mgr module %s ...", module_name) + log.debug("Unloading Mgr module %s ...", module_name) cls.mgr_cluster.mon_manager.raw_cluster_cmd('mgr', 'module', 'disable', module_name) cls.wait_until_true(is_disabled, timeout=30) @@ -143,7 +143,7 @@ class MgrTestCase(CephTestCase): if module_name in always_on: return - log.info("Loading Mgr module %s ...", module_name) + log.debug("Loading Mgr module %s ...", module_name) initial_gid = initial_mgr_map['active_gid'] cls.mgr_cluster.mon_manager.raw_cluster_cmd( "mgr", "module", "enable", module_name, "--force") @@ -153,7 +153,7 @@ class MgrTestCase(CephTestCase): mgr_map = cls.mgr_cluster.get_mgr_map() done = mgr_map['active_gid'] != initial_gid and mgr_map['available'] if done: - log.info("Restarted after module load (new active {0}/{1})".format( + log.debug("Restarted after module load (new active {0}/{1})".format( mgr_map['active_name'], mgr_map['active_gid'])) return done cls.wait_until_true(has_restarted, timeout=30) @@ -174,7 +174,7 @@ class MgrTestCase(CephTestCase): uri = mgr_map['x']['services'][service_name] - log.info("Found {0} at {1} (daemon {2}/{3})".format( + log.debug("Found {0} at {1} (daemon {2}/{3})".format( service_name, uri, mgr_map['x']['active_name'], mgr_map['x']['active_gid'])) @@ -198,7 +198,7 @@ class MgrTestCase(CephTestCase): cls.mgr_cluster.mgr_fail(mgr_id) for mgr_id in cls.mgr_cluster.mgr_ids: - log.info("Using port {0} for {1} on mgr.{2}".format( + log.debug("Using port {0} for {1} on mgr.{2}".format( assign_port, module_name, mgr_id )) cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id, @@ -214,7 +214,7 @@ class MgrTestCase(CephTestCase): mgr_map = cls.mgr_cluster.get_mgr_map() done = mgr_map['available'] if done: - log.info("Available after assign ports (new active {0}/{1})".format( + log.debug("Available after assign ports (new active {0}/{1})".format( mgr_map['active_name'], mgr_map['active_gid'])) return done cls.wait_until_true(is_available, timeout=30) diff --git a/ceph/qa/tasks/mgr/test_prometheus.py b/ceph/qa/tasks/mgr/test_prometheus.py index 867d5cd5d..7fe37a1a5 100644 --- a/ceph/qa/tasks/mgr/test_prometheus.py +++ b/ceph/qa/tasks/mgr/test_prometheus.py @@ -49,6 +49,7 @@ class TestPrometheus(MgrTestCase): r = requests.get(original_uri + "metrics", allow_redirects=False) self.assertEqual(r.status_code, 200) self.assertEqual(r.headers["content-type"], "text/plain;charset=utf-8") + self.assertEqual(r.headers["server"], "Ceph-Prometheus") def test_urls(self): self._assign_ports("prometheus", "server_port") diff --git a/ceph/qa/tasks/osd_failsafe_enospc.py b/ceph/qa/tasks/osd_failsafe_enospc.py index 4b2cdb983..da43ee9c5 100644 --- a/ceph/qa/tasks/osd_failsafe_enospc.py +++ b/ceph/qa/tasks/osd_failsafe_enospc.py @@ -17,13 +17,13 @@ def task(ctx, config): Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio configuration settings - In order for test to pass must use log-whitelist as follows + In order for test to pass must use log-ignorelist as follows tasks: - chef: - install: - ceph: - log-whitelist: ['OSD near full', 'OSD full dropping all updates'] + log-ignorelist: ['OSD near full', 'OSD full dropping all updates'] - osd_failsafe_enospc: """ diff --git a/ceph/qa/tasks/repair_test.py b/ceph/qa/tasks/repair_test.py index 973273bbe..0fbe6cb77 100644 --- a/ceph/qa/tasks/repair_test.py +++ b/ceph/qa/tasks/repair_test.py @@ -251,7 +251,7 @@ def task(ctx, config): The config should be as follows: - Must include the log-whitelist below + Must include the log-ignorelist below Must enable filestore_debug_inject_read_err config example: @@ -260,7 +260,7 @@ def task(ctx, config): - chef: - install: - ceph: - log-whitelist: + log-ignorelist: - 'candidate had a stat error' - 'candidate had a read error' - 'deep-scrub 0 missing, 1 inconsistent objects' diff --git a/ceph/qa/tasks/scrub_test.py b/ceph/qa/tasks/scrub_test.py index 3d71708ed..d301bea0b 100644 --- a/ceph/qa/tasks/scrub_test.py +++ b/ceph/qa/tasks/scrub_test.py @@ -324,7 +324,7 @@ def task(ctx, config): - chef: - install: - ceph: - log-whitelist: + log-ignorelist: - '!= data_digest' - '!= omap_digest' - '!= size' diff --git a/ceph/qa/tasks/thrashosds-health.yaml b/ceph/qa/tasks/thrashosds-health.yaml index 611d867e7..1b2560d4e 100644 --- a/ceph/qa/tasks/thrashosds-health.yaml +++ b/ceph/qa/tasks/thrashosds-health.yaml @@ -3,7 +3,7 @@ overrides: conf: osd: osd max markdown count: 1000 - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/tasks/vstart_runner.py b/ceph/qa/tasks/vstart_runner.py index 14cc5939d..741d464c0 100644 --- a/ceph/qa/tasks/vstart_runner.py +++ b/ceph/qa/tasks/vstart_runner.py @@ -53,6 +53,11 @@ from teuthology.orchestra.daemon import DaemonGroup from teuthology.config import config as teuth_config import six import logging +try: + import urllib3 + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) +except: + pass def init_log(): global log @@ -210,13 +215,13 @@ class LocalRemoteProcess(object): return False def kill(self): - log.info("kill ") + log.debug("kill ") if self.subproc.pid and not self.finished: - log.info("kill: killing pid {0} ({1})".format( + log.debug("kill: killing pid {0} ({1})".format( self.subproc.pid, self.args)) safe_kill(self.subproc.pid) else: - log.info("kill: already terminated ({0})".format(self.args)) + log.debug("kill: already terminated ({0})".format(self.args)) @property def stdin(self): @@ -360,7 +365,7 @@ class LocalRemote(object): args[0] )) - log.info("Running {0}".format(args)) + log.debug("Running {0}".format(args)) if shell: subproc = subprocess.Popen(quote(args), @@ -454,13 +459,13 @@ class LocalDaemon(object): for line in lines: if line.find("ceph-{0} -i {1}".format(self.daemon_type, self.daemon_id)) != -1: - log.info("Found ps line for daemon: {0}".format(line)) + log.debug("Found ps line for daemon: {0}".format(line)) return int(line.split()[0]) if opt_log_ps_output: - log.info("No match for {0} {1}: {2}".format( + log.debug("No match for {0} {1}: {2}".format( self.daemon_type, self.daemon_id, ps_txt)) else: - log.info("No match for {0} {1}".format(self.daemon_type, + log.debug("No match for {0} {1}".format(self.daemon_type, self.daemon_id)) return None @@ -478,14 +483,14 @@ class LocalDaemon(object): return pid = self._get_pid() - log.info("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id)) + log.debug("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id)) os.kill(pid, signal.SIGTERM) waited = 0 while pid is not None: new_pid = self._get_pid() if new_pid is not None and new_pid != pid: - log.info("Killing new PID {0}".format(new_pid)) + log.debug("Killing new PID {0}".format(new_pid)) pid = new_pid os.kill(pid, signal.SIGTERM) @@ -515,7 +520,7 @@ class LocalDaemon(object): os.kill(self._get_pid(), sig) if not silent: - log.info("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id)) + log.debug("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id)) def safe_kill(pid): @@ -617,9 +622,9 @@ class LocalKernelMount(KernelMount): # Previous mount existed, reuse the old name name = self.fs.name self.fs = LocalFilesystem(self.ctx, name=name) - log.info('Wait for MDS to reach steady state...') + log.debug('Wait for MDS to reach steady state...') self.fs.wait_for_daemons() - log.info('Ready to start {}...'.format(type(self).__name__)) + log.debug('Ready to start {}...'.format(type(self).__name__)) @property def _prefix(self): @@ -778,9 +783,9 @@ class LocalFuseMount(FuseMount): # Previous mount existed, reuse the old name name = self.fs.name self.fs = LocalFilesystem(self.ctx, name=name) - log.info('Wait for MDS to reach steady state...') + log.debug('Wait for MDS to reach steady state...') self.fs.wait_for_daemons() - log.info('Ready to start {}...'.format(type(self).__name__)) + log.debug('Ready to start {}...'.format(type(self).__name__)) @property def _prefix(self): @@ -836,7 +841,7 @@ class LocalFuseMount(FuseMount): # Before starting ceph-fuse process, note the contents of # /sys/fs/fuse/connections pre_mount_conns = list_connections() - log.info("Pre-mount connections: {0}".format(pre_mount_conns)) + log.debug("Pre-mount connections: {0}".format(pre_mount_conns)) prefix = [os.path.join(BIN_PREFIX, "ceph-fuse")] if os.getuid() != 0: @@ -858,7 +863,7 @@ class LocalFuseMount(FuseMount): self.mountpoint ], wait=False) - log.info("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid)) + log.debug("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid)) # Wait for the connection reference to appear in /sys waited = 0 @@ -876,7 +881,7 @@ class LocalFuseMount(FuseMount): )) post_mount_conns = list_connections() - log.info("Post-mount connections: {0}".format(post_mount_conns)) + log.debug("Post-mount connections: {0}".format(post_mount_conns)) # Record our fuse connection number so that we can use it when # forcing an unmount @@ -912,7 +917,7 @@ class LocalCephManager(CephManager): # certain teuthology tests want to run tasks in parallel self.lock = threading.RLock() - self.log = lambda x: log.info(x) + self.log = lambda x: log.debug(x) # Don't bother constructing a map of pools: it should be empty # at test cluster start, and in any case it would be out of date @@ -1054,7 +1059,7 @@ class LocalCephCluster(CephCluster): existing_str += "\n[{0}]\n".format(subsys) for key, val in kvs.items(): # Comment out existing instance if it exists - log.info("Searching for existing instance {0}/{1}".format( + log.debug("Searching for existing instance {0}/{1}".format( key, subsys )) existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format( @@ -1066,7 +1071,7 @@ class LocalCephCluster(CephCluster): existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE) if existing_val: start = existing_section.start() + existing_val.start(1) - log.info("Found string to replace at {0}".format( + log.debug("Found string to replace at {0}".format( start )) existing_str = existing_str[0:start] + "#" + existing_str[start:] @@ -1133,7 +1138,7 @@ class LocalFilesystem(Filesystem, LocalMDSCluster): self.mds_ids = list(self.mds_ids) - log.info("Discovered MDS IDs: {0}".format(self.mds_ids)) + log.debug("Discovered MDS IDs: {0}".format(self.mds_ids)) self.mon_manager = LocalCephManager() @@ -1190,7 +1195,7 @@ class InteractiveFailureResult(unittest.TextTestResult): def enumerate_methods(s): - log.info("e: {0}".format(s)) + log.debug("e: {0}".format(s)) for t in s._tests: if isinstance(t, suite.BaseTestSuite): for sub in enumerate_methods(t): @@ -1201,15 +1206,15 @@ def enumerate_methods(s): def load_tests(modules, loader): if modules: - log.info("Executing modules: {0}".format(modules)) + log.debug("Executing modules: {0}".format(modules)) module_suites = [] for mod_name in modules: # Test names like cephfs.test_auto_repair module_suites.append(loader.loadTestsFromName(mod_name)) - log.info("Loaded: {0}".format(list(module_suites))) + log.debug("Loaded: {0}".format(list(module_suites))) return suite.TestSuite(module_suites) else: - log.info("Executing all cephfs tests") + log.debug("Executing all cephfs tests") return loader.discover( os.path.join(os.path.dirname(os.path.abspath(__file__)), "cephfs") ) @@ -1292,7 +1297,7 @@ def clear_old_log(): with open(logpath, 'w') as logfile: logfile.write('') init_log() - log.info('logging in a fresh file now...') + log.debug('logging in a fresh file now...') def exec_test(): # Parse arguments @@ -1509,7 +1514,7 @@ def exec_test(): if not is_named: victims.append((case, method)) - log.info("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims))) + log.debug("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims))) for s, method in victims: s._tests.remove(method) diff --git a/ceph/qa/valgrind.supp b/ceph/qa/valgrind.supp index 1e5fc3262..a7257faaf 100644 --- a/ceph/qa/valgrind.supp +++ b/ceph/qa/valgrind.supp @@ -1,3 +1,11 @@ + +{ + + Memcheck:Free + fun:free + ... +} + { older boost mersenne twister uses uninitialized memory for randomness Memcheck:Cond diff --git a/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh b/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh index 5a214689d..3e26f977c 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh @@ -542,6 +542,9 @@ status() echo "image ${image} journal status" rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" journal status --image ${image} echo + echo "image ${image} snapshots" + rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" snap ls --all ${image} + echo done echo "${cluster} ${image_pool} ${image_ns} rbd_mirroring omap vals" @@ -902,7 +905,9 @@ create_image_and_enable_mirror() fi create_image ${cluster} ${pool} ${image} $@ - enable_mirror ${cluster} ${pool} ${image} ${mode} + if [ "${MIRROR_POOL_MODE}" = "image" ] || [ "$pool" = "${PARENT_POOL}" ]; then + enable_mirror ${cluster} ${pool} ${image} ${mode} + fi } enable_journaling() diff --git a/ceph/qa/workunits/rbd/rbd_mirror_stress.sh b/ceph/qa/workunits/rbd/rbd_mirror_stress.sh index cb4f66b25..a17ad75e1 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_stress.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_stress.sh @@ -31,6 +31,7 @@ compare_image_snaps() local pool=$1 local image=$2 local snap_name=$3 + local ret=0 local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export @@ -38,8 +39,13 @@ compare_image_snaps() rm -f ${rmt_export} ${loc_export} rbd --cluster ${CLUSTER2} -p ${pool} export ${image}@${snap_name} ${rmt_export} rbd --cluster ${CLUSTER1} -p ${pool} export ${image}@${snap_name} ${loc_export} - cmp ${rmt_export} ${loc_export} + if ! cmp ${rmt_export} ${loc_export} + then + show_diff ${rmt_export} ${loc_export} + ret=1 + fi rm -f ${rmt_export} ${loc_export} + return ${ret} } wait_for_pool_images() @@ -90,9 +96,10 @@ start_mirrors ${CLUSTER2} testlog "TEST: add image and test replay after client crashes" image=test -create_image ${CLUSTER2} ${POOL} ${image} '512M' +create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '512M' wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} +clean_snap_name= for i in `seq 1 10` do stress_write_image ${CLUSTER2} ${POOL} ${image} @@ -104,12 +111,35 @@ do wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image} wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name} + + if [ -n "${clean_snap_name}" ]; then + compare_image_snaps ${POOL} ${image} ${clean_snap_name} + fi compare_image_snaps ${POOL} ${image} ${snap_name} + + clean_snap_name="snap${i}-clean" + create_snap ${CLUSTER2} ${POOL} ${image} ${clean_snap_name} done +wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} +wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image} +wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${clean_snap_name} + for i in `seq 1 10` do snap_name="snap${i}" + compare_image_snaps ${POOL} ${image} ${snap_name} + + snap_name="snap${i}-clean" + compare_image_snaps ${POOL} ${image} ${snap_name} +done + +for i in `seq 1 10` +do + snap_name="snap${i}" + remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name} + + snap_name="snap${i}-clean" remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name} done @@ -121,7 +151,7 @@ snap_name="snap" for i in `seq 1 ${IMAGE_COUNT}` do image="image_${i}" - create_image ${CLUSTER2} ${POOL} ${image} '128M' + create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '128M' if [ -n "${RBD_MIRROR_REDUCE_WRITES}" ]; then write_image ${CLUSTER2} ${POOL} ${image} 100 else diff --git a/ceph/src/.git_version b/ceph/src/.git_version index 066da7f32..0ca509a9f 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -357616cbf726abb779ca75a551e8d02568e15b17 -15.2.9 +27917a557cca91e4da407489bbaa64ad4352cc02 +15.2.10 diff --git a/ceph/src/ceph-volume/ceph_volume/api/lvm.py b/ceph/src/ceph-volume/ceph_volume/api/lvm.py index 461e01050..30362f1bd 100644 --- a/ceph/src/ceph-volume/ceph_volume/api/lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/api/lvm.py @@ -588,9 +588,31 @@ class VolumeGroup(object): def bytes_to_extents(self, size): ''' - Return a how many extents we can fit into a size in bytes. + Return a how many free extents we can fit into a size in bytes. This has + some uncertainty involved. If size/extent_size is within 1% of the + actual free extents we will return the extent count, otherwise we'll + throw an error. + This accomodates for the size calculation in batch. We need to report + the OSD layout but have not yet created any LVM structures. We use the + disk size in batch if no VG is present and that will overshoot the + actual free_extent count due to LVM overhead. + ''' - return int(size / int(self.vg_extent_size)) + b_to_ext = int(size / int(self.vg_extent_size)) + if b_to_ext < int(self.vg_free_count): + # return bytes in extents if there is more space + return b_to_ext + elif b_to_ext / int(self.vg_free_count) - 1 < 0.01: + # return vg_fre_count if its less then 1% off + logger.info( + 'bytes_to_extents results in {} but only {} ' + 'are available, adjusting the latter'.format(b_to_ext, + self.vg_free_count)) + return int(self.vg_free_count) + # else raise an exception + raise RuntimeError('Can\'t convert {} to free extents, only {} ({} ' + 'bytes) are free'.format(size, self.vg_free_count, + self.free)) def slots_to_extents(self, slots): ''' diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py index a6f7632b3..40c0fea4e 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -102,7 +102,7 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar requested_slots = getattr(args, '{}_slots'.format(type_)) if not requested_slots or requested_slots < fast_slots_per_device: if requested_slots: - mlogger.info('{}_slots argument is to small, ignoring'.format(type_)) + mlogger.info('{}_slots argument is too small, ignoring'.format(type_)) requested_slots = fast_slots_per_device requested_size = getattr(args, '{}_size'.format(type_), 0) diff --git a/ceph/src/ceph-volume/ceph_volume/drive_group/main.py b/ceph/src/ceph-volume/ceph_volume/drive_group/main.py index a224b21a5..9e93bc759 100644 --- a/ceph/src/ceph-volume/ceph_volume/drive_group/main.py +++ b/ceph/src/ceph-volume/ceph_volume/drive_group/main.py @@ -90,7 +90,7 @@ class Deploy(object): def get_dg_spec(self, dg): dg_spec = DriveGroupSpec._from_json_impl(dg) dg_spec.validate() - i = Inventory([]) + i = Inventory(['--filter-for-batch']) i.main() inventory = i.get_report() devices = [Device.from_json(i) for i in inventory] diff --git a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py index cd2f9d9cb..f01ceb4f3 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py @@ -185,19 +185,42 @@ class TestCreateLV(object): def setup(self): self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='') self.foo_group = api.VolumeGroup(vg_name='foo_group', - vg_extent_size=4194304, - vg_extent_count=100, - vg_free_count=100) + vg_extent_size="4194304", + vg_extent_count="100", + vg_free_count="100") @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') @patch('ceph_volume.api.lvm.get_first_lv') def test_uses_size(self, m_get_first_lv, m_call, m_run, monkeypatch): m_get_first_lv.return_value = self.foo_volume - api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'}) - expected = ['lvcreate', '--yes', '-l', '1280', '-n', 'foo-0', 'foo_group'] + api.create_lv('foo', 0, vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'}) + expected = ['lvcreate', '--yes', '-l', '100', '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected) + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_size_adjust_if_1percent_over(self, m_get_first_lv, m_call, m_run, monkeypatch): + foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='') + foo_group = api.VolumeGroup(vg_name='foo_group', + vg_extent_size="4194304", + vg_extent_count="1000", + vg_free_count="1000") + m_get_first_lv.return_value = foo_volume + # 423624704 should be just under 1% off of the available size 419430400 + api.create_lv('foo', 0, vg=foo_group, size=4232052736, tags={'ceph.type': 'data'}) + expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-0', 'foo_group'] + m_run.assert_called_with(expected) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_size_too_large(self, m_get_first_lv, m_call, m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + with pytest.raises(RuntimeError): + api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'}) + @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') @patch('ceph_volume.api.lvm.get_first_lv') diff --git a/ceph/src/ceph-volume/ceph_volume/util/device.py b/ceph/src/ceph-volume/ceph_volume/util/device.py index ad7950d21..830f3bbe2 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/device.py +++ b/ceph/src/ceph-volume/ceph_volume/util/device.py @@ -454,7 +454,7 @@ class Device(object): # assuming 4M extents here extent_size = 4194304 vg_free = int(self.size / extent_size) * extent_size - if self.size % 4194304 == 0: + if self.size % extent_size == 0: # If the extent size divides size exactly, deduct on extent for # LVM metadata vg_free -= extent_size diff --git a/ceph/src/ceph-volume/ceph_volume/util/disk.py b/ceph/src/ceph-volume/ceph_volume/util/disk.py index e022c9e51..df016c4e8 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/disk.py +++ b/ceph/src/ceph-volume/ceph_volume/util/disk.py @@ -760,7 +760,7 @@ def get_devices(_sys_block_path='/sys/block'): # If the mapper device is a logical volume it gets excluded if is_mapper_device(diskname): - if lvm.is_lv(diskname): + if lvm.get_device_lvs(diskname): continue # all facts that have no defaults diff --git a/ceph/src/cephadm/cephadm b/ceph/src/cephadm/cephadm index 8de809d75..1b4163b82 100755 --- a/ceph/src/cephadm/cephadm +++ b/ceph/src/cephadm/cephadm @@ -182,7 +182,7 @@ class Monitoring(object): ], }, "grafana": { - "image": "docker.io/ceph/ceph-grafana:6.6.2", + "image": "docker.io/ceph/ceph-grafana:6.7.4", "cpus": "2", "memory": "4GB", "args": [], @@ -2381,7 +2381,6 @@ class Firewalld(object): else: logger.debug('firewalld port %s is enabled in current zone' % tcp_port) - out, err, ret = call([self.cmd, '--permanent', '--query-port', tcp_port], verbose_on_failure=False) def apply_rules(self): # type: () -> None if not self.available: @@ -2723,7 +2722,7 @@ def command_inspect_image(): # type: () -> int out, err, ret = call_throws([ container_path, 'inspect', - '--format', '{{.ID}},{{json .RepoDigests}}', + '--format', '{{.ID}},{{.RepoDigests}}', args.image]) if ret: return errno.ENOENT @@ -2745,7 +2744,7 @@ def get_image_info_from_inspect(out, image): 'image_id': normalize_container_id(image_id) } if digests: - json_digests = json.loads(digests) + json_digests = digests[1:-1].split(' ') if json_digests: r['repo_digest'] = json_digests[0] return r @@ -3253,10 +3252,11 @@ def command_bootstrap(): logger.info('Creating initial admin user...') password = args.initial_dashboard_password or generate_password() - cmd = ['dashboard', 'ac-user-create', args.initial_dashboard_user, password, 'administrator', '--force-password'] + tmp_password_file = write_tmp(password, uid, gid) + cmd = ['dashboard', 'ac-user-create', args.initial_dashboard_user, '-i', '/tmp/dashboard.pw', 'administrator', '--force-password'] if not args.dashboard_password_noupdate: cmd.append('--pwd-update-required') - cli(cmd) + cli(cmd, extra_mounts={pathify(tmp_password_file.name): '/tmp/dashboard.pw:z'}) logger.info('Fetching dashboard port number...') out = cli(['config', 'get', 'mgr', 'mgr/dashboard/ssl_server_port']) port = int(out) diff --git a/ceph/src/cephadm/tests/test_cephadm.py b/ceph/src/cephadm/tests/test_cephadm.py index 5487f43b3..74a93d8dd 100644 --- a/ceph/src/cephadm/tests/test_cephadm.py +++ b/ceph/src/cephadm/tests/test_cephadm.py @@ -241,7 +241,7 @@ default via fe80::2480:28ec:5097:3fe2 dev wlp2s0 proto ra metric 20600 pref medi def test_get_image_info_from_inspect(self): # podman - out = """204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1,["docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992"]""" + out = """204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1,[docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992]""" r = cd.get_image_info_from_inspect(out, 'registry/ceph/ceph:latest') assert r == { 'image_id': '204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1', @@ -249,7 +249,7 @@ default via fe80::2480:28ec:5097:3fe2 dev wlp2s0 proto ra metric 20600 pref medi } # docker - out = """sha256:16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552,["quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f"]""" + out = """sha256:16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552,[quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f]""" r = cd.get_image_info_from_inspect(out, 'registry/ceph/ceph:latest') assert r == { 'image_id': '16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552', diff --git a/ceph/src/common/legacy_config_opts.h b/ceph/src/common/legacy_config_opts.h index 68364bbb9..6aa45b7e4 100644 --- a/ceph/src/common/legacy_config_opts.h +++ b/ceph/src/common/legacy_config_opts.h @@ -918,6 +918,7 @@ OPTION(bluefs_allocator, OPT_STR) // stupid | bitmap OPTION(bluefs_log_replay_check_allocations, OPT_BOOL) OPTION(bluefs_replay_recovery, OPT_BOOL) OPTION(bluefs_replay_recovery_disable_compact, OPT_BOOL) +OPTION(bluefs_check_for_zeros, OPT_BOOL) OPTION(bluestore_bluefs, OPT_BOOL) OPTION(bluestore_bluefs_env_mirror, OPT_BOOL) // mirror to normal Env for debug @@ -1014,6 +1015,7 @@ OPTION(bluestore_bitmapallocator_span_size, OPT_INT) // must be power of 2 align OPTION(bluestore_max_deferred_txc, OPT_U64) OPTION(bluestore_max_defer_interval, OPT_U64) OPTION(bluestore_rocksdb_options, OPT_STR) +OPTION(bluestore_rocksdb_options_annex, OPT_STR) OPTION(bluestore_fsck_on_mount, OPT_BOOL) OPTION(bluestore_fsck_on_mount_deep, OPT_BOOL) OPTION(bluestore_fsck_quick_fix_on_mount, OPT_BOOL) diff --git a/ceph/src/common/options.cc b/ceph/src/common/options.cc index 693ed4c5f..f74aa788a 100644 --- a/ceph/src/common/options.cc +++ b/ceph/src/common/options.cc @@ -4051,6 +4051,14 @@ std::vector