From 91327a770de101c8d859649fa82e887fff521e13 Mon Sep 17 00:00:00 2001 From: Alwin Antreich Date: Wed, 5 Dec 2018 15:13:22 +0100 Subject: [PATCH] update sources to 12.2.10 Signed-off-by: Alwin Antreich --- Makefile | 2 +- ceph/CMakeLists.txt | 4 +- ceph/PendingReleaseNotes | 11 + ceph/alpine/APKBUILD | 6 +- ceph/ceph.spec | 33 +- ceph/ceph.spec.in | 27 +- ceph/debian/changelog | 12 + ceph/debian/control | 5 +- ceph/debian/rules | 2 + ceph/doc/architecture.rst | 14 +- ceph/doc/ceph-volume/index.rst | 7 + ceph/doc/ceph-volume/inventory.rst | 17 + ceph/doc/ceph-volume/lvm/batch.rst | 4 + ceph/doc/ceph-volume/lvm/prepare.rst | 27 + ceph/doc/cephfs/eviction.rst | 6 +- ceph/doc/cephfs/fstab.rst | 12 +- ceph/doc/cephfs/fuse.rst | 6 +- ceph/doc/cephfs/index.rst | 2 + ceph/doc/cephfs/kernel.rst | 6 +- ceph/doc/glossary.rst | 4 +- ceph/doc/man/8/ceph-fuse.rst | 9 +- ceph/doc/man/8/ceph-volume.rst | 33 +- ceph/doc/man/8/ceph.rst | 16 +- ceph/doc/rados/operations/monitoring.rst | 2 +- ceph/doc/radosgw/index.rst | 2 + ceph/doc/radosgw/ldap-auth.rst | 2 +- ceph/doc/rbd/index.rst | 16 +- ceph/doc/start/quick-cephfs.rst | 20 +- .../cephfs/clusters/1a3s-mds-1c-client.yaml | 12 + .../cephfs/clusters/1a3s-mds-2c-client.yaml | 12 + .../qa/{suites/knfs/basic/% => cephfs/conf/+} | 0 .../debug.yaml => conf/client.yaml} | 4 +- ceph/qa/cephfs/conf/mds.yaml | 11 + ceph/qa/cephfs/conf/mon.yaml | 5 + ceph/qa/cephfs/conf/osd.yaml | 5 + .../whitelist_wrongly_marked_down.yaml | 6 - ceph/qa/packages/packages.yaml | 4 + .../erasure-code/test-erasure-eio.sh | 31 + .../qa/standalone/osd/ec-error-rollforward.sh | 7 +- ceph/qa/standalone/osd/osd-backfill-stats.sh | 108 ++-- ceph/qa/standalone/osd/osd-recovery-stats.sh | 177 ++++-- ceph/qa/standalone/scrub/osd-scrub-repair.sh | 115 ++++ ceph/qa/standalone/scrub/osd-scrub-snaps.sh | 60 +- ceph/qa/suites/.qa | 1 + ceph/qa/suites/big/.qa | 1 + ceph/qa/suites/big/rados-thrash/.qa | 1 + ceph/qa/suites/big/rados-thrash/ceph/.qa | 1 + ceph/qa/suites/big/rados-thrash/clusters/.qa | 1 + ceph/qa/suites/big/rados-thrash/thrashers/.qa | 1 + ceph/qa/suites/big/rados-thrash/workloads/.qa | 1 + ceph/qa/suites/buildpackages/.qa | 1 + ceph/qa/suites/buildpackages/any/.qa | 1 + ceph/qa/suites/buildpackages/any/tasks/.qa | 1 + ceph/qa/suites/buildpackages/tests/.qa | 1 + ceph/qa/suites/buildpackages/tests/tasks/.qa | 1 + ceph/qa/suites/ceph-ansible/.qa | 1 + ceph/qa/suites/ceph-ansible/smoke/.qa | 1 + ceph/qa/suites/ceph-ansible/smoke/basic/.qa | 1 + .../ceph-ansible/smoke/basic/0-clusters/.qa | 1 + ...s to use both cases: mon.a and installer.0 | 12 - .../ceph-ansible/smoke/basic/1-distros/.qa | 1 + .../ceph-ansible/smoke/basic/2-ceph/.qa | 1 + .../smoke/basic/2-ceph/ceph_ansible.yaml | 1 + .../ceph-ansible/smoke/basic/3-config/.qa | 1 + .../3-config/bluestore_with_dmcrypt.yaml | 1 + .../smoke/basic/3-config/dmcrypt_off.yaml | 1 + .../smoke/basic/3-config/dmcrypt_on.yaml | 1 + .../ceph-ansible/smoke/basic/4-tasks/.qa | 1 + ceph/qa/suites/ceph-deploy/.qa | 1 + ceph/qa/suites/ceph-deploy/basic/.qa | 1 + .../basic/ceph-deploy-overrides/.qa | 1 + .../ceph-deploy/basic/config_options/.qa | 1 + .../suites/ceph-deploy/basic/objectstore/.qa | 1 + .../ceph-deploy/basic/python_versions/.qa | 1 + ceph/qa/suites/ceph-deploy/basic/tasks/.qa | 1 + ceph/qa/suites/ceph-deploy/ceph-volume/.qa | 1 + .../ceph-deploy/ceph-volume/cluster/.qa | 1 + .../suites/ceph-deploy/ceph-volume/config/.qa | 1 + .../ceph-deploy/ceph-volume/distros/.qa | 1 + .../suites/ceph-deploy/ceph-volume/tasks/.qa | 1 + ceph/qa/suites/ceph-disk/.qa | 1 + ceph/qa/suites/ceph-disk/basic/.qa | 1 + ceph/qa/suites/ceph-disk/basic/tasks/.qa | 1 + ceph/qa/suites/dummy/.qa | 1 + ceph/qa/suites/dummy/all/.qa | 1 + ceph/qa/suites/experimental/.qa | 1 + ceph/qa/suites/experimental/multimds/.qa | 1 + .../suites/experimental/multimds/clusters/.qa | 1 + .../qa/suites/experimental/multimds/tasks/.qa | 1 + ceph/qa/suites/fs/.qa | 1 + ceph/qa/suites/fs/32bits/.qa | 1 + ceph/qa/suites/fs/32bits/clusters/.qa | 1 + ceph/qa/suites/fs/32bits/conf | 1 + ceph/qa/suites/fs/32bits/mount/.qa | 1 + ceph/qa/suites/fs/32bits/overrides/.qa | 1 + ceph/qa/suites/fs/32bits/overrides/debug.yaml | 1 - ceph/qa/suites/fs/32bits/tasks/.qa | 1 + ceph/qa/suites/fs/basic_functional/.qa | 1 + .../suites/fs/basic_functional/clusters/.qa | 1 + ceph/qa/suites/fs/basic_functional/conf | 1 + ceph/qa/suites/fs/basic_functional/mount/.qa | 1 + .../fs/basic_functional/objectstore/.qa | 1 + .../suites/fs/basic_functional/overrides/.qa | 1 + .../fs/basic_functional/overrides/debug.yaml | 1 - ceph/qa/suites/fs/basic_functional/tasks/.qa | 1 + .../tasks/cfuse_workunit_quota.yaml | 1 - .../fs/basic_functional/tasks/volume-client/% | 0 .../tasks/volume-client/py/2.yaml | 2 + .../tasks/volume-client/py/3.yaml | 2 + .../test.yaml} | 0 ceph/qa/suites/fs/basic_workload/.qa | 1 + ceph/qa/suites/fs/basic_workload/clusters/.qa | 1 + ceph/qa/suites/fs/basic_workload/conf | 1 + ceph/qa/suites/fs/basic_workload/inline/.qa | 1 + ceph/qa/suites/fs/basic_workload/mount/.qa | 1 + .../suites/fs/basic_workload/omap_limit/.qa | 1 + .../qa/suites/fs/basic_workload/overrides/.qa | 1 + .../fs/basic_workload/overrides/debug.yaml | 1 - ceph/qa/suites/fs/basic_workload/tasks/.qa | 1 + .../tasks/cfuse_workunit_misc.yaml | 1 - .../tasks/cfuse_workunit_norstats.yaml | 1 - .../tasks/cfuse_workunit_suites_pjd.yaml | 5 - ceph/qa/suites/fs/bugs/.qa | 1 + ceph/qa/suites/fs/bugs/client_trim_caps/.qa | 1 + .../fs/bugs/client_trim_caps/clusters/.qa | 1 + .../fs/bugs/client_trim_caps/objectstore/.qa | 1 + .../fs/bugs/client_trim_caps/overrides/.qa | 1 + .../client_trim_caps/overrides/debug.yaml | 1 - .../suites/fs/bugs/client_trim_caps/tasks/.qa | 1 + ceph/qa/suites/fs/bugs/conf | 1 + ceph/qa/suites/fs/multiclient/.qa | 1 + ceph/qa/suites/fs/multiclient/clusters/.qa | 1 + ceph/qa/suites/fs/multiclient/conf | 1 + ceph/qa/suites/fs/multiclient/mount/.qa | 1 + ceph/qa/suites/fs/multiclient/overrides/.qa | 1 + .../fs/multiclient/overrides/debug.yaml | 1 - ceph/qa/suites/fs/multiclient/tasks/.qa | 1 + .../multiclient/tasks/cephfs_misc_tests.yaml | 1 + ceph/qa/suites/fs/multifs/.qa | 1 + ceph/qa/suites/fs/multifs/clusters/.qa | 1 + .../multifs/clusters/1a3s-mds-2c-client.yaml | 1 + ceph/qa/suites/fs/multifs/conf | 1 + ceph/qa/suites/fs/multifs/mount/.qa | 1 + ceph/qa/suites/fs/multifs/overrides/.qa | 1 + .../qa/suites/fs/multifs/overrides/debug.yaml | 1 - ceph/qa/suites/fs/multifs/tasks/.qa | 1 + ceph/qa/suites/fs/permission/.qa | 1 + ceph/qa/suites/fs/permission/clusters/.qa | 1 + ceph/qa/suites/fs/permission/conf | 1 + ceph/qa/suites/fs/permission/mount/.qa | 1 + ceph/qa/suites/fs/permission/overrides/.qa | 1 + .../suites/fs/permission/overrides/debug.yaml | 1 - ceph/qa/suites/fs/permission/tasks/.qa | 1 + ceph/qa/suites/fs/snaps/.qa | 1 + ceph/qa/suites/fs/snaps/clusters/.qa | 1 + ceph/qa/suites/fs/snaps/conf | 1 + ceph/qa/suites/fs/snaps/mount/.qa | 1 + ceph/qa/suites/fs/snaps/overrides/.qa | 1 + ceph/qa/suites/fs/snaps/overrides/debug.yaml | 1 - ceph/qa/suites/fs/snaps/tasks/.qa | 1 + ceph/qa/suites/fs/thrash/.qa | 1 + ceph/qa/suites/fs/thrash/ceph-thrash/.qa | 1 + ceph/qa/suites/fs/thrash/clusters/.qa | 1 + ceph/qa/suites/fs/thrash/conf | 1 + ceph/qa/suites/fs/thrash/mount/.qa | 1 + ceph/qa/suites/fs/thrash/msgr-failures/.qa | 1 + ceph/qa/suites/fs/thrash/overrides/.qa | 1 + ceph/qa/suites/fs/thrash/overrides/debug.yaml | 1 - ceph/qa/suites/fs/thrash/tasks/.qa | 1 + ceph/qa/suites/fs/traceless/.qa | 1 + ceph/qa/suites/fs/traceless/clusters/.qa | 1 + ceph/qa/suites/fs/traceless/conf | 1 + ceph/qa/suites/fs/traceless/mount/.qa | 1 + ceph/qa/suites/fs/traceless/overrides/.qa | 1 + .../suites/fs/traceless/overrides/debug.yaml | 1 - ceph/qa/suites/fs/traceless/tasks/.qa | 1 + ceph/qa/suites/fs/traceless/traceless/.qa | 1 + ceph/qa/suites/fs/verify/.qa | 1 + ceph/qa/suites/fs/verify/clusters/.qa | 1 + ceph/qa/suites/fs/verify/conf | 1 + ceph/qa/suites/fs/verify/mount/.qa | 1 + ceph/qa/suites/fs/verify/overrides/.qa | 1 + ceph/qa/suites/fs/verify/overrides/debug.yaml | 1 - ceph/qa/suites/fs/verify/tasks/.qa | 1 + ceph/qa/suites/fs/verify/validater/.qa | 1 + ceph/qa/suites/hadoop/.qa | 1 + ceph/qa/suites/hadoop/basic/.qa | 1 + ceph/qa/suites/hadoop/basic/clusters/.qa | 1 + ceph/qa/suites/hadoop/basic/tasks/.qa | 1 + ceph/qa/suites/kcephfs/.qa | 1 + ceph/qa/suites/kcephfs/cephfs/.qa | 1 + ceph/qa/suites/kcephfs/cephfs/clusters/.qa | 1 + ceph/qa/suites/kcephfs/cephfs/conf | 1 + ceph/qa/suites/kcephfs/cephfs/inline/.qa | 1 + ceph/qa/suites/kcephfs/cephfs/overrides/.qa | 1 + .../kcephfs/cephfs/overrides/debug.yaml | 1 - .../overrides/ms-die-on-skipped.yaml} | 2 - ceph/qa/suites/kcephfs/cephfs/tasks/.qa | 1 + ceph/qa/suites/kcephfs/mixed-clients/.qa | 1 + .../suites/kcephfs/mixed-clients/clusters/.qa | 1 + ceph/qa/suites/kcephfs/mixed-clients/conf | 1 + .../kcephfs/mixed-clients/overrides/.qa | 1 + .../mixed-clients/overrides/debug.yaml | 1 - .../overrides/ms-die-on-skipped.yaml} | 2 - .../qa/suites/kcephfs/mixed-clients/tasks/.qa | 1 + ceph/qa/suites/kcephfs/recovery/.qa | 1 + ceph/qa/suites/kcephfs/recovery/clusters/.qa | 1 + ceph/qa/suites/kcephfs/recovery/conf | 1 + .../kcephfs/recovery/debug/mds_client.yaml | 12 - .../kcephfs/recovery/dirfrag/frag_enable.yaml | 11 - ceph/qa/suites/kcephfs/recovery/mounts/.qa | 1 + ceph/qa/suites/kcephfs/recovery/overrides/.qa | 1 + .../kcephfs/recovery/overrides/debug.yaml | 1 - ceph/qa/suites/kcephfs/recovery/tasks/.qa | 1 + ceph/qa/suites/kcephfs/thrash/.qa | 1 + ceph/qa/suites/kcephfs/thrash/clusters/.qa | 1 + ceph/qa/suites/kcephfs/thrash/conf | 1 + ceph/qa/suites/kcephfs/thrash/overrides/.qa | 1 + .../kcephfs/thrash/overrides/debug.yaml | 1 - .../overrides/ms-die-on-skipped.yaml} | 2 - ceph/qa/suites/kcephfs/thrash/thrashers/.qa | 1 + ceph/qa/suites/kcephfs/thrash/workloads/.qa | 1 + ceph/qa/suites/knfs/basic/ceph/base.yaml | 14 - .../knfs/basic/clusters/extra-client.yaml | 1 - ceph/qa/suites/knfs/basic/mount/v3.yaml | 5 - ceph/qa/suites/knfs/basic/mount/v4.yaml | 5 - .../nfs-workunit-kernel-untar-build.yaml | 6 - .../knfs/basic/tasks/nfs_workunit_misc.yaml | 11 - .../tasks/nfs_workunit_suites_blogbench.yaml | 5 - .../tasks/nfs_workunit_suites_dbench.yaml | 5 - .../basic/tasks/nfs_workunit_suites_ffsb.yaml | 10 - .../tasks/nfs_workunit_suites_fsstress.yaml | 5 - .../tasks/nfs_workunit_suites_iozone.yaml | 5 - ceph/qa/suites/krbd/.qa | 1 + ceph/qa/suites/krbd/rbd-nomount/.qa | 1 + ceph/qa/suites/krbd/rbd-nomount/clusters/.qa | 1 + ceph/qa/suites/krbd/rbd-nomount/install/.qa | 1 + .../suites/krbd/rbd-nomount/msgr-failures/.qa | 1 + ceph/qa/suites/krbd/rbd-nomount/tasks/.qa | 1 + ceph/qa/suites/krbd/rbd/.qa | 1 + ceph/qa/suites/krbd/rbd/clusters/.qa | 1 + ceph/qa/suites/krbd/rbd/msgr-failures/.qa | 1 + ceph/qa/suites/krbd/rbd/tasks/.qa | 1 + ceph/qa/suites/krbd/singleton/.qa | 1 + .../suites/krbd/singleton/msgr-failures/.qa | 1 + ceph/qa/suites/krbd/singleton/tasks/.qa | 1 + ceph/qa/suites/krbd/thrash/.qa | 1 + ceph/qa/suites/krbd/thrash/ceph/.qa | 1 + ceph/qa/suites/krbd/thrash/clusters/.qa | 1 + ceph/qa/suites/krbd/thrash/thrashers/.qa | 1 + ceph/qa/suites/krbd/thrash/workloads/.qa | 1 + ceph/qa/suites/krbd/unmap/.qa | 1 + ceph/qa/suites/krbd/unmap/ceph/.qa | 1 + ceph/qa/suites/krbd/unmap/clusters/.qa | 1 + ceph/qa/suites/krbd/unmap/kernels/.qa | 1 + ceph/qa/suites/krbd/unmap/tasks/.qa | 1 + ceph/qa/suites/krbd/unmap/tasks/unmap.yaml | 2 +- ceph/qa/suites/krbd/wac/.qa | 1 + ceph/qa/suites/krbd/wac/sysfs/.qa | 1 + ceph/qa/suites/krbd/wac/sysfs/ceph/.qa | 1 + ceph/qa/suites/krbd/wac/sysfs/clusters/.qa | 1 + ceph/qa/suites/krbd/wac/sysfs/tasks/.qa | 1 + ceph/qa/suites/krbd/wac/wac/.qa | 1 + ceph/qa/suites/krbd/wac/wac/ceph/.qa | 1 + ceph/qa/suites/krbd/wac/wac/clusters/.qa | 1 + ceph/qa/suites/krbd/wac/wac/tasks/.qa | 1 + ceph/qa/suites/krbd/wac/wac/verify/.qa | 1 + ceph/qa/suites/marginal/.qa | 1 + ceph/qa/suites/marginal/basic/.qa | 1 + ceph/qa/suites/marginal/basic/clusters/.qa | 1 + ceph/qa/suites/marginal/basic/tasks/.qa | 1 + ceph/qa/suites/marginal/fs-misc/.qa | 1 + ceph/qa/suites/marginal/fs-misc/clusters/.qa | 1 + ceph/qa/suites/marginal/fs-misc/tasks/.qa | 1 + ceph/qa/suites/marginal/mds_restart/.qa | 1 + .../suites/marginal/mds_restart/clusters/.qa | 1 + ceph/qa/suites/marginal/mds_restart/tasks/.qa | 1 + ceph/qa/suites/marginal/multimds/.qa | 1 + ceph/qa/suites/marginal/multimds/clusters/.qa | 1 + ceph/qa/suites/marginal/multimds/mounts/.qa | 1 + ceph/qa/suites/marginal/multimds/tasks/.qa | 1 + ceph/qa/suites/marginal/multimds/thrash/.qa | 1 + ceph/qa/suites/mixed-clients/.qa | 1 + ceph/qa/suites/mixed-clients/basic/.qa | 1 + .../suites/mixed-clients/basic/clusters/.qa | 1 + ceph/qa/suites/mixed-clients/basic/tasks/.qa | 1 + ceph/qa/suites/multimds/.qa | 1 + ceph/qa/suites/multimds/basic/.qa | 1 + ceph/qa/suites/multimds/basic/clusters/.qa | 1 + ceph/qa/suites/multimds/basic/conf | 1 + ceph/qa/suites/multimds/basic/mount/.qa | 1 + ceph/qa/suites/multimds/basic/overrides/.qa | 1 + .../suites/multimds/basic/q_check_counter/.qa | 1 + ceph/qa/suites/multimds/basic/tasks/.qa | 1 + .../basic/tasks/cfuse_workunit_misc.yaml | 1 - .../basic/tasks/cfuse_workunit_norstats.yaml | 1 - .../tasks/cfuse_workunit_suites_pjd.yaml | 5 - ceph/qa/suites/multimds/thrash/.qa | 1 + ceph/qa/suites/multimds/thrash/clusters/.qa | 1 + ceph/qa/suites/multimds/thrash/conf | 1 + ceph/qa/suites/multimds/thrash/mount/.qa | 1 + ceph/qa/suites/multimds/thrash/overrides/.qa | 1 + ceph/qa/suites/multimds/thrash/tasks/.qa | 1 + ceph/qa/suites/multimds/verify/.qa | 1 + ceph/qa/suites/multimds/verify/clusters/.qa | 1 + ceph/qa/suites/multimds/verify/conf | 1 + ceph/qa/suites/multimds/verify/mount/.qa | 1 + ceph/qa/suites/multimds/verify/overrides/.qa | 1 + ceph/qa/suites/powercycle/.qa | 1 + ceph/qa/suites/powercycle/osd/.qa | 1 + ceph/qa/suites/powercycle/osd/clusters/.qa | 1 + ceph/qa/suites/powercycle/osd/powercycle/.qa | 1 + ceph/qa/suites/powercycle/osd/tasks/.qa | 1 + ceph/qa/suites/rados/.qa | 1 + ceph/qa/suites/rados/basic-luminous/.qa | 1 + .../rados/basic-luminous/scrub_test.yaml | 3 +- ceph/qa/suites/rados/basic/.qa | 1 + ceph/qa/suites/rados/basic/clusters/.qa | 1 + .../suites/rados/basic/d-require-luminous/.qa | 1 + ceph/qa/suites/rados/basic/msgr-failures/.qa | 1 + ceph/qa/suites/rados/basic/msgr/.qa | 1 + ceph/qa/suites/rados/basic/tasks/.qa | 1 + .../suites/rados/basic/tasks/repair_test.yaml | 1 + ceph/qa/suites/rados/mgr/.qa | 1 + ceph/qa/suites/rados/mgr/clusters/.qa | 1 + ceph/qa/suites/rados/mgr/debug/.qa | 1 + ceph/qa/suites/rados/mgr/tasks/.qa | 1 + ceph/qa/suites/rados/monthrash/.qa | 1 + ceph/qa/suites/rados/monthrash/clusters/.qa | 1 + .../suites/rados/monthrash/msgr-failures/.qa | 1 + ceph/qa/suites/rados/monthrash/thrashers/.qa | 1 + ceph/qa/suites/rados/monthrash/workloads/.qa | 1 + ceph/qa/suites/rados/multimon/.qa | 1 + ceph/qa/suites/rados/multimon/clusters/.qa | 1 + .../suites/rados/multimon/msgr-failures/.qa | 1 + ceph/qa/suites/rados/multimon/tasks/.qa | 1 + ceph/qa/suites/rados/objectstore/.qa | 1 + ceph/qa/suites/rados/rest/.qa | 1 + ceph/qa/suites/rados/singleton-bluestore/.qa | 1 + .../suites/rados/singleton-bluestore/all/.qa | 1 + .../singleton-bluestore/msgr-failures/.qa | 1 + .../rados/singleton-bluestore/objectstore/.qa | 1 + ceph/qa/suites/rados/singleton-nomsgr/.qa | 1 + ceph/qa/suites/rados/singleton-nomsgr/all/.qa | 1 + .../all/librados_hello_world.yaml | 20 + ceph/qa/suites/rados/singleton/.qa | 1 + ceph/qa/suites/rados/singleton/all/.qa | 1 + .../rados/singleton/all/thrash-rados/.qa | 1 + .../suites/rados/singleton/msgr-failures/.qa | 1 + ceph/qa/suites/rados/standalone/.qa | 1 + .../suites/rados/thrash-erasure-code-big/.qa | 1 + .../rados/thrash-erasure-code-big/cluster/.qa | 1 + .../thrash-erasure-code-big/thrashers/.qa | 1 + .../thrash-erasure-code-big/workloads/.qa | 1 + .../suites/rados/thrash-erasure-code-isa/.qa | 1 + .../rados/thrash-erasure-code-isa/arch/.qa | 1 + .../thrash-erasure-code-isa/workloads/.qa | 1 + .../rados/thrash-erasure-code-overwrites/.qa | 1 + .../workloads/.qa | 1 + .../suites/rados/thrash-erasure-code-shec/.qa | 1 + .../thrash-erasure-code-shec/clusters/.qa | 1 + .../thrash-erasure-code-shec/thrashers/.qa | 1 + .../thrash-erasure-code-shec/workloads/.qa | 1 + ceph/qa/suites/rados/thrash-erasure-code/.qa | 1 + .../suites/rados/thrash-erasure-code/fast/.qa | 1 + .../rados/thrash-erasure-code/thrashers/.qa | 1 + .../rados/thrash-erasure-code/workloads/.qa | 1 + ceph/qa/suites/rados/thrash-luminous/.qa | 1 + .../rados/thrash-luminous/workloads/.qa | 1 + ceph/qa/suites/rados/thrash/.qa | 1 + .../thrash/0-size-min-size-overrides/.qa | 1 + .../rados/thrash/1-pg-log-overrides/.qa | 1 + .../rados/thrash/2-recovery-overrides/.qa | 1 + ceph/qa/suites/rados/thrash/backoff/.qa | 1 + ceph/qa/suites/rados/thrash/clusters/.qa | 1 + .../rados/thrash/d-require-luminous/.qa | 1 + ceph/qa/suites/rados/thrash/msgr-failures/.qa | 1 + ceph/qa/suites/rados/thrash/thrashers/.qa | 1 + ceph/qa/suites/rados/thrash/workloads/.qa | 1 + ceph/qa/suites/rados/upgrade/.qa | 1 + .../rados/upgrade/jewel-x-singleton/.qa | 1 + .../upgrade/jewel-x-singleton/0-cluster/.qa | 1 + .../jewel-x-singleton/1-jewel-install/.qa | 1 + .../1-jewel-install/jewel.yaml | 2 +- .../jewel-x-singleton/2-partial-upgrade/.qa | 1 + .../upgrade/jewel-x-singleton/3-thrash/.qa | 1 + .../upgrade/jewel-x-singleton/4-workload/.qa | 1 + .../upgrade/jewel-x-singleton/5-workload/.qa | 1 + .../upgrade/jewel-x-singleton/8-workload/.qa | 1 + ceph/qa/suites/rados/verify/.qa | 1 + ceph/qa/suites/rados/verify/clusters/.qa | 1 + ceph/qa/suites/rados/verify/d-thrash/.qa | 1 + .../suites/rados/verify/d-thrash/default/.qa | 1 + ceph/qa/suites/rados/verify/msgr-failures/.qa | 1 + ceph/qa/suites/rados/verify/tasks/.qa | 1 + ceph/qa/suites/rados/verify/validater/.qa | 1 + ceph/qa/suites/rbd/.qa | 1 + ceph/qa/suites/rbd/basic/.qa | 1 + ceph/qa/suites/rbd/basic/base/.qa | 1 + ceph/qa/suites/rbd/basic/cachepool/.qa | 1 + ceph/qa/suites/rbd/basic/clusters/.qa | 1 + ceph/qa/suites/rbd/basic/msgr-failures/.qa | 1 + ceph/qa/suites/rbd/basic/tasks/.qa | 1 + ceph/qa/suites/rbd/cli/.qa | 1 + ceph/qa/suites/rbd/cli/base/.qa | 1 + ceph/qa/suites/rbd/cli/features/.qa | 1 + ceph/qa/suites/rbd/cli/msgr-failures/.qa | 1 + ceph/qa/suites/rbd/cli/pool/.qa | 1 + ceph/qa/suites/rbd/cli/workloads/.qa | 1 + ceph/qa/suites/rbd/librbd/.qa | 1 + ceph/qa/suites/rbd/librbd/cache/.qa | 1 + ceph/qa/suites/rbd/librbd/clusters/.qa | 1 + ceph/qa/suites/rbd/librbd/config/.qa | 1 + ceph/qa/suites/rbd/librbd/msgr-failures/.qa | 1 + ceph/qa/suites/rbd/librbd/pool/.qa | 1 + ceph/qa/suites/rbd/librbd/workloads/.qa | 1 + ceph/qa/suites/rbd/maintenance/.qa | 1 + ceph/qa/suites/rbd/maintenance/base/.qa | 1 + ceph/qa/suites/rbd/maintenance/clusters/.qa | 1 + ceph/qa/suites/rbd/maintenance/qemu/.qa | 1 + .../suites/rbd/maintenance/qemu/xfstests.yaml | 2 +- ceph/qa/suites/rbd/maintenance/workloads/.qa | 1 + ceph/qa/suites/rbd/mirror-ha/.qa | 1 + ceph/qa/suites/rbd/mirror-ha/workloads/.qa | 1 + ceph/qa/suites/rbd/mirror/.qa | 1 + ceph/qa/suites/rbd/mirror/base/.qa | 1 + ceph/qa/suites/rbd/mirror/cluster/.qa | 1 + ceph/qa/suites/rbd/mirror/rbd-mirror/.qa | 1 + ceph/qa/suites/rbd/mirror/workloads/.qa | 1 + ceph/qa/suites/rbd/nbd/.qa | 1 + ceph/qa/suites/rbd/nbd/cluster/.qa | 1 + ceph/qa/suites/rbd/nbd/workloads/.qa | 1 + ceph/qa/suites/rbd/openstack/.qa | 1 + ceph/qa/suites/rbd/openstack/base/.qa | 1 + ceph/qa/suites/rbd/openstack/clusters/.qa | 1 + ceph/qa/suites/rbd/openstack/features/.qa | 1 + ceph/qa/suites/rbd/openstack/workloads/.qa | 1 + .../workloads/devstack-tempest-gate.yaml | 2 +- ceph/qa/suites/rbd/qemu/.qa | 1 + ceph/qa/suites/rbd/qemu/cache/.qa | 1 + ceph/qa/suites/rbd/qemu/clusters/.qa | 1 + ceph/qa/suites/rbd/qemu/features/.qa | 1 + ceph/qa/suites/rbd/qemu/msgr-failures/.qa | 1 + ceph/qa/suites/rbd/qemu/pool/.qa | 1 + ceph/qa/suites/rbd/qemu/workloads/.qa | 1 + .../rbd/qemu/workloads/qemu_bonnie.yaml | 2 +- .../rbd/qemu/workloads/qemu_fsstress.yaml | 2 +- .../qemu/workloads/qemu_iozone.yaml.disabled | 2 +- .../rbd/qemu/workloads/qemu_xfstests.yaml | 2 +- ceph/qa/suites/rbd/singleton-bluestore/.qa | 1 + .../qa/suites/rbd/singleton-bluestore/all/.qa | 1 + .../rbd/singleton-bluestore/objectstore/.qa | 1 + ceph/qa/suites/rbd/singleton/.qa | 1 + ceph/qa/suites/rbd/singleton/all/.qa | 1 + .../rbd/singleton/all/formatted-output.yaml | 2 +- ceph/qa/suites/rbd/thrash/.qa | 1 + ceph/qa/suites/rbd/thrash/base/.qa | 1 + ceph/qa/suites/rbd/thrash/clusters/.qa | 1 + ceph/qa/suites/rbd/thrash/msgr-failures/.qa | 1 + ceph/qa/suites/rbd/thrash/thrashers/.qa | 1 + ceph/qa/suites/rbd/thrash/workloads/.qa | 1 + ceph/qa/suites/rbd/valgrind/.qa | 1 + ceph/qa/suites/rbd/valgrind/base/.qa | 1 + ceph/qa/suites/rbd/valgrind/validator/.qa | 1 + ceph/qa/suites/rbd/valgrind/workloads/.qa | 1 + ceph/qa/suites/rgw/.qa | 1 + ceph/qa/suites/rgw/hadoop-s3a/.qa | 1 + ceph/qa/suites/rgw/hadoop-s3a/hadoop/.qa | 1 + ceph/qa/suites/rgw/multifs/.qa | 1 + ceph/qa/suites/rgw/multifs/clusters/.qa | 1 + ceph/qa/suites/rgw/multifs/frontend/.qa | 1 + ceph/qa/suites/rgw/multifs/tasks/.qa | 1 + ceph/qa/suites/rgw/multisite/.qa | 1 + ceph/qa/suites/rgw/multisite/realms/.qa | 1 + ceph/qa/suites/rgw/multisite/tasks/.qa | 1 + .../rgw/multisite/tasks/test_multi.yaml | 8 +- ceph/qa/suites/rgw/singleton/.qa | 1 + ceph/qa/suites/rgw/singleton/all/.qa | 1 + ceph/qa/suites/rgw/singleton/frontend/.qa | 1 + ceph/qa/suites/rgw/tempest/.qa | 1 + ceph/qa/suites/rgw/thrash/.qa | 1 + ceph/qa/suites/rgw/thrash/clusters/.qa | 1 + ceph/qa/suites/rgw/thrash/thrasher/.qa | 1 + ceph/qa/suites/rgw/thrash/workload/.qa | 1 + ceph/qa/suites/rgw/verify/.qa | 1 + ceph/qa/suites/rgw/verify/clusters/.qa | 1 + ceph/qa/suites/rgw/verify/msgr-failures/.qa | 1 + ceph/qa/suites/rgw/verify/tasks/+ | 0 ceph/qa/suites/rgw/verify/tasks/.qa | 1 + .../{rgw_s3tests.yaml => 0-install.yaml} | 7 +- ceph/qa/suites/rgw/verify/tasks/cls_rgw.yaml | 5 + .../qa/suites/rgw/verify/tasks/rgw_swift.yaml | 13 - ceph/qa/suites/rgw/verify/tasks/s3tests.yaml | 5 + ceph/qa/suites/rgw/verify/tasks/swift.yaml | 4 + ceph/qa/suites/rgw/verify/validater/.qa | 1 + ceph/qa/suites/samba/.qa | 1 + ceph/qa/suites/samba/clusters/.qa | 1 + ceph/qa/suites/samba/install/.qa | 1 + ceph/qa/suites/samba/mount/.qa | 1 + ceph/qa/suites/samba/workload/.qa | 1 + ceph/qa/suites/smoke/.qa | 1 + ceph/qa/suites/smoke/1node/.qa | 1 + ceph/qa/suites/smoke/1node/clusters/.qa | 1 + ceph/qa/suites/smoke/1node/distros/.qa | 1 + ceph/qa/suites/smoke/1node/objectstore/.qa | 1 + ceph/qa/suites/smoke/1node/tasks/.qa | 1 + ceph/qa/suites/smoke/basic/.qa | 1 + ceph/qa/suites/smoke/basic/clusters/.qa | 1 + ceph/qa/suites/smoke/basic/objectstore/.qa | 1 + ceph/qa/suites/smoke/basic/tasks/.qa | 1 + ceph/qa/suites/smoke/systemd/.qa | 1 + ceph/qa/suites/smoke/systemd/clusters/.qa | 1 + ceph/qa/suites/smoke/systemd/distros/.qa | 1 + ceph/qa/suites/smoke/systemd/objectstore/.qa | 1 + ceph/qa/suites/smoke/systemd/tasks/.qa | 1 + ceph/qa/suites/stress/.qa | 1 + ceph/qa/suites/stress/bench/.qa | 1 + ceph/qa/suites/stress/bench/clusters/.qa | 1 + ceph/qa/suites/stress/bench/tasks/.qa | 1 + ceph/qa/suites/stress/thrash/.qa | 1 + ceph/qa/suites/stress/thrash/clusters/.qa | 1 + ceph/qa/suites/stress/thrash/thrashers/.qa | 1 + ceph/qa/suites/stress/thrash/workloads/.qa | 1 + ceph/qa/suites/teuthology/.qa | 1 + ceph/qa/suites/teuthology/buildpackages/.qa | 1 + .../suites/teuthology/buildpackages/tasks/.qa | 1 + ceph/qa/suites/teuthology/ceph/.qa | 1 + ceph/qa/suites/teuthology/ceph/clusters/.qa | 1 + ceph/qa/suites/teuthology/ceph/tasks/.qa | 1 + ceph/qa/suites/teuthology/multi-cluster/.qa | 1 + .../suites/teuthology/multi-cluster/all/.qa | 1 + ceph/qa/suites/teuthology/no-ceph/.qa | 1 + .../qa/suites/teuthology/no-ceph/clusters/.qa | 1 + ceph/qa/suites/teuthology/no-ceph/tasks/.qa | 1 + ceph/qa/suites/teuthology/nop/.qa | 1 + ceph/qa/suites/teuthology/nop/all/.qa | 1 + ceph/qa/suites/teuthology/rgw/.qa | 1 + ceph/qa/suites/teuthology/rgw/tasks/.qa | 1 + ceph/qa/suites/teuthology/workunits/.qa | 1 + ceph/qa/suites/tgt/.qa | 1 + ceph/qa/suites/tgt/basic/.qa | 1 + ceph/qa/suites/tgt/basic/clusters/.qa | 1 + ceph/qa/suites/tgt/basic/msgr-failures/.qa | 1 + ceph/qa/suites/tgt/basic/tasks/.qa | 1 + ceph/qa/suites/upgrade/.qa | 1 + .../upgrade/client-upgrade-luminous/.qa | 1 + .../luminous-client-x/.qa | 1 + .../luminous-client-x/basic/.qa | 1 + .../luminous-client-x/basic/0-cluster/.qa | 1 + .../luminous-client-x/basic/1-install/.qa | 1 + .../luminous-client-x/basic/2-workload/.qa | 1 + .../luminous-client-x/basic/supported/.qa | 1 + .../luminous-client-x/rbd/.qa | 1 + .../luminous-client-x/rbd/0-cluster/.qa | 1 + .../luminous-client-x/rbd/1-install/.qa | 1 + .../luminous-client-x/rbd/2-features/.qa | 1 + .../luminous-client-x/rbd/3-workload/.qa | 1 + .../luminous-client-x/rbd/supported/.qa | 1 + ceph/qa/suites/upgrade/client-upgrade/.qa | 1 + .../client-upgrade/hammer-client-x/.qa | 1 + .../client-upgrade/hammer-client-x/basic/.qa | 1 + .../hammer-client-x/basic/0-cluster/.qa | 1 + .../hammer-client-x/basic/1-install/.qa | 1 + .../hammer-client-x/basic/2-workload/.qa | 1 + .../client-upgrade/hammer-client-x/rbd/.qa | 1 + .../hammer-client-x/rbd/0-cluster/.qa | 1 + .../hammer-client-x/rbd/1-install/.qa | 1 + .../hammer-client-x/rbd/2-workload/.qa | 1 + .../upgrade/client-upgrade/jewel-client-x/.qa | 1 + .../client-upgrade/jewel-client-x/basic/.qa | 1 + .../jewel-client-x/basic/0-cluster/.qa | 1 + .../jewel-client-x/basic/1-install/.qa | 1 + .../jewel-client-x/basic/2-workload/.qa | 1 + .../client-upgrade/jewel-client-x/rbd/.qa | 1 + .../jewel-client-x/rbd/0-cluster/.qa | 1 + .../jewel-client-x/rbd/1-install/.qa | 1 + .../jewel-client-x/rbd/2-features/.qa | 1 + .../jewel-client-x/rbd/3-workload/.qa | 1 + ceph/qa/suites/upgrade/hammer-jewel-x/.qa | 1 + .../upgrade/hammer-jewel-x/parallel/.qa | 1 + .../hammer-jewel-x/parallel/0-cluster/.qa | 1 + .../parallel/1-hammer-jewel-install/.qa | 1 + .../hammer-jewel-x/parallel/2-workload/.qa | 1 + .../parallel/3-upgrade-sequence/.qa | 1 + .../parallel/5-hammer-jewel-x-upgrade/.qa | 1 + .../hammer-jewel-x/parallel/6-workload/.qa | 1 + .../parallel/7-upgrade-sequence/.qa | 1 + .../parallel/9-final-workload/.qa | 1 + .../upgrade/hammer-jewel-x/stress-split/.qa | 1 + .../1-hammer-install-and-upgrade-to-jewel/.qa | 1 + .../suites/upgrade/hammer-jewel-x/tiering/.qa | 1 + .../hammer-jewel-x/tiering/0-cluster/.qa | 1 + .../1-install-hammer-and-upgrade-to-jewel/.qa | 1 + .../tiering/2-setup-cache-tiering/.qa | 1 + .../0-create-base-tier/.qa | 1 + ceph/qa/suites/upgrade/jewel-x/.qa | 1 + .../qa/suites/upgrade/jewel-x/ceph-deploy/.qa | 1 + .../upgrade/jewel-x/ceph-deploy/distros/.qa | 1 + ceph/qa/suites/upgrade/jewel-x/parallel/.qa | 1 + .../upgrade/jewel-x/parallel/0-cluster/.qa | 1 + .../jewel-x/parallel/1-jewel-install/.qa | 1 + .../parallel/1-jewel-install/jewel.yaml | 2 +- .../upgrade/jewel-x/parallel/2-workload/.qa | 1 + .../jewel-x/parallel/3-upgrade-sequence/.qa | 1 + .../jewel-x/parallel/7-final-workload/.qa | 1 + .../jewel-x/stress-split-erasure-code/.qa | 1 + .../stress-split-erasure-code/3-thrash/.qa | 1 + .../stress-split-erasure-code/4-workload/.qa | 1 + .../7-final-workload/.qa | 1 + .../suites/upgrade/jewel-x/stress-split/.qa | 1 + .../jewel-x/stress-split/0-cluster/.qa | 1 + .../jewel-x/stress-split/1-jewel-install/.qa | 1 + .../stress-split/1-jewel-install/jewel.yaml | 2 +- .../stress-split/2-partial-upgrade/.qa | 1 + .../upgrade/jewel-x/stress-split/3-thrash/.qa | 1 + .../jewel-x/stress-split/4-workload/.qa | 1 + .../jewel-x/stress-split/7-final-workload/.qa | 1 + ceph/qa/suites/upgrade/kraken-x/.qa | 1 + .../suites/upgrade/kraken-x/ceph-deploy/.qa | 1 + ceph/qa/suites/upgrade/kraken-x/parallel/.qa | 1 + .../upgrade/kraken-x/parallel/0-cluster/.qa | 1 + .../kraken-x/parallel/1-kraken-install/.qa | 1 + .../upgrade/kraken-x/parallel/2-workload/.qa | 1 + .../kraken-x/parallel/3-upgrade-sequence/.qa | 1 + .../kraken-x/parallel/7-final-workload/.qa | 1 + .../kraken-x/stress-split-erasure-code/.qa | 1 + .../stress-split-erasure-code/3-thrash/.qa | 1 + .../suites/upgrade/kraken-x/stress-split/.qa | 1 + .../kraken-x/stress-split/0-cluster/.qa | 1 + .../stress-split/1-kraken-install/.qa | 1 + .../stress-split/2-partial-upgrade/.qa | 1 + .../kraken-x/stress-split/3-thrash/.qa | 1 + .../kraken-x/stress-split/4-workload/.qa | 1 + .../stress-split/7-final-workload/.qa | 1 + .../kraken-x/stress-split/objectstore/.qa | 1 + ceph/qa/suites/upgrade/luminous-p2p/.qa | 1 + .../luminous-p2p/point-to-point-upgrade.yaml | 15 + ceph/qa/tasks/cephfs/filesystem.py | 7 + ceph/qa/tasks/cephfs/kernel_mount.py | 2 +- ceph/qa/tasks/cephfs/mount.py | 15 +- ceph/qa/tasks/cephfs/test_misc.py | 57 +- ceph/qa/tasks/cephfs/test_volume_client.py | 66 +- ceph/qa/tasks/cram.py | 39 +- ceph/qa/tasks/qemu.py | 34 +- ceph/qa/tasks/repair_test.py | 1 + ceph/qa/tasks/s3a_hadoop.py | 2 +- ceph/qa/tasks/scrub_test.py | 3 +- ceph/qa/tasks/util/workunit.py | 78 +++ ceph/qa/tasks/vstart_runner.py | 8 +- ceph/qa/tasks/workunit.py | 115 +--- .../qa/workunits/rados/test_librados_build.sh | 64 ++ ceph/qa/workunits/rbd/verify_pool.sh | 4 +- ceph/qa/workunits/suites/fsstress.sh | 27 +- ceph/selinux/ceph.fc | 1 + ceph/src/.git_version | 4 +- ceph/src/CMakeLists.txt | 16 +- ceph/src/auth/Crypto.cc | 5 +- ceph/src/ceph-disk/ceph_disk/main.py | 4 +- ceph/src/ceph-volume/bin/ceph-volume | 6 - ceph/src/ceph-volume/bin/ceph-volume-systemd | 6 - ceph/src/ceph-volume/ceph_volume/api/lvm.py | 52 +- .../ceph-volume/ceph_volume/configuration.py | 12 +- .../ceph_volume/devices/lvm/activate.py | 9 +- .../ceph_volume/devices/lvm/batch.py | 75 ++- .../ceph_volume/devices/lvm/common.py | 7 +- .../ceph_volume/devices/lvm/create.py | 13 +- .../ceph_volume/devices/lvm/prepare.py | 107 ++-- .../devices/lvm/strategies/bluestore.py | 311 +++++++--- .../devices/lvm/strategies/filestore.py | 244 +++++--- .../devices/lvm/strategies/validators.py | 22 +- .../ceph_volume/devices/lvm/zap.py | 20 +- .../ceph_volume/devices/simple/activate.py | 66 +- .../ceph_volume/devices/simple/scan.py | 11 +- .../ceph_volume/devices/simple/trigger.py | 2 +- .../ceph_volume/inventory/__init__.py | 1 + .../ceph-volume/ceph_volume/inventory/main.py | 46 ++ ceph/src/ceph-volume/ceph_volume/main.py | 18 +- .../ceph_volume/systemd/__init__.py | 1 + .../ceph_volume/tests/api/test_lvm.py | 52 +- .../ceph-volume/ceph_volume/tests/conftest.py | 57 +- .../tests/devices/lvm/strategies/__init__.py | 0 .../devices/lvm/strategies/test_bluestore.py | 139 +++++ .../devices/lvm/strategies/test_filestore.py | 210 +++++++ .../devices/lvm/strategies/test_validate.py | 52 ++ .../tests/devices/lvm/test_activate.py | 103 +++- .../tests/devices/lvm/test_batch.py | 61 ++ .../tests/devices/lvm/test_create.py | 9 +- .../tests/devices/lvm/test_prepare.py | 32 +- .../tests/devices/simple/test_activate.py | 83 +++ .../bluestore/mixed-type-dmcrypt/Vagrantfile | 1 + .../mixed-type-dmcrypt/group_vars/all | 32 + .../bluestore/mixed-type-dmcrypt/hosts | 8 + .../bluestore/mixed-type-dmcrypt/setup.yml | 1 + .../bluestore/mixed-type-dmcrypt/test.yml | 1 + .../mixed-type-dmcrypt/vagrant_variables.yml | 56 ++ .../centos7/bluestore/mixed-type/Vagrantfile | 1 + .../bluestore/mixed-type/group_vars/all | 31 + .../batch/centos7/bluestore/mixed-type/hosts | 8 + .../centos7/bluestore/mixed-type/setup.yml | 1 + .../centos7/bluestore/mixed-type/test.yml | 1 + .../mixed-type/vagrant_variables.yml | 56 ++ .../single-type-dmcrypt/group_vars/all | 9 +- .../bluestore/single-type-dmcrypt/setup.yml | 1 + .../bluestore/single-type-dmcrypt/test.yml | 2 +- .../bluestore/single-type/group_vars/all | 9 +- .../centos7/bluestore/single-type/setup.yml | 1 + .../centos7/bluestore/single-type/test.yml | 2 +- .../filestore/mixed-type-dmcrypt/Vagrantfile | 1 + .../mixed-type-dmcrypt/group_vars/all | 32 + .../filestore/mixed-type-dmcrypt/hosts | 8 + .../filestore/mixed-type-dmcrypt/setup.yml | 1 + .../filestore/mixed-type-dmcrypt/test.yml | 1 + .../mixed-type-dmcrypt/vagrant_variables.yml | 56 ++ .../centos7/filestore/mixed-type/Vagrantfile | 1 + .../filestore/mixed-type/group_vars/all | 31 + .../batch/centos7/filestore/mixed-type/hosts | 8 + .../centos7/filestore/mixed-type/setup.yml | 1 + .../centos7/filestore/mixed-type/test.yml | 1 + .../mixed-type/vagrant_variables.yml | 56 ++ .../single-type-dmcrypt/group_vars/all | 9 +- .../filestore/single-type-dmcrypt/setup.yml | 1 + .../filestore/single-type-dmcrypt/test.yml | 2 +- .../filestore/single-type/group_vars/all | 9 +- .../centos7/filestore/single-type/setup.yml | 1 + .../centos7/filestore/single-type/test.yml | 2 +- .../tests/functional/batch/playbooks/noop.yml | 12 + .../batch/playbooks/setup_mixed_type.yml | 140 +++++ .../tests/functional/batch/playbooks/test.yml | 63 ++ .../batch/playbooks/test_bluestore.yml | 46 -- .../playbooks/test_bluestore_dmcrypt.yml | 46 -- .../batch/playbooks/test_filestore.yml | 46 -- .../playbooks/test_filestore_dmcrypt.yml | 46 -- .../tests/functional/batch/tox.ini | 15 +- .../single-type-dmcrypt/group_vars/all | 9 +- .../bluestore/single-type-dmcrypt/setup.yml | 1 + .../bluestore/single-type-dmcrypt/test.yml | 2 +- .../bluestore/single-type/group_vars/all | 9 +- .../xenial/bluestore/single-type/setup.yml | 1 + .../xenial/bluestore/single-type/test.yml | 2 +- .../single-type-dmcrypt/group_vars/all | 9 +- .../filestore/single-type-dmcrypt/setup.yml | 1 + .../filestore/single-type-dmcrypt/test.yml | 2 +- .../filestore/single-type/group_vars/all | 9 +- .../xenial/filestore/single-type/setup.yml | 1 + .../xenial/filestore/single-type/test.yml | 2 +- .../centos7/bluestore/create/group_vars/all | 2 +- .../centos7/bluestore/dmcrypt/group_vars/all | 2 +- .../lvm/centos7/bluestore/dmcrypt/test.yml | 18 +- .../centos7/filestore/create/group_vars/all | 2 +- .../centos7/filestore/dmcrypt/group_vars/all | 2 +- .../lvm/centos7/filestore/dmcrypt/test.yml | 16 +- .../lvm/playbooks/test_bluestore.yml | 53 +- .../lvm/playbooks/test_filestore.yml | 59 +- .../ceph_volume/tests/functional/lvm/tox.ini | 9 +- .../xenial/bluestore/create/group_vars/all | 2 +- .../xenial/bluestore/dmcrypt/group_vars/all | 2 +- .../lvm/xenial/bluestore/dmcrypt/test.yml | 18 +- .../xenial/filestore/create/group_vars/all | 2 +- .../xenial/filestore/dmcrypt/group_vars/all | 2 +- .../lvm/xenial/filestore/dmcrypt/test.yml | 16 +- .../tests/functional/playbooks/deploy.yml | 60 +- .../tests/functional/simple/tox.ini | 9 +- .../ceph_volume/tests/systemd/test_main.py | 14 +- .../ceph_volume/tests/test_inventory.py | 108 ++++ .../ceph_volume/tests/test_main.py | 9 +- .../tests/util/test_arg_validators.py | 25 - .../ceph_volume/tests/util/test_device.py | 183 +++++- .../ceph_volume/tests/util/test_disk.py | 62 +- .../ceph_volume/util/arg_validators.py | 50 +- .../ceph-volume/ceph_volume/util/device.py | 272 ++++++++- ceph/src/ceph-volume/ceph_volume/util/disk.py | 118 +++- .../ceph_volume/util/encryption.py | 12 +- .../ceph-volume/ceph_volume/util/prepare.py | 35 ++ .../ceph-volume/ceph_volume/util/templates.py | 16 + ceph/src/ceph-volume/setup.py | 8 +- ceph/src/ceph_osd.cc | 2 +- ceph/src/client/Client.cc | 81 ++- ceph/src/client/Client.h | 6 +- ceph/src/client/Dir.h | 2 + ceph/src/client/fuse_ll.cc | 2 +- ceph/src/cls/rgw/cls_rgw.cc | 47 +- ceph/src/cls/rgw/cls_rgw_types.cc | 49 ++ ceph/src/cls/rgw/cls_rgw_types.h | 4 + ceph/src/cls/user/cls_user.cc | 21 +- ceph/src/common/HeartbeatMap.cc | 2 +- ceph/src/common/OutputDataSocket.cc | 22 +- ceph/src/common/Preforker.h | 17 +- ceph/src/common/PriorityCache.cc | 29 + ceph/src/common/PriorityCache.h | 69 +++ ceph/src/common/admin_socket.cc | 23 +- ceph/src/common/admin_socket_client.cc | 10 +- ceph/src/common/autovector.h | 334 +++++++++++ ceph/src/common/buffer.cc | 4 +- ceph/src/common/ceph_time.h | 8 + ceph/src/common/compat.cc | 118 +++- ceph/src/common/config.cc | 2 +- ceph/src/common/hostname.cc | 6 + ceph/src/common/legacy_config_opts.h | 8 +- ceph/src/common/options.cc | 96 ++- ceph/src/common/pipe.c | 58 -- ceph/src/common/pipe.h | 33 - ceph/src/common/util.cc | 12 + ceph/src/crush/CrushWrapper.cc | 12 + ceph/src/global/global_init.cc | 3 +- ceph/src/global/pidfile.cc | 2 +- ceph/src/global/signal_handler.cc | 4 +- ceph/src/include/compat.h | 2 + ceph/src/include/denc.h | 2 +- ceph/src/include/encoding.h | 4 +- ceph/src/include/filepath.h | 13 +- ceph/src/include/rados.h | 1 + ceph/src/include/sock_compat.h | 16 + ceph/src/include/utime.h | 4 + ceph/src/include/uuid.h | 7 +- ceph/src/kv/CMakeLists.txt | 4 +- ceph/src/kv/KeyValueDB.h | 63 +- ceph/src/kv/MemDB.cc | 4 +- ceph/src/kv/RocksDBStore.cc | 111 +++- ceph/src/kv/RocksDBStore.h | 19 +- ceph/src/kv/rocksdb_cache/BinnedLRUCache.cc | 565 ++++++++++++++++++ ceph/src/kv/rocksdb_cache/BinnedLRUCache.h | 319 ++++++++++ ceph/src/kv/rocksdb_cache/ShardedCache.cc | 159 +++++ ceph/src/kv/rocksdb_cache/ShardedCache.h | 111 ++++ ceph/src/librbd/ExclusiveLock.cc | 11 +- ceph/src/librbd/ExclusiveLock.h | 3 +- ceph/src/librbd/ImageCtx.cc | 10 +- ceph/src/librbd/ImageCtx.h | 6 +- ceph/src/librbd/ImageWatcher.cc | 2 +- ceph/src/librbd/ManagedLock.cc | 109 ++-- ceph/src/librbd/ManagedLock.h | 5 +- ceph/src/librbd/ObjectMap.cc | 12 +- ceph/src/librbd/ObjectMap.h | 21 +- ceph/src/librbd/Operations.cc | 62 +- ceph/src/librbd/Operations.h | 15 +- ceph/src/librbd/Watcher.cc | 20 +- ceph/src/librbd/Watcher.h | 7 + ceph/src/librbd/image/CreateRequest.cc | 123 +--- ceph/src/librbd/image/CreateRequest.h | 12 +- ceph/src/librbd/internal.cc | 13 +- ceph/src/librbd/io/CopyupRequest.cc | 6 +- ceph/src/librbd/io/ImageRequest.cc | 98 ++- ceph/src/librbd/io/ImageRequest.h | 5 +- ceph/src/librbd/io/ImageRequestWQ.cc | 4 +- ceph/src/librbd/io/ObjectRequest.cc | 5 +- ceph/src/librbd/mirror/DemoteRequest.cc | 5 +- ceph/src/librbd/mirror/DisableRequest.cc | 7 +- ceph/src/librbd/object_map/Request.cc | 3 +- .../object_map/SnapshotCreateRequest.cc | 3 +- .../object_map/SnapshotRemoveRequest.cc | 212 +++---- .../librbd/object_map/SnapshotRemoveRequest.h | 34 +- ceph/src/librbd/object_map/UpdateRequest.cc | 9 +- ceph/src/librbd/object_map/UpdateRequest.h | 13 +- .../operation/DisableFeaturesRequest.cc | 12 +- ceph/src/librbd/operation/FlattenRequest.cc | 2 +- .../librbd/operation/SnapshotCreateRequest.cc | 12 +- ceph/src/librbd/operation/TrimRequest.cc | 4 +- ceph/src/librbd/watcher/RewatchRequest.cc | 29 +- ceph/src/log/Log.cc | 10 +- ceph/src/mds/Beacon.cc | 250 ++++---- ceph/src/mds/Beacon.h | 54 +- ceph/src/mds/CDentry.cc | 4 +- ceph/src/mds/CDentry.h | 2 +- ceph/src/mds/CDir.cc | 66 +- ceph/src/mds/CDir.h | 23 +- ceph/src/mds/CInode.cc | 54 +- ceph/src/mds/CInode.h | 31 +- ceph/src/mds/Locker.cc | 46 +- ceph/src/mds/Locker.h | 6 +- ceph/src/mds/MDBalancer.cc | 110 ++-- ceph/src/mds/MDBalancer.h | 12 +- ceph/src/mds/MDCache.cc | 133 +++-- ceph/src/mds/MDCache.h | 41 +- ceph/src/mds/MDLog.cc | 35 +- ceph/src/mds/MDSCacheObject.h | 9 +- ceph/src/mds/MDSContext.cc | 52 ++ ceph/src/mds/MDSContext.h | 31 +- ceph/src/mds/MDSDaemon.cc | 18 +- ceph/src/mds/MDSRank.cc | 137 +++-- ceph/src/mds/MDSRank.h | 21 +- ceph/src/mds/MDSTable.cc | 6 + ceph/src/mds/MDSTable.h | 3 + ceph/src/mds/Migrator.cc | 540 ++++++++++++----- ceph/src/mds/Migrator.h | 53 +- ceph/src/mds/Mutation.cc | 69 ++- ceph/src/mds/Mutation.h | 54 +- ceph/src/mds/PurgeQueue.cc | 10 +- ceph/src/mds/RecoveryQueue.cc | 6 +- ceph/src/mds/ScatterLock.h | 7 +- ceph/src/mds/Server.cc | 550 ++++++++++------- ceph/src/mds/Server.h | 68 ++- ceph/src/mds/SessionMap.cc | 117 +++- ceph/src/mds/SessionMap.h | 103 +++- ceph/src/mds/StrayManager.cc | 4 + ceph/src/messages/MMDSBeacon.h | 12 +- ceph/src/mgr/MgrClient.cc | 30 +- ceph/src/mgr/MgrClient.h | 8 +- ceph/src/mon/LogMonitor.cc | 2 +- ceph/src/mon/MDSMonitor.cc | 63 +- ceph/src/mon/Monitor.cc | 2 +- ceph/src/mon/MonitorDBStore.h | 2 +- ceph/src/mon/OSDMonitor.cc | 21 + ceph/src/mon/OSDMonitor.h | 3 +- ceph/src/msg/Message.cc | 2 +- ceph/src/msg/async/AsyncConnection.cc | 8 +- ceph/src/msg/async/AsyncMessenger.cc | 14 +- ceph/src/msg/async/Event.cc | 8 +- ceph/src/msg/async/EventEpoll.cc | 9 + ceph/src/msg/async/PosixStack.cc | 5 +- ceph/src/msg/async/net_handler.cc | 22 +- ceph/src/msg/async/net_handler.h | 1 - .../msg/async/rdma/RDMAConnectedSocketImpl.cc | 1 - .../msg/async/rdma/RDMAServerSocketImpl.cc | 7 +- ceph/src/msg/simple/Accepter.cc | 62 +- ceph/src/msg/simple/Pipe.cc | 10 +- ceph/src/os/CMakeLists.txt | 5 +- .../src/os/bluestore/BitmapFreelistManager.cc | 2 +- ceph/src/os/bluestore/BlueStore.cc | 452 ++++++++++---- ceph/src/os/bluestore/BlueStore.h | 149 ++++- ceph/src/os/bluestore/KernelDevice.cc | 4 +- ceph/src/os/bluestore/NVMEDevice.cc | 2 +- ceph/src/os/bluestore/PMEMDevice.cc | 2 +- ceph/src/os/bluestore/bluestore_tool.cc | 4 +- .../src/os/filestore/BtrfsFileStoreBackend.cc | 6 +- ceph/src/os/filestore/FileJournal.cc | 2 +- ceph/src/os/filestore/FileStore.cc | 52 +- .../os/filestore/GenericFileStoreBackend.cc | 13 +- ceph/src/os/filestore/LFNIndex.cc | 4 +- ceph/src/os/kstore/KStore.cc | 2 +- ceph/src/osd/ECBackend.cc | 38 +- ceph/src/osd/OSD.cc | 50 +- ceph/src/osd/OSD.h | 3 +- ceph/src/osd/OSDMap.cc | 15 +- ceph/src/osd/PG.cc | 284 +++++++-- ceph/src/osd/PG.h | 108 +++- ceph/src/osd/PGBackend.cc | 193 ++++-- ceph/src/osd/PGBackend.h | 8 +- ceph/src/osd/PrimaryLogPG.cc | 91 +-- ceph/src/osd/ReplicatedBackend.cc | 3 +- ceph/src/osd/osd_types.cc | 15 + ceph/src/osd/osd_types.h | 1 + ceph/src/osdc/Objecter.cc | 9 +- ceph/src/pybind/ceph_volume_client.py | 92 ++- ceph/src/pybind/cephfs/cephfs.pyx | 4 +- ceph/src/pybind/mgr/balancer/module.py | 4 +- ceph/src/pybind/mgr/prometheus/module.py | 412 ++++++------- ceph/src/pybind/rados/rados.pyx | 29 +- ceph/src/rgw/rgw_admin.cc | 37 +- ceph/src/rgw/rgw_asio_frontend.cc | 27 +- ceph/src/rgw/rgw_bucket.cc | 4 + ceph/src/rgw/rgw_bucket.h | 9 + ceph/src/rgw/rgw_client_io_filters.h | 10 +- ceph/src/rgw/rgw_cr_rados.cc | 53 +- ceph/src/rgw/rgw_cr_rados.h | 124 ++-- ceph/src/rgw/rgw_data_sync.cc | 503 +++++++++------- ceph/src/rgw/rgw_data_sync.h | 25 +- ceph/src/rgw/rgw_file.h | 3 + ceph/src/rgw/rgw_http_client.cc | 23 +- ceph/src/rgw/rgw_lc.cc | 19 +- ceph/src/rgw/rgw_lc.h | 2 +- ceph/src/rgw/rgw_lc_s3.cc | 5 + ceph/src/rgw/rgw_main.cc | 9 +- ceph/src/rgw/rgw_multi.cc | 2 +- ceph/src/rgw/rgw_op.cc | 2 + ceph/src/rgw/rgw_rados.cc | 50 +- ceph/src/rgw/rgw_rados.h | 16 +- ceph/src/rgw/rgw_reshard.cc | 1 + ceph/src/rgw/rgw_sync.cc | 19 + ceph/src/rgw/rgw_sync.h | 58 +- ceph/src/rgw/rgw_sync_module.h | 2 +- ceph/src/rgw/rgw_sync_module_es.cc | 8 +- ceph/src/rgw/rgw_sync_module_log.cc | 4 +- ceph/src/rgw/rgw_user.cc | 3 +- ceph/src/test/cls_rbd/test_cls_rbd.cc | 14 +- ceph/src/test/common/CMakeLists.txt | 2 +- ceph/src/test/common/test_config.cc | 3 +- ceph/src/test/common/test_hostname.cc | 20 +- ceph/src/test/common/test_mutex.cc | 16 +- ceph/src/test/encoding/types.h | 1 + ceph/src/test/libcephfs/test.cc | 38 ++ ceph/src/test/librbd/CMakeLists.txt | 1 + .../test/librbd/io/test_mock_ImageRequest.cc | 4 + .../librbd/io/test_mock_ImageRequestWQ.cc | 42 ++ .../test/librbd/io/test_mock_ObjectRequest.cc | 4 +- .../librbd/mirror/test_mock_DisableRequest.cc | 2 +- ceph/src/test/librbd/mock/MockExclusiveLock.h | 5 +- ceph/src/test/librbd/mock/MockImageWatcher.h | 2 + ceph/src/test/librbd/mock/MockObjectMap.h | 15 +- .../object_map/test_mock_InvalidateRequest.cc | 6 +- .../test_mock_SnapshotRemoveRequest.cc | 13 + .../test_mock_SnapshotRollbackRequest.cc | 6 +- .../object_map/test_mock_UpdateRequest.cc | 39 +- .../test_mock_SnapshotCreateRequest.cc | 4 +- .../librbd/operation/test_mock_TrimRequest.cc | 4 +- ceph/src/test/librbd/test_ObjectMap.cc | 29 +- ceph/src/test/librbd/test_librbd.cc | 73 +++ ceph/src/test/librbd/test_mock_ManagedLock.cc | 192 +++++- ceph/src/test/librbd/test_mock_ObjectMap.cc | 41 +- ..._ObjectWatcher.cc => test_mock_Watcher.cc} | 214 +++---- .../watcher/test_mock_RewatchRequest.cc | 35 +- ceph/src/test/objectstore/test_bluefs.cc | 3 + ceph/src/test/objectstore/test_kv.cc | 2 +- ceph/src/test/osd/TestOSDMap.cc | 31 + ceph/src/test/pybind/test_rados.py | 4 +- .../image_sync/test_mock_ObjectCopyRequest.cc | 10 +- .../test_mock_SnapshotCopyRequest.cc | 2 +- .../test_mock_SnapshotCreateRequest.cc | 2 +- ceph/src/test/rbd_mirror/test_ImageSync.cc | 3 +- .../test/rbd_mirror/test_mock_ImageSync.cc | 2 +- .../rbd_mirror/test_mock_LeaderWatcher.cc | 5 + ceph/src/test/rgw/rgw_multi/tests.py | 12 + ceph/src/tools/ceph_objectstore_tool.cc | 16 +- ceph/src/tools/cephfs/DataScan.cc | 28 +- ceph/src/tools/cephfs/Dumper.cc | 86 ++- ceph/src/tools/cephfs/Dumper.h | 2 +- ceph/src/tools/cephfs/JournalTool.cc | 17 +- ceph/src/tools/cephfs/JournalTool.h | 2 +- ceph/src/tools/rbd/action/Import.cc | 8 +- ceph/src/tools/rbd_mirror/ImageReplayer.cc | 5 +- ceph/src/tools/rbd_mirror/ImageSync.cc | 5 +- ceph/src/tools/rbd_mirror/LeaderWatcher.cc | 7 + ceph/src/tools/rbd_mirror/LeaderWatcher.h | 2 + .../image_sync/ObjectCopyRequest.cc | 21 +- .../rbd_mirror/image_sync/ObjectCopyRequest.h | 2 +- .../image_sync/SnapshotCopyRequest.cc | 25 +- .../image_sync/SnapshotCopyRequest.h | 2 +- .../image_sync/SnapshotCreateRequest.cc | 47 +- .../image_sync/SnapshotCreateRequest.h | 2 +- 1027 files changed, 12476 insertions(+), 4301 deletions(-) create mode 100644 ceph/doc/ceph-volume/inventory.rst create mode 100644 ceph/qa/cephfs/clusters/1a3s-mds-1c-client.yaml create mode 100644 ceph/qa/cephfs/clusters/1a3s-mds-2c-client.yaml rename ceph/qa/{suites/knfs/basic/% => cephfs/conf/+} (100%) rename ceph/qa/cephfs/{overrides/debug.yaml => conf/client.yaml} (62%) create mode 100644 ceph/qa/cephfs/conf/mds.yaml create mode 100644 ceph/qa/cephfs/conf/mon.yaml create mode 100644 ceph/qa/cephfs/conf/osd.yaml create mode 120000 ceph/qa/suites/.qa create mode 120000 ceph/qa/suites/big/.qa create mode 120000 ceph/qa/suites/big/rados-thrash/.qa create mode 120000 ceph/qa/suites/big/rados-thrash/ceph/.qa create mode 120000 ceph/qa/suites/big/rados-thrash/clusters/.qa create mode 120000 ceph/qa/suites/big/rados-thrash/thrashers/.qa create mode 120000 ceph/qa/suites/big/rados-thrash/workloads/.qa create mode 120000 ceph/qa/suites/buildpackages/.qa create mode 120000 ceph/qa/suites/buildpackages/any/.qa create mode 120000 ceph/qa/suites/buildpackages/any/tasks/.qa create mode 120000 ceph/qa/suites/buildpackages/tests/.qa create mode 120000 ceph/qa/suites/buildpackages/tests/tasks/.qa create mode 120000 ceph/qa/suites/ceph-ansible/.qa create mode 120000 ceph/qa/suites/ceph-ansible/smoke/.qa create mode 120000 ceph/qa/suites/ceph-ansible/smoke/basic/.qa create mode 120000 ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa delete mode 100644 ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.0 create mode 120000 ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/.qa create mode 120000 ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa create mode 120000 ceph/qa/suites/ceph-ansible/smoke/basic/3-config/.qa create mode 120000 ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa create mode 120000 ceph/qa/suites/ceph-deploy/.qa create mode 120000 ceph/qa/suites/ceph-deploy/basic/.qa create mode 120000 ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/.qa create mode 120000 ceph/qa/suites/ceph-deploy/basic/config_options/.qa create mode 120000 ceph/qa/suites/ceph-deploy/basic/objectstore/.qa create mode 120000 ceph/qa/suites/ceph-deploy/basic/python_versions/.qa create mode 120000 ceph/qa/suites/ceph-deploy/basic/tasks/.qa create mode 120000 ceph/qa/suites/ceph-deploy/ceph-volume/.qa create mode 120000 ceph/qa/suites/ceph-deploy/ceph-volume/cluster/.qa create mode 120000 ceph/qa/suites/ceph-deploy/ceph-volume/config/.qa create mode 120000 ceph/qa/suites/ceph-deploy/ceph-volume/distros/.qa create mode 120000 ceph/qa/suites/ceph-deploy/ceph-volume/tasks/.qa create mode 120000 ceph/qa/suites/ceph-disk/.qa create mode 120000 ceph/qa/suites/ceph-disk/basic/.qa create mode 120000 ceph/qa/suites/ceph-disk/basic/tasks/.qa create mode 120000 ceph/qa/suites/dummy/.qa create mode 120000 ceph/qa/suites/dummy/all/.qa create mode 120000 ceph/qa/suites/experimental/.qa create mode 120000 ceph/qa/suites/experimental/multimds/.qa create mode 120000 ceph/qa/suites/experimental/multimds/clusters/.qa create mode 120000 ceph/qa/suites/experimental/multimds/tasks/.qa create mode 120000 ceph/qa/suites/fs/.qa create mode 120000 ceph/qa/suites/fs/32bits/.qa create mode 120000 ceph/qa/suites/fs/32bits/clusters/.qa create mode 120000 ceph/qa/suites/fs/32bits/conf create mode 120000 ceph/qa/suites/fs/32bits/mount/.qa create mode 120000 ceph/qa/suites/fs/32bits/overrides/.qa delete mode 120000 ceph/qa/suites/fs/32bits/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/32bits/tasks/.qa create mode 120000 ceph/qa/suites/fs/basic_functional/.qa create mode 120000 ceph/qa/suites/fs/basic_functional/clusters/.qa create mode 120000 ceph/qa/suites/fs/basic_functional/conf create mode 120000 ceph/qa/suites/fs/basic_functional/mount/.qa create mode 120000 ceph/qa/suites/fs/basic_functional/objectstore/.qa create mode 120000 ceph/qa/suites/fs/basic_functional/overrides/.qa delete mode 120000 ceph/qa/suites/fs/basic_functional/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/basic_functional/tasks/.qa create mode 100644 ceph/qa/suites/fs/basic_functional/tasks/volume-client/% create mode 100644 ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/2.yaml create mode 100644 ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/3.yaml rename ceph/qa/suites/fs/basic_functional/tasks/{volume-client.yaml => volume-client/test.yaml} (100%) create mode 120000 ceph/qa/suites/fs/basic_workload/.qa create mode 120000 ceph/qa/suites/fs/basic_workload/clusters/.qa create mode 120000 ceph/qa/suites/fs/basic_workload/conf create mode 120000 ceph/qa/suites/fs/basic_workload/inline/.qa create mode 120000 ceph/qa/suites/fs/basic_workload/mount/.qa create mode 120000 ceph/qa/suites/fs/basic_workload/omap_limit/.qa create mode 120000 ceph/qa/suites/fs/basic_workload/overrides/.qa delete mode 120000 ceph/qa/suites/fs/basic_workload/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/basic_workload/tasks/.qa create mode 120000 ceph/qa/suites/fs/bugs/.qa create mode 120000 ceph/qa/suites/fs/bugs/client_trim_caps/.qa create mode 120000 ceph/qa/suites/fs/bugs/client_trim_caps/clusters/.qa create mode 120000 ceph/qa/suites/fs/bugs/client_trim_caps/objectstore/.qa create mode 120000 ceph/qa/suites/fs/bugs/client_trim_caps/overrides/.qa delete mode 120000 ceph/qa/suites/fs/bugs/client_trim_caps/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/bugs/client_trim_caps/tasks/.qa create mode 120000 ceph/qa/suites/fs/bugs/conf create mode 120000 ceph/qa/suites/fs/multiclient/.qa create mode 120000 ceph/qa/suites/fs/multiclient/clusters/.qa create mode 120000 ceph/qa/suites/fs/multiclient/conf create mode 120000 ceph/qa/suites/fs/multiclient/mount/.qa create mode 120000 ceph/qa/suites/fs/multiclient/overrides/.qa delete mode 120000 ceph/qa/suites/fs/multiclient/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/multiclient/tasks/.qa create mode 120000 ceph/qa/suites/fs/multifs/.qa create mode 120000 ceph/qa/suites/fs/multifs/clusters/.qa create mode 120000 ceph/qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml create mode 120000 ceph/qa/suites/fs/multifs/conf create mode 120000 ceph/qa/suites/fs/multifs/mount/.qa create mode 120000 ceph/qa/suites/fs/multifs/overrides/.qa delete mode 120000 ceph/qa/suites/fs/multifs/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/multifs/tasks/.qa create mode 120000 ceph/qa/suites/fs/permission/.qa create mode 120000 ceph/qa/suites/fs/permission/clusters/.qa create mode 120000 ceph/qa/suites/fs/permission/conf create mode 120000 ceph/qa/suites/fs/permission/mount/.qa create mode 120000 ceph/qa/suites/fs/permission/overrides/.qa delete mode 120000 ceph/qa/suites/fs/permission/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/permission/tasks/.qa create mode 120000 ceph/qa/suites/fs/snaps/.qa create mode 120000 ceph/qa/suites/fs/snaps/clusters/.qa create mode 120000 ceph/qa/suites/fs/snaps/conf create mode 120000 ceph/qa/suites/fs/snaps/mount/.qa create mode 120000 ceph/qa/suites/fs/snaps/overrides/.qa delete mode 120000 ceph/qa/suites/fs/snaps/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/snaps/tasks/.qa create mode 120000 ceph/qa/suites/fs/thrash/.qa create mode 120000 ceph/qa/suites/fs/thrash/ceph-thrash/.qa create mode 120000 ceph/qa/suites/fs/thrash/clusters/.qa create mode 120000 ceph/qa/suites/fs/thrash/conf create mode 120000 ceph/qa/suites/fs/thrash/mount/.qa create mode 120000 ceph/qa/suites/fs/thrash/msgr-failures/.qa create mode 120000 ceph/qa/suites/fs/thrash/overrides/.qa delete mode 120000 ceph/qa/suites/fs/thrash/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/thrash/tasks/.qa create mode 120000 ceph/qa/suites/fs/traceless/.qa create mode 120000 ceph/qa/suites/fs/traceless/clusters/.qa create mode 120000 ceph/qa/suites/fs/traceless/conf create mode 120000 ceph/qa/suites/fs/traceless/mount/.qa create mode 120000 ceph/qa/suites/fs/traceless/overrides/.qa delete mode 120000 ceph/qa/suites/fs/traceless/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/traceless/tasks/.qa create mode 120000 ceph/qa/suites/fs/traceless/traceless/.qa create mode 120000 ceph/qa/suites/fs/verify/.qa create mode 120000 ceph/qa/suites/fs/verify/clusters/.qa create mode 120000 ceph/qa/suites/fs/verify/conf create mode 120000 ceph/qa/suites/fs/verify/mount/.qa create mode 120000 ceph/qa/suites/fs/verify/overrides/.qa delete mode 120000 ceph/qa/suites/fs/verify/overrides/debug.yaml create mode 120000 ceph/qa/suites/fs/verify/tasks/.qa create mode 120000 ceph/qa/suites/fs/verify/validater/.qa create mode 120000 ceph/qa/suites/hadoop/.qa create mode 120000 ceph/qa/suites/hadoop/basic/.qa create mode 120000 ceph/qa/suites/hadoop/basic/clusters/.qa create mode 120000 ceph/qa/suites/hadoop/basic/tasks/.qa create mode 120000 ceph/qa/suites/kcephfs/.qa create mode 120000 ceph/qa/suites/kcephfs/cephfs/.qa create mode 120000 ceph/qa/suites/kcephfs/cephfs/clusters/.qa create mode 120000 ceph/qa/suites/kcephfs/cephfs/conf create mode 120000 ceph/qa/suites/kcephfs/cephfs/inline/.qa create mode 120000 ceph/qa/suites/kcephfs/cephfs/overrides/.qa delete mode 120000 ceph/qa/suites/kcephfs/cephfs/overrides/debug.yaml rename ceph/qa/suites/kcephfs/{mixed-clients/conf.yaml => cephfs/overrides/ms-die-on-skipped.yaml} (72%) create mode 120000 ceph/qa/suites/kcephfs/cephfs/tasks/.qa create mode 120000 ceph/qa/suites/kcephfs/mixed-clients/.qa create mode 120000 ceph/qa/suites/kcephfs/mixed-clients/clusters/.qa create mode 120000 ceph/qa/suites/kcephfs/mixed-clients/conf create mode 120000 ceph/qa/suites/kcephfs/mixed-clients/overrides/.qa delete mode 120000 ceph/qa/suites/kcephfs/mixed-clients/overrides/debug.yaml rename ceph/qa/suites/kcephfs/{thrash/conf.yaml => mixed-clients/overrides/ms-die-on-skipped.yaml} (72%) create mode 120000 ceph/qa/suites/kcephfs/mixed-clients/tasks/.qa create mode 120000 ceph/qa/suites/kcephfs/recovery/.qa create mode 120000 ceph/qa/suites/kcephfs/recovery/clusters/.qa create mode 120000 ceph/qa/suites/kcephfs/recovery/conf delete mode 100644 ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml delete mode 100644 ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml create mode 120000 ceph/qa/suites/kcephfs/recovery/mounts/.qa create mode 120000 ceph/qa/suites/kcephfs/recovery/overrides/.qa delete mode 120000 ceph/qa/suites/kcephfs/recovery/overrides/debug.yaml create mode 120000 ceph/qa/suites/kcephfs/recovery/tasks/.qa create mode 120000 ceph/qa/suites/kcephfs/thrash/.qa create mode 120000 ceph/qa/suites/kcephfs/thrash/clusters/.qa create mode 120000 ceph/qa/suites/kcephfs/thrash/conf create mode 120000 ceph/qa/suites/kcephfs/thrash/overrides/.qa delete mode 120000 ceph/qa/suites/kcephfs/thrash/overrides/debug.yaml rename ceph/qa/suites/kcephfs/{cephfs/conf.yaml => thrash/overrides/ms-die-on-skipped.yaml} (71%) create mode 120000 ceph/qa/suites/kcephfs/thrash/thrashers/.qa create mode 120000 ceph/qa/suites/kcephfs/thrash/workloads/.qa delete mode 100644 ceph/qa/suites/knfs/basic/ceph/base.yaml delete mode 120000 ceph/qa/suites/knfs/basic/clusters/extra-client.yaml delete mode 100644 ceph/qa/suites/knfs/basic/mount/v3.yaml delete mode 100644 ceph/qa/suites/knfs/basic/mount/v4.yaml delete mode 100644 ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml delete mode 100644 ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml delete mode 100644 ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml delete mode 100644 ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml delete mode 100644 ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml delete mode 100644 ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml delete mode 100644 ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml create mode 120000 ceph/qa/suites/krbd/.qa create mode 120000 ceph/qa/suites/krbd/rbd-nomount/.qa create mode 120000 ceph/qa/suites/krbd/rbd-nomount/clusters/.qa create mode 120000 ceph/qa/suites/krbd/rbd-nomount/install/.qa create mode 120000 ceph/qa/suites/krbd/rbd-nomount/msgr-failures/.qa create mode 120000 ceph/qa/suites/krbd/rbd-nomount/tasks/.qa create mode 120000 ceph/qa/suites/krbd/rbd/.qa create mode 120000 ceph/qa/suites/krbd/rbd/clusters/.qa create mode 120000 ceph/qa/suites/krbd/rbd/msgr-failures/.qa create mode 120000 ceph/qa/suites/krbd/rbd/tasks/.qa create mode 120000 ceph/qa/suites/krbd/singleton/.qa create mode 120000 ceph/qa/suites/krbd/singleton/msgr-failures/.qa create mode 120000 ceph/qa/suites/krbd/singleton/tasks/.qa create mode 120000 ceph/qa/suites/krbd/thrash/.qa create mode 120000 ceph/qa/suites/krbd/thrash/ceph/.qa create mode 120000 ceph/qa/suites/krbd/thrash/clusters/.qa create mode 120000 ceph/qa/suites/krbd/thrash/thrashers/.qa create mode 120000 ceph/qa/suites/krbd/thrash/workloads/.qa create mode 120000 ceph/qa/suites/krbd/unmap/.qa create mode 120000 ceph/qa/suites/krbd/unmap/ceph/.qa create mode 120000 ceph/qa/suites/krbd/unmap/clusters/.qa create mode 120000 ceph/qa/suites/krbd/unmap/kernels/.qa create mode 120000 ceph/qa/suites/krbd/unmap/tasks/.qa create mode 120000 ceph/qa/suites/krbd/wac/.qa create mode 120000 ceph/qa/suites/krbd/wac/sysfs/.qa create mode 120000 ceph/qa/suites/krbd/wac/sysfs/ceph/.qa create mode 120000 ceph/qa/suites/krbd/wac/sysfs/clusters/.qa create mode 120000 ceph/qa/suites/krbd/wac/sysfs/tasks/.qa create mode 120000 ceph/qa/suites/krbd/wac/wac/.qa create mode 120000 ceph/qa/suites/krbd/wac/wac/ceph/.qa create mode 120000 ceph/qa/suites/krbd/wac/wac/clusters/.qa create mode 120000 ceph/qa/suites/krbd/wac/wac/tasks/.qa create mode 120000 ceph/qa/suites/krbd/wac/wac/verify/.qa create mode 120000 ceph/qa/suites/marginal/.qa create mode 120000 ceph/qa/suites/marginal/basic/.qa create mode 120000 ceph/qa/suites/marginal/basic/clusters/.qa create mode 120000 ceph/qa/suites/marginal/basic/tasks/.qa create mode 120000 ceph/qa/suites/marginal/fs-misc/.qa create mode 120000 ceph/qa/suites/marginal/fs-misc/clusters/.qa create mode 120000 ceph/qa/suites/marginal/fs-misc/tasks/.qa create mode 120000 ceph/qa/suites/marginal/mds_restart/.qa create mode 120000 ceph/qa/suites/marginal/mds_restart/clusters/.qa create mode 120000 ceph/qa/suites/marginal/mds_restart/tasks/.qa create mode 120000 ceph/qa/suites/marginal/multimds/.qa create mode 120000 ceph/qa/suites/marginal/multimds/clusters/.qa create mode 120000 ceph/qa/suites/marginal/multimds/mounts/.qa create mode 120000 ceph/qa/suites/marginal/multimds/tasks/.qa create mode 120000 ceph/qa/suites/marginal/multimds/thrash/.qa create mode 120000 ceph/qa/suites/mixed-clients/.qa create mode 120000 ceph/qa/suites/mixed-clients/basic/.qa create mode 120000 ceph/qa/suites/mixed-clients/basic/clusters/.qa create mode 120000 ceph/qa/suites/mixed-clients/basic/tasks/.qa create mode 120000 ceph/qa/suites/multimds/.qa create mode 120000 ceph/qa/suites/multimds/basic/.qa create mode 120000 ceph/qa/suites/multimds/basic/clusters/.qa create mode 120000 ceph/qa/suites/multimds/basic/conf create mode 120000 ceph/qa/suites/multimds/basic/mount/.qa create mode 120000 ceph/qa/suites/multimds/basic/overrides/.qa create mode 120000 ceph/qa/suites/multimds/basic/q_check_counter/.qa create mode 120000 ceph/qa/suites/multimds/basic/tasks/.qa create mode 120000 ceph/qa/suites/multimds/thrash/.qa create mode 120000 ceph/qa/suites/multimds/thrash/clusters/.qa create mode 120000 ceph/qa/suites/multimds/thrash/conf create mode 120000 ceph/qa/suites/multimds/thrash/mount/.qa create mode 120000 ceph/qa/suites/multimds/thrash/overrides/.qa create mode 120000 ceph/qa/suites/multimds/thrash/tasks/.qa create mode 120000 ceph/qa/suites/multimds/verify/.qa create mode 120000 ceph/qa/suites/multimds/verify/clusters/.qa create mode 120000 ceph/qa/suites/multimds/verify/conf create mode 120000 ceph/qa/suites/multimds/verify/mount/.qa create mode 120000 ceph/qa/suites/multimds/verify/overrides/.qa create mode 120000 ceph/qa/suites/powercycle/.qa create mode 120000 ceph/qa/suites/powercycle/osd/.qa create mode 120000 ceph/qa/suites/powercycle/osd/clusters/.qa create mode 120000 ceph/qa/suites/powercycle/osd/powercycle/.qa create mode 120000 ceph/qa/suites/powercycle/osd/tasks/.qa create mode 120000 ceph/qa/suites/rados/.qa create mode 120000 ceph/qa/suites/rados/basic-luminous/.qa create mode 120000 ceph/qa/suites/rados/basic/.qa create mode 120000 ceph/qa/suites/rados/basic/clusters/.qa create mode 120000 ceph/qa/suites/rados/basic/d-require-luminous/.qa create mode 120000 ceph/qa/suites/rados/basic/msgr-failures/.qa create mode 120000 ceph/qa/suites/rados/basic/msgr/.qa create mode 120000 ceph/qa/suites/rados/basic/tasks/.qa create mode 120000 ceph/qa/suites/rados/mgr/.qa create mode 120000 ceph/qa/suites/rados/mgr/clusters/.qa create mode 120000 ceph/qa/suites/rados/mgr/debug/.qa create mode 120000 ceph/qa/suites/rados/mgr/tasks/.qa create mode 120000 ceph/qa/suites/rados/monthrash/.qa create mode 120000 ceph/qa/suites/rados/monthrash/clusters/.qa create mode 120000 ceph/qa/suites/rados/monthrash/msgr-failures/.qa create mode 120000 ceph/qa/suites/rados/monthrash/thrashers/.qa create mode 120000 ceph/qa/suites/rados/monthrash/workloads/.qa create mode 120000 ceph/qa/suites/rados/multimon/.qa create mode 120000 ceph/qa/suites/rados/multimon/clusters/.qa create mode 120000 ceph/qa/suites/rados/multimon/msgr-failures/.qa create mode 120000 ceph/qa/suites/rados/multimon/tasks/.qa create mode 120000 ceph/qa/suites/rados/objectstore/.qa create mode 120000 ceph/qa/suites/rados/rest/.qa create mode 120000 ceph/qa/suites/rados/singleton-bluestore/.qa create mode 120000 ceph/qa/suites/rados/singleton-bluestore/all/.qa create mode 120000 ceph/qa/suites/rados/singleton-bluestore/msgr-failures/.qa create mode 120000 ceph/qa/suites/rados/singleton-bluestore/objectstore/.qa create mode 120000 ceph/qa/suites/rados/singleton-nomsgr/.qa create mode 120000 ceph/qa/suites/rados/singleton-nomsgr/all/.qa create mode 100644 ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml create mode 120000 ceph/qa/suites/rados/singleton/.qa create mode 120000 ceph/qa/suites/rados/singleton/all/.qa create mode 120000 ceph/qa/suites/rados/singleton/all/thrash-rados/.qa create mode 120000 ceph/qa/suites/rados/singleton/msgr-failures/.qa create mode 120000 ceph/qa/suites/rados/standalone/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-big/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-big/cluster/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-big/workloads/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-isa/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-isa/arch/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-overwrites/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-shec/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code/fast/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code/thrashers/.qa create mode 120000 ceph/qa/suites/rados/thrash-erasure-code/workloads/.qa create mode 120000 ceph/qa/suites/rados/thrash-luminous/.qa create mode 120000 ceph/qa/suites/rados/thrash-luminous/workloads/.qa create mode 120000 ceph/qa/suites/rados/thrash/.qa create mode 120000 ceph/qa/suites/rados/thrash/0-size-min-size-overrides/.qa create mode 120000 ceph/qa/suites/rados/thrash/1-pg-log-overrides/.qa create mode 120000 ceph/qa/suites/rados/thrash/2-recovery-overrides/.qa create mode 120000 ceph/qa/suites/rados/thrash/backoff/.qa create mode 120000 ceph/qa/suites/rados/thrash/clusters/.qa create mode 120000 ceph/qa/suites/rados/thrash/d-require-luminous/.qa create mode 120000 ceph/qa/suites/rados/thrash/msgr-failures/.qa create mode 120000 ceph/qa/suites/rados/thrash/thrashers/.qa create mode 120000 ceph/qa/suites/rados/thrash/workloads/.qa create mode 120000 ceph/qa/suites/rados/upgrade/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/.qa create mode 120000 ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/.qa create mode 120000 ceph/qa/suites/rados/verify/.qa create mode 120000 ceph/qa/suites/rados/verify/clusters/.qa create mode 120000 ceph/qa/suites/rados/verify/d-thrash/.qa create mode 120000 ceph/qa/suites/rados/verify/d-thrash/default/.qa create mode 120000 ceph/qa/suites/rados/verify/msgr-failures/.qa create mode 120000 ceph/qa/suites/rados/verify/tasks/.qa create mode 120000 ceph/qa/suites/rados/verify/validater/.qa create mode 120000 ceph/qa/suites/rbd/.qa create mode 120000 ceph/qa/suites/rbd/basic/.qa create mode 120000 ceph/qa/suites/rbd/basic/base/.qa create mode 120000 ceph/qa/suites/rbd/basic/cachepool/.qa create mode 120000 ceph/qa/suites/rbd/basic/clusters/.qa create mode 120000 ceph/qa/suites/rbd/basic/msgr-failures/.qa create mode 120000 ceph/qa/suites/rbd/basic/tasks/.qa create mode 120000 ceph/qa/suites/rbd/cli/.qa create mode 120000 ceph/qa/suites/rbd/cli/base/.qa create mode 120000 ceph/qa/suites/rbd/cli/features/.qa create mode 120000 ceph/qa/suites/rbd/cli/msgr-failures/.qa create mode 120000 ceph/qa/suites/rbd/cli/pool/.qa create mode 120000 ceph/qa/suites/rbd/cli/workloads/.qa create mode 120000 ceph/qa/suites/rbd/librbd/.qa create mode 120000 ceph/qa/suites/rbd/librbd/cache/.qa create mode 120000 ceph/qa/suites/rbd/librbd/clusters/.qa create mode 120000 ceph/qa/suites/rbd/librbd/config/.qa create mode 120000 ceph/qa/suites/rbd/librbd/msgr-failures/.qa create mode 120000 ceph/qa/suites/rbd/librbd/pool/.qa create mode 120000 ceph/qa/suites/rbd/librbd/workloads/.qa create mode 120000 ceph/qa/suites/rbd/maintenance/.qa create mode 120000 ceph/qa/suites/rbd/maintenance/base/.qa create mode 120000 ceph/qa/suites/rbd/maintenance/clusters/.qa create mode 120000 ceph/qa/suites/rbd/maintenance/qemu/.qa create mode 120000 ceph/qa/suites/rbd/maintenance/workloads/.qa create mode 120000 ceph/qa/suites/rbd/mirror-ha/.qa create mode 120000 ceph/qa/suites/rbd/mirror-ha/workloads/.qa create mode 120000 ceph/qa/suites/rbd/mirror/.qa create mode 120000 ceph/qa/suites/rbd/mirror/base/.qa create mode 120000 ceph/qa/suites/rbd/mirror/cluster/.qa create mode 120000 ceph/qa/suites/rbd/mirror/rbd-mirror/.qa create mode 120000 ceph/qa/suites/rbd/mirror/workloads/.qa create mode 120000 ceph/qa/suites/rbd/nbd/.qa create mode 120000 ceph/qa/suites/rbd/nbd/cluster/.qa create mode 120000 ceph/qa/suites/rbd/nbd/workloads/.qa create mode 120000 ceph/qa/suites/rbd/openstack/.qa create mode 120000 ceph/qa/suites/rbd/openstack/base/.qa create mode 120000 ceph/qa/suites/rbd/openstack/clusters/.qa create mode 120000 ceph/qa/suites/rbd/openstack/features/.qa create mode 120000 ceph/qa/suites/rbd/openstack/workloads/.qa create mode 120000 ceph/qa/suites/rbd/qemu/.qa create mode 120000 ceph/qa/suites/rbd/qemu/cache/.qa create mode 120000 ceph/qa/suites/rbd/qemu/clusters/.qa create mode 120000 ceph/qa/suites/rbd/qemu/features/.qa create mode 120000 ceph/qa/suites/rbd/qemu/msgr-failures/.qa create mode 120000 ceph/qa/suites/rbd/qemu/pool/.qa create mode 120000 ceph/qa/suites/rbd/qemu/workloads/.qa create mode 120000 ceph/qa/suites/rbd/singleton-bluestore/.qa create mode 120000 ceph/qa/suites/rbd/singleton-bluestore/all/.qa create mode 120000 ceph/qa/suites/rbd/singleton-bluestore/objectstore/.qa create mode 120000 ceph/qa/suites/rbd/singleton/.qa create mode 120000 ceph/qa/suites/rbd/singleton/all/.qa create mode 120000 ceph/qa/suites/rbd/thrash/.qa create mode 120000 ceph/qa/suites/rbd/thrash/base/.qa create mode 120000 ceph/qa/suites/rbd/thrash/clusters/.qa create mode 120000 ceph/qa/suites/rbd/thrash/msgr-failures/.qa create mode 120000 ceph/qa/suites/rbd/thrash/thrashers/.qa create mode 120000 ceph/qa/suites/rbd/thrash/workloads/.qa create mode 120000 ceph/qa/suites/rbd/valgrind/.qa create mode 120000 ceph/qa/suites/rbd/valgrind/base/.qa create mode 120000 ceph/qa/suites/rbd/valgrind/validator/.qa create mode 120000 ceph/qa/suites/rbd/valgrind/workloads/.qa create mode 120000 ceph/qa/suites/rgw/.qa create mode 120000 ceph/qa/suites/rgw/hadoop-s3a/.qa create mode 120000 ceph/qa/suites/rgw/hadoop-s3a/hadoop/.qa create mode 120000 ceph/qa/suites/rgw/multifs/.qa create mode 120000 ceph/qa/suites/rgw/multifs/clusters/.qa create mode 120000 ceph/qa/suites/rgw/multifs/frontend/.qa create mode 120000 ceph/qa/suites/rgw/multifs/tasks/.qa create mode 120000 ceph/qa/suites/rgw/multisite/.qa create mode 120000 ceph/qa/suites/rgw/multisite/realms/.qa create mode 120000 ceph/qa/suites/rgw/multisite/tasks/.qa create mode 120000 ceph/qa/suites/rgw/singleton/.qa create mode 120000 ceph/qa/suites/rgw/singleton/all/.qa create mode 120000 ceph/qa/suites/rgw/singleton/frontend/.qa create mode 120000 ceph/qa/suites/rgw/tempest/.qa create mode 120000 ceph/qa/suites/rgw/thrash/.qa create mode 120000 ceph/qa/suites/rgw/thrash/clusters/.qa create mode 120000 ceph/qa/suites/rgw/thrash/thrasher/.qa create mode 120000 ceph/qa/suites/rgw/thrash/workload/.qa create mode 120000 ceph/qa/suites/rgw/verify/.qa create mode 120000 ceph/qa/suites/rgw/verify/clusters/.qa create mode 120000 ceph/qa/suites/rgw/verify/msgr-failures/.qa create mode 100644 ceph/qa/suites/rgw/verify/tasks/+ create mode 120000 ceph/qa/suites/rgw/verify/tasks/.qa rename ceph/qa/suites/rgw/verify/tasks/{rgw_s3tests.yaml => 0-install.yaml} (73%) create mode 100644 ceph/qa/suites/rgw/verify/tasks/cls_rgw.yaml delete mode 100644 ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml create mode 100644 ceph/qa/suites/rgw/verify/tasks/s3tests.yaml create mode 100644 ceph/qa/suites/rgw/verify/tasks/swift.yaml create mode 120000 ceph/qa/suites/rgw/verify/validater/.qa create mode 120000 ceph/qa/suites/samba/.qa create mode 120000 ceph/qa/suites/samba/clusters/.qa create mode 120000 ceph/qa/suites/samba/install/.qa create mode 120000 ceph/qa/suites/samba/mount/.qa create mode 120000 ceph/qa/suites/samba/workload/.qa create mode 120000 ceph/qa/suites/smoke/.qa create mode 120000 ceph/qa/suites/smoke/1node/.qa create mode 120000 ceph/qa/suites/smoke/1node/clusters/.qa create mode 120000 ceph/qa/suites/smoke/1node/distros/.qa create mode 120000 ceph/qa/suites/smoke/1node/objectstore/.qa create mode 120000 ceph/qa/suites/smoke/1node/tasks/.qa create mode 120000 ceph/qa/suites/smoke/basic/.qa create mode 120000 ceph/qa/suites/smoke/basic/clusters/.qa create mode 120000 ceph/qa/suites/smoke/basic/objectstore/.qa create mode 120000 ceph/qa/suites/smoke/basic/tasks/.qa create mode 120000 ceph/qa/suites/smoke/systemd/.qa create mode 120000 ceph/qa/suites/smoke/systemd/clusters/.qa create mode 120000 ceph/qa/suites/smoke/systemd/distros/.qa create mode 120000 ceph/qa/suites/smoke/systemd/objectstore/.qa create mode 120000 ceph/qa/suites/smoke/systemd/tasks/.qa create mode 120000 ceph/qa/suites/stress/.qa create mode 120000 ceph/qa/suites/stress/bench/.qa create mode 120000 ceph/qa/suites/stress/bench/clusters/.qa create mode 120000 ceph/qa/suites/stress/bench/tasks/.qa create mode 120000 ceph/qa/suites/stress/thrash/.qa create mode 120000 ceph/qa/suites/stress/thrash/clusters/.qa create mode 120000 ceph/qa/suites/stress/thrash/thrashers/.qa create mode 120000 ceph/qa/suites/stress/thrash/workloads/.qa create mode 120000 ceph/qa/suites/teuthology/.qa create mode 120000 ceph/qa/suites/teuthology/buildpackages/.qa create mode 120000 ceph/qa/suites/teuthology/buildpackages/tasks/.qa create mode 120000 ceph/qa/suites/teuthology/ceph/.qa create mode 120000 ceph/qa/suites/teuthology/ceph/clusters/.qa create mode 120000 ceph/qa/suites/teuthology/ceph/tasks/.qa create mode 120000 ceph/qa/suites/teuthology/multi-cluster/.qa create mode 120000 ceph/qa/suites/teuthology/multi-cluster/all/.qa create mode 120000 ceph/qa/suites/teuthology/no-ceph/.qa create mode 120000 ceph/qa/suites/teuthology/no-ceph/clusters/.qa create mode 120000 ceph/qa/suites/teuthology/no-ceph/tasks/.qa create mode 120000 ceph/qa/suites/teuthology/nop/.qa create mode 120000 ceph/qa/suites/teuthology/nop/all/.qa create mode 120000 ceph/qa/suites/teuthology/rgw/.qa create mode 120000 ceph/qa/suites/teuthology/rgw/tasks/.qa create mode 120000 ceph/qa/suites/teuthology/workunits/.qa create mode 120000 ceph/qa/suites/tgt/.qa create mode 120000 ceph/qa/suites/tgt/basic/.qa create mode 120000 ceph/qa/suites/tgt/basic/clusters/.qa create mode 120000 ceph/qa/suites/tgt/basic/msgr-failures/.qa create mode 120000 ceph/qa/suites/tgt/basic/tasks/.qa create mode 120000 ceph/qa/suites/upgrade/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/1-install/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/2-workload/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/supported/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/1-install/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/2-features/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/3-workload/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/supported/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/.qa create mode 120000 ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/tiering/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/.qa create mode 120000 ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/ceph-deploy/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/parallel/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/.qa create mode 120000 ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/ceph-deploy/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/parallel/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/.qa create mode 120000 ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/.qa create mode 120000 ceph/qa/suites/upgrade/luminous-p2p/.qa create mode 100644 ceph/qa/tasks/util/workunit.py create mode 100755 ceph/qa/workunits/rados/test_librados_build.sh delete mode 100755 ceph/src/ceph-volume/bin/ceph-volume delete mode 100755 ceph/src/ceph-volume/bin/ceph-volume-systemd create mode 100644 ceph/src/ceph-volume/ceph_volume/inventory/__init__.py create mode 100644 ceph/src/ceph-volume/ceph_volume/inventory/main.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/__init__.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml delete mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml delete mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml delete mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml delete mode 100644 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml create mode 120000 ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py create mode 100644 ceph/src/common/PriorityCache.cc create mode 100644 ceph/src/common/PriorityCache.h create mode 100644 ceph/src/common/autovector.h delete mode 100644 ceph/src/common/pipe.c delete mode 100644 ceph/src/common/pipe.h create mode 100644 ceph/src/kv/rocksdb_cache/BinnedLRUCache.cc create mode 100644 ceph/src/kv/rocksdb_cache/BinnedLRUCache.h create mode 100644 ceph/src/kv/rocksdb_cache/ShardedCache.cc create mode 100644 ceph/src/kv/rocksdb_cache/ShardedCache.h rename ceph/src/test/librbd/{test_mock_ObjectWatcher.cc => test_mock_Watcher.cc} (61%) diff --git a/Makefile b/Makefile index a3a2f68e9..8bb42e96f 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ RELEASE=5.2 PACKAGE=ceph -VER=12.2.8 +VER=12.2.10 DEBREL=pve1 SRCDIR=ceph diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index c507dfaa2..35c193936 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 2.8.11) project(ceph) -set(VERSION 12.2.8) +set(VERSION 12.2.10) if(POLICY CMP0046) # Tweak policies (this one disables "missing" dependency warning) @@ -107,6 +107,8 @@ CHECK_FUNCTION_EXISTS(fdatasync HAVE_FDATASYNC) CHECK_FUNCTION_EXISTS(strerror_r HAVE_STRERROR_R) CHECK_FUNCTION_EXISTS(name_to_handle_at HAVE_NAME_TO_HANDLE_AT) CHECK_FUNCTION_EXISTS(pipe2 HAVE_PIPE2) +CHECK_FUNCTION_EXISTS(accept4 HAVE_ACCEPT4) + set(CMAKE_REQUIRED_LIBRARIES pthread) CHECK_FUNCTION_EXISTS(pthread_spin_init HAVE_PTHREAD_SPINLOCK) CHECK_FUNCTION_EXISTS(pthread_set_name_np HAVE_PTHREAD_SET_NAME_NP) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index 139ca0fc7..00ee957e0 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -111,3 +111,14 @@ has an error. This head object is there to show the snapset that was used in determining errors. + +12.2.9 +------ +* 12.2.9 contains the pg hard hard limit patches(https://tracker.ceph.com/issues/23979). + A partial upgrade during recovery/backfill, can cause the osds on the previous version, + to fail with assert(trim_to <= info.last_complete). The workaround for users is to + upgrade and restart all OSDs to a version with the pg hard limit, or only upgrade + when all PGs are active+clean. This patch will be reverted in 12.2.10, until + a clean upgrade path is added to the pg log hard limit patches. + + See also: http://tracker.ceph.com/issues/36686 diff --git a/ceph/alpine/APKBUILD b/ceph/alpine/APKBUILD index bfa3a349b..26f824c7a 100644 --- a/ceph/alpine/APKBUILD +++ b/ceph/alpine/APKBUILD @@ -1,7 +1,7 @@ # Contributor: John Coyle # Maintainer: John Coyle pkgname=ceph -pkgver=12.2.8 +pkgver=12.2.10 pkgrel=0 pkgdesc="Ceph is a distributed object store and file system" pkgusers="ceph" @@ -63,7 +63,7 @@ makedepends=" xmlstarlet yasm " -source="ceph-12.2.8.tar.bz2" +source="ceph-12.2.10.tar.bz2" subpackages=" $pkgname-base $pkgname-common @@ -116,7 +116,7 @@ _sysconfdir=/etc _udevrulesdir=/etc/udev/rules.d _python_sitelib=/usr/lib/python2.7/site-packages -builddir=$srcdir/ceph-12.2.8 +builddir=$srcdir/ceph-12.2.10 build() { export CEPH_BUILD_VIRTUALENV=$builddir diff --git a/ceph/ceph.spec b/ceph/ceph.spec index 9309442d3..94d44b690 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -44,7 +44,7 @@ %if %{with selinux} # get selinux policy version -%{!?_selinux_policy_version: %global _selinux_policy_version %(sed -e 's,.*selinux-policy-\\([^/]*\\)/.*,\\1,' /usr/share/selinux/devel/policyhelp 2>/dev/null || echo 0.0.0)} +%{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0} %endif %{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d} @@ -61,7 +61,7 @@ # main package definition ################################################################################# Name: ceph -Version: 12.2.8 +Version: 12.2.10 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -77,7 +77,7 @@ License: LGPL-2.1 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: http://ceph.com/download/ceph-12.2.8.tar.bz2 +Source0: http://ceph.com/download/ceph-12.2.10.tar.bz2 %if 0%{?suse_version} %if 0%{?is_opensuse} ExclusiveArch: x86_64 aarch64 ppc64 ppc64le @@ -100,7 +100,6 @@ BuildRequires: sharutils %if 0%{with selinux} BuildRequires: checkpolicy BuildRequires: selinux-policy-devel -BuildRequires: /usr/share/selinux/devel/policyhelp %endif %if 0%{with make_check} %if 0%{?fedora} || 0%{?rhel} @@ -124,8 +123,13 @@ BuildRequires: fuse-devel BuildRequires: gcc-c++ BuildRequires: gdbm %if 0%{with tcmalloc} +%if 0%{?fedora} || 0%{?rhel} +BuildRequires: gperftools-devel >= 2.6.1 +%endif +%if 0%{?suse_version} BuildRequires: gperftools-devel >= 2.4 %endif +%endif BuildRequires: jq BuildRequires: leveldb-devel > 1.2 BuildRequires: libaio-devel @@ -248,14 +252,25 @@ Requires: python-requests Requires: python-setuptools Requires: grep Requires: xfsprogs +Requires: e2fsprogs Requires: logrotate +Requires: parted Requires: util-linux Requires: cryptsetup Requires: findutils Requires: psmisc Requires: which +%if 0%{?fedora} || 0%{?rhel} +Requires: gdisk +# The following is necessary due to tracker 36508 and can be removed once the +# associated upstream bugs are resolved. +%if 0%{with tcmalloc} +Requires: gperftools-libs >= 2.6.1 +%endif +%endif %if 0%{?suse_version} Recommends: ntp-daemon +Requires: gptfdisk %endif %description base Base is the package that includes all the files shared amongst ceph servers @@ -423,14 +438,6 @@ Summary: Ceph Object Storage Daemon Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} -# for sgdisk, used by ceph-disk -%if 0%{?fedora} || 0%{?rhel} -Requires: gdisk -%endif -%if 0%{?suse_version} -Requires: gptfdisk -%endif -Requires: parted Requires: lvm2 %description osd ceph-osd is the object storage daemon for the Ceph distributed file @@ -781,7 +788,7 @@ python-rbd, python-rgw or python-cephfs instead. # common ################################################################################# %prep -%autosetup -p1 -n ceph-12.2.8 +%autosetup -p1 -n ceph-12.2.10 %build %if 0%{with cephfs_java} diff --git a/ceph/ceph.spec.in b/ceph/ceph.spec.in index bb394f06b..d708aea33 100644 --- a/ceph/ceph.spec.in +++ b/ceph/ceph.spec.in @@ -44,7 +44,7 @@ %if %{with selinux} # get selinux policy version -%{!?_selinux_policy_version: %global _selinux_policy_version %(sed -e 's,.*selinux-policy-\\([^/]*\\)/.*,\\1,' /usr/share/selinux/devel/policyhelp 2>/dev/null || echo 0.0.0)} +%{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0} %endif %{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d} @@ -100,7 +100,6 @@ BuildRequires: sharutils %if 0%{with selinux} BuildRequires: checkpolicy BuildRequires: selinux-policy-devel -BuildRequires: /usr/share/selinux/devel/policyhelp %endif %if 0%{with make_check} %if 0%{?fedora} || 0%{?rhel} @@ -124,8 +123,13 @@ BuildRequires: fuse-devel BuildRequires: gcc-c++ BuildRequires: gdbm %if 0%{with tcmalloc} +%if 0%{?fedora} || 0%{?rhel} +BuildRequires: gperftools-devel >= 2.6.1 +%endif +%if 0%{?suse_version} BuildRequires: gperftools-devel >= 2.4 %endif +%endif BuildRequires: jq BuildRequires: leveldb-devel > 1.2 BuildRequires: libaio-devel @@ -248,14 +252,25 @@ Requires: python-requests Requires: python-setuptools Requires: grep Requires: xfsprogs +Requires: e2fsprogs Requires: logrotate +Requires: parted Requires: util-linux Requires: cryptsetup Requires: findutils Requires: psmisc Requires: which +%if 0%{?fedora} || 0%{?rhel} +Requires: gdisk +# The following is necessary due to tracker 36508 and can be removed once the +# associated upstream bugs are resolved. +%if 0%{with tcmalloc} +Requires: gperftools-libs >= 2.6.1 +%endif +%endif %if 0%{?suse_version} Recommends: ntp-daemon +Requires: gptfdisk %endif %description base Base is the package that includes all the files shared amongst ceph servers @@ -423,14 +438,6 @@ Summary: Ceph Object Storage Daemon Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} -# for sgdisk, used by ceph-disk -%if 0%{?fedora} || 0%{?rhel} -Requires: gdisk -%endif -%if 0%{?suse_version} -Requires: gptfdisk -%endif -Requires: parted Requires: lvm2 %description osd ceph-osd is the object storage daemon for the Ceph distributed file diff --git a/ceph/debian/changelog b/ceph/debian/changelog index 642fc450f..2b61ec882 100644 --- a/ceph/debian/changelog +++ b/ceph/debian/changelog @@ -1,3 +1,15 @@ +ceph (12.2.10-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Mon, 26 Nov 2018 19:35:56 +0000 + +ceph (12.2.9-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Wed, 24 Oct 2018 21:04:18 +0000 + ceph (12.2.8-1) stable; urgency=medium * New upstream release diff --git a/ceph/debian/control b/ceph/debian/control index d5bc16e9e..6d01e3115 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -88,10 +88,12 @@ Depends: binutils, ceph-common (= ${binary:Version}), cryptsetup-bin | cryptsetup, debianutils, + e2fsprogs, findutils, gdisk, grep, logrotate, + parted, psmisc, xfsprogs, ${misc:Depends}, @@ -235,7 +237,6 @@ Description: debugging symbols for ceph-mon Package: ceph-osd Architecture: linux-any Depends: ceph-base (= ${binary:Version}), - parted, lvm2, ${misc:Depends}, ${python:Depends}, @@ -269,7 +270,7 @@ Package: ceph-fuse Architecture: linux-any Depends: ${misc:Depends}, ${shlibs:Depends}, -Recommends: fuse, + fuse, Description: FUSE-based client for the Ceph distributed file system Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, diff --git a/ceph/debian/rules b/ceph/debian/rules index 99a3e8e98..b5ce42c6f 100755 --- a/ceph/debian/rules +++ b/ceph/debian/rules @@ -183,6 +183,8 @@ override_dh_python2: dh_python2 -p ceph-base dh_python2 -p ceph-osd dh_python2 -p ceph-mgr + # batch-compile, and set up for delete, all the module files + dh_python2 -p ceph-mgr usr/lib/ceph/mgr override_dh_python3: for binding in rados cephfs rbd rgw; do \ diff --git a/ceph/doc/architecture.rst b/ceph/doc/architecture.rst index 2d5d849c1..933bd8500 100644 --- a/ceph/doc/architecture.rst +++ b/ceph/doc/architecture.rst @@ -1444,7 +1444,7 @@ architecture. .. ditaa:: +--------------+ +----------------+ +-------------+ - | Block Device | | Object Storage | | Ceph FS | + | Block Device | | Object Storage | | CephFS | +--------------+ +----------------+ +-------------+ +--------------+ +----------------+ +-------------+ @@ -1513,14 +1513,16 @@ client. Other virtualization technologies such as Xen can access the Ceph Block Device kernel object(s). This is done with the command-line tool ``rbd``. -.. index:: Ceph FS; Ceph Filesystem; libcephfs; MDS; metadata server; ceph-mds +.. index:: CephFS; Ceph Filesystem; libcephfs; MDS; metadata server; ceph-mds + +.. _arch-cephfs: Ceph Filesystem --------------- -The Ceph Filesystem (Ceph FS) provides a POSIX-compliant filesystem as a +The Ceph Filesystem (CephFS) provides a POSIX-compliant filesystem as a service that is layered on top of the object-based Ceph Storage Cluster. -Ceph FS files get mapped to objects that Ceph stores in the Ceph Storage +CephFS files get mapped to objects that Ceph stores in the Ceph Storage Cluster. Ceph Clients mount a CephFS filesystem as a kernel object or as a Filesystem in User Space (FUSE). @@ -1530,7 +1532,7 @@ a Filesystem in User Space (FUSE). +-----------------------+ +------------------------+ +---------------------------------------------------+ - | Ceph FS Library (libcephfs) | + | CephFS Library (libcephfs) | +---------------------------------------------------+ +---------------------------------------------------+ @@ -1552,7 +1554,7 @@ would tax the Ceph OSD Daemons unnecessarily. So separating the metadata from the data means that the Ceph Filesystem can provide high performance services without taxing the Ceph Storage Cluster. -Ceph FS separates the metadata from the data, storing the metadata in the MDS, +CephFS separates the metadata from the data, storing the metadata in the MDS, and storing the file data in one or more objects in the Ceph Storage Cluster. The Ceph filesystem aims for POSIX compatibility. ``ceph-mds`` can run as a single process, or it can be distributed out to multiple physical machines, diff --git a/ceph/doc/ceph-volume/index.rst b/ceph/doc/ceph-volume/index.rst index 34094b733..1e8efe5d4 100644 --- a/ceph/doc/ceph-volume/index.rst +++ b/ceph/doc/ceph-volume/index.rst @@ -11,12 +11,18 @@ follow a predictable, and robust way of preparing, activating, and starting OSDs **Command Line Subcommands** + There is currently support for ``lvm``, and plain disks (with GPT partitions) that may have been deployed with ``ceph-disk``. * :ref:`ceph-volume-lvm` * :ref:`ceph-volume-simple` +**Node inventory** + +The :ref:`ceph-volume-inventory` subcommand provides information and metadata +about a nodes physical disk inventory. + Migrating --------- @@ -51,6 +57,7 @@ and ``ceph-disk`` is fully disabled. Encryption is fully supported. intro systemd + inventory lvm/index lvm/activate lvm/batch diff --git a/ceph/doc/ceph-volume/inventory.rst b/ceph/doc/ceph-volume/inventory.rst new file mode 100644 index 000000000..edb1fd205 --- /dev/null +++ b/ceph/doc/ceph-volume/inventory.rst @@ -0,0 +1,17 @@ +.. _ceph-volume-inventory: + +``inventory`` +============= +The ``inventory`` subcommand queries a host's disc inventory and provides +hardware information and metadata on every physical device. + +By default the command returns a short, human-readable report of all physical disks. + +For programmatic consumption of this report pass ``--format json`` to generate a +JSON formatted report. This report includes extensive information on the +physical drives such as disk metadata (like model and size), logical volumes +and whether they are used by ceph, and if the disk is usable by ceph and +reasons why not. + +A device path can be specified to report extensive information on a device in +both plain and json format. diff --git a/ceph/doc/ceph-volume/lvm/batch.rst b/ceph/doc/ceph-volume/lvm/batch.rst index bf484b017..b55ae8db3 100644 --- a/ceph/doc/ceph-volume/lvm/batch.rst +++ b/ceph/doc/ceph-volume/lvm/batch.rst @@ -12,6 +12,8 @@ devices in an efficient way? The process is similar to :ref:`ceph-volume-lvm-create`, and will do the preparation and activation at once, following the same workflow for each OSD. +However, If the ``--prepare`` flag is passed then only the prepare step is taken +and the OSDs are not activated. All the features that ``ceph-volume lvm create`` supports, like ``dmcrypt``, avoiding ``systemd`` units from starting, defining bluestore or filestore, @@ -19,6 +21,8 @@ are supported. Any fine-grained option that may affect a single OSD is not supported, for example: specifying where journals should be placed. + + .. _ceph-volume-lvm-batch_bluestore: ``bluestore`` diff --git a/ceph/doc/ceph-volume/lvm/prepare.rst b/ceph/doc/ceph-volume/lvm/prepare.rst index 700d1216a..6d62b17f5 100644 --- a/ceph/doc/ceph-volume/lvm/prepare.rst +++ b/ceph/doc/ceph-volume/lvm/prepare.rst @@ -201,6 +201,33 @@ work for both bluestore and filestore OSDs:: ceph-volume lvm prepare --bluestore --data vg/lv --crush-device-class foo +.. _ceph-volume-lvm-multipath: + +``multipath`` support +--------------------- +Devices that come from ``multipath`` are not supported as-is. The tool will +refuse to consume a raw multipath device and will report a message like:: + + --> RuntimeError: Cannot use device (/dev/mapper/). A vg/lv path or an existing device is needed + +The reason for not supporting multipath is that depending on the type of the +multipath setup, if using an active/passive array as the underlying physical +devices, filters are required in ``lvm.conf`` to exclude the disks that are part of +those underlying devices. + +It is unfeasible for ceph-volume to understand what type of configuration is +needed for LVM to be able to work in various different multipath scenarios. The +functionality to create the LV for you is merely a (naive) convenience, +anything that involves different settings or configuration must be provided by +a config management system which can then provide VGs and LVs for ceph-volume +to consume. + +This situation will only arise when trying to use the ceph-volume functionality +that creates a volume group and logical volume from a device. If a multipath +device is already a logical volume it *should* work, given that the LVM +configuration is done correctly to avoid issues. + + Storing metadata ---------------- The following tags will get applied as part of the preparation process diff --git a/ceph/doc/cephfs/eviction.rst b/ceph/doc/cephfs/eviction.rst index 1bab7a127..8f0f20b84 100644 --- a/ceph/doc/cephfs/eviction.rst +++ b/ceph/doc/cephfs/eviction.rst @@ -21,12 +21,16 @@ libcephfs. Automatic client eviction ========================= -There are two situations in which a client may be evicted automatically: +There are three situations in which a client may be evicted automatically: On an active MDS daemon, if a client has not communicated with the MDS for over ``mds_session_autoclose`` seconds (300 seconds by default), then it will be evicted automatically. +On an active MDS daemon, if a client has not responded to cap revoke messages +for over ``mds_cap_revoke_eviction_timeout`` (configuration option) seconds. +This is disabled by default. + During MDS startup (including on failover), the MDS passes through a state called ``reconnect``. During this state, it waits for all the clients to connect to the new MDS daemon. If any clients fail to do diff --git a/ceph/doc/cephfs/fstab.rst b/ceph/doc/cephfs/fstab.rst index dc3871549..785208d7f 100644 --- a/ceph/doc/cephfs/fstab.rst +++ b/ceph/doc/cephfs/fstab.rst @@ -1,14 +1,14 @@ -========================================== - Mount Ceph FS in your File Systems Table -========================================== +======================================== + Mount CephFS in your File Systems Table +======================================== -If you mount Ceph FS in your file systems table, the Ceph file system will mount +If you mount CephFS in your file systems table, the Ceph file system will mount automatically on startup. Kernel Driver ============= -To mount Ceph FS in your file systems table as a kernel driver, add the +To mount CephFS in your file systems table as a kernel driver, add the following to ``/etc/fstab``:: {ipaddress}:{port}:/ {mount}/{mountpoint} {filesystem-name} [name=username,secret=secretkey|secretfile=/path/to/secretfile],[{mount.options}] @@ -26,7 +26,7 @@ See `User Management`_ for details. FUSE ==== -To mount Ceph FS in your file systems table as a filesystem in user space, add the +To mount CephFS in your file systems table as a filesystem in user space, add the following to ``/etc/fstab``:: #DEVICE PATH TYPE OPTIONS diff --git a/ceph/doc/cephfs/fuse.rst b/ceph/doc/cephfs/fuse.rst index d8c6cdf46..02a4d485c 100644 --- a/ceph/doc/cephfs/fuse.rst +++ b/ceph/doc/cephfs/fuse.rst @@ -1,6 +1,6 @@ -========================= -Mount Ceph FS using FUSE -========================= +======================= +Mount CephFS using FUSE +======================= Before mounting a Ceph File System in User Space (FUSE), ensure that the client host has a copy of the Ceph configuration file and a keyring with CAPS for the diff --git a/ceph/doc/cephfs/index.rst b/ceph/doc/cephfs/index.rst index f7055bcef..3c69a4ce7 100644 --- a/ceph/doc/cephfs/index.rst +++ b/ceph/doc/cephfs/index.rst @@ -1,3 +1,5 @@ +.. _ceph-filesystem: + ================= Ceph Filesystem ================= diff --git a/ceph/doc/cephfs/kernel.rst b/ceph/doc/cephfs/kernel.rst index eaaacef37..3b5a75086 100644 --- a/ceph/doc/cephfs/kernel.rst +++ b/ceph/doc/cephfs/kernel.rst @@ -1,6 +1,6 @@ -====================================== - Mount Ceph FS with the Kernel Driver -====================================== +==================================== + Mount CephFS with the Kernel Driver +==================================== To mount the Ceph file system you may use the ``mount`` command if you know the monitor host IP address(es), or use the ``mount.ceph`` utility to resolve the diff --git a/ceph/doc/glossary.rst b/ceph/doc/glossary.rst index 235d59338..0a23faa8a 100644 --- a/ceph/doc/glossary.rst +++ b/ceph/doc/glossary.rst @@ -69,7 +69,9 @@ reflect either technical terms or legacy ways of referring to Ceph systems. Ceph Filesystem CephFS Ceph FS - The POSIX filesystem components of Ceph. + The POSIX filesystem components of Ceph. Refer + :ref:`CephFS Architecture ` and :ref:`ceph-filesystem` for + more details. Cloud Platforms Cloud Stacks diff --git a/ceph/doc/man/8/ceph-fuse.rst b/ceph/doc/man/8/ceph-fuse.rst index 6c88a5a8f..c156d8e46 100644 --- a/ceph/doc/man/8/ceph-fuse.rst +++ b/ceph/doc/man/8/ceph-fuse.rst @@ -16,9 +16,10 @@ Description =========== **ceph-fuse** is a FUSE (File system in USErspace) client for Ceph -distributed file system. It will mount a ceph file system (specified -via the -m option for described by ceph.conf (see below) at the -specific mount point. +distributed file system. It will mount a ceph file system specified +via the -m option or described by ceph.conf (see below) at the +specific mount point. See `Mount CephFS using FUSE`_ for detailed +information. The file system can be unmounted with:: @@ -73,3 +74,5 @@ See also fusermount(8), :doc:`ceph `\(8) + +.. _Mount CephFS using FUSE: ../../../cephfs/fuse/ diff --git a/ceph/doc/man/8/ceph-volume.rst b/ceph/doc/man/8/ceph-volume.rst index 197bea1ae..af5775997 100644 --- a/ceph/doc/man/8/ceph-volume.rst +++ b/ceph/doc/man/8/ceph-volume.rst @@ -1,8 +1,8 @@ :orphan: -======================================== - ceph-volume -- Ceph OSD deployment tool -======================================== +======================================================= + ceph-volume -- Ceph OSD deployment and inspection tool +======================================================= .. program:: ceph-volume @@ -12,6 +12,8 @@ Synopsis | **ceph-volume** [-h] [--cluster CLUSTER] [--log-level LOG_LEVEL] | [--log-path LOG_PATH] +| **ceph-volume** **inventory** + | **ceph-volume** **lvm** [ *trigger* | *create* | *activate* | *prepare* | *zap* | *list* | *batch*] @@ -34,6 +36,27 @@ them. Commands ======== +inventory +--------- + +This subcommand provides information about a host's physical disc inventory and +reports metadata about these discs. Among this metadata one can find disc +specific data items (like model, size, rotational or solid state) as well as +data items specific to ceph using a device, such as if it is available for +use with ceph or if logical volumes are present. + +Examples:: + + ceph-volume inventory + ceph-volume inventory /dev/sda + ceph-volume inventory --format json-pretty + +Optional arguments: + +* [-h, --help] show the help message and exit +* [--format] report format, valid values are ``plain`` (default), + ``json`` and ``json-pretty`` + lvm --- @@ -58,6 +81,7 @@ Optional arguments: * [--bluestore] Use the bluestore objectstore (default) * [--filestore] Use the filestore objectstore * [--yes] Skip the report and prompt to continue provisioning +* [--prepare] Only prepare OSDs, do not activate * [--dmcrypt] Enable encryption for the underlying OSD devices * [--crush-device-class] Define a CRUSH device class to assign the OSD to * [--no-systemd] Do not enable or create any systemd units @@ -65,6 +89,9 @@ Optional arguments: current input (requires devices to be passed in) * [--format] Output format when reporting (used along with --report), can be one of 'pretty' (default) or 'json' +* [--block-db-size] Set (or override) the "bluestore_block_db_size" value, + in bytes +* [--journal-size] Override the "osd_journal_size" value, in megabytes Required positional arguments: diff --git a/ceph/doc/man/8/ceph.rst b/ceph/doc/man/8/ceph.rst index c4ca9d822..32482a7d2 100644 --- a/ceph/doc/man/8/ceph.rst +++ b/ceph/doc/man/8/ceph.rst @@ -1263,11 +1263,11 @@ Subcommand ``ls`` lists pg with specific pool, osd, state Usage:: ceph pg ls {} {active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale| remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale|remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized...]} @@ -1278,11 +1278,11 @@ Usage:: ceph pg ls-by-osd {} {active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale| remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale|remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized...]} @@ -1293,11 +1293,11 @@ Usage:: ceph pg ls-by-pool {} {active| clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale| remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale|remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized...]} @@ -1308,11 +1308,11 @@ Usage:: ceph pg ls-by-primary {} {active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale| remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| + scrubbing|degraded|inconsistent|peering|repair| recovery|backfill_wait|incomplete|stale|remapped| deep_scrub|backfill|backfill_toofull|recovery_wait| undersized...]} diff --git a/ceph/doc/rados/operations/monitoring.rst b/ceph/doc/rados/operations/monitoring.rst index c291440b7..603e8905e 100644 --- a/ceph/doc/rados/operations/monitoring.rst +++ b/ceph/doc/rados/operations/monitoring.rst @@ -297,7 +297,7 @@ three monitors may return the following: Checking MDS Status =================== -Metadata servers provide metadata services for Ceph FS. Metadata servers have +Metadata servers provide metadata services for CephFS. Metadata servers have two sets of states: ``up | down`` and ``active | inactive``. To ensure your metadata servers are ``up`` and ``active``, execute the following:: diff --git a/ceph/doc/radosgw/index.rst b/ceph/doc/radosgw/index.rst index b28f76e91..2e25fdbf1 100644 --- a/ceph/doc/radosgw/index.rst +++ b/ceph/doc/radosgw/index.rst @@ -1,3 +1,5 @@ +.. _object-gateway: + ===================== Ceph Object Gateway ===================== diff --git a/ceph/doc/radosgw/ldap-auth.rst b/ceph/doc/radosgw/ldap-auth.rst index c67da0447..be854c1e5 100644 --- a/ceph/doc/radosgw/ldap-auth.rst +++ b/ceph/doc/radosgw/ldap-auth.rst @@ -65,7 +65,7 @@ authentication: - ``rgw_ldap_dnattr``: The attribute being used in the constructed search filter to match a username. Depending on your Directory Information Tree (DIT) this would probably be ``uid`` or ``cn``. -- ``rgw_search_filter``: If not specified, the Ceph Object Gateway +- ``rgw_ldap_searchfilter``: If not specified, the Ceph Object Gateway automatically constructs the search filter with the ``rgw_ldap_dnattr`` setting. Use this parameter to narrow the list of allowed users in very flexible ways. Consult the *Using a custom search filter to limit user access diff --git a/ceph/doc/rbd/index.rst b/ceph/doc/rbd/index.rst index 3cdb7b5be..5bde3378a 100644 --- a/ceph/doc/rbd/index.rst +++ b/ceph/doc/rbd/index.rst @@ -32,8 +32,8 @@ Ceph's block devices deliver high performance with infinite scalability to `kernel modules`_, or to :abbr:`KVMs (kernel virtual machines)` such as `QEMU`_, and cloud-based computing systems like `OpenStack`_ and `CloudStack`_ that rely on libvirt and QEMU to integrate with Ceph block devices. You can use the same cluster -to operate the `Ceph RADOS Gateway`_, the `Ceph FS filesystem`_, and Ceph block -devices simultaneously. +to operate the :ref:`Ceph RADOS Gateway `, the +:ref:`CephFS filesystem `, and Ceph block devices simultaneously. .. important:: To use Ceph Block Devices, you must have access to a running Ceph cluster. @@ -63,10 +63,8 @@ devices simultaneously. APIs -.. _RBD Caching: ../rbd-config-ref/ -.. _kernel modules: ../rbd-ko/ -.. _QEMU: ../qemu-rbd/ -.. _OpenStack: ../rbd-openstack -.. _CloudStack: ../rbd-cloudstack -.. _Ceph RADOS Gateway: ../../radosgw/ -.. _Ceph FS filesystem: ../../cephfs/ +.. _RBD Caching: ./rbd-config-ref/ +.. _kernel modules: ./rbd-ko/ +.. _QEMU: ./qemu-rbd/ +.. _OpenStack: ./rbd-openstack +.. _CloudStack: ./rbd-cloudstack diff --git a/ceph/doc/start/quick-cephfs.rst b/ceph/doc/start/quick-cephfs.rst index da4d84efa..fda0919a4 100644 --- a/ceph/doc/start/quick-cephfs.rst +++ b/ceph/doc/start/quick-cephfs.rst @@ -1,8 +1,8 @@ -===================== - Ceph FS Quick Start -===================== +=================== + CephFS Quick Start +=================== -To use the :term:`Ceph FS` Quick Start guide, you must have executed the +To use the :term:`CephFS` Quick Start guide, you must have executed the procedures in the `Storage Cluster Quick Start`_ guide first. Execute this quick start on the Admin Host. @@ -53,7 +53,7 @@ following procedure: cat ceph.client.admin.keyring -#. Copy the key of the user who will be using the mounted Ceph FS filesystem. +#. Copy the key of the user who will be using the mounted CephFS filesystem. It should look something like this:: [client.admin] @@ -75,7 +75,7 @@ following procedure: Kernel Driver ============= -Mount Ceph FS as a kernel driver. :: +Mount CephFS as a kernel driver. :: sudo mkdir /mnt/mycephfs sudo mount -t ceph {ip-address-of-monitor}:6789:/ /mnt/mycephfs @@ -87,14 +87,14 @@ example:: sudo mount -t ceph 192.168.0.1:6789:/ /mnt/mycephfs -o name=admin,secretfile=admin.secret -.. note:: Mount the Ceph FS filesystem on the admin node, +.. note:: Mount the CephFS filesystem on the admin node, not the server node. See `FAQ`_ for details. Filesystem in User Space (FUSE) =============================== -Mount Ceph FS as a Filesystem in User Space (FUSE). :: +Mount CephFS as a Filesystem in User Space (FUSE). :: sudo mkdir ~/mycephfs sudo ceph-fuse -m {ip-address-of-monitor}:6789 ~/mycephfs @@ -108,12 +108,12 @@ is not in the default location (i.e., ``/etc/ceph``):: Additional Information ====================== -See `Ceph FS`_ for additional information. Ceph FS is not quite as stable +See `CephFS`_ for additional information. CephFS is not quite as stable as the Ceph Block Device and Ceph Object Storage. See `Troubleshooting`_ if you encounter trouble. .. _Storage Cluster Quick Start: ../quick-ceph-deploy -.. _Ceph FS: ../../cephfs/ +.. _CephFS: ../../cephfs/ .. _FAQ: http://wiki.ceph.com/How_Can_I_Give_Ceph_a_Try .. _Troubleshooting: ../../cephfs/troubleshooting .. _OS Recommendations: ../os-recommendations diff --git a/ceph/qa/cephfs/clusters/1a3s-mds-1c-client.yaml b/ceph/qa/cephfs/clusters/1a3s-mds-1c-client.yaml new file mode 100644 index 000000000..0b479ed71 --- /dev/null +++ b/ceph/qa/cephfs/clusters/1a3s-mds-1c-client.yaml @@ -0,0 +1,12 @@ +roles: +- [mon.a, mgr.y, mds.a, mds.w-s, osd.0, osd.1, osd.2, osd.3, client.0] +- [mon.b, mon.c, mgr.x, mds.x-s, mds.y-s, osd.4, osd.5, osd.6, osd.7] +openstack: +- volumes: # attached to each instance + count: 4 + size: 20 # GB +- machine: + disk: 200 # GB +log-rotate: + ceph-mds: 10G + ceph-osd: 10G diff --git a/ceph/qa/cephfs/clusters/1a3s-mds-2c-client.yaml b/ceph/qa/cephfs/clusters/1a3s-mds-2c-client.yaml new file mode 100644 index 000000000..01388ae54 --- /dev/null +++ b/ceph/qa/cephfs/clusters/1a3s-mds-2c-client.yaml @@ -0,0 +1,12 @@ +roles: +- [mon.a, mgr.y, mds.a, mds.w-s, osd.0, osd.1, osd.2, osd.3, client.0] +- [mon.b, mon.c, mgr.x, mds.x-s, mds.y-s, osd.4, osd.5, osd.6, osd.7, client.1] +openstack: +- volumes: # attached to each instance + count: 4 + size: 20 # GB +- machine: + disk: 200 # GB +log-rotate: + ceph-mds: 10G + ceph-osd: 10G diff --git a/ceph/qa/suites/knfs/basic/% b/ceph/qa/cephfs/conf/+ similarity index 100% rename from ceph/qa/suites/knfs/basic/% rename to ceph/qa/cephfs/conf/+ diff --git a/ceph/qa/cephfs/overrides/debug.yaml b/ceph/qa/cephfs/conf/client.yaml similarity index 62% rename from ceph/qa/cephfs/overrides/debug.yaml rename to ceph/qa/cephfs/conf/client.yaml index cf5995fdd..96026f91a 100644 --- a/ceph/qa/cephfs/overrides/debug.yaml +++ b/ceph/qa/cephfs/conf/client.yaml @@ -1,9 +1,7 @@ overrides: ceph: conf: - mds: - debug ms: 1 - debug mds: 20 client: + client mount timeout: 600 debug ms: 1 debug client: 20 diff --git a/ceph/qa/cephfs/conf/mds.yaml b/ceph/qa/cephfs/conf/mds.yaml new file mode 100644 index 000000000..0c2f83805 --- /dev/null +++ b/ceph/qa/cephfs/conf/mds.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + mds: + debug mds: 20 + debug ms: 1 + mds debug frag: true + mds debug scatterstat: true + mds op complaint time: 180 + mds verify scatter: true + osd op complaint time: 180 diff --git a/ceph/qa/cephfs/conf/mon.yaml b/ceph/qa/cephfs/conf/mon.yaml new file mode 100644 index 000000000..eea56004a --- /dev/null +++ b/ceph/qa/cephfs/conf/mon.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + mon: + mon op complaint time: 120 diff --git a/ceph/qa/cephfs/conf/osd.yaml b/ceph/qa/cephfs/conf/osd.yaml new file mode 100644 index 000000000..1087202f9 --- /dev/null +++ b/ceph/qa/cephfs/conf/osd.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd op complaint time: 180 diff --git a/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml b/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml index 9e090d7de..48c1b837d 100644 --- a/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml +++ b/ceph/qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml @@ -7,9 +7,3 @@ overrides: - but it is still running # MDS daemon 'b' is not responding, replacing it as rank 0 with standby 'a' - is not responding - conf: - mds: - debug mds: 20 - debug ms: 1 - client: - debug client: 10 diff --git a/ceph/qa/packages/packages.yaml b/ceph/qa/packages/packages.yaml index e2120d3a3..398656450 100644 --- a/ceph/qa/packages/packages.yaml +++ b/ceph/qa/packages/packages.yaml @@ -32,6 +32,8 @@ ceph: - rbd-fuse-dbg - rbd-mirror-dbg - rbd-nbd-dbg + - python3-cephfs + - python3-rados rpm: - ceph-radosgw - ceph-test @@ -47,3 +49,5 @@ ceph: - python-ceph - rbd-fuse - ceph-debuginfo + - python34-cephfs + - python34-rados diff --git a/ceph/qa/standalone/erasure-code/test-erasure-eio.sh b/ceph/qa/standalone/erasure-code/test-erasure-eio.sh index 32bef54ef..ce037aaca 100755 --- a/ceph/qa/standalone/erasure-code/test-erasure-eio.sh +++ b/ceph/qa/standalone/erasure-code/test-erasure-eio.sh @@ -353,6 +353,37 @@ function TEST_rados_get_with_subreadall_eio_shard_1() { delete_erasure_coded_pool $poolname } +# Test recovery the object attr read error +function TEST_ec_object_attr_read_error() { + local dir=$1 + local objname=myobject + + setup_osds 7 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 3 2 || return 1 + + local primary_osd=$(get_primary $poolname $objname) + # Kill primary OSD + kill_daemons $dir TERM osd.${primary_osd} >&2 < /dev/null || return 1 + + # Write data + rados_put $dir $poolname $objname || return 1 + + # Inject eio, shard 1 is the one read attr + inject_eio ec mdata $poolname $objname $dir 1 || return 1 + + # Restart OSD + run_osd $dir ${primary_osd} || return 1 + + # Cluster should recover this object + wait_for_clean || return 1 + + rados_get $dir $poolname myobject || return 1 + + delete_erasure_coded_pool $poolname +} + # Test recovery the first k copies aren't all available function TEST_ec_single_recovery_error() { local dir=$1 diff --git a/ceph/qa/standalone/osd/ec-error-rollforward.sh b/ceph/qa/standalone/osd/ec-error-rollforward.sh index e3a6480a5..cfbf28719 100755 --- a/ceph/qa/standalone/osd/ec-error-rollforward.sh +++ b/ceph/qa/standalone/osd/ec-error-rollforward.sh @@ -40,13 +40,18 @@ function TEST_ec_error_rollforward() { kill -STOP `cat $dir/osd.2.pid` rados -p ec rm foo & + pids="$!" sleep 1 rados -p ec rm a & + pids+=" $!" rados -p ec rm b & + pids+=" $!" rados -p ec rm c & + pids+=" $!" sleep 1 kill -9 `cat $dir/osd.?.pid` - kill %1 %2 %3 %4 + kill $pids + wait run_osd $dir 0 || return 1 run_osd $dir 1 || return 1 diff --git a/ceph/qa/standalone/osd/osd-backfill-stats.sh b/ceph/qa/standalone/osd/osd-backfill-stats.sh index f1fed4bc9..d57305f4e 100755 --- a/ceph/qa/standalone/osd/osd-backfill-stats.sh +++ b/ceph/qa/standalone/osd/osd-backfill-stats.sh @@ -55,14 +55,24 @@ function above_margin() { return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 )) } +FIND_UPACT='grep "pg[[]${PG}.*backfilling.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"' +FIND_FIRST='grep "pg[[]${PG}.*backfilling.*_update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"' +FIND_LAST='grep "pg[[]${PG}.*backfilling.*_update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"' + function check() { - local PG=$1 - local log=$2 - local degraded_start=$3 - local degraded_end=$4 - local misplaced_start=$5 - local misplaced_end=$6 - local type=$7 + local dir=$1 + local PG=$2 + local primary=$3 + local type=$4 + local degraded_start=$5 + local degraded_end=$6 + local misplaced_start=$7 + local misplaced_end=$8 + local primary_start=${9:-} + local primary_end=${10:-} + + local log=$(grep -l +backfilling $dir/osd.*.log) + test -n "$log" || return 1 local addp=" " if [ "$type" = "erasure" ]; @@ -70,19 +80,38 @@ function check() { addp="p" fi - UPACT=$(grep "pg[[]${PG}.*backfilling.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/") + UPACT=$(eval $FIND_UPACT) + [ -n "$UPACT" ] || return 1 - # Check 3rd line at start because of false backfill starts - FIRST=$(grep "pg[[]${PG}.*backfilling.*_update_calc_stats degraded " $log | grep -F " ${UPACT}${addp}" | head -1 | sed "s/.* \([0-9]*\)$/\1/") + # Check 3rd line at start because of false recovery starts + local which="degraded" + FIRST=$(eval $FIND_FIRST) + [ -n "$FIRST" ] || return 1 below_margin $FIRST $degraded_start || return 1 - LAST=$(grep "pg[[]${PG}.*backfilling.*_update_calc_stats degraded " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") + LAST=$(eval $FIND_LAST) + [ -n "$LAST" ] || return 1 above_margin $LAST $degraded_end || return 1 - # Check 3rd line at start because of false backfill starts - FIRST=$(grep "pg[[]${PG}.*backfilling.*_update_calc_stats misplaced " $log | grep -F " ${UPACT}${addp}" | head -1 | sed "s/.* \([0-9]*\)$/\1/") + # Check 3rd line at start because of false recovery starts + which="misplaced" + FIRST=$(eval $FIND_FIRST) + [ -n "$FIRST" ] || return 1 below_margin $FIRST $misplaced_start || return 1 - LAST=$(grep "pg[[]${PG}.*backfilling.*_update_calc_stats misplaced " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") + LAST=$(eval $FIND_LAST) + [ -n "$LAST" ] || return 1 above_margin $LAST $misplaced_end || return 1 + + # This is the value of set into MISSING_ON_PRIMARY + if [ -n "$primary_start" ]; + then + which="shard $primary" + FIRST=$(eval $FIND_FIRST) + [ -n "$FIRST" ] || return 1 + below_margin $FIRST $primary_start || return 1 + LAST=$(eval $FIND_LAST) + [ -n "$LAST" ] || return 1 + above_margin $LAST $primary_end || return 1 + fi } # [1] -> [1, 0, 2] @@ -122,11 +151,8 @@ function TEST_backfill_sizeup() { local primary=$(get_primary $poolname obj1) local PG=$(get_pg $poolname obj1) - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 - local degraded=$(expr $objects \* 2) - check $PG $log $degraded 0 0 0 || return 1 + check $dir $PG $primary replicated $degraded 0 0 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -173,10 +199,8 @@ function TEST_backfill_sizeup_out() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 local degraded=$(expr $objects \* 2) - check $PG $log $degraded 0 $objects 0 || return 1 + check $dir $PG $primary replicated $degraded 0 $objects 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -221,10 +245,7 @@ function TEST_backfill_out() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 - - check $PG $log 0 0 $objects 0 || return 1 + check $dir $PG $primary replicated 0 0 $objects 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -273,10 +294,7 @@ function TEST_backfill_down_out() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 - - check $PG $log $objects 0 0 0 || return 1 + check $dir $PG $primary replicated $objects 0 0 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -328,11 +346,9 @@ function TEST_backfill_out2() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 local misplaced=$(expr $objects \* 2) - check $PG $log $objects 0 $misplaced 0 || return 1 + check $dir $PG $primary replicated $objects 0 $misplaced 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -384,10 +400,8 @@ function TEST_backfill_sizeup4_allout() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 local misdeg=$(expr $objects \* 2) - check $PG $log $misdeg 0 $misdeg $objects || return 1 + check $dir $PG $primary replicated $misdeg 0 $misdeg $objects || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -445,11 +459,9 @@ function TEST_backfill_remapped() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 local misplaced=$(expr $objects \* 2) - check $PG $log 0 0 $misplaced $objects || return 1 + check $dir $PG $primary replicated 0 0 $misplaced $objects || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -503,11 +515,8 @@ function TEST_backfill_ec_all_out() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 - local misplaced=$(expr $objects \* 3) - check $PG $log 0 0 $misplaced $objects erasure || return 1 + check $dir $PG $primary erasure 0 0 $misplaced $objects || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -553,11 +562,8 @@ function TEST_backfill_ec_prim_out() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 - local misplaced=$(expr $objects \* 3) - check $PG $log 0 0 $objects 0 erasure || return 1 + check $dir $PG $primary erasure 0 0 $objects 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -640,11 +646,8 @@ function TEST_backfill_ec_down_all_out() { ceph pg dump pgs - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 - local misplaced=$(expr $objects \* 2) - check $PG $log $objects 0 $misplaced 0 erasure || return 1 + check $dir $PG $primary erasure $objects 0 $misplaced 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -695,11 +698,8 @@ function TEST_backfill_ec_down_out() { wait_for_clean || return 1 - local log=$(grep -l +backfilling $dir/osd.*.log) - test -n "$log" || return 1 - local misplaced=$(expr $objects \* 2) - check $PG $log $objects 0 0 0 erasure || return 1 + check $dir $PG $primary erasure $objects 0 0 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 diff --git a/ceph/qa/standalone/osd/osd-recovery-stats.sh b/ceph/qa/standalone/osd/osd-recovery-stats.sh index 46f51adb8..cf08a86ef 100755 --- a/ceph/qa/standalone/osd/osd-recovery-stats.sh +++ b/ceph/qa/standalone/osd/osd-recovery-stats.sh @@ -54,14 +54,23 @@ function above_margin() { return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 )) } +FIND_UPACT='grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"' +FIND_FIRST='grep "pg[[]${PG}.*recovering.*_update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"' +FIND_LAST='grep "pg[[]${PG}.*recovering.*_update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"' + function check() { - local PG=$1 - local log=$2 - local degraded_start=$3 - local degraded_end=$4 - local misplaced_start=$5 - local misplaced_end=$6 - local type=$7 + local dir=$1 + local PG=$2 + local primary=$3 + local type=$4 + local degraded_start=$5 + local degraded_end=$6 + local misplaced_start=$7 + local misplaced_end=$8 + local primary_start=${9:-} + local primary_end=${10:-} + + local log=$dir/osd.${primary}.log local addp=" " if [ "$type" = "erasure" ]; @@ -69,19 +78,31 @@ function check() { addp="p" fi - UPACT=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/") + UPACT=$(eval $FIND_UPACT) # Check 3rd line at start because of false recovery starts - FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats degraded " $log | grep -F " ${UPACT}${addp}" | head -1 | sed "s/.* \([0-9]*\)$/\1/") + local which="degraded" + FIRST=$(eval $FIND_FIRST) below_margin $FIRST $degraded_start || return 1 - LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats degraded " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") + LAST=$(eval $FIND_LAST) above_margin $LAST $degraded_end || return 1 # Check 3rd line at start because of false recovery starts - FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats misplaced " $log | grep -F " ${UPACT}${addp}" | head -1 | sed "s/.* \([0-9]*\)$/\1/") + which="misplaced" + FIRST=$(eval $FIND_FIRST) below_margin $FIRST $misplaced_start || return 1 - LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats misplaced " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") + LAST=$(eval $FIND_LAST) above_margin $LAST $misplaced_end || return 1 + + # This is the value of set into MISSING_ON_PRIMARY + if [ -n "$primary_start" ]; + then + which="shard $primary" + FIRST=$(eval $FIND_FIRST) + below_margin $FIRST $primary_start || return 1 + LAST=$(eval $FIND_LAST) + above_margin $LAST $primary_end || return 1 + fi } # [1,0,?] -> [1,2,4] @@ -134,8 +155,7 @@ function do_recovery_out1() { wait_for_clean || return 1 - local log=$dir/osd.${primary}.log - check $PG $log $objects 0 0 0 $type || return 1 + check $dir $PG $primary $type $objects 0 0 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -154,11 +174,12 @@ function TEST_recovery_erasure_out1() { } # [0, 1] -> [2,3,4,5] -# degraded 2000 -> 0 +# degraded 1000 -> 0 +# misplaced 1000 -> 0 # missing on primary 500 -> 0 # PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP -# 1.0 500 500 2000 0 0 0 500 500 active+recovering+degraded 2017-10-27 09:38:37.453438 22'500 25:394 [2,4,3,5] 2 [2,4,3,5] 2 0'0 2017-10-27 09:37:58.046748 0'0 2017-10-27 09:37:58.046748 +# 1.0 500 500 1000 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:38:37.453438 22'500 25:394 [2,4,3,5] 2 [2,4,3,5] 2 0'0 2017-10-27 09:37:58.046748 0'0 2017-10-27 09:37:58.046748 function TEST_recovery_sizeup() { local dir=$1 @@ -198,29 +219,22 @@ function TEST_recovery_sizeup() { # Get new primary primary=$(get_primary $poolname obj1) - local degraded=$(expr $objects \* 4) + local degraded=$(expr $objects \* 2) + local misplaced=$(expr $objects \* 2) local log=$dir/osd.${primary}.log - check $PG $log $degraded 0 0 0 || return 1 - - UPACT=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/") - - # This is the value of set into MISSING_ON_PRIMARY - FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats missing shard $primary " $log | grep -F " $UPACT " | head -1 | sed "s/.* \([0-9]*\)$/\1/") - below_margin $FIRST $objects || return 1 - LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats missing shard $primary " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") - above_margin $LAST 0 || return 1 + check $dir $PG $primary replicated $degraded 0 $misplaced 0 $objects 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 } # [0, 1, 2, 4] -> [3, 5] -# degraded 1000 -> 0 +# misplaced 1000 -> 0 # missing on primary 500 -> 0 # active+recovering+degraded # PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP -# 1.0 500 500 1000 0 0 0 500 500 active+recovering+degraded 2017-10-27 09:34:50.012261 22'500 27:118 [3,5] 3 [3,5] 3 0'0 2017-10-27 09:34:08.617248 0'0 2017-10-27 09:34:08.617248 +# 1.0 500 500 0 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:34:50.012261 22'500 27:118 [3,5] 3 [3,5] 3 0'0 2017-10-27 09:34:08.617248 0'0 2017-10-27 09:34:08.617248 function TEST_recovery_sizedown() { local dir=$1 @@ -264,16 +278,16 @@ function TEST_recovery_sizedown() { # Get new primary primary=$(get_primary $poolname obj1) - local degraded=$(expr $objects \* 2) + local misplaced=$(expr $objects \* 2) local log=$dir/osd.${primary}.log - check $PG $log $degraded 0 0 0 || return 1 + check $dir $PG $primary replicated 0 0 $misplaced 0 || return 1 UPACT=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/") # This is the value of set into MISSING_ON_PRIMARY - FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats missing shard $primary " $log | grep -F " $UPACT " | head -1 | sed "s/.* \([0-9]*\)$/\1/") + FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats shard $primary " $log | grep -F " $UPACT " | head -1 | sed "s/.* \([0-9]*\)$/\1/") below_margin $FIRST $objects || return 1 - LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats missing shard $primary " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") + LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats shard $primary " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") above_margin $LAST 0 || return 1 delete_pool $poolname @@ -281,19 +295,21 @@ function TEST_recovery_sizedown() { } # [1] -> [1,2] -# degraded 200 -> 100 +# degraded 300 -> 200 # active+recovering+undersized+degraded # PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP -# 1.0 100 0 200 0 0 0 100 100 active+recovering+undersized+degraded 2017-11-17 17:16:15.302943 13'500 16:643 [1,2] 1 [1,2] 1 0'0 2017-11-17 17:15:34.985563 0'0 2017-11-17 17:15:34.985563 +# 1.0 100 0 300 0 0 0 100 100 active+recovering+undersized+degraded 2017-11-17 17:16:15.302943 13'500 16:643 [1,2] 1 [1,2] 1 0'0 2017-11-17 17:15:34.985563 0'0 2017-11-17 17:15:34.985563 function TEST_recovery_undersized() { local dir=$1 + local osds=3 run_mon $dir a || return 1 run_mgr $dir x || return 1 - run_osd $dir 0 || return 1 - run_osd $dir 1 || return 1 - run_osd $dir 2 || return 1 + for i in $(seq 0 $(expr $osds - 1)) + do + run_osd $dir $i || return 1 + done create_pool $poolname 1 1 ceph osd pool set $poolname size 1 @@ -310,7 +326,7 @@ function TEST_recovery_undersized() { ceph osd set norecover # Mark any osd not the primary (only 1 replica so also has no replica) - for i in 0 1 2 + for i in $(seq 0 $(expr $osds - 1)) do if [ $i = $primary ]; then @@ -319,12 +335,12 @@ function TEST_recovery_undersized() { ceph osd out osd.$i break done - ceph osd pool set test size 3 + ceph osd pool set test size 4 ceph osd unset norecover ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0 # Give extra sleep time because code below doesn't have the sophistication of wait_for_clean() sleep 10 - flush_pg_stats + flush_pg_stats || return 1 # Wait for recovery to finish # Can't use wait_for_clean() because state goes from active+recovering+undersized+degraded @@ -347,13 +363,9 @@ function TEST_recovery_undersized() { primary=$(get_primary $poolname obj1) local log=$dir/osd.${primary}.log - UPACT=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/") - - local degraded=$(expr $objects \* 2) - FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats degraded " $log | grep -F " $UPACT " | head -1 | sed "s/.* \([0-9]*\)$/\1/") - below_margin $FIRST $degraded || return 1 - LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats degraded " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") - above_margin $LAST $objects || return 1 + local first_degraded=$(expr $objects \* 3) + local last_degraded=$(expr $objects \* 2) + check $dir $PG $primary replicated $first_degraded $last_degraded 0 0 || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -417,7 +429,7 @@ function TEST_recovery_erasure_remapped() { wait_for_clean || return 1 local log=$dir/osd.${primary}.log - check $PG $log $objects 0 $objects $objects erasure || return 1 + check $dir $PG $primary erasure $objects 0 $objects $objects || return 1 delete_pool $poolname kill_daemons $dir || return 1 @@ -425,6 +437,75 @@ function TEST_recovery_erasure_remapped() { main osd-recovery-stats "$@" +function TEST_recovery_multi() { + local dir=$1 + + local osds=6 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $(seq 0 $(expr $osds - 1)) + do + run_osd $dir $i || return 1 + done + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 3 + ceph osd pool set $poolname min_size 1 + + wait_for_clean || return 1 + + rados -p $poolname put obj1 /dev/null + + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set noout + ceph osd set norecover + kill $(cat $dir/osd.${otherosd}.pid) + ceph osd down osd.${otherosd} + + local half=$(expr $objects / 2) + for i in $(seq 2 $half) + do + rados -p $poolname put obj$i /dev/null + done + + kill $(cat $dir/osd.${primary}.pid) + ceph osd down osd.${primary} + run_osd $dir ${otherosd} + sleep 3 + + for i in $(seq $(expr $half + 1) $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + local otherosd=$(get_not_primary $poolname obj$objects) + + ceph osd unset noout + ceph osd out osd.$primary osd.$otherosd + run_osd $dir ${primary} + sleep 3 + + ceph osd pool set test size 4 + ceph osd unset norecover + ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + # Get new primary + primary=$(get_primary $poolname obj1) + + local log=$dir/osd.${primary}.log + check $dir $PG $primary replicated 399 0 300 0 99 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + # Local Variables: # compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-stats.sh" # End: diff --git a/ceph/qa/standalone/scrub/osd-scrub-repair.sh b/ceph/qa/standalone/scrub/osd-scrub-repair.sh index 257b9dbaf..a266aed90 100755 --- a/ceph/qa/standalone/scrub/osd-scrub-repair.sh +++ b/ceph/qa/standalone/scrub/osd-scrub-repair.sh @@ -513,6 +513,7 @@ function TEST_corrupt_scrub_replicated() { done local pg=$(get_pg $poolname ROBJ0) + local primary=$(get_primary $poolname ROBJ0) # Compute an old omap digest and save oi CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) \ @@ -643,6 +644,39 @@ function TEST_corrupt_scrub_replicated() { pg_scrub $pg + ERRORS=0 + declare -a s_err_strings + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:30259878:::ROBJ15:head : candidate had a missing info key" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:33aca486:::ROBJ18:head : object info inconsistent " + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:5c7b2c47:::ROBJ16:head : candidate had a corrupt snapset" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:5c7b2c47:::ROBJ16:head : candidate had a missing snapset key" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:5c7b2c47:::ROBJ16:head : failed to pick suitable object info" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:86586531:::ROBJ8:head : attr value mismatch '_key1-ROBJ8', attr name mismatch '_key3-ROBJ8', attr name mismatch '_key2-ROBJ8'" + err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:bc819597:::ROBJ12:head : candidate had a stat error" + err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:c0c86b1d:::ROBJ14:head : candidate had a missing info key" + err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:c0c86b1d:::ROBJ14:head : candidate had a corrupt info" + err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:c0c86b1d:::ROBJ14:head : failed to pick suitable object info" + err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : candidate size 9 info size 7 mismatch" + err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : size 9 != size 7 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from shard 0" + err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:d60617f9:::ROBJ13:head : candidate had a stat error" + err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 3:f2a5b2a4:::ROBJ3:head : missing" + err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ffdb2004:::ROBJ9:head : candidate size 1 info size 7 mismatch" + err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ffdb2004:::ROBJ9:head : object info inconsistent " + err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 3:c0c86b1d:::ROBJ14:head : no '_' attr" + err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 3:5c7b2c47:::ROBJ16:head : can't decode 'snapset' attr buffer::malformed_input: .* no longer understand old encoding version 3 < 97" + err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub : stat mismatch, got 18/18 objects, 0/0 clones, 17/18 dirty, 17/18 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 113/120 bytes, 0/0 hit_set_archive bytes." + err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 1 missing, 7 inconsistent objects" + err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 17 errors" + + for err_string in "${err_strings[@]}" + do + if ! grep -q "$err_string" $dir/osd.${primary}.log + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + rados list-inconsistent-pg $poolname > $dir/json || return 1 # Check pg count test $(jq '. | length' $dir/json) = "1" || return 1 @@ -1504,6 +1538,56 @@ EOF inject_eio rep data $poolname ROBJ13 $dir 0 || return 1 # shard 0 of [1, 0], osd.1 pg_deep_scrub $pg + err_strings=() + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:30259878:::ROBJ15:head : candidate had a missing info key" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:33aca486:::ROBJ18:head : data_digest 0xbd89c912 != data_digest 0x2ddbf8f5 from auth oi 3:33aca486:::ROBJ18:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 54 dd 2ddbf8f5 od ddc3680f alloc_hint [[]0 0 255[]][)], object info inconsistent " + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:33aca486:::ROBJ18:head : data_digest 0xbd89c912 != data_digest 0x2ddbf8f5 from auth oi 3:33aca486:::ROBJ18:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 54 dd 2ddbf8f5 od ddc3680f alloc_hint [[]0 0 255[]][)]" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:33aca486:::ROBJ18:head : failed to pick suitable auth object" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:5c7b2c47:::ROBJ16:head : candidate had a corrupt snapset" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:5c7b2c47:::ROBJ16:head : candidate had a missing snapset key" + err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:5c7b2c47:::ROBJ16:head : failed to pick suitable object info" + err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:86586531:::ROBJ8:head : attr value mismatch '_key1-ROBJ8', attr name mismatch '_key3-ROBJ8', attr name mismatch '_key2-ROBJ8'" + err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:87abbf36:::ROBJ11:head : candidate had a read error" + err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:8aa5320e:::ROBJ17:head : data_digest 0x5af0c3ef != data_digest 0x2ddbf8f5 from auth oi 3:8aa5320e:::ROBJ17:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 51 dd 2ddbf8f5 od e9572720 alloc_hint [[]0 0 0[]][)]" + err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:8aa5320e:::ROBJ17:head : data_digest 0x5af0c3ef != data_digest 0x2ddbf8f5 from auth oi 3:8aa5320e:::ROBJ17:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 51 dd 2ddbf8f5 od e9572720 alloc_hint [[]0 0 0[]][)]" + err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:8aa5320e:::ROBJ17:head : failed to pick suitable auth object" + err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:8b55fa4b:::ROBJ7:head : omap_digest 0xefced57a != omap_digest 0x6a73cc07 from shard 1" + err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:8b55fa4b:::ROBJ7:head : omap_digest 0x6a73cc07 != omap_digest 0xefced57a from auth oi 3:8b55fa4b:::ROBJ7:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 21 dd 2ddbf8f5 od efced57a alloc_hint [[]0 0 0[]][)]" + err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:a53c12e8:::ROBJ6:head : omap_digest 0x689ee887 != omap_digest 0x179c919f from shard 1, omap_digest 0x689ee887 != omap_digest 0x179c919f from auth oi 3:a53c12e8:::ROBJ6:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 18 dd 2ddbf8f5 od 179c919f alloc_hint [[]0 0 0[]][)]" + err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:b1f19cbd:::ROBJ10:head : omap_digest 0xa8dd5adc != omap_digest 0xc2025a24 from auth oi 3:b1f19cbd:::ROBJ10:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 30 dd 2ddbf8f5 od c2025a24 alloc_hint [[]0 0 0[]][)]" + err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:b1f19cbd:::ROBJ10:head : omap_digest 0xa8dd5adc != omap_digest 0xc2025a24 from auth oi 3:b1f19cbd:::ROBJ10:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 30 dd 2ddbf8f5 od c2025a24 alloc_hint [[]0 0 0[]][)]" + err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:b1f19cbd:::ROBJ10:head : failed to pick suitable auth object" + err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:bc819597:::ROBJ12:head : candidate had a stat error" + err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:c0c86b1d:::ROBJ14:head : candidate had a missing info key" + err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:c0c86b1d:::ROBJ14:head : candidate had a corrupt info" + err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:c0c86b1d:::ROBJ14:head : failed to pick suitable object info" + err_strings[22]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : candidate size 9 info size 7 mismatch" + err_strings[23]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : data_digest 0x2d4a11c2 != data_digest 0x2ddbf8f5 from shard 0, data_digest 0x2d4a11c2 != data_digest 0x2ddbf8f5 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:65 dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from shard 0" + err_strings[24]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:d60617f9:::ROBJ13:head : candidate had a read error" + err_strings[25]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:d60617f9:::ROBJ13:head : candidate had a stat error" + err_strings[26]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:d60617f9:::ROBJ13:head : failed to pick suitable object info" + err_strings[27]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:e97ce31e:::ROBJ2:head : data_digest 0x578a4830 != data_digest 0x2ddbf8f5 from shard 1, data_digest 0x578a4830 != data_digest 0x2ddbf8f5 from auth oi 3:e97ce31e:::ROBJ2:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 6 dd 2ddbf8f5 od f8e11918 alloc_hint [[]0 0 0[]][)]" + err_strings[28]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 3:f2a5b2a4:::ROBJ3:head : missing" + err_strings[29]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:f4981d31:::ROBJ4:head : omap_digest 0xd7178dfe != omap_digest 0xe2d46ea4 from shard 1, omap_digest 0xd7178dfe != omap_digest 0xe2d46ea4 from auth oi 3:f4981d31:::ROBJ4:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 12 dd 2ddbf8f5 od e2d46ea4 alloc_hint [[]0 0 0[]][)]" + err_strings[30]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:f4bfd4d1:::ROBJ5:head : omap_digest 0x1a862a41 != omap_digest 0x6cac8f6 from shard 1" + err_strings[31]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:f4bfd4d1:::ROBJ5:head : omap_digest 0x6cac8f6 != omap_digest 0x1a862a41 from auth oi 3:f4bfd4d1:::ROBJ5:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 15 dd 2ddbf8f5 od 1a862a41 alloc_hint [[]0 0 0[]][)]" + err_strings[32]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:ffdb2004:::ROBJ9:head : candidate size 3 info size 7 mismatch" + err_strings[33]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:ffdb2004:::ROBJ9:head : object info inconsistent " + err_strings[34]="log_channel[(]cluster[)] log [[]ERR[]] : deep-scrub [0-9]*[.]0 3:c0c86b1d:::ROBJ14:head : no '_' attr" + err_strings[35]="log_channel[(]cluster[)] log [[]ERR[]] : deep-scrub [0-9]*[.]0 3:5c7b2c47:::ROBJ16:head : can't decode 'snapset' attr buffer::malformed_input: .* no longer understand old encoding version 3 < 97" + err_strings[36]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub : stat mismatch, got 18/18 objects, 0/0 clones, 17/18 dirty, 17/18 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 115/116 bytes, 0/0 hit_set_archive bytes." + err_strings[37]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub 1 missing, 11 inconsistent objects" + err_strings[38]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub 35 errors" + + for err_string in "${err_strings[@]}" + do + if ! grep -q "$err_string" $dir/osd.${primary}.log + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + rados list-inconsistent-pg $poolname > $dir/json || return 1 # Check pg count test $(jq '. | length' $dir/json) = "1" || return 1 @@ -2922,6 +3006,12 @@ EOF diff -q $dir/new.ROBJ18 $dir/robj18.out || return 1 rm -f $dir/new.ROBJ18 $dir/robj18.out || return 1 + if [ $ERRORS != "0" ]; + then + echo "TEST FAILED WITH $ERRORS ERRORS" + return 1 + fi + rados rmpool $poolname $poolname --yes-i-really-really-mean-it teardown $dir || return 1 } @@ -5213,6 +5303,7 @@ function TEST_corrupt_snapset_scrub_rep() { done local pg=$(get_pg $poolname ROBJ0) + local primary=$(get_primary $poolname ROBJ0) for i in $(seq 1 $total_objs) ; do objname=ROBJ${i} @@ -5446,6 +5537,30 @@ EOF jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1 fi + ERRORS=0 + declare -a err_strings + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid [0-9]*:.*:::ROBJ1:head : snapset inconsistent" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid [0-9]*:.*:::ROBJ2:head : snapset inconsistent" + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 [0-9]*:.*:::ROBJ1:1 : is an unexpected clone" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub : stat mismatch, got 3/4 objects, 1/2 clones, 3/4 dirty, 3/4 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 49/56 bytes, 0/0 hit_set_archive bytes." + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 0 missing, 2 inconsistent objects" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 5 errors" + err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 [0-9]*:.*:::ROBJ1:head : snapset.head_exists=false, but head exists" + for err_string in "${err_strings[@]}" + do + if ! grep -q "$err_string" $dir/osd.${primary}.log + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + if [ $ERRORS != "0" ]; + then + echo "TEST FAILED WITH $ERRORS ERRORS" + return 1 + fi + rados rmpool $poolname $poolname --yes-i-really-really-mean-it teardown $dir || return 1 } diff --git a/ceph/qa/standalone/scrub/osd-scrub-snaps.sh b/ceph/qa/standalone/scrub/osd-scrub-snaps.sh index 3370332de..c46eae292 100755 --- a/ceph/qa/standalone/scrub/osd-scrub-snaps.sh +++ b/ceph/qa/standalone/scrub/osd-scrub-snaps.sh @@ -728,30 +728,30 @@ EOF kill_daemons $dir || return 1 declare -a err_strings - err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj10:.* is missing in clone_overlap" - err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 no '_' attr" - err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 is an unexpected clone" - err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:4 on disk size [(]4608[)] does not match object info size [(]512[)] adjusted for ondisk to [(]512[)]" - err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head expected clone .*:::obj5:2" - err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head expected clone .*:::obj5:1" - err_strings[6]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj5:head 2 missing clone[(]s[)]" - err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj12:head snapset.head_exists=false, but head exists" - err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj8:head snaps.seq not set" - err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:head snapset.head_exists=false, but head exists" - err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:1 is an unexpected clone" - err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj3:head on disk size [(]3840[)] does not match object info size [(]768[)] adjusted for ondisk to [(]768[)]" - err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj6:1 is an unexpected clone" - err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:head no 'snapset' attr" - err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:7 clone ignored due to missing snapset" - err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:4 clone ignored due to missing snapset" - err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj4:head expected clone .*:::obj4:7" - err_strings[17]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj4:head 1 missing clone[(]s[)]" - err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj1:1 is an unexpected clone" - err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj9:1 is missing in clone_size" - err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj11:1 is an unexpected clone" - err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj14:1 size 1032 != clone_size 1033" + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj10:.* : is missing in clone_overlap" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : no '_' attr" + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : is an unexpected clone" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:4 : on disk size [(]4608[)] does not match object info size [(]512[)] adjusted for ondisk to [(]512[)]" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:2" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:1" + err_strings[6]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj5:head : 2 missing clone[(]s[)]" + err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj12:head : snapset.head_exists=false, but head exists" + err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj8:head : snaps.seq not set" + err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:head : snapset.head_exists=false, but head exists" + err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:1 : is an unexpected clone" + err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj3:head : on disk size [(]3840[)] does not match object info size [(]768[)] adjusted for ondisk to [(]768[)]" + err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj6:1 : is an unexpected clone" + err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:head : no 'snapset' attr" + err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:7 : clone ignored due to missing snapset" + err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:4 : clone ignored due to missing snapset" + err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj4:head : expected clone .*:::obj4:7" + err_strings[17]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj4:head : 1 missing clone[(]s[)]" + err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj1:1 : is an unexpected clone" + err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj9:1 : is missing in clone_size" + err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj11:1 : is an unexpected clone" + err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj14:1 : size 1032 != clone_size 1033" err_strings[22]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 22 errors" - err_strings[23]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj15:head can't decode 'snapset' attr buffer" + err_strings[23]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj15:head : can't decode 'snapset' attr buffer" for err_string in "${err_strings[@]}" do @@ -1220,13 +1220,13 @@ fi kill_daemons $dir || return 1 declare -a err_strings - err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] missing .*:::obj4:7" - err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1]: soid .*:::obj3:head size 3840 != size 768 from auth oi" - err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] missing .*:::obj5:1" - err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] missing .*:::obj5:2" - err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1]: soid .*:::obj5:4 size 4608 != size 512 from auth oi" - err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid .*:::obj5:7: failed to pick suitable object info" - err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] missing .*:::obj1:head" + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj4:7 : missing" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj3:head : size 3840 != size 768 from auth oi" + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:1 : missing" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:2 : missing" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj5:4 : size 4608 != size 512 from auth oi" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid .*:::obj5:7 : failed to pick suitable object info" + err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj1:head : missing" err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub ${scruberrors} errors" for err_string in "${err_strings[@]}" diff --git a/ceph/qa/suites/.qa b/ceph/qa/suites/.qa new file mode 120000 index 000000000..b870225aa --- /dev/null +++ b/ceph/qa/suites/.qa @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/ceph/qa/suites/big/.qa b/ceph/qa/suites/big/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/big/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/big/rados-thrash/.qa b/ceph/qa/suites/big/rados-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/big/rados-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/big/rados-thrash/ceph/.qa b/ceph/qa/suites/big/rados-thrash/ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/big/rados-thrash/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/big/rados-thrash/clusters/.qa b/ceph/qa/suites/big/rados-thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/big/rados-thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/big/rados-thrash/thrashers/.qa b/ceph/qa/suites/big/rados-thrash/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/big/rados-thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/big/rados-thrash/workloads/.qa b/ceph/qa/suites/big/rados-thrash/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/big/rados-thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/buildpackages/.qa b/ceph/qa/suites/buildpackages/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/buildpackages/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/buildpackages/any/.qa b/ceph/qa/suites/buildpackages/any/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/buildpackages/any/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/buildpackages/any/tasks/.qa b/ceph/qa/suites/buildpackages/any/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/buildpackages/any/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/buildpackages/tests/.qa b/ceph/qa/suites/buildpackages/tests/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/buildpackages/tests/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/buildpackages/tests/tasks/.qa b/ceph/qa/suites/buildpackages/tests/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/buildpackages/tests/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/.qa b/ceph/qa/suites/ceph-ansible/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/smoke/.qa b/ceph/qa/suites/ceph-ansible/smoke/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/smoke/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/.qa b/ceph/qa/suites/ceph-ansible/smoke/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa b/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.0 b/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.0 deleted file mode 100644 index 86dd366b9..000000000 --- a/ceph/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml~10fc85089c... qa_tests - Added options to use both cases: mon.a and installer.0 +++ /dev/null @@ -1,12 +0,0 @@ -meta: -- desc: | - 3-node cluster - install and run ceph-ansible on a mon.a node alone with ceph -roles: -- [mon.a, mds.a, osd.0, osd.1, osd.2] -- [mon.b, mgr.x, osd.3, osd.4, osd.5] -- [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0] -openstack: -- volumes: # attached to each instance - count: 3 - size: 10 # GB diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/.qa b/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/1-distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa b/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml b/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml index 36d0a07d9..5ca4bd609 100644 --- a/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml @@ -4,6 +4,7 @@ meta: overrides: ceph_ansible: vars: + branch: stable-3.2 ceph_conf_overrides: global: osd default pool size: 2 diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/.qa b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml index 604e757ad..b26c73929 100644 --- a/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml @@ -3,6 +3,7 @@ meta: overrides: ceph_ansible: + branch: stable-3.2 vars: osd_objectstore: bluestore dmcrypt: True diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml index 4bbd1c7c5..94bdf5fcb 100644 --- a/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml @@ -3,5 +3,6 @@ meta: overrides: ceph_ansible: + branch: stable-3.2 vars: dmcrypt: False diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml index 12d63d325..d60c4480b 100644 --- a/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml @@ -3,5 +3,6 @@ meta: overrides: ceph_ansible: + branch: stable-3.2 vars: dmcrypt: True diff --git a/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa b/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/.qa b/ceph/qa/suites/ceph-deploy/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/basic/.qa b/ceph/qa/suites/ceph-deploy/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/.qa b/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/basic/ceph-deploy-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/basic/config_options/.qa b/ceph/qa/suites/ceph-deploy/basic/config_options/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/basic/config_options/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/basic/objectstore/.qa b/ceph/qa/suites/ceph-deploy/basic/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/basic/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/basic/python_versions/.qa b/ceph/qa/suites/ceph-deploy/basic/python_versions/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/basic/python_versions/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/basic/tasks/.qa b/ceph/qa/suites/ceph-deploy/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/ceph-volume/.qa b/ceph/qa/suites/ceph-deploy/ceph-volume/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/ceph-volume/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/ceph-volume/cluster/.qa b/ceph/qa/suites/ceph-deploy/ceph-volume/cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/ceph-volume/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/ceph-volume/config/.qa b/ceph/qa/suites/ceph-deploy/ceph-volume/config/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/ceph-volume/config/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/ceph-volume/distros/.qa b/ceph/qa/suites/ceph-deploy/ceph-volume/distros/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/ceph-volume/distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-deploy/ceph-volume/tasks/.qa b/ceph/qa/suites/ceph-deploy/ceph-volume/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-deploy/ceph-volume/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-disk/.qa b/ceph/qa/suites/ceph-disk/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-disk/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-disk/basic/.qa b/ceph/qa/suites/ceph-disk/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-disk/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/ceph-disk/basic/tasks/.qa b/ceph/qa/suites/ceph-disk/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/ceph-disk/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/dummy/.qa b/ceph/qa/suites/dummy/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/dummy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/dummy/all/.qa b/ceph/qa/suites/dummy/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/dummy/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/experimental/.qa b/ceph/qa/suites/experimental/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/experimental/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/experimental/multimds/.qa b/ceph/qa/suites/experimental/multimds/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/experimental/multimds/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/experimental/multimds/clusters/.qa b/ceph/qa/suites/experimental/multimds/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/experimental/multimds/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/experimental/multimds/tasks/.qa b/ceph/qa/suites/experimental/multimds/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/experimental/multimds/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/.qa b/ceph/qa/suites/fs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/32bits/.qa b/ceph/qa/suites/fs/32bits/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/32bits/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/32bits/clusters/.qa b/ceph/qa/suites/fs/32bits/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/32bits/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/32bits/conf b/ceph/qa/suites/fs/32bits/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/32bits/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/32bits/mount/.qa b/ceph/qa/suites/fs/32bits/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/32bits/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/32bits/overrides/.qa b/ceph/qa/suites/fs/32bits/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/32bits/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/32bits/overrides/debug.yaml b/ceph/qa/suites/fs/32bits/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/32bits/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/32bits/tasks/.qa b/ceph/qa/suites/fs/32bits/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/32bits/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/.qa b/ceph/qa/suites/fs/basic_functional/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/clusters/.qa b/ceph/qa/suites/fs/basic_functional/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/conf b/ceph/qa/suites/fs/basic_functional/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/mount/.qa b/ceph/qa/suites/fs/basic_functional/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/objectstore/.qa b/ceph/qa/suites/fs/basic_functional/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/overrides/.qa b/ceph/qa/suites/fs/basic_functional/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/overrides/debug.yaml b/ceph/qa/suites/fs/basic_functional/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/basic_functional/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/tasks/.qa b/ceph/qa/suites/fs/basic_functional/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml b/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml index 8801454db..7ac8714c5 100644 --- a/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml +++ b/ceph/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml @@ -1,6 +1,5 @@ tasks: - workunit: - timeout: 6h clients: all: - fs/quota diff --git a/ceph/qa/suites/fs/basic_functional/tasks/volume-client/% b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/2.yaml b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/2.yaml new file mode 100644 index 000000000..e3924dd6b --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/2.yaml @@ -0,0 +1,2 @@ +overrides: + python: python2 diff --git a/ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/3.yaml b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/3.yaml new file mode 100644 index 000000000..9bd4a9226 --- /dev/null +++ b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/py/3.yaml @@ -0,0 +1,2 @@ +overrides: + python: python3 diff --git a/ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml b/ceph/qa/suites/fs/basic_functional/tasks/volume-client/test.yaml similarity index 100% rename from ceph/qa/suites/fs/basic_functional/tasks/volume-client.yaml rename to ceph/qa/suites/fs/basic_functional/tasks/volume-client/test.yaml diff --git a/ceph/qa/suites/fs/basic_workload/.qa b/ceph/qa/suites/fs/basic_workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/clusters/.qa b/ceph/qa/suites/fs/basic_workload/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/conf b/ceph/qa/suites/fs/basic_workload/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/inline/.qa b/ceph/qa/suites/fs/basic_workload/inline/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/inline/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/mount/.qa b/ceph/qa/suites/fs/basic_workload/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/omap_limit/.qa b/ceph/qa/suites/fs/basic_workload/omap_limit/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/omap_limit/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/overrides/.qa b/ceph/qa/suites/fs/basic_workload/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/overrides/debug.yaml b/ceph/qa/suites/fs/basic_workload/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/basic_workload/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/tasks/.qa b/ceph/qa/suites/fs/basic_workload/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/basic_workload/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml b/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml index fac769ed5..d6c8140a4 100644 --- a/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml +++ b/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml @@ -4,7 +4,6 @@ tasks: mds: - "mds.dir_split" - workunit: - timeout: 6h clients: all: - fs/misc diff --git a/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml b/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml index bfed71c1b..ea018c990 100644 --- a/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml +++ b/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml @@ -4,7 +4,6 @@ tasks: mds: - "mds.dir_split" - workunit: - timeout: 6h clients: all: - fs/norstats diff --git a/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml b/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml index 7cb0b0f6e..a1e2ada19 100644 --- a/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml +++ b/ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml @@ -2,13 +2,8 @@ overrides: ceph: conf: client: - debug ms: 1 - debug client: 20 fuse set user groups: true fuse default permissions: false - mds: - debug ms: 1 - debug mds: 20 tasks: - workunit: clients: diff --git a/ceph/qa/suites/fs/bugs/.qa b/ceph/qa/suites/fs/bugs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/bugs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/.qa b/ceph/qa/suites/fs/bugs/client_trim_caps/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/bugs/client_trim_caps/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/clusters/.qa b/ceph/qa/suites/fs/bugs/client_trim_caps/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/bugs/client_trim_caps/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/objectstore/.qa b/ceph/qa/suites/fs/bugs/client_trim_caps/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/bugs/client_trim_caps/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/.qa b/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/debug.yaml b/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/debug.yaml deleted file mode 120000 index 4fdb9dd12..000000000 --- a/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/tasks/.qa b/ceph/qa/suites/fs/bugs/client_trim_caps/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/bugs/client_trim_caps/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/conf b/ceph/qa/suites/fs/bugs/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/bugs/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/.qa b/ceph/qa/suites/fs/multiclient/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multiclient/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/clusters/.qa b/ceph/qa/suites/fs/multiclient/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multiclient/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/conf b/ceph/qa/suites/fs/multiclient/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/multiclient/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/mount/.qa b/ceph/qa/suites/fs/multiclient/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multiclient/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/overrides/.qa b/ceph/qa/suites/fs/multiclient/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multiclient/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/overrides/debug.yaml b/ceph/qa/suites/fs/multiclient/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/multiclient/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/tasks/.qa b/ceph/qa/suites/fs/multiclient/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multiclient/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml b/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml index cb84e6483..90ae38589 100644 --- a/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml +++ b/ceph/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml @@ -8,3 +8,4 @@ overrides: log-whitelist: - evicting unresponsive client - POOL_APP_NOT_ENABLED + - has not responded to cap revoke by MDS for over diff --git a/ceph/qa/suites/fs/multifs/.qa b/ceph/qa/suites/fs/multifs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multifs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/clusters/.qa b/ceph/qa/suites/fs/multifs/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multifs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml b/ceph/qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml new file mode 120000 index 000000000..c190ea92f --- /dev/null +++ b/ceph/qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1a3s-mds-2c-client.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/conf b/ceph/qa/suites/fs/multifs/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/multifs/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/mount/.qa b/ceph/qa/suites/fs/multifs/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multifs/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/overrides/.qa b/ceph/qa/suites/fs/multifs/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multifs/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/overrides/debug.yaml b/ceph/qa/suites/fs/multifs/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/multifs/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/tasks/.qa b/ceph/qa/suites/fs/multifs/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/multifs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/permission/.qa b/ceph/qa/suites/fs/permission/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/permission/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/permission/clusters/.qa b/ceph/qa/suites/fs/permission/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/permission/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/permission/conf b/ceph/qa/suites/fs/permission/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/permission/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/permission/mount/.qa b/ceph/qa/suites/fs/permission/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/permission/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/permission/overrides/.qa b/ceph/qa/suites/fs/permission/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/permission/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/permission/overrides/debug.yaml b/ceph/qa/suites/fs/permission/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/permission/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/permission/tasks/.qa b/ceph/qa/suites/fs/permission/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/permission/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/.qa b/ceph/qa/suites/fs/snaps/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/snaps/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/clusters/.qa b/ceph/qa/suites/fs/snaps/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/snaps/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/conf b/ceph/qa/suites/fs/snaps/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/snaps/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/mount/.qa b/ceph/qa/suites/fs/snaps/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/snaps/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/overrides/.qa b/ceph/qa/suites/fs/snaps/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/snaps/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/overrides/debug.yaml b/ceph/qa/suites/fs/snaps/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/snaps/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/tasks/.qa b/ceph/qa/suites/fs/snaps/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/snaps/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/.qa b/ceph/qa/suites/fs/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/ceph-thrash/.qa b/ceph/qa/suites/fs/thrash/ceph-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/ceph-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/clusters/.qa b/ceph/qa/suites/fs/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/conf b/ceph/qa/suites/fs/thrash/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/thrash/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/mount/.qa b/ceph/qa/suites/fs/thrash/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/msgr-failures/.qa b/ceph/qa/suites/fs/thrash/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/overrides/.qa b/ceph/qa/suites/fs/thrash/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/overrides/debug.yaml b/ceph/qa/suites/fs/thrash/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/thrash/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/tasks/.qa b/ceph/qa/suites/fs/thrash/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/.qa b/ceph/qa/suites/fs/traceless/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/traceless/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/clusters/.qa b/ceph/qa/suites/fs/traceless/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/traceless/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/conf b/ceph/qa/suites/fs/traceless/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/traceless/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/mount/.qa b/ceph/qa/suites/fs/traceless/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/traceless/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/overrides/.qa b/ceph/qa/suites/fs/traceless/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/traceless/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/overrides/debug.yaml b/ceph/qa/suites/fs/traceless/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/traceless/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/tasks/.qa b/ceph/qa/suites/fs/traceless/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/traceless/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/traceless/.qa b/ceph/qa/suites/fs/traceless/traceless/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/traceless/traceless/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/.qa b/ceph/qa/suites/fs/verify/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/clusters/.qa b/ceph/qa/suites/fs/verify/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/conf b/ceph/qa/suites/fs/verify/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/fs/verify/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/mount/.qa b/ceph/qa/suites/fs/verify/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/verify/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/overrides/.qa b/ceph/qa/suites/fs/verify/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/verify/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/overrides/debug.yaml b/ceph/qa/suites/fs/verify/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/fs/verify/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/tasks/.qa b/ceph/qa/suites/fs/verify/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/verify/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/verify/validater/.qa b/ceph/qa/suites/fs/verify/validater/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/verify/validater/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/hadoop/.qa b/ceph/qa/suites/hadoop/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/hadoop/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/hadoop/basic/.qa b/ceph/qa/suites/hadoop/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/hadoop/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/hadoop/basic/clusters/.qa b/ceph/qa/suites/hadoop/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/hadoop/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/hadoop/basic/tasks/.qa b/ceph/qa/suites/hadoop/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/hadoop/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/.qa b/ceph/qa/suites/kcephfs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/.qa b/ceph/qa/suites/kcephfs/cephfs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/cephfs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/clusters/.qa b/ceph/qa/suites/kcephfs/cephfs/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/cephfs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/conf b/ceph/qa/suites/kcephfs/cephfs/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/kcephfs/cephfs/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/inline/.qa b/ceph/qa/suites/kcephfs/cephfs/inline/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/cephfs/inline/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/overrides/.qa b/ceph/qa/suites/kcephfs/cephfs/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/cephfs/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/overrides/debug.yaml b/ceph/qa/suites/kcephfs/cephfs/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/kcephfs/cephfs/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/mixed-clients/conf.yaml b/ceph/qa/suites/kcephfs/cephfs/overrides/ms-die-on-skipped.yaml similarity index 72% rename from ceph/qa/suites/kcephfs/mixed-clients/conf.yaml rename to ceph/qa/suites/kcephfs/cephfs/overrides/ms-die-on-skipped.yaml index 75b855838..30da870b2 100644 --- a/ceph/qa/suites/kcephfs/mixed-clients/conf.yaml +++ b/ceph/qa/suites/kcephfs/cephfs/overrides/ms-die-on-skipped.yaml @@ -3,5 +3,3 @@ overrides: conf: global: ms die on skipped message: false - mds: - debug mds: 20 \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/tasks/.qa b/ceph/qa/suites/kcephfs/cephfs/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/cephfs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/mixed-clients/.qa b/ceph/qa/suites/kcephfs/mixed-clients/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/mixed-clients/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/mixed-clients/clusters/.qa b/ceph/qa/suites/kcephfs/mixed-clients/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/mixed-clients/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/mixed-clients/conf b/ceph/qa/suites/kcephfs/mixed-clients/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/kcephfs/mixed-clients/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/mixed-clients/overrides/.qa b/ceph/qa/suites/kcephfs/mixed-clients/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/mixed-clients/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/mixed-clients/overrides/debug.yaml b/ceph/qa/suites/kcephfs/mixed-clients/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/kcephfs/mixed-clients/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/thrash/conf.yaml b/ceph/qa/suites/kcephfs/mixed-clients/overrides/ms-die-on-skipped.yaml similarity index 72% rename from ceph/qa/suites/kcephfs/thrash/conf.yaml rename to ceph/qa/suites/kcephfs/mixed-clients/overrides/ms-die-on-skipped.yaml index 75b855838..30da870b2 100644 --- a/ceph/qa/suites/kcephfs/thrash/conf.yaml +++ b/ceph/qa/suites/kcephfs/mixed-clients/overrides/ms-die-on-skipped.yaml @@ -3,5 +3,3 @@ overrides: conf: global: ms die on skipped message: false - mds: - debug mds: 20 \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/mixed-clients/tasks/.qa b/ceph/qa/suites/kcephfs/mixed-clients/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/mixed-clients/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/recovery/.qa b/ceph/qa/suites/kcephfs/recovery/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/recovery/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/recovery/clusters/.qa b/ceph/qa/suites/kcephfs/recovery/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/recovery/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/recovery/conf b/ceph/qa/suites/kcephfs/recovery/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/kcephfs/recovery/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml b/ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml deleted file mode 100644 index 76cc4d868..000000000 --- a/ceph/qa/suites/kcephfs/recovery/debug/mds_client.yaml +++ /dev/null @@ -1,12 +0,0 @@ -overrides: - ceph: - conf: - mds: - debug ms: 1 - debug mds: 20 - client.0: - debug ms: 1 - debug client: 20 - client.1: - debug ms: 1 - debug client: 20 diff --git a/ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml b/ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml deleted file mode 100644 index 9913fa1df..000000000 --- a/ceph/qa/suites/kcephfs/recovery/dirfrag/frag_enable.yaml +++ /dev/null @@ -1,11 +0,0 @@ - -overrides: - ceph: - conf: - mds: - mds bal frag: true - mds bal fragment size max: 10000 - mds bal split size: 100 - mds bal merge size: 5 - mds bal split bits: 3 - diff --git a/ceph/qa/suites/kcephfs/recovery/mounts/.qa b/ceph/qa/suites/kcephfs/recovery/mounts/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/recovery/mounts/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/recovery/overrides/.qa b/ceph/qa/suites/kcephfs/recovery/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/recovery/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/recovery/overrides/debug.yaml b/ceph/qa/suites/kcephfs/recovery/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/kcephfs/recovery/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/recovery/tasks/.qa b/ceph/qa/suites/kcephfs/recovery/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/recovery/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/thrash/.qa b/ceph/qa/suites/kcephfs/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/thrash/clusters/.qa b/ceph/qa/suites/kcephfs/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/thrash/conf b/ceph/qa/suites/kcephfs/thrash/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/kcephfs/thrash/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/thrash/overrides/.qa b/ceph/qa/suites/kcephfs/thrash/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/thrash/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/thrash/overrides/debug.yaml b/ceph/qa/suites/kcephfs/thrash/overrides/debug.yaml deleted file mode 120000 index 9bc8eb1e7..000000000 --- a/ceph/qa/suites/kcephfs/thrash/overrides/debug.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../cephfs/overrides/debug.yaml \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/cephfs/conf.yaml b/ceph/qa/suites/kcephfs/thrash/overrides/ms-die-on-skipped.yaml similarity index 71% rename from ceph/qa/suites/kcephfs/cephfs/conf.yaml rename to ceph/qa/suites/kcephfs/thrash/overrides/ms-die-on-skipped.yaml index b3ef40468..30da870b2 100644 --- a/ceph/qa/suites/kcephfs/cephfs/conf.yaml +++ b/ceph/qa/suites/kcephfs/thrash/overrides/ms-die-on-skipped.yaml @@ -3,5 +3,3 @@ overrides: conf: global: ms die on skipped message: false - mds: - debug mds: 20 diff --git a/ceph/qa/suites/kcephfs/thrash/thrashers/.qa b/ceph/qa/suites/kcephfs/thrash/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/kcephfs/thrash/workloads/.qa b/ceph/qa/suites/kcephfs/thrash/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/kcephfs/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/knfs/basic/ceph/base.yaml b/ceph/qa/suites/knfs/basic/ceph/base.yaml deleted file mode 100644 index 7c2f0fcd5..000000000 --- a/ceph/qa/suites/knfs/basic/ceph/base.yaml +++ /dev/null @@ -1,14 +0,0 @@ - -overrides: - ceph: - conf: - global: - ms die on skipped message: false - -tasks: -- install: -- ceph: -- kclient: [client.0] -- knfsd: - client.0: - options: [rw,no_root_squash,async] diff --git a/ceph/qa/suites/knfs/basic/clusters/extra-client.yaml b/ceph/qa/suites/knfs/basic/clusters/extra-client.yaml deleted file mode 120000 index 1582e3089..000000000 --- a/ceph/qa/suites/knfs/basic/clusters/extra-client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/extra-client.yaml \ No newline at end of file diff --git a/ceph/qa/suites/knfs/basic/mount/v3.yaml b/ceph/qa/suites/knfs/basic/mount/v3.yaml deleted file mode 100644 index 1b6111924..000000000 --- a/ceph/qa/suites/knfs/basic/mount/v3.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- nfs: - client.1: - server: client.0 - options: [rw,hard,intr,nfsvers=3] diff --git a/ceph/qa/suites/knfs/basic/mount/v4.yaml b/ceph/qa/suites/knfs/basic/mount/v4.yaml deleted file mode 100644 index 88405666b..000000000 --- a/ceph/qa/suites/knfs/basic/mount/v4.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- nfs: - client.1: - server: client.0 - options: [rw,hard,intr,nfsvers=4] diff --git a/ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml b/ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml deleted file mode 100644 index b9c0a5e05..000000000 --- a/ceph/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - timeout: 6h - clients: - client.1: - - kernel_untar_build.sh diff --git a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml b/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml deleted file mode 100644 index 135c4a740..000000000 --- a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - fs/misc/chmod.sh - - fs/misc/i_complete_vs_rename.sh - - fs/misc/trivial_sync.sh - #- fs/misc/multiple_rsync.sh - #- fs/misc/xattrs.sh -# Once we can run multiple_rsync.sh and xattrs.sh we can change to this -# - misc diff --git a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml b/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml deleted file mode 100644 index e554a3d9a..000000000 --- a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/blogbench.sh diff --git a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml b/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml deleted file mode 100644 index 1da1b768d..000000000 --- a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/dbench-short.sh diff --git a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml b/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml deleted file mode 100644 index 3090f91ea..000000000 --- a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - conf: - osd: - filestore flush min: 0 -tasks: -- workunit: - clients: - client.1: - - suites/ffsb.sh diff --git a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml b/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml deleted file mode 100644 index bbe7b7a40..000000000 --- a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/fsstress.sh diff --git a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml b/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml deleted file mode 100644 index 7c3eec2ff..000000000 --- a/ceph/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/iozone.sh diff --git a/ceph/qa/suites/krbd/.qa b/ceph/qa/suites/krbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd-nomount/.qa b/ceph/qa/suites/krbd/rbd-nomount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd-nomount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd-nomount/clusters/.qa b/ceph/qa/suites/krbd/rbd-nomount/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd-nomount/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd-nomount/install/.qa b/ceph/qa/suites/krbd/rbd-nomount/install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd-nomount/install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/.qa b/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd-nomount/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd-nomount/tasks/.qa b/ceph/qa/suites/krbd/rbd-nomount/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd-nomount/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd/.qa b/ceph/qa/suites/krbd/rbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd/clusters/.qa b/ceph/qa/suites/krbd/rbd/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd/msgr-failures/.qa b/ceph/qa/suites/krbd/rbd/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/rbd/tasks/.qa b/ceph/qa/suites/krbd/rbd/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/rbd/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/singleton/.qa b/ceph/qa/suites/krbd/singleton/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/singleton/msgr-failures/.qa b/ceph/qa/suites/krbd/singleton/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/singleton/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/singleton/tasks/.qa b/ceph/qa/suites/krbd/singleton/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/singleton/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/thrash/.qa b/ceph/qa/suites/krbd/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/thrash/ceph/.qa b/ceph/qa/suites/krbd/thrash/ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/thrash/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/thrash/clusters/.qa b/ceph/qa/suites/krbd/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/thrash/thrashers/.qa b/ceph/qa/suites/krbd/thrash/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/thrash/workloads/.qa b/ceph/qa/suites/krbd/thrash/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/unmap/.qa b/ceph/qa/suites/krbd/unmap/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/unmap/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/unmap/ceph/.qa b/ceph/qa/suites/krbd/unmap/ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/unmap/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/unmap/clusters/.qa b/ceph/qa/suites/krbd/unmap/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/unmap/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/unmap/kernels/.qa b/ceph/qa/suites/krbd/unmap/kernels/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/unmap/kernels/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/unmap/tasks/.qa b/ceph/qa/suites/krbd/unmap/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/unmap/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml b/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml index 05cc5f3ae..435061b45 100644 --- a/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml +++ b/ceph/qa/suites/krbd/unmap/tasks/unmap.yaml @@ -2,4 +2,4 @@ tasks: - cram: clients: client.0: - - http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=src/test/cli-integration/rbd/unmap.t + - src/test/cli-integration/rbd/unmap.t diff --git a/ceph/qa/suites/krbd/wac/.qa b/ceph/qa/suites/krbd/wac/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/sysfs/.qa b/ceph/qa/suites/krbd/wac/sysfs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/sysfs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/sysfs/ceph/.qa b/ceph/qa/suites/krbd/wac/sysfs/ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/sysfs/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/sysfs/clusters/.qa b/ceph/qa/suites/krbd/wac/sysfs/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/sysfs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/sysfs/tasks/.qa b/ceph/qa/suites/krbd/wac/sysfs/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/sysfs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/wac/.qa b/ceph/qa/suites/krbd/wac/wac/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/wac/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/wac/ceph/.qa b/ceph/qa/suites/krbd/wac/wac/ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/wac/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/wac/clusters/.qa b/ceph/qa/suites/krbd/wac/wac/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/wac/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/wac/tasks/.qa b/ceph/qa/suites/krbd/wac/wac/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/wac/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/krbd/wac/wac/verify/.qa b/ceph/qa/suites/krbd/wac/wac/verify/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/krbd/wac/wac/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/.qa b/ceph/qa/suites/marginal/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/basic/.qa b/ceph/qa/suites/marginal/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/basic/clusters/.qa b/ceph/qa/suites/marginal/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/basic/tasks/.qa b/ceph/qa/suites/marginal/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/fs-misc/.qa b/ceph/qa/suites/marginal/fs-misc/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/fs-misc/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/fs-misc/clusters/.qa b/ceph/qa/suites/marginal/fs-misc/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/fs-misc/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/fs-misc/tasks/.qa b/ceph/qa/suites/marginal/fs-misc/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/fs-misc/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/mds_restart/.qa b/ceph/qa/suites/marginal/mds_restart/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/mds_restart/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/mds_restart/clusters/.qa b/ceph/qa/suites/marginal/mds_restart/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/mds_restart/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/mds_restart/tasks/.qa b/ceph/qa/suites/marginal/mds_restart/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/mds_restart/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/multimds/.qa b/ceph/qa/suites/marginal/multimds/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/multimds/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/multimds/clusters/.qa b/ceph/qa/suites/marginal/multimds/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/multimds/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/multimds/mounts/.qa b/ceph/qa/suites/marginal/multimds/mounts/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/multimds/mounts/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/multimds/tasks/.qa b/ceph/qa/suites/marginal/multimds/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/multimds/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/marginal/multimds/thrash/.qa b/ceph/qa/suites/marginal/multimds/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/marginal/multimds/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/mixed-clients/.qa b/ceph/qa/suites/mixed-clients/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/mixed-clients/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/mixed-clients/basic/.qa b/ceph/qa/suites/mixed-clients/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/mixed-clients/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/mixed-clients/basic/clusters/.qa b/ceph/qa/suites/mixed-clients/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/mixed-clients/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/mixed-clients/basic/tasks/.qa b/ceph/qa/suites/mixed-clients/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/mixed-clients/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/.qa b/ceph/qa/suites/multimds/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/.qa b/ceph/qa/suites/multimds/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/clusters/.qa b/ceph/qa/suites/multimds/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/conf b/ceph/qa/suites/multimds/basic/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/multimds/basic/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/mount/.qa b/ceph/qa/suites/multimds/basic/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/basic/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/overrides/.qa b/ceph/qa/suites/multimds/basic/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/basic/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/q_check_counter/.qa b/ceph/qa/suites/multimds/basic/q_check_counter/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/basic/q_check_counter/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/tasks/.qa b/ceph/qa/suites/multimds/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml b/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml index 5d54f3da0..58866a271 100644 --- a/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml +++ b/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml @@ -1,6 +1,5 @@ tasks: - workunit: - timeout: 6h clients: all: - fs/misc diff --git a/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml b/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml index 4833371df..3776131bd 100644 --- a/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml +++ b/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml @@ -1,6 +1,5 @@ tasks: - workunit: - timeout: 6h clients: all: - fs/norstats diff --git a/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml b/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml index 7cb0b0f6e..a1e2ada19 100644 --- a/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml +++ b/ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml @@ -2,13 +2,8 @@ overrides: ceph: conf: client: - debug ms: 1 - debug client: 20 fuse set user groups: true fuse default permissions: false - mds: - debug ms: 1 - debug mds: 20 tasks: - workunit: clients: diff --git a/ceph/qa/suites/multimds/thrash/.qa b/ceph/qa/suites/multimds/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/thrash/clusters/.qa b/ceph/qa/suites/multimds/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/thrash/conf b/ceph/qa/suites/multimds/thrash/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/multimds/thrash/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/multimds/thrash/mount/.qa b/ceph/qa/suites/multimds/thrash/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/thrash/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/thrash/overrides/.qa b/ceph/qa/suites/multimds/thrash/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/thrash/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/thrash/tasks/.qa b/ceph/qa/suites/multimds/thrash/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/thrash/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/verify/.qa b/ceph/qa/suites/multimds/verify/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/verify/clusters/.qa b/ceph/qa/suites/multimds/verify/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/verify/conf b/ceph/qa/suites/multimds/verify/conf new file mode 120000 index 000000000..16e8cc44b --- /dev/null +++ b/ceph/qa/suites/multimds/verify/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/ceph/qa/suites/multimds/verify/mount/.qa b/ceph/qa/suites/multimds/verify/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/verify/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/multimds/verify/overrides/.qa b/ceph/qa/suites/multimds/verify/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/multimds/verify/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/powercycle/.qa b/ceph/qa/suites/powercycle/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/powercycle/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/powercycle/osd/.qa b/ceph/qa/suites/powercycle/osd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/powercycle/osd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/powercycle/osd/clusters/.qa b/ceph/qa/suites/powercycle/osd/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/powercycle/osd/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/powercycle/osd/powercycle/.qa b/ceph/qa/suites/powercycle/osd/powercycle/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/powercycle/osd/powercycle/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/powercycle/osd/tasks/.qa b/ceph/qa/suites/powercycle/osd/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/powercycle/osd/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/.qa b/ceph/qa/suites/rados/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic-luminous/.qa b/ceph/qa/suites/rados/basic-luminous/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/basic-luminous/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml b/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml index 39821ad86..00e85f9e5 100644 --- a/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml +++ b/ceph/qa/suites/rados/basic-luminous/scrub_test.yaml @@ -9,12 +9,13 @@ overrides: - 'deep-scrub [0-9]+ errors' - 'repair 0 missing, 1 inconsistent objects' - 'repair [0-9]+ errors, [0-9]+ fixed' - - 'shard [0-9]+ missing' + - 'shard [0-9]+ .* : missing' - 'deep-scrub 1 missing, 1 inconsistent objects' - 'does not match object info size' - 'attr name mistmatch' - 'deep-scrub 1 missing, 0 inconsistent objects' - 'failed to pick suitable auth object' + - 'candidate size [0-9]+ info size [0-9]+ mismatch' - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/basic/.qa b/ceph/qa/suites/rados/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic/clusters/.qa b/ceph/qa/suites/rados/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic/d-require-luminous/.qa b/ceph/qa/suites/rados/basic/d-require-luminous/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/basic/d-require-luminous/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic/msgr-failures/.qa b/ceph/qa/suites/rados/basic/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/basic/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic/msgr/.qa b/ceph/qa/suites/rados/basic/msgr/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/basic/msgr/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic/tasks/.qa b/ceph/qa/suites/rados/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/basic/tasks/repair_test.yaml b/ceph/qa/suites/rados/basic/tasks/repair_test.yaml index da765b018..f3a7868d1 100644 --- a/ceph/qa/suites/rados/basic/tasks/repair_test.yaml +++ b/ceph/qa/suites/rados/basic/tasks/repair_test.yaml @@ -17,6 +17,7 @@ overrides: - 'size 1 != size' - attr name mismatch - Regular scrub request, deep-scrub details will be lost + - candidate size [0-9]+ info size [0-9]+ mismatch - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ diff --git a/ceph/qa/suites/rados/mgr/.qa b/ceph/qa/suites/rados/mgr/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/mgr/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/mgr/clusters/.qa b/ceph/qa/suites/rados/mgr/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/mgr/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/mgr/debug/.qa b/ceph/qa/suites/rados/mgr/debug/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/mgr/debug/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/mgr/tasks/.qa b/ceph/qa/suites/rados/mgr/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/mgr/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/monthrash/.qa b/ceph/qa/suites/rados/monthrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/monthrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/monthrash/clusters/.qa b/ceph/qa/suites/rados/monthrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/monthrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/monthrash/msgr-failures/.qa b/ceph/qa/suites/rados/monthrash/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/monthrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/monthrash/thrashers/.qa b/ceph/qa/suites/rados/monthrash/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/monthrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/monthrash/workloads/.qa b/ceph/qa/suites/rados/monthrash/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/monthrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/multimon/.qa b/ceph/qa/suites/rados/multimon/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/multimon/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/multimon/clusters/.qa b/ceph/qa/suites/rados/multimon/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/multimon/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/multimon/msgr-failures/.qa b/ceph/qa/suites/rados/multimon/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/multimon/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/multimon/tasks/.qa b/ceph/qa/suites/rados/multimon/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/multimon/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/objectstore/.qa b/ceph/qa/suites/rados/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/rest/.qa b/ceph/qa/suites/rados/rest/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/rest/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton-bluestore/.qa b/ceph/qa/suites/rados/singleton-bluestore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton-bluestore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton-bluestore/all/.qa b/ceph/qa/suites/rados/singleton-bluestore/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton-bluestore/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/.qa b/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton-bluestore/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton-bluestore/objectstore/.qa b/ceph/qa/suites/rados/singleton-bluestore/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton-bluestore/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton-nomsgr/.qa b/ceph/qa/suites/rados/singleton-nomsgr/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton-nomsgr/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/.qa b/ceph/qa/suites/rados/singleton-nomsgr/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml new file mode 100644 index 000000000..f77f5bbf4 --- /dev/null +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml @@ -0,0 +1,20 @@ +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: + extra_packages: + deb: + - libradosstriper-dev + - librados-dev + rpm: + - libradosstriper-devel + - librados-devel +- ceph: +- workunit: + clients: + all: + - rados/test_librados_build.sh diff --git a/ceph/qa/suites/rados/singleton/.qa b/ceph/qa/suites/rados/singleton/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton/all/.qa b/ceph/qa/suites/rados/singleton/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton/all/thrash-rados/.qa b/ceph/qa/suites/rados/singleton/all/thrash-rados/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton/all/thrash-rados/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/singleton/msgr-failures/.qa b/ceph/qa/suites/rados/singleton/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/singleton/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/standalone/.qa b/ceph/qa/suites/rados/standalone/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/standalone/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/.qa b/ceph/qa/suites/rados/thrash-erasure-code-big/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/.qa b/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/.qa b/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-isa/.qa b/ceph/qa/suites/rados/thrash-erasure-code-isa/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-isa/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/.qa b/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-isa/arch/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa b/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-overwrites/.qa b/ceph/qa/suites/rados/thrash-erasure-code-overwrites/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-overwrites/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa b/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/.qa b/ceph/qa/suites/rados/thrash-erasure-code-shec/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa b/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa b/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code/.qa b/ceph/qa/suites/rados/thrash-erasure-code/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code/fast/.qa b/ceph/qa/suites/rados/thrash-erasure-code/fast/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code/fast/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/.qa b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-erasure-code/workloads/.qa b/ceph/qa/suites/rados/thrash-erasure-code/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-erasure-code/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-luminous/.qa b/ceph/qa/suites/rados/thrash-luminous/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-luminous/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash-luminous/workloads/.qa b/ceph/qa/suites/rados/thrash-luminous/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash-luminous/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/.qa b/ceph/qa/suites/rados/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/.qa b/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/0-size-min-size-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/1-pg-log-overrides/.qa b/ceph/qa/suites/rados/thrash/1-pg-log-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/2-recovery-overrides/.qa b/ceph/qa/suites/rados/thrash/2-recovery-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/2-recovery-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/backoff/.qa b/ceph/qa/suites/rados/thrash/backoff/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/backoff/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/clusters/.qa b/ceph/qa/suites/rados/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/d-require-luminous/.qa b/ceph/qa/suites/rados/thrash/d-require-luminous/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/d-require-luminous/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/msgr-failures/.qa b/ceph/qa/suites/rados/thrash/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/thrashers/.qa b/ceph/qa/suites/rados/thrash/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/thrash/workloads/.qa b/ceph/qa/suites/rados/thrash/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/.qa b/ceph/qa/suites/rados/upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml index 31ca3e502..908028298 100644 --- a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/1-jewel-install/jewel.yaml @@ -3,7 +3,7 @@ meta: tasks: - install: branch: jewel - exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'python3-rados', 'python3-cephfs'] - print: "**** done install jewel" - ceph: skip_mgr_daemons: true diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/4-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/5-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/.qa b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/upgrade/jewel-x-singleton/8-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/verify/.qa b/ceph/qa/suites/rados/verify/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/verify/clusters/.qa b/ceph/qa/suites/rados/verify/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/verify/d-thrash/.qa b/ceph/qa/suites/rados/verify/d-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/verify/d-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/verify/d-thrash/default/.qa b/ceph/qa/suites/rados/verify/d-thrash/default/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/verify/d-thrash/default/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/verify/msgr-failures/.qa b/ceph/qa/suites/rados/verify/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/verify/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/verify/tasks/.qa b/ceph/qa/suites/rados/verify/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/verify/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rados/verify/validater/.qa b/ceph/qa/suites/rados/verify/validater/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rados/verify/validater/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/.qa b/ceph/qa/suites/rbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/.qa b/ceph/qa/suites/rbd/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/base/.qa b/ceph/qa/suites/rbd/basic/base/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/basic/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/cachepool/.qa b/ceph/qa/suites/rbd/basic/cachepool/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/basic/cachepool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/clusters/.qa b/ceph/qa/suites/rbd/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/msgr-failures/.qa b/ceph/qa/suites/rbd/basic/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/basic/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/tasks/.qa b/ceph/qa/suites/rbd/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/.qa b/ceph/qa/suites/rbd/cli/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/cli/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/base/.qa b/ceph/qa/suites/rbd/cli/base/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/cli/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/features/.qa b/ceph/qa/suites/rbd/cli/features/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/cli/features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/msgr-failures/.qa b/ceph/qa/suites/rbd/cli/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/cli/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/pool/.qa b/ceph/qa/suites/rbd/cli/pool/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/cli/pool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/workloads/.qa b/ceph/qa/suites/rbd/cli/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/cli/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/.qa b/ceph/qa/suites/rbd/librbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/cache/.qa b/ceph/qa/suites/rbd/librbd/cache/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/cache/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/clusters/.qa b/ceph/qa/suites/rbd/librbd/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/config/.qa b/ceph/qa/suites/rbd/librbd/config/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/config/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/msgr-failures/.qa b/ceph/qa/suites/rbd/librbd/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/pool/.qa b/ceph/qa/suites/rbd/librbd/pool/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/pool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/workloads/.qa b/ceph/qa/suites/rbd/librbd/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/maintenance/.qa b/ceph/qa/suites/rbd/maintenance/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/maintenance/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/maintenance/base/.qa b/ceph/qa/suites/rbd/maintenance/base/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/maintenance/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/maintenance/clusters/.qa b/ceph/qa/suites/rbd/maintenance/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/maintenance/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/maintenance/qemu/.qa b/ceph/qa/suites/rbd/maintenance/qemu/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/maintenance/qemu/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml b/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml index 38022f6b9..135103b34 100644 --- a/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml +++ b/ceph/qa/suites/rbd/maintenance/qemu/xfstests.yaml @@ -10,5 +10,5 @@ io_workload: type: block disks: 3 time_wait: 120 - test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/run_xfstests_qemu.sh + test: qa/run_xfstests_qemu.sh exclude_arch: armv7l diff --git a/ceph/qa/suites/rbd/maintenance/workloads/.qa b/ceph/qa/suites/rbd/maintenance/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/maintenance/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror-ha/.qa b/ceph/qa/suites/rbd/mirror-ha/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror-ha/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror-ha/workloads/.qa b/ceph/qa/suites/rbd/mirror-ha/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror-ha/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror/.qa b/ceph/qa/suites/rbd/mirror/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror/base/.qa b/ceph/qa/suites/rbd/mirror/base/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror/cluster/.qa b/ceph/qa/suites/rbd/mirror/cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror/rbd-mirror/.qa b/ceph/qa/suites/rbd/mirror/rbd-mirror/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror/rbd-mirror/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror/workloads/.qa b/ceph/qa/suites/rbd/mirror/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/mirror/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/nbd/.qa b/ceph/qa/suites/rbd/nbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/nbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/nbd/cluster/.qa b/ceph/qa/suites/rbd/nbd/cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/nbd/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/nbd/workloads/.qa b/ceph/qa/suites/rbd/nbd/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/nbd/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/openstack/.qa b/ceph/qa/suites/rbd/openstack/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/openstack/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/openstack/base/.qa b/ceph/qa/suites/rbd/openstack/base/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/openstack/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/openstack/clusters/.qa b/ceph/qa/suites/rbd/openstack/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/openstack/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/openstack/features/.qa b/ceph/qa/suites/rbd/openstack/features/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/openstack/features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/openstack/workloads/.qa b/ceph/qa/suites/rbd/openstack/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/openstack/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml b/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml index 26ddda9ed..5e168462a 100644 --- a/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml +++ b/ceph/qa/suites/rbd/openstack/workloads/devstack-tempest-gate.yaml @@ -7,7 +7,7 @@ tasks: disks: - image_size: 30720 - image_size: 30720 - test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/rbd/run_devstack_tempest.sh + test: qa/workunits/rbd/run_devstack_tempest.sh image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img cloud_config_archive: - type: text/cloud-config diff --git a/ceph/qa/suites/rbd/qemu/.qa b/ceph/qa/suites/rbd/qemu/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/cache/.qa b/ceph/qa/suites/rbd/qemu/cache/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/cache/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/clusters/.qa b/ceph/qa/suites/rbd/qemu/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/features/.qa b/ceph/qa/suites/rbd/qemu/features/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/msgr-failures/.qa b/ceph/qa/suites/rbd/qemu/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/pool/.qa b/ceph/qa/suites/rbd/qemu/pool/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/pool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/workloads/.qa b/ceph/qa/suites/rbd/qemu/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml b/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml index e06a5870e..0ef9ebb65 100644 --- a/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml +++ b/ceph/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml @@ -2,5 +2,5 @@ tasks: - qemu: all: clone: true - test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/suites/bonnie.sh + test: qa/workunits/suites/bonnie.sh exclude_arch: armv7l diff --git a/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml b/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml index a78801df2..95f514805 100644 --- a/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml +++ b/ceph/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml @@ -2,5 +2,5 @@ tasks: - qemu: all: clone: true - test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/workunits/suites/fsstress.sh + test: qa/workunits/suites/fsstress.sh exclude_arch: armv7l diff --git a/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled b/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled index c436ba1f6..e159e208e 100644 --- a/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled +++ b/ceph/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled @@ -1,6 +1,6 @@ tasks: - qemu: all: - test: http://git.ceph.com/?p={repo};a=blob_plain;h={branch};f=qa/workunits/suites/iozone.sh + test: qa/workunits/suites/iozone.sh image_size: 20480 exclude_arch: armv7l diff --git a/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml b/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml index 2fc6fb695..198f798d4 100644 --- a/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml +++ b/ceph/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml @@ -4,5 +4,5 @@ tasks: clone: true type: block disks: 3 - test: http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=qa/run_xfstests_qemu.sh + test: qa/run_xfstests_qemu.sh exclude_arch: armv7l diff --git a/ceph/qa/suites/rbd/singleton-bluestore/.qa b/ceph/qa/suites/rbd/singleton-bluestore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/singleton-bluestore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/singleton-bluestore/all/.qa b/ceph/qa/suites/rbd/singleton-bluestore/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/singleton-bluestore/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/singleton-bluestore/objectstore/.qa b/ceph/qa/suites/rbd/singleton-bluestore/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/singleton-bluestore/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/singleton/.qa b/ceph/qa/suites/rbd/singleton/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/singleton/all/.qa b/ceph/qa/suites/rbd/singleton/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/singleton/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml b/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml index f6a19910c..7be94ef23 100644 --- a/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml +++ b/ceph/qa/suites/rbd/singleton/all/formatted-output.yaml @@ -7,4 +7,4 @@ tasks: - cram: clients: client.0: - - http://git.ceph.com/?p={repo};a=blob_plain;hb={branch};f=src/test/cli-integration/rbd/formatted-output.t + - src/test/cli-integration/rbd/formatted-output.t diff --git a/ceph/qa/suites/rbd/thrash/.qa b/ceph/qa/suites/rbd/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/thrash/base/.qa b/ceph/qa/suites/rbd/thrash/base/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/thrash/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/thrash/clusters/.qa b/ceph/qa/suites/rbd/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/thrash/msgr-failures/.qa b/ceph/qa/suites/rbd/thrash/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/thrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/thrash/thrashers/.qa b/ceph/qa/suites/rbd/thrash/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/thrash/workloads/.qa b/ceph/qa/suites/rbd/thrash/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/valgrind/.qa b/ceph/qa/suites/rbd/valgrind/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/valgrind/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/valgrind/base/.qa b/ceph/qa/suites/rbd/valgrind/base/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/valgrind/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/valgrind/validator/.qa b/ceph/qa/suites/rbd/valgrind/validator/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/valgrind/validator/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/valgrind/workloads/.qa b/ceph/qa/suites/rbd/valgrind/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/valgrind/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/.qa b/ceph/qa/suites/rgw/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/hadoop-s3a/.qa b/ceph/qa/suites/rgw/hadoop-s3a/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/hadoop-s3a/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/hadoop-s3a/hadoop/.qa b/ceph/qa/suites/rgw/hadoop-s3a/hadoop/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/hadoop-s3a/hadoop/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multifs/.qa b/ceph/qa/suites/rgw/multifs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/multifs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multifs/clusters/.qa b/ceph/qa/suites/rgw/multifs/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/multifs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multifs/frontend/.qa b/ceph/qa/suites/rgw/multifs/frontend/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/multifs/frontend/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multifs/tasks/.qa b/ceph/qa/suites/rgw/multifs/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/multifs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multisite/.qa b/ceph/qa/suites/rgw/multisite/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/multisite/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multisite/realms/.qa b/ceph/qa/suites/rgw/multisite/realms/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/multisite/realms/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multisite/tasks/.qa b/ceph/qa/suites/rgw/multisite/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/multisite/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml b/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml index a8f897873..6c7ba3d10 100644 --- a/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml +++ b/ceph/qa/suites/rgw/multisite/tasks/test_multi.yaml @@ -7,13 +7,13 @@ tasks: - ceph: {cluster: c2} - rgw: c1.client.0: - valgrind: [--tool=memcheck] + valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214 c1.client.1: - valgrind: [--tool=memcheck] + valgrind: [--tool=memcheck, --max-threads=1024] c2.client.0: - valgrind: [--tool=memcheck] + valgrind: [--tool=memcheck, --max-threads=1024] c2.client.1: - valgrind: [--tool=memcheck] + valgrind: [--tool=memcheck, --max-threads=1024] - rgw-multisite: - rgw-multisite-tests: config: diff --git a/ceph/qa/suites/rgw/singleton/.qa b/ceph/qa/suites/rgw/singleton/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/singleton/all/.qa b/ceph/qa/suites/rgw/singleton/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/singleton/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/singleton/frontend/.qa b/ceph/qa/suites/rgw/singleton/frontend/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/singleton/frontend/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/tempest/.qa b/ceph/qa/suites/rgw/tempest/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/tempest/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/thrash/.qa b/ceph/qa/suites/rgw/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/thrash/clusters/.qa b/ceph/qa/suites/rgw/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/thrash/thrasher/.qa b/ceph/qa/suites/rgw/thrash/thrasher/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/thrash/thrasher/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/thrash/workload/.qa b/ceph/qa/suites/rgw/thrash/workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/thrash/workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/verify/.qa b/ceph/qa/suites/rgw/verify/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/verify/clusters/.qa b/ceph/qa/suites/rgw/verify/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/verify/msgr-failures/.qa b/ceph/qa/suites/rgw/verify/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/verify/tasks/+ b/ceph/qa/suites/rgw/verify/tasks/+ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/rgw/verify/tasks/.qa b/ceph/qa/suites/rgw/verify/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml b/ceph/qa/suites/rgw/verify/tasks/0-install.yaml similarity index 73% rename from ceph/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml rename to ceph/qa/suites/rgw/verify/tasks/0-install.yaml index cf413389b..0b5dce445 100644 --- a/ceph/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml +++ b/ceph/qa/suites/rgw/verify/tasks/0-install.yaml @@ -7,11 +7,8 @@ tasks: - ceph: - rgw: client.0: - valgrind: [--tool=memcheck] -- s3tests: - client.0: - force-branch: ceph-luminous - rgw_server: client.0 + valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214 + overrides: ceph: conf: diff --git a/ceph/qa/suites/rgw/verify/tasks/cls_rgw.yaml b/ceph/qa/suites/rgw/verify/tasks/cls_rgw.yaml new file mode 100644 index 000000000..aa497d9b0 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/tasks/cls_rgw.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - cls/test_cls_rgw.sh diff --git a/ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml b/ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml deleted file mode 100644 index 9b3aa6feb..000000000 --- a/ceph/qa/suites/rgw/verify/tasks/rgw_swift.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 -os_type: centos - -tasks: -- install: - flavor: notcmalloc -- ceph: -- rgw: - client.0: - valgrind: [--tool=memcheck] -- swift: - client.0: - rgw_server: client.0 diff --git a/ceph/qa/suites/rgw/verify/tasks/s3tests.yaml b/ceph/qa/suites/rgw/verify/tasks/s3tests.yaml new file mode 100644 index 000000000..5900e0ccf --- /dev/null +++ b/ceph/qa/suites/rgw/verify/tasks/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + force-branch: ceph-luminous + rgw_server: client.0 diff --git a/ceph/qa/suites/rgw/verify/tasks/swift.yaml b/ceph/qa/suites/rgw/verify/tasks/swift.yaml new file mode 100644 index 000000000..45e2fc9cc --- /dev/null +++ b/ceph/qa/suites/rgw/verify/tasks/swift.yaml @@ -0,0 +1,4 @@ +tasks: +- swift: + client.0: + rgw_server: client.0 diff --git a/ceph/qa/suites/rgw/verify/validater/.qa b/ceph/qa/suites/rgw/verify/validater/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/validater/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/samba/.qa b/ceph/qa/suites/samba/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/samba/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/samba/clusters/.qa b/ceph/qa/suites/samba/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/samba/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/samba/install/.qa b/ceph/qa/suites/samba/install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/samba/install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/samba/mount/.qa b/ceph/qa/suites/samba/mount/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/samba/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/samba/workload/.qa b/ceph/qa/suites/samba/workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/samba/workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/.qa b/ceph/qa/suites/smoke/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/1node/.qa b/ceph/qa/suites/smoke/1node/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/1node/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/1node/clusters/.qa b/ceph/qa/suites/smoke/1node/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/1node/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/1node/distros/.qa b/ceph/qa/suites/smoke/1node/distros/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/1node/distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/1node/objectstore/.qa b/ceph/qa/suites/smoke/1node/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/1node/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/1node/tasks/.qa b/ceph/qa/suites/smoke/1node/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/1node/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/basic/.qa b/ceph/qa/suites/smoke/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/basic/clusters/.qa b/ceph/qa/suites/smoke/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/basic/objectstore/.qa b/ceph/qa/suites/smoke/basic/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/basic/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/basic/tasks/.qa b/ceph/qa/suites/smoke/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/systemd/.qa b/ceph/qa/suites/smoke/systemd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/systemd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/systemd/clusters/.qa b/ceph/qa/suites/smoke/systemd/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/systemd/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/systemd/distros/.qa b/ceph/qa/suites/smoke/systemd/distros/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/systemd/distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/systemd/objectstore/.qa b/ceph/qa/suites/smoke/systemd/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/systemd/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/systemd/tasks/.qa b/ceph/qa/suites/smoke/systemd/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/smoke/systemd/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/.qa b/ceph/qa/suites/stress/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/bench/.qa b/ceph/qa/suites/stress/bench/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/bench/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/bench/clusters/.qa b/ceph/qa/suites/stress/bench/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/bench/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/bench/tasks/.qa b/ceph/qa/suites/stress/bench/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/bench/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/thrash/.qa b/ceph/qa/suites/stress/thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/thrash/clusters/.qa b/ceph/qa/suites/stress/thrash/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/thrash/thrashers/.qa b/ceph/qa/suites/stress/thrash/thrashers/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/stress/thrash/workloads/.qa b/ceph/qa/suites/stress/thrash/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/stress/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/.qa b/ceph/qa/suites/teuthology/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/buildpackages/.qa b/ceph/qa/suites/teuthology/buildpackages/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/buildpackages/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/buildpackages/tasks/.qa b/ceph/qa/suites/teuthology/buildpackages/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/buildpackages/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/ceph/.qa b/ceph/qa/suites/teuthology/ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/ceph/clusters/.qa b/ceph/qa/suites/teuthology/ceph/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/ceph/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/ceph/tasks/.qa b/ceph/qa/suites/teuthology/ceph/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/ceph/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/multi-cluster/.qa b/ceph/qa/suites/teuthology/multi-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/multi-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/multi-cluster/all/.qa b/ceph/qa/suites/teuthology/multi-cluster/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/multi-cluster/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/no-ceph/.qa b/ceph/qa/suites/teuthology/no-ceph/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/no-ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/no-ceph/clusters/.qa b/ceph/qa/suites/teuthology/no-ceph/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/no-ceph/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/no-ceph/tasks/.qa b/ceph/qa/suites/teuthology/no-ceph/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/no-ceph/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/nop/.qa b/ceph/qa/suites/teuthology/nop/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/nop/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/nop/all/.qa b/ceph/qa/suites/teuthology/nop/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/nop/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/rgw/.qa b/ceph/qa/suites/teuthology/rgw/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/rgw/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/rgw/tasks/.qa b/ceph/qa/suites/teuthology/rgw/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/rgw/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/teuthology/workunits/.qa b/ceph/qa/suites/teuthology/workunits/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/teuthology/workunits/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/tgt/.qa b/ceph/qa/suites/tgt/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/tgt/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/tgt/basic/.qa b/ceph/qa/suites/tgt/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/tgt/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/tgt/basic/clusters/.qa b/ceph/qa/suites/tgt/basic/clusters/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/tgt/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/tgt/basic/msgr-failures/.qa b/ceph/qa/suites/tgt/basic/msgr-failures/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/tgt/basic/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/tgt/basic/tasks/.qa b/ceph/qa/suites/tgt/basic/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/tgt/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/.qa b/ceph/qa/suites/upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/0-cluster/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/1-install/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/2-workload/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/supported/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/supported/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/basic/supported/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/0-cluster/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/1-install/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/2-features/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/2-features/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/2-features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/3-workload/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/3-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/3-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/supported/.qa b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/supported/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade-luminous/luminous-client-x/rbd/supported/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/.qa b/ceph/qa/suites/upgrade/client-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/basic/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/.qa b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/hammer-client-x/rbd/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/basic/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/2-features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/.qa b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/client-upgrade/jewel-client-x/rbd/3-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/1-hammer-jewel-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/3-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/5-hammer-jewel-x-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/6-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/7-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/parallel/9-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/.qa b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/.qa b/ceph/qa/suites/upgrade/jewel-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/.qa b/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/.qa b/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/ceph-deploy/distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/parallel/.qa b/ceph/qa/suites/upgrade/jewel-x/parallel/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/parallel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/.qa b/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/parallel/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/.qa b/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml b/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml index 48f1ce328..0471509b2 100644 --- a/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml +++ b/ceph/qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml @@ -25,7 +25,7 @@ meta: tasks: - install: branch: jewel - exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados','python34-rbd','python34-rgw','python34-ceph-argparse','python3-cephfs','python3-rados'] - print: "**** done installing jewel" - ceph: skip_mgr_daemons: true diff --git a/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/.qa b/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/parallel/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/.qa b/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/parallel/3-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/.qa b/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/parallel/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/4-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml b/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml index 31ca3e502..87bebb445 100644 --- a/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/1-jewel-install/jewel.yaml @@ -3,7 +3,7 @@ meta: tasks: - install: branch: jewel - exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados','python34-rbd','python34-rgw','python34-ceph-argparse','python3-cephfs','python3-rados'] - print: "**** done install jewel" - ceph: skip_mgr_daemons: true diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/4-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/.qa b/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/jewel-x/stress-split/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/.qa b/ceph/qa/suites/upgrade/kraken-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/.qa b/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/ceph-deploy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/parallel/.qa b/ceph/qa/suites/upgrade/kraken-x/parallel/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/parallel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/.qa b/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/parallel/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/.qa b/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/parallel/1-kraken-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/.qa b/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/parallel/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/.qa b/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/parallel/3-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/.qa b/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/parallel/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split-erasure-code/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/1-kraken-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/4-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/.qa b/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/kraken-x/stress-split/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/luminous-p2p/.qa b/ceph/qa/suites/upgrade/luminous-p2p/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/upgrade/luminous-p2p/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/upgrade/luminous-p2p/point-to-point-upgrade.yaml b/ceph/qa/suites/upgrade/luminous-p2p/point-to-point-upgrade.yaml index ffa21378c..9deeb4c49 100644 --- a/ceph/qa/suites/upgrade/luminous-p2p/point-to-point-upgrade.yaml +++ b/ceph/qa/suites/upgrade/luminous-p2p/point-to-point-upgrade.yaml @@ -9,6 +9,8 @@ meta: run workload and upgrade-sequence in parallel install ceph/luminous v12.2.7 point version run workload and upgrade-sequence in parallel + install ceph/luminous v12.2.8 point version + run workload and upgrade-sequence in parallel install ceph/luminous latest version run workload and upgrade-sequence in parallel overrides: @@ -104,6 +106,19 @@ tasks: - upgrade-sequence_luminous - print: "**** done parallel luminous v12.2.7" +#### upgrade to v12.2.8 +- install.upgrade: + #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + mon.a: + tag: v12.2.8 + mon.b: + tag: v12.2.8 + # Note that client.a IS NOT upgraded at this point +- parallel: + - workload_luminous + - upgrade-sequence_luminous +- print: "**** done parallel luminous v12.2.8" + #### upgrade to latest luminous - install.upgrade: #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] diff --git a/ceph/qa/tasks/cephfs/filesystem.py b/ceph/qa/tasks/cephfs/filesystem.py index 393d69e53..7f9253aab 100644 --- a/ceph/qa/tasks/cephfs/filesystem.py +++ b/ceph/qa/tasks/cephfs/filesystem.py @@ -439,6 +439,10 @@ class Filesystem(MDSCluster): raise RuntimeError("cannot deactivate rank 0") self.mon_manager.raw_cluster_cmd("mds", "deactivate", "%d:%d" % (self.id, rank)) + def set_var(self, var, *args): + a = map(str, args) + self.mon_manager.raw_cluster_cmd("fs", "set", self.name, var, *a) + def set_max_mds(self, max_mds): self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "max_mds", "%d" % max_mds) @@ -558,6 +562,9 @@ class Filesystem(MDSCluster): def get_mds_map(self): return self.status().get_fsmap(self.id)['mdsmap'] + def get_var(self, var): + return self.status().get_fsmap(self.id)['mdsmap'][var] + def add_data_pool(self, name): self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.get_pgs_per_fs_pool().__str__()) self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name) diff --git a/ceph/qa/tasks/cephfs/kernel_mount.py b/ceph/qa/tasks/cephfs/kernel_mount.py index d237f65fb..80271a6eb 100644 --- a/ceph/qa/tasks/cephfs/kernel_mount.py +++ b/ceph/qa/tasks/cephfs/kernel_mount.py @@ -99,7 +99,7 @@ class KernelMount(CephFSMount): cmd.append('-f') try: - self.client_remote.run(args=cmd) + self.client_remote.run(args=cmd, timeout=(5*60)) except Exception as e: self.client_remote.run(args=[ 'sudo', diff --git a/ceph/qa/tasks/cephfs/mount.py b/ceph/qa/tasks/cephfs/mount.py index 4f96e6cdc..8a76d09fa 100644 --- a/ceph/qa/tasks/cephfs/mount.py +++ b/ceph/qa/tasks/cephfs/mount.py @@ -124,13 +124,14 @@ class CephFSMount(object): 'sudo', 'rm', '-f', os.path.join(self.mountpoint, filename) ]) - def _run_python(self, pyscript): - return self.client_remote.run(args=[ - 'sudo', 'adjust-ulimits', 'daemon-helper', 'kill', 'python', '-c', pyscript - ], wait=False, stdin=run.PIPE, stdout=StringIO()) - - def run_python(self, pyscript): - p = self._run_python(pyscript) + def _run_python(self, pyscript, py_version='python'): + return self.client_remote.run( + args=['sudo', 'adjust-ulimits', 'daemon-helper', 'kill', + py_version, '-c', pyscript], wait=False, stdin=run.PIPE, + stdout=StringIO()) + + def run_python(self, pyscript, py_version='python'): + p = self._run_python(pyscript, py_version) p.wait() return p.stdout.getvalue().strip() diff --git a/ceph/qa/tasks/cephfs/test_misc.py b/ceph/qa/tasks/cephfs/test_misc.py index d857cfddf..4158538fd 100644 --- a/ceph/qa/tasks/cephfs/test_misc.py +++ b/ceph/qa/tasks/cephfs/test_misc.py @@ -2,11 +2,13 @@ from unittest import SkipTest from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase -from teuthology.orchestra.run import CommandFailedError +from teuthology.orchestra.run import CommandFailedError, ConnectionLostError import errno import time import json +import logging +log = logging.getLogger(__name__) class TestMisc(CephFSTestCase): CLIENTS_REQUIRED = 2 @@ -130,6 +132,59 @@ class TestMisc(CephFSTestCase): ls_data = self.fs.mds_asok(['session', 'ls']) self.assert_session_count(1, ls_data) + def test_cap_revoke_nonresponder(self): + """ + Check that a client is evicted if it has not responded to cap revoke + request for configured number of seconds. + """ + session_timeout = self.fs.get_var("session_timeout") + eviction_timeout = session_timeout / 2.0 + + self.fs.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout', + str(eviction_timeout)]) + + cap_holder = self.mount_a.open_background() + + # Wait for the file to be visible from another client, indicating + # that mount_a has completed its network ops + self.mount_b.wait_for_visible() + + # Simulate client death + self.mount_a.kill() + + try: + # The waiter should get stuck waiting for the capability + # held on the MDS by the now-dead client A + cap_waiter = self.mount_b.write_background() + + a = time.time() + time.sleep(eviction_timeout) + cap_waiter.wait() + b = time.time() + cap_waited = b - a + log.info("cap_waiter waited {0}s".format(cap_waited)) + + # check if the cap is transferred before session timeout kicked in. + # this is a good enough check to ensure that the client got evicted + # by the cap auto evicter rather than transitioning to stale state + # and then getting evicted. + self.assertLess(cap_waited, session_timeout, + "Capability handover took {0}, expected less than {1}".format( + cap_waited, session_timeout + )) + + cap_holder.stdin.close() + try: + cap_holder.wait() + except (CommandFailedError, ConnectionLostError): + # We killed it (and possibly its node), so it raises an error + pass + finally: + self.mount_a.kill_cleanup() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + def test_filtered_df(self): pool_name = self.fs.get_data_pool_name() raw_df = self.fs.get_pool_df(pool_name) diff --git a/ceph/qa/tasks/cephfs/test_volume_client.py b/ceph/qa/tasks/cephfs/test_volume_client.py index 9be7fc2ff..06094dd6f 100644 --- a/ceph/qa/tasks/cephfs/test_volume_client.py +++ b/ceph/qa/tasks/cephfs/test_volume_client.py @@ -14,6 +14,12 @@ class TestVolumeClient(CephFSTestCase): # One for looking at the global filesystem, one for being # the VolumeClient, two for mounting the created shares CLIENTS_REQUIRED = 4 + py_version = 'python' + + def setUp(self): + CephFSTestCase.setUp(self) + self.py_version = self.ctx.config.get('overrides', {}).get('python', 'python') + log.info("using python version: %s".format(self.py_version)) def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None): # Can't dedent this *and* the script we pass in, because they might have different @@ -23,6 +29,7 @@ class TestVolumeClient(CephFSTestCase): if ns_prefix: ns_prefix = "\"" + ns_prefix + "\"" return client.run_python(""" +from __future__ import print_function from ceph_volume_client import CephFSVolumeClient, VolumePath import logging log = logging.getLogger("ceph_volume_client") @@ -32,7 +39,9 @@ vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefi vc.connect() {payload} vc.disconnect() - """.format(payload=script, conf_path=client.config_path, vol_prefix=vol_prefix, ns_prefix=ns_prefix)) + """.format(payload=script, conf_path=client.config_path, + vol_prefix=vol_prefix, ns_prefix=ns_prefix), + self.py_version) def _sudo_write_file(self, remote, path, data): """ @@ -98,7 +107,7 @@ vc.disconnect() vp = VolumePath("{group_id}", "{volume_id}") auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly}, tenant_id="{tenant_id}") - print auth_result['auth_key'] + print(auth_result['auth_key']) """.format( group_id=group_id, volume_id=volume_id, @@ -195,7 +204,7 @@ vc.disconnect() mount_path = self._volume_client_python(self.mount_b, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*{volume_size}) - print create_result['mount_path'] + print(create_result['mount_path']) """.format( group_id=group_id, volume_id=volume_id, @@ -476,7 +485,7 @@ vc.disconnect() self._volume_client_python(volumeclient_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 10 * 1024 * 1024) - print create_result['mount_path'] + print(create_result['mount_path']) """.format( group_id=group_id, volume_id=volume_ids[i] @@ -559,7 +568,7 @@ vc.disconnect() mount_path = self._volume_client_python(self.mount_b, dedent(""" vp = VolumePath("{group_id}", u"{volume_id}") create_result = vc.create_volume(vp, 10) - print create_result['mount_path'] + print(create_result['mount_path']) """.format( group_id=group_id, volume_id=volume_id @@ -609,7 +618,7 @@ vc.disconnect() mount_path = self._volume_client_python(volumeclient_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*10) - print create_result['mount_path'] + print(create_result['mount_path']) """.format( group_id=group_id, volume_id=volume_id, @@ -664,14 +673,14 @@ vc.disconnect() guest_entity_1 = "guest1" guest_entity_2 = "guest2" - log.info("print group ID: {0}".format(group_id)) + log.info("print(group ID: {0})".format(group_id)) # Create a volume. auths = self._volume_client_python(volumeclient_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") vc.create_volume(vp, 1024*1024*10) auths = vc.get_authorized_ids(vp) - print auths + print(auths) """.format( group_id=group_id, volume_id=volume_id, @@ -686,7 +695,7 @@ vc.disconnect() vc.authorize(vp, "{guest_entity_1}", readonly=False) vc.authorize(vp, "{guest_entity_2}", readonly=True) auths = vc.get_authorized_ids(vp) - print auths + print(auths) """.format( group_id=group_id, volume_id=volume_id, @@ -694,7 +703,11 @@ vc.disconnect() guest_entity_2=guest_entity_2, ))) # Check the list of authorized IDs and their access levels. - expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')] + if self.py_version == 'python3': + expected_result = [('guest1', 'rw'), ('guest2', 'r')] + else: + expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')] + self.assertItemsEqual(str(expected_result), auths) # Disallow both the auth IDs' access to the volume. @@ -703,7 +716,7 @@ vc.disconnect() vc.deauthorize(vp, "{guest_entity_1}") vc.deauthorize(vp, "{guest_entity_2}") auths = vc.get_authorized_ids(vp) - print auths + print(auths) """.format( group_id=group_id, volume_id=volume_id, @@ -780,11 +793,11 @@ vc.disconnect() "version": 2, "compat_version": 1, "dirty": False, - "tenant_id": u"tenant1", + "tenant_id": "tenant1", "volumes": { "groupid/volumeid": { "dirty": False, - "access_level": u"rw", + "access_level": "rw" } } } @@ -814,7 +827,7 @@ vc.disconnect() "auths": { "guest": { "dirty": False, - "access_level": u"rw" + "access_level": "rw" } } } @@ -970,6 +983,27 @@ vc.disconnect() obj_data = obj_data ))) + def test_put_object_versioned(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test_data' + obj_name = 'test_vc_ob_2' + pool_name = self.fs.get_data_pool_names()[0] + self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data) + + # Test if put_object_versioned() crosschecks the version of the + # given object. Being a negative test, an exception is expected. + with self.assertRaises(CommandFailedError): + self._volume_client_python(vc_mount, dedent(""" + data, version = vc.get_object_and_version("{pool_name}", "{obj_name}") + data += 'm1' + vc.put_object("{pool_name}", "{obj_name}", data) + data += 'm2' + vc.put_object_versioned("{pool_name}", "{obj_name}", data, version) + """).format(pool_name=pool_name, obj_name=obj_name)) + def test_delete_object(self): vc_mount = self.mounts[1] vc_mount.umount_wait() @@ -1018,7 +1052,7 @@ vc.disconnect() mount_path = self._volume_client_python(vc_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*10) - print create_result['mount_path'] + print(create_result['mount_path']) """.format( group_id=group_id, volume_id=volume_id @@ -1057,7 +1091,7 @@ vc.disconnect() mount_path = self._volume_client_python(vc_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) - print create_result['mount_path'] + print(create_result['mount_path']) """.format( group_id=group_id, volume_id=volume_id diff --git a/ceph/qa/tasks/cram.py b/ceph/qa/tasks/cram.py index 02c6667eb..9fe4fb809 100644 --- a/ceph/qa/tasks/cram.py +++ b/ceph/qa/tasks/cram.py @@ -4,6 +4,8 @@ Cram tests import logging import os +from util.workunit import get_refspec_after_overrides + from teuthology import misc as teuthology from teuthology.parallel import parallel from teuthology.orchestra import run @@ -13,7 +15,7 @@ log = logging.getLogger(__name__) def task(ctx, config): """ - Run all cram tests from the specified urls on the specified + Run all cram tests from the specified paths on the specified clients. Each client runs tests in parallel. Limitations: @@ -27,9 +29,9 @@ def task(ctx, config): - cram: clients: client.0: - - http://download.ceph.com/qa/test.t - - http://download.ceph.com/qa/test2.t] - client.1: [http://download.ceph.com/qa/test.t] + - qa/test.t + - qa/test2.t] + client.1: [qa/test.t] branch: foo You can also run a list of cram tests on all clients:: @@ -38,7 +40,7 @@ def task(ctx, config): - ceph: - cram: clients: - all: [http://download.ceph.com/qa/test.t] + all: [qa/test.t] :param ctx: Context :param config: Configuration @@ -52,21 +54,10 @@ def task(ctx, config): testdir = teuthology.get_testdir(ctx) overrides = ctx.config.get('overrides', {}) - teuthology.deep_merge(config, overrides.get('workunit', {})) - - refspec = config.get('branch') - if refspec is None: - refspec = config.get('tag') - if refspec is None: - refspec = config.get('sha1') - if refspec is None: - refspec = 'HEAD' - - # hack: the git_url is always ceph-ci or ceph - git_url = teuth_config.get_ceph_git_url() - repo_name = 'ceph.git' - if git_url.count('ceph-ci'): - repo_name = 'ceph-ci.git' + refspec = get_refspec_after_overrides(config, overrides) + + git_url = teuth_config.get_ceph_qa_suite_git_url() + log.info('Pulling tests from %s ref %s', git_url, refspec) try: for client, tests in clients.iteritems(): @@ -82,13 +73,14 @@ def task(ctx, config): 'install', 'cram==0.6', ], ) + clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client) + remote.run(args=refspec.clone(git_url, clone_dir)) + for test in tests: - url = test.format(repo=repo_name, branch=refspec) - log.info('fetching test %s for %s', url, client) assert test.endswith('.t'), 'tests must end in .t' remote.run( args=[ - 'wget', '-nc', '-nv', '-P', client_dir, '--', url, + 'cp', '--', os.path.join(clone_dir, test), client_dir, ], ) @@ -119,6 +111,7 @@ def task(ctx, config): args=[ 'rm', '-rf', '--', '{tdir}/virtualenv'.format(tdir=testdir), + clone_dir, run.Raw(';'), 'rmdir', '--ignore-fail-on-non-empty', client_dir, ], diff --git a/ceph/qa/tasks/qemu.py b/ceph/qa/tasks/qemu.py index 7a1abe8f5..f597c08d6 100644 --- a/ceph/qa/tasks/qemu.py +++ b/ceph/qa/tasks/qemu.py @@ -15,6 +15,8 @@ from tasks import rbd from teuthology.orchestra import run from teuthology.config import config as teuth_config +from util.workunit import get_refspec_after_overrides + log = logging.getLogger(__name__) DEFAULT_NUM_DISKS = 2 @@ -102,24 +104,20 @@ def generate_iso(ctx, config): # use ctx.config instead of config, because config has been # through teuthology.replace_all_with_clients() - refspec = ctx.config.get('branch') - if refspec is None: - refspec = ctx.config.get('tag') - if refspec is None: - refspec = ctx.config.get('sha1') - if refspec is None: - refspec = 'HEAD' - - # hack: the git_url is always ceph-ci or ceph - git_url = teuth_config.get_ceph_git_url() - repo_name = 'ceph.git' - if git_url.count('ceph-ci'): - repo_name = 'ceph-ci.git' + refspec = get_refspec_after_overrides(ctx.config, {}) + + git_url = teuth_config.get_ceph_qa_suite_git_url() + log.info('Pulling tests from %s ref %s', git_url, refspec) for client, client_config in config.iteritems(): assert 'test' in client_config, 'You must specify a test to run' - test_url = client_config['test'].format(repo=repo_name, branch=refspec) + test = client_config['test'] + (remote,) = ctx.cluster.only(client).remotes.keys() + + clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client) + remote.run(args=refspec.clone(git_url, clone_dir)) + src_dir = os.path.dirname(__file__) userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client) metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client) @@ -181,11 +179,10 @@ def generate_iso(ctx, config): test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client) - log.info('fetching test %s for %s', test_url, client) + log.info('fetching test %s for %s', test, client) remote.run( args=[ - 'wget', '-nv', '-O', test_file, - test_url, + 'cp', '--', os.path.join(clone_dir, test), test_file, run.Raw('&&'), 'chmod', '755', test_file, ], @@ -210,11 +207,12 @@ def generate_iso(ctx, config): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ - 'rm', '-f', + 'rm', '-rf', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), os.path.join(testdir, 'qemu', 'userdata.' + client), os.path.join(testdir, 'qemu', 'metadata.' + client), '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client), + '{tdir}/clone.{client}'.format(tdir=testdir, client=client), ], ) diff --git a/ceph/qa/tasks/repair_test.py b/ceph/qa/tasks/repair_test.py index 5a63bd636..8ad4d02be 100644 --- a/ceph/qa/tasks/repair_test.py +++ b/ceph/qa/tasks/repair_test.py @@ -276,6 +276,7 @@ def task(ctx, config): - 'size 1 != size' - 'attr name mismatch' - 'Regular scrub request, deep-scrub details will be lost' + - 'candidate size [0-9]+ info size [0-9]+ mismatch' conf: osd: filestore debug inject read err: true diff --git a/ceph/qa/tasks/s3a_hadoop.py b/ceph/qa/tasks/s3a_hadoop.py index c01fe1dda..9f8bf299c 100644 --- a/ceph/qa/tasks/s3a_hadoop.py +++ b/ceph/qa/tasks/s3a_hadoop.py @@ -46,7 +46,7 @@ def task(ctx, config): # set versions for cloning the repo apache_maven = 'apache-maven-{maven_version}-bin.tar.gz'.format( maven_version=maven_version) - maven_link = 'http://mirror.jax.hugeserver.com/apache/maven/' + \ + maven_link = 'http://apache.mirrors.lucidnetworks.net/maven/' + \ '{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + apache_maven hadoop_git = 'https://github.com/apache/hadoop' hadoop_rel = 'hadoop-{ver} rel/release-{ver}'.format(ver=hadoop_ver) diff --git a/ceph/qa/tasks/scrub_test.py b/ceph/qa/tasks/scrub_test.py index a545c9b89..377158b7e 100644 --- a/ceph/qa/tasks/scrub_test.py +++ b/ceph/qa/tasks/scrub_test.py @@ -342,12 +342,13 @@ def task(ctx, config): - deep-scrub [0-9]+ errors - repair 0 missing, 1 inconsistent objects - repair [0-9]+ errors, [0-9]+ fixed - - shard [0-9]+ missing + - shard [0-9]+ .* : missing - deep-scrub 1 missing, 1 inconsistent objects - does not match object info size - attr name mistmatch - deep-scrub 1 missing, 0 inconsistent objects - failed to pick suitable auth object + - candidate size [0-9]+ info size [0-9]+ mismatch conf: osd: osd deep scrub update digest min age: 0 diff --git a/ceph/qa/tasks/util/workunit.py b/ceph/qa/tasks/util/workunit.py new file mode 100644 index 000000000..c5314330f --- /dev/null +++ b/ceph/qa/tasks/util/workunit.py @@ -0,0 +1,78 @@ +import copy + +from teuthology import misc +from teuthology.orchestra import run + +class Refspec: + def __init__(self, refspec): + self.refspec = refspec + + def __str__(self): + return self.refspec + + def _clone(self, git_url, clonedir, opts=None): + if opts is None: + opts = [] + return (['rm', '-rf', clonedir] + + [run.Raw('&&')] + + ['git', 'clone'] + opts + + [git_url, clonedir]) + + def _cd(self, clonedir): + return ['cd', clonedir] + + def _checkout(self): + return ['git', 'checkout', self.refspec] + + def clone(self, git_url, clonedir): + return (self._clone(git_url, clonedir) + + [run.Raw('&&')] + + self._cd(clonedir) + + [run.Raw('&&')] + + self._checkout()) + + +class Branch(Refspec): + def __init__(self, tag): + Refspec.__init__(self, tag) + + def clone(self, git_url, clonedir): + opts = ['--depth', '1', + '--branch', self.refspec] + return (self._clone(git_url, clonedir, opts) + + [run.Raw('&&')] + + self._cd(clonedir)) + + +class Head(Refspec): + def __init__(self): + Refspec.__init__(self, 'HEAD') + + def clone(self, git_url, clonedir): + opts = ['--depth', '1'] + return (self._clone(git_url, clonedir, opts) + + [run.Raw('&&')] + + self._cd(clonedir)) + + +def get_refspec_after_overrides(config, overrides): + # mimic the behavior of the "install" task, where the "overrides" are + # actually the defaults of that task. in other words, if none of "sha1", + # "tag", or "branch" is specified by a "workunit" tasks, we will update + # it with the information in the "workunit" sub-task nested in "overrides". + overrides = copy.deepcopy(overrides.get('workunit', {})) + refspecs = {'suite_sha1': Refspec, 'suite_branch': Branch, + 'sha1': Refspec, 'tag': Refspec, 'branch': Branch} + if any(map(lambda i: i in config, refspecs.iterkeys())): + for i in refspecs.iterkeys(): + overrides.pop(i, None) + misc.deep_merge(config, overrides) + + for spec, cls in refspecs.iteritems(): + refspec = config.get(spec) + if refspec: + refspec = cls(refspec) + break + if refspec is None: + refspec = Head() + return refspec diff --git a/ceph/qa/tasks/vstart_runner.py b/ceph/qa/tasks/vstart_runner.py index e7f7f68f3..91afb7d0f 100644 --- a/ceph/qa/tasks/vstart_runner.py +++ b/ceph/qa/tasks/vstart_runner.py @@ -516,15 +516,13 @@ class LocalFuseMount(FuseMount): else: self._fuse_conn = new_conns[0] - def _run_python(self, pyscript): + def _run_python(self, pyscript, py_version='python'): """ Override this to remove the daemon-helper prefix that is used otherwise to make the process killable. """ - return self.client_remote.run(args=[ - 'python', '-c', pyscript - ], wait=False) - + return self.client_remote.run(args=[py_version, '-c', pyscript], + wait=False) class LocalCephManager(CephManager): def __init__(self): diff --git a/ceph/qa/tasks/workunit.py b/ceph/qa/tasks/workunit.py index f69b3960a..0a46ade76 100644 --- a/ceph/qa/tasks/workunit.py +++ b/ceph/qa/tasks/workunit.py @@ -6,8 +6,8 @@ import pipes import os import re -from copy import deepcopy from util import get_remote_for_role +from util.workunit import get_refspec_after_overrides from teuthology import misc from teuthology.config import config as teuth_config @@ -17,59 +17,6 @@ from teuthology.orchestra import run log = logging.getLogger(__name__) - -class Refspec: - def __init__(self, refspec): - self.refspec = refspec - - def __str__(self): - return self.refspec - - def _clone(self, git_url, clonedir, opts=None): - if opts is None: - opts = [] - return (['rm', '-rf', clonedir] + - [run.Raw('&&')] + - ['git', 'clone'] + opts + - [git_url, clonedir]) - - def _cd(self, clonedir): - return ['cd', clonedir] - - def _checkout(self): - return ['git', 'checkout', self.refspec] - - def clone(self, git_url, clonedir): - return (self._clone(git_url, clonedir) + - [run.Raw('&&')] + - self._cd(clonedir) + - [run.Raw('&&')] + - self._checkout()) - - -class Branch(Refspec): - def __init__(self, tag): - Refspec.__init__(self, tag) - - def clone(self, git_url, clonedir): - opts = ['--depth', '1', - '--branch', self.refspec] - return (self._clone(git_url, clonedir, opts) + - [run.Raw('&&')] + - self._cd(clonedir)) - - -class Head(Refspec): - def __init__(self): - Refspec.__init__(self, 'HEAD') - - def clone(self, git_url, clonedir): - opts = ['--depth', '1'] - return (self._clone(git_url, clonedir, opts) + - [run.Raw('&&')] + - self._cd(clonedir)) - - def task(ctx, config): """ Run ceph on all workunits found under the specified path. @@ -140,26 +87,10 @@ def task(ctx, config): assert isinstance(config.get('clients'), dict), \ 'configuration must contain a dictionary of clients' - # mimic the behavior of the "install" task, where the "overrides" are - # actually the defaults of that task. in other words, if none of "sha1", - # "tag", or "branch" is specified by a "workunit" tasks, we will update - # it with the information in the "workunit" sub-task nested in "overrides". - overrides = deepcopy(ctx.config.get('overrides', {}).get('workunit', {})) - refspecs = {'branch': Branch, 'tag': Refspec, 'sha1': Refspec} - if any(map(lambda i: i in config, refspecs.iterkeys())): - for i in refspecs.iterkeys(): - overrides.pop(i, None) - misc.deep_merge(config, overrides) - - for spec, cls in refspecs.iteritems(): - refspec = config.get(spec) - if refspec: - refspec = cls(refspec) - break - if refspec is None: - refspec = Head() - + overrides = ctx.config.get('overrides', {}) + refspec = get_refspec_after_overrides(config, overrides) timeout = config.get('timeout', '3h') + cleanup = config.get('cleanup', True) log.info('Pulling workunits from ref %s', refspec) @@ -181,24 +112,28 @@ def task(ctx, config): created_mountpoint[role] = created_mnt_dir # Execute any non-all workunits + log.info("timeout={}".format(timeout)) + log.info("cleanup={}".format(cleanup)) with parallel() as p: for role, tests in clients.iteritems(): if role != "all": p.spawn(_run_tests, ctx, refspec, role, tests, config.get('env'), basedir=config.get('basedir','qa/workunits'), - timeout=timeout) + timeout=timeout, cleanup=cleanup) - # Clean up dirs from any non-all workunits - for role, created in created_mountpoint.items(): - _delete_dir(ctx, role, created) + if cleanup: + # Clean up dirs from any non-all workunits + for role, created in created_mountpoint.items(): + _delete_dir(ctx, role, created) # Execute any 'all' workunits if 'all' in clients: all_tasks = clients["all"] _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'), config.get('basedir', 'qa/workunits'), - config.get('subdir'), timeout=timeout) + config.get('subdir'), timeout=timeout, + cleanup=cleanup) def _client_mountpoint(ctx, cluster, id_): @@ -326,7 +261,7 @@ def _make_scratch_dir(ctx, role, subdir): return created_mountpoint -def _spawn_on_all_clients(ctx, refspec, tests, env, basedir, subdir, timeout=None): +def _spawn_on_all_clients(ctx, refspec, tests, env, basedir, subdir, timeout=None, cleanup=True): """ Make a scratch directory for each client in the cluster, and then for each test spawn _run_tests() for each role. @@ -350,13 +285,14 @@ def _spawn_on_all_clients(ctx, refspec, tests, env, basedir, subdir, timeout=Non subdir, timeout=timeout) - # cleanup the generated client directories - for role, _ in client_remotes.items(): - _delete_dir(ctx, role, created_mountpoint[role]) + if cleanup: + # cleanup the generated client directories + for role, _ in client_remotes.items(): + _delete_dir(ctx, role, created_mountpoint[role]) def _run_tests(ctx, refspec, role, tests, env, basedir, - subdir=None, timeout=None): + subdir=None, timeout=None, cleanup=True): """ Run the individual test. Create a scratch directory and then extract the workunits from git. Make the executables, and then run the tests. @@ -472,15 +408,14 @@ def _run_tests(ctx, refspec, role, tests, env, basedir, args=args, label="workunit test {workunit}".format(workunit=workunit) ) - remote.run( - logger=log.getChild(role), - args=['sudo', 'rm', '-rf', '--', scratch_tmp], - ) + if cleanup: + args=['sudo', 'rm', '-rf', '--', scratch_tmp] + remote.run(logger=log.getChild(role), args=args, timeout=(15*60)) finally: log.info('Stopping %s on %s...', tests, role) + args=['sudo', 'rm', '-rf', '--', workunits_file, clonedir] + # N.B. don't cleanup scratch_tmp! If the mount is broken then rm will hang. remote.run( logger=log.getChild(role), - args=[ - 'rm', '-rf', '--', workunits_file, clonedir, - ], + args=args, ) diff --git a/ceph/qa/workunits/rados/test_librados_build.sh b/ceph/qa/workunits/rados/test_librados_build.sh new file mode 100755 index 000000000..43ded25b2 --- /dev/null +++ b/ceph/qa/workunits/rados/test_librados_build.sh @@ -0,0 +1,64 @@ +#!/bin/bash -ex +# +# Compile and run a librados application outside of the ceph build system, so +# that we can be sure librados.h[pp] is still usable and hasn't accidentally +# started depending on internal headers. +# +# The script assumes all dependencies - e.g. curl, make, gcc, librados headers, +# libradosstriper headers, boost headers, etc. - are already installed. +# + +trap cleanup EXIT + +SOURCES="hello_radosstriper.cc +hello_world_c.c +hello_world.cc +Makefile +" +BINARIES_TO_RUN="hello_world_c +hello_world_cpp +" +BINARIES="${BINARIES_TO_RUN}hello_radosstriper_cpp +" +DL_PREFIX="http://git.ceph.com/?p=ceph.git;a=blob_plain;f=examples/librados/" +#DL_PREFIX="https://raw.githubusercontent.com/ceph/ceph/master/examples/librados/" +DESTDIR=$(pwd) + +function cleanup () { + for f in $BINARIES$SOURCES ; do + rm -f "${DESTDIR}/$f" + done +} + +function get_sources () { + for s in $SOURCES ; do + curl --progress-bar --output $s ${DL_PREFIX}$s + done +} + +function check_sources () { + for s in $SOURCES ; do + test -f $s + done +} + +function check_binaries () { + for b in $BINARIES ; do + file $b + test -f $b + done +} + +function run_binaries () { + for b in $BINARIES_TO_RUN ; do + ./$b -c /etc/ceph/ceph.conf + done +} + +pushd $DESTDIR +get_sources +check_sources +make all-system +check_binaries +run_binaries +popd diff --git a/ceph/qa/workunits/rbd/verify_pool.sh b/ceph/qa/workunits/rbd/verify_pool.sh index f008fb6b3..65a61199b 100755 --- a/ceph/qa/workunits/rbd/verify_pool.sh +++ b/ceph/qa/workunits/rbd/verify_pool.sh @@ -20,8 +20,8 @@ set_up # creating an image in a pool-managed snapshot pool should fail rbd create --pool $POOL_NAME --size 1 foo && exit 1 || true -# should succeed if images already exist in the pool -rados --pool $POOL_NAME create rbd_directory +# should succeed if the pool already marked as validated +printf "overwrite validated" | rados --pool $POOL_NAME put rbd_info - rbd create --pool $POOL_NAME --size 1 foo echo OK diff --git a/ceph/qa/workunits/suites/fsstress.sh b/ceph/qa/workunits/suites/fsstress.sh index 92e123b99..e5da5b439 100755 --- a/ceph/qa/workunits/suites/fsstress.sh +++ b/ceph/qa/workunits/suites/fsstress.sh @@ -1,20 +1,17 @@ #!/bin/bash -BIN_PATH=${TESTDIR}/fsstress/ltp-full-20091231/testcases/kernel/fs/fsstress/fsstress +set -ex -path=`pwd` -trap "rm -rf ${TESTDIR}/fsstress" EXIT -mkdir -p ${TESTDIR}/fsstress -cd ${TESTDIR}/fsstress -wget -q -O ${TESTDIR}/fsstress/ltp-full.tgz http://download.ceph.com/qa/ltp-full-20091231.tgz -tar xzf ${TESTDIR}/fsstress/ltp-full.tgz -rm ${TESTDIR}/fsstress/ltp-full.tgz -cd ${TESTDIR}/fsstress/ltp-full-20091231/testcases/kernel/fs/fsstress +mkdir -p fsstress +pushd fsstress +wget -q -O ltp-full.tgz http://download.ceph.com/qa/ltp-full-20091231.tgz +tar xzf ltp-full.tgz +pushd ltp-full-20091231/testcases/kernel/fs/fsstress make -cd $path +BIN=$(readlink -f fsstress) +popd +popd -command="${BIN_PATH} -d fsstress-`hostname`$$ -l 1 -n 1000 -p 10 -v" - -echo "Starting fsstress $command" -mkdir fsstress`hostname`-$$ -$command +T=$(mktemp -d -p .) +"$BIN" -d "$T" -l 1 -n 1000 -p 10 -v +rm -rf -- "$T" diff --git a/ceph/selinux/ceph.fc b/ceph/selinux/ceph.fc index df47fe10b..b942dd704 100644 --- a/ceph/selinux/ceph.fc +++ b/ceph/selinux/ceph.fc @@ -4,6 +4,7 @@ /usr/bin/ceph-mgr -- gen_context(system_u:object_r:ceph_exec_t,s0) /usr/bin/ceph-mon -- gen_context(system_u:object_r:ceph_exec_t,s0) /usr/bin/ceph-mds -- gen_context(system_u:object_r:ceph_exec_t,s0) +/usr/bin/ceph-fuse -- gen_context(system_u:object_r:ceph_exec_t,s0) /usr/bin/ceph-osd -- gen_context(system_u:object_r:ceph_exec_t,s0) /usr/bin/radosgw -- gen_context(system_u:object_r:ceph_exec_t,s0) diff --git a/ceph/src/.git_version b/ceph/src/.git_version index 9704f6797..fc407817f 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -ae699615bac534ea496ee965ac6192cb7e0e07c0 -v12.2.8 +177915764b752804194937482a39e95e0ca3de94 +v12.2.10 diff --git a/ceph/src/CMakeLists.txt b/ceph/src/CMakeLists.txt index 80d4b351f..b0837ab1d 100644 --- a/ceph/src/CMakeLists.txt +++ b/ceph/src/CMakeLists.txt @@ -493,7 +493,6 @@ set(libcommon_files osd/OpRequest.cc common/blkdev.cc common/common_init.cc - common/pipe.c common/ceph_argparse.cc common/ceph_context.cc common/code_environment.cc @@ -537,6 +536,7 @@ set(libcommon_files common/dns_resolve.cc common/hostname.cc common/util.cc + common/PriorityCache.cc arch/probe.cc ${auth_files} ${mds_files}) @@ -663,6 +663,20 @@ target_link_libraries(ceph-common ${ceph_common_deps}) set_target_properties(ceph-common PROPERTIES SOVERSION 0 INSTALL_RPATH "") +if(NOT APPLE) + # Apple uses Mach-O, not ELF. so this option does not apply to APPLE. + # + # prefer the local symbol definitions when binding references to global + # symbols. otherwise we could reference the symbols defined by the application + # with the same name, instead of using the one defined in libceph-common. + # in other words, we require libceph-common to use local symbols, even if redefined + # in application". + set_property( + TARGET ceph-common + APPEND APPEND_STRING + PROPERTY LINK_FLAGS "-Wl,-Bsymbolic -Wl,-Bsymbolic-functions") +endif() + install(TARGETS ceph-common DESTINATION ${CMAKE_INSTALL_PKGLIBDIR}) add_library(common_utf8 STATIC common/utf8.c) diff --git a/ceph/src/auth/Crypto.cc b/ceph/src/auth/Crypto.cc index 0186b7b22..150052bfe 100644 --- a/ceph/src/auth/Crypto.cc +++ b/ceph/src/auth/Crypto.cc @@ -12,6 +12,9 @@ */ #include +#include +#include + #include "Crypto.h" #ifdef USE_CRYPTOPP # include @@ -37,7 +40,7 @@ int get_random_bytes(char *buf, int len) { - int fd = TEMP_FAILURE_RETRY(::open("/dev/urandom", O_RDONLY)); + int fd = TEMP_FAILURE_RETRY(::open("/dev/urandom", O_RDONLY|O_CLOEXEC)); if (fd < 0) return -errno; int ret = safe_read_exact(fd, buf, len); diff --git a/ceph/src/ceph-disk/ceph_disk/main.py b/ceph/src/ceph-disk/ceph_disk/main.py index 0058f1ac2..73f26ce17 100644 --- a/ceph/src/ceph-disk/ceph_disk/main.py +++ b/ceph/src/ceph-disk/ceph_disk/main.py @@ -898,8 +898,8 @@ def is_mounted(dev): """ Check if the given device is mounted. """ - dev = os.path.realpath(dev) - with open(PROCDIR + '/mounts', 'rb') as proc_mounts: + dev = os.path.realpath(_bytes2str(dev)) + with open(PROCDIR + '/mounts', 'r') as proc_mounts: for line in proc_mounts: fields = line.split() if len(fields) < 3: diff --git a/ceph/src/ceph-volume/bin/ceph-volume b/ceph/src/ceph-volume/bin/ceph-volume deleted file mode 100755 index 5905cfccc..000000000 --- a/ceph/src/ceph-volume/bin/ceph-volume +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python - -from ceph_volume import main - -if __name__ == '__main__': - main.Volume() diff --git a/ceph/src/ceph-volume/bin/ceph-volume-systemd b/ceph/src/ceph-volume/bin/ceph-volume-systemd deleted file mode 100755 index 7da8ec6b1..000000000 --- a/ceph/src/ceph-volume/bin/ceph-volume-systemd +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python - -from ceph_volume.systemd import main - -if __name__ == '__main__': - main.main() diff --git a/ceph/src/ceph-volume/ceph_volume/api/lvm.py b/ceph/src/ceph-volume/ceph_volume/api/lvm.py index e766671b3..aed4a8f64 100644 --- a/ceph/src/ceph-volume/ceph_volume/api/lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/api/lvm.py @@ -293,7 +293,8 @@ def get_api_vgs(): """ fields = 'vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free,vg_free_count' stdout, stderr, returncode = process.call( - ['vgs', '--noheadings', '--readonly', '--units=g', '--separator=";"', '-o', fields] + ['vgs', '--noheadings', '--readonly', '--units=g', '--separator=";"', '-o', fields], + verbose_on_failure=False ) return _output_parser(stdout, fields) @@ -312,7 +313,8 @@ def get_api_lvs(): """ fields = 'lv_tags,lv_path,lv_name,vg_name,lv_uuid,lv_size' stdout, stderr, returncode = process.call( - ['lvs', '--noheadings', '--readonly', '--separator=";"', '-o', fields] + ['lvs', '--noheadings', '--readonly', '--separator=";"', '-o', fields], + verbose_on_failure=False ) return _output_parser(stdout, fields) @@ -334,7 +336,8 @@ def get_api_pvs(): fields = 'pv_name,pv_tags,pv_uuid,vg_name,lv_uuid' stdout, stderr, returncode = process.call( - ['pvs', '--no-heading', '--readonly', '--separator=";"', '-o', fields] + ['pvs', '--no-heading', '--readonly', '--separator=";"', '-o', fields], + verbose_on_failure=False ) return _output_parser(stdout, fields) @@ -477,7 +480,17 @@ def remove_vg(vg_name): def remove_pv(pv_name): """ - Removes a physical volume. + Removes a physical volume using a double `-f` to prevent prompts and fully + remove anything related to LVM. This is tremendously destructive, but so is all other actions + when zapping a device. + + In the case where multiple PVs are found, it will ignore that fact and + continue with the removal, specifically in the case of messages like:: + + WARNING: PV $UUID /dev/DEV-1 was already found on /dev/DEV-2 + + These situations can be avoided with custom filtering rules, which this API + cannot handle while accommodating custom user filters. """ fail_msg = "Unable to remove vg %s" % pv_name process.run( @@ -485,19 +498,27 @@ def remove_pv(pv_name): 'pvremove', '-v', # verbose '-f', # force it + '-f', # force it pv_name ], fail_msg=fail_msg, ) -def remove_lv(path): +def remove_lv(lv): """ Removes a logical volume given it's absolute path. Will return True if the lv is successfully removed or raises a RuntimeError if the removal fails. + + :param lv: A ``Volume`` object or the path for an LV """ + if isinstance(lv, Volume): + path = lv.lv_path + else: + path = lv + stdout, stderr, returncode = process.call( [ 'lvremove', @@ -1067,6 +1088,7 @@ class Volume(object): self.name = kw['lv_name'] self.tags = parse_tags(kw['lv_tags']) self.encrypted = self.tags.get('ceph.encrypted', '0') == '1' + self.used_by_ceph = 'ceph.osd_id' in self.tags def __str__(self): return '<%s>' % self.lv_api['lv_path'] @@ -1083,6 +1105,26 @@ class Volume(object): obj['path'] = self.lv_path return obj + def report(self): + if not self.used_by_ceph: + return { + 'name': self.lv_name, + 'comment': 'not used by ceph' + } + else: + type_ = self.tags['ceph.type'] + report = { + 'name': self.lv_name, + 'osd_id': self.tags['ceph.osd_id'], + 'cluster_name': self.tags['ceph.cluster_name'], + 'type': type_, + 'osd_fsid': self.tags['ceph.osd_fsid'], + 'cluster_fsid': self.tags['ceph.cluster_fsid'], + } + type_uuid = '{}_uuid'.format(type_) + report[type_uuid] = self.tags['ceph.{}'.format(type_uuid)] + return report + def clear_tags(self): """ Removes all tags from the Logical Volume. diff --git a/ceph/src/ceph-volume/ceph_volume/configuration.py b/ceph/src/ceph-volume/ceph_volume/configuration.py index 2b9cd9fdc..6379ef67a 100644 --- a/ceph/src/ceph-volume/ceph_volume/configuration.py +++ b/ceph/src/ceph-volume/ceph_volume/configuration.py @@ -6,7 +6,7 @@ import contextlib import logging import os import re -from ceph_volume import terminal +from ceph_volume import terminal, conf from ceph_volume import exceptions @@ -31,7 +31,16 @@ class _TrimIndentFile(object): return iter(self.readline, '') +def load_ceph_conf_path(cluster_name='ceph'): + abspath = '/etc/ceph/%s.conf' % cluster_name + conf.path = os.getenv('CEPH_CONF', abspath) + conf.cluster = cluster_name + + def load(abspath=None): + if abspath is None: + abspath = conf.path + if not os.path.exists(abspath): raise exceptions.ConfigurationError(abspath=abspath) @@ -42,6 +51,7 @@ def load(abspath=None): trimmed_conf = _TrimIndentFile(ceph_file) with contextlib.closing(ceph_file): parser.readfp(trimmed_conf) + conf.ceph = parser return parser except configparser.ParsingError as error: logger.exception('Unable to parse INI-style file: %s' % abspath) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py index 782a6d169..852c314c2 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py @@ -3,7 +3,7 @@ import argparse import logging import os from textwrap import dedent -from ceph_volume import process, conf, decorators, terminal, __release__ +from ceph_volume import process, conf, decorators, terminal, __release__, configuration from ceph_volume.util import system, disk from ceph_volume.util import prepare as prepare_utils from ceph_volume.util import encryption as encryption_utils @@ -24,7 +24,8 @@ def activate_filestore(lvs, no_systemd=False): is_vdo = osd_lv.tags.get('ceph.vdo', '0') osd_id = osd_lv.tags['ceph.osd_id'] - conf.cluster = osd_lv.tags['ceph.cluster_name'] + configuration.load_ceph_conf_path(osd_lv.tags['ceph.cluster_name']) + configuration.load() # it may have a volume with a journal osd_journal_lv = lvs.get(lv_tags={'ceph.type': 'journal'}) # TODO: add sensible error reporting if this is ever the case @@ -249,9 +250,9 @@ class Activate(object): has_journal = lv.tags.get('ceph.journal_uuid') if has_journal: logger.info('found a journal associated with the OSD, assuming filestore') - return activate_filestore(lvs) + return activate_filestore(lvs, no_systemd=args.no_systemd) logger.info('unable to find a journal associated with the OSD, assuming bluestore') - return activate_bluestore(lvs) + return activate_bluestore(lvs, no_systemd=args.no_systemd) if args.bluestore: activate_bluestore(lvs, no_systemd=args.no_systemd) elif args.filestore: diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py index 4086064f5..cce58b166 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -1,10 +1,14 @@ import argparse +import logging from textwrap import dedent from ceph_volume import terminal, decorators from ceph_volume.util import disk, prompt_bool from ceph_volume.util import arg_validators from . import strategies +mlogger = terminal.MultiLogger(__name__) +logger = logging.getLogger(__name__) + device_list_template = """ * {path: <25} {size: <10} {state}""" @@ -62,7 +66,7 @@ def filestore_mixed_type(device_facts): return strategies.filestore.MixedType -def get_strategy(args): +def get_strategy(args, devices): """ Given a set of devices as input, go through the different detection mechanisms to narrow down on a strategy to use. The strategies are 4 in @@ -85,9 +89,31 @@ def get_strategy(args): strategies = filestore_strategies for strategy in strategies: - backend = strategy(args.devices) + backend = strategy(devices) if backend: - return backend(args.devices, args) + return backend + + +def filter_devices(args): + unused_devices = [device for device in args.devices if not device.used_by_ceph] + # only data devices, journals can be reused + used_devices = [device.abspath for device in args.devices if device.used_by_ceph] + args.filtered_devices = {} + if used_devices: + for device in used_devices: + args.filtered_devices[device] = {"reasons": ["Used by ceph as a data device already"]} + logger.info("Ignoring devices already used by ceph: %s" % ", ".join(used_devices)) + if len(unused_devices) == 1: + last_device = unused_devices[0] + if not last_device.rotational and last_device.is_lvm_member: + reason = "Used by ceph as a %s already and there are no devices left for data/block" % ( + last_device.lvs[0].tags.get("ceph.type"), + ) + args.filtered_devices[last_device.abspath] = {"reasons": [reason]} + logger.info(reason + ": %s" % last_device.abspath) + unused_devices = [] + + return unused_devices class Batch(object): @@ -128,7 +154,7 @@ class Batch(object): ) def report(self, args): - strategy = get_strategy(args) + strategy = self._get_strategy(args) if args.format == 'pretty': strategy.report_pretty() elif args.format == 'json': @@ -137,16 +163,32 @@ class Batch(object): raise RuntimeError('report format must be "pretty" or "json"') def execute(self, args): - strategy = get_strategy(args) + strategy = self._get_strategy(args) if not args.yes: strategy.report_pretty() terminal.info('The above OSDs would be created if the operation continues') if not prompt_bool('do you want to proceed? (yes/no)'): - terminal.error('aborting OSD provisioning for %s' % ','.join(args.devices)) + devices = ','.join([device.abspath for device in args.devices]) + terminal.error('aborting OSD provisioning for %s' % devices) raise SystemExit(0) strategy.execute() + def _get_strategy(self, args): + strategy = get_strategy(args, args.devices) + unused_devices = filter_devices(args) + if not unused_devices and not args.format == 'json': + # report nothing changed + mlogger.info("All devices are already used by ceph. No OSDs will be created.") + raise SystemExit(0) + else: + new_strategy = get_strategy(args, unused_devices) + if new_strategy and strategy != new_strategy: + mlogger.error("Aborting because strategy changed from %s to %s after filtering" % (strategy.type(), new_strategy.type())) + raise SystemExit(1) + + return strategy(unused_devices, args) + @decorators.needs_root def main(self): parser = argparse.ArgumentParser( @@ -205,6 +247,27 @@ class Batch(object): action='store_true', help='Skip creating and enabling systemd units and starting OSD services', ) + parser.add_argument( + '--osds-per-device', + type=int, + default=1, + help='Provision more than 1 (the default) OSD per device', + ) + parser.add_argument( + '--block-db-size', + type=int, + help='Set (or override) the "bluestore_block_db_size" value, in bytes' + ) + parser.add_argument( + '--journal-size', + type=int, + help='Override the "osd_journal_size" value, in megabytes' + ) + parser.add_argument( + '--prepare', + action='store_true', + help='Only prepare all OSDs, do not activate', + ) args = parser.parse_args(self.argv) if not args.devices: diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py index 332398972..082222cb7 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py @@ -56,7 +56,7 @@ def common_parser(prog, description): required_group.add_argument( '--data', required=True, - type=arg_validators.LVPath(), + type=arg_validators.ValidDevice(as_string=True), help='OSD data path. A physical device or logical volume', ) @@ -99,6 +99,11 @@ def common_parser(prog, description): help='Reuse an existing OSD fsid', ) + parser.add_argument( + '--cluster-fsid', + help='Specify the cluster fsid, useful when no ceph.conf is available', + ) + parser.add_argument( '--crush-device-class', dest='crush_device_class', diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/create.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/create.py index 0f972f9d8..a406cbd9a 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/create.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/create.py @@ -44,18 +44,13 @@ class Create(object): all the metadata to the logical volumes using LVM tags, and starting the OSD daemon. - Example calls for supported scenarios: + Existing logical volume (lv) or device: - Filestore - --------- + ceph-volume lvm create --data {vg name/lv name} --journal /path/to/device - Existing logical volume (lv) or device: + Or: - ceph-volume lvm create --filestore --data {vg name/lv name} --journal /path/to/device - - Or: - - ceph-volume lvm create --filestore --data {vg name/lv name} --journal {vg name/lv name} + ceph-volume lvm create --data {vg name/lv name} --journal {vg name/lv name} """) parser = create_parser( diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py index aedb71ed5..d2cd3547d 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py @@ -205,42 +205,58 @@ class Prepare(object): raise RuntimeError('no data logical volume found with: %s' % arg) - def safe_prepare(self, args): + def safe_prepare(self, args=None): """ An intermediate step between `main()` and `prepare()` so that we can capture the `self.osd_id` in case we need to rollback + + :param args: Injected args, usually from `lvm create` which compounds + both `prepare` and `create` """ + if args is not None: + self.args = args try: - self.prepare(args) + self.prepare() except Exception: logger.exception('lvm prepare was unable to complete') logger.info('will rollback OSD ID creation') - rollback_osd(args, self.osd_id) + rollback_osd(self.args, self.osd_id) raise - terminal.success("ceph-volume lvm prepare successful for: %s" % args.data) + terminal.success("ceph-volume lvm prepare successful for: %s" % self.args.data) + + def get_cluster_fsid(self): + """ + Allows using --cluster-fsid as an argument, but can fallback to reading + from ceph.conf if that is unset (the default behavior). + """ + if self.args.cluster_fsid: + return self.args.cluster_fsid + else: + return conf.ceph.get('global', 'fsid') @decorators.needs_root - def prepare(self, args): + def prepare(self): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cephx_lockbox_secret = '' - encrypted = 1 if args.dmcrypt else 0 + encrypted = 1 if self.args.dmcrypt else 0 cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key() if encrypted: secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key() secrets['cephx_lockbox_secret'] = cephx_lockbox_secret - cluster_fsid = conf.ceph.get('global', 'fsid') - osd_fsid = args.osd_fsid or system.generate_uuid() - crush_device_class = args.crush_device_class + cluster_fsid = self.get_cluster_fsid() + + osd_fsid = self.args.osd_fsid or system.generate_uuid() + crush_device_class = self.args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class # reuse a given ID if it exists, otherwise create a new ID - self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=args.osd_id) + self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': self.osd_id, @@ -248,13 +264,13 @@ class Prepare(object): 'ceph.cluster_name': conf.cluster, 'ceph.crush_device_class': crush_device_class, } - if args.filestore: - if not args.journal: + if self.args.filestore: + if not self.args.journal: raise RuntimeError('--journal is required when using --filestore') - data_lv = self.get_lv(args.data) + data_lv = self.get_lv(self.args.data) if not data_lv: - data_lv = self.prepare_device(args.data, 'data', cluster_fsid, osd_fsid) + data_lv = self.prepare_device(self.args.data, 'data', cluster_fsid, osd_fsid) tags['ceph.data_device'] = data_lv.lv_path tags['ceph.data_uuid'] = data_lv.lv_uuid @@ -262,7 +278,9 @@ class Prepare(object): tags['ceph.encrypted'] = encrypted tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path) - journal_device, journal_uuid, tags = self.setup_device('journal', args.journal, tags) + journal_device, journal_uuid, tags = self.setup_device( + 'journal', self.args.journal, tags + ) tags['ceph.type'] = 'data' data_lv.set_tags(tags) @@ -275,10 +293,10 @@ class Prepare(object): self.osd_id, osd_fsid, ) - elif args.bluestore: - block_lv = self.get_lv(args.data) + elif self.args.bluestore: + block_lv = self.get_lv(self.args.data) if not block_lv: - block_lv = self.prepare_device(args.data, 'block', cluster_fsid, osd_fsid) + block_lv = self.prepare_device(self.args.data, 'block', cluster_fsid, osd_fsid) tags['ceph.block_device'] = block_lv.lv_path tags['ceph.block_uuid'] = block_lv.lv_uuid @@ -286,8 +304,8 @@ class Prepare(object): tags['ceph.encrypted'] = encrypted tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path) - wal_device, wal_uuid, tags = self.setup_device('wal', args.block_wal, tags) - db_device, db_uuid, tags = self.setup_device('db', args.block_db, tags) + wal_device, wal_uuid, tags = self.setup_device('wal', self.args.block_wal, tags) + db_device, db_uuid, tags = self.setup_device('db', self.args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) @@ -312,43 +330,19 @@ class Prepare(object): Once the OSD is ready, an ad-hoc systemd unit will be enabled so that it can later get activated and the OSD daemon can get started. - Most basic Usage looks like (journal will be collocated from the same volume group): - - ceph-volume lvm prepare --data {volume group name} - Encryption is supported via dmcrypt and the --dmcrypt flag. - Example calls for supported scenarios: - - Dedicated volume group for Journal(s) - ------------------------------------- - - Existing logical volume (lv) or device: - - ceph-volume lvm prepare --filestore --data {vg/lv} --journal /path/to/device - - Or: - - ceph-volume lvm prepare --filestore --data {vg/lv} --journal {vg/lv} - - Existing block device, that will be made a group and logical volume: - - ceph-volume lvm prepare --filestore --data /path/to/device --journal {vg/lv} - - Bluestore - --------- - - Existing logical volume (lv): + Existing logical volume (lv): - ceph-volume lvm prepare --bluestore --data {vg/lv} + ceph-volume lvm prepare --data {vg/lv} - Existing block device, that will be made a group and logical volume: + Existing block device, that will be made a group and logical volume: - ceph-volume lvm prepare --bluestore --data /path/to/device + ceph-volume lvm prepare --data /path/to/device - Optionally, can consume db and wal devices or logical volumes: + Optionally, can consume db and wal partitions or logical volumes: - ceph-volume lvm prepare --bluestore --data {vg/lv} --block.wal {device} --block-db {vg/lv} + ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {vg/lv} """) parser = prepare_parser( prog='ceph-volume lvm prepare', @@ -358,9 +352,14 @@ class Prepare(object): print(sub_command_help) return exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore']) - args = parser.parse_args(self.argv) + self.args = parser.parse_args(self.argv) + # the unfortunate mix of one superset for both filestore and bluestore + # makes this validation cumbersome + if self.args.filestore: + if not self.args.journal: + raise SystemExit('--journal is required when using --filestore') # Default to bluestore here since defaulting it in add_argument may # cause both to be True - if not args.bluestore and not args.filestore: - args.bluestore = True - self.safe_prepare(args) + if not self.args.bluestore and not self.args.filestore: + self.args.bluestore = True + self.safe_prepare(self.args) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py index 7b6052607..92dc3a2e9 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py @@ -1,11 +1,12 @@ from __future__ import print_function import json -from uuid import uuid4 -from ceph_volume.util import disk +from ceph_volume.util import disk, prepare from ceph_volume.api import lvm from . import validators from ceph_volume.devices.lvm.create import Create +from ceph_volume.devices.lvm.prepare import Prepare from ceph_volume.util import templates +from ceph_volume.exceptions import SizeAllocationError class SingleType(object): @@ -15,20 +16,38 @@ class SingleType(object): def __init__(self, devices, args): self.args = args + self.osds_per_device = args.osds_per_device self.devices = devices + # TODO: add --fast-devices and --slow-devices so these can be customized self.hdds = [device for device in devices if device.sys_api['rotational'] == '1'] self.ssds = [device for device in devices if device.sys_api['rotational'] == '0'] - self.computed = {'osds': [], 'vgs': []} - self.validate() - self.compute() + self.computed = {'osds': [], 'vgs': [], 'filtered_devices': args.filtered_devices} + if self.devices: + self.validate() + self.compute() + else: + self.computed["changed"] = False + + @staticmethod + def type(): + return "bluestore.SingleType" + + @property + def total_osds(self): + if self.hdds: + return len(self.hdds) * self.osds_per_device + else: + return len(self.ssds) * self.osds_per_device def report_json(self): print(json.dumps(self.computed, indent=4, sort_keys=True)) def report_pretty(self): string = "" + if self.args.filtered_devices: + string += templates.filtered_devices(self.args.filtered_devices) string += templates.total_osds.format( - total_osds=len(self.hdds) or len(self.ssds) * 2 + total_osds=self.total_osds, ) string += templates.osd_component_titles @@ -49,7 +68,12 @@ class SingleType(object): met, raise an error if the provided devices would not work """ # validate minimum size for all devices - validators.minimum_device_size(self.devices) + validators.minimum_device_size( + self.devices, osds_per_device=self.osds_per_device + ) + + # make sure that data devices do not have any LVs + validators.no_lvm_membership(self.hdds) def compute(self): """ @@ -57,30 +81,31 @@ class SingleType(object): a dictionary with the result """ osds = self.computed['osds'] - vgs = self.computed['vgs'] for device in self.hdds: - vgs.append({'devices': [device.abspath], 'parts': 1}) - osd = {'data': {}, 'block.db': {}} - osd['data']['path'] = device.abspath - osd['data']['size'] = device.sys_api['size'] - osd['data']['parts'] = 1 - osd['data']['percentage'] = 100 - osd['data']['human_readable_size'] = str(disk.Size(b=device.sys_api['size'])) - osds.append(osd) + for hdd in range(self.osds_per_device): + osd = {'data': {}, 'block.db': {}} + osd['data']['path'] = device.abspath + osd['data']['size'] = device.sys_api['size'] / self.osds_per_device + osd['data']['parts'] = self.osds_per_device + osd['data']['percentage'] = 100 / self.osds_per_device + osd['data']['human_readable_size'] = str( + disk.Size(b=device.sys_api['size']) / self.osds_per_device + ) + osds.append(osd) for device in self.ssds: - # TODO: creates 2 OSDs per device, make this configurable (env var?) - extents = lvm.sizing(device.sys_api['size'], parts=2) - vgs.append({'devices': [device.abspath], 'parts': 2}) - for ssd in range(2): + extents = lvm.sizing(device.sys_api['size'], parts=self.osds_per_device) + for ssd in range(self.osds_per_device): osd = {'data': {}, 'block.db': {}} osd['data']['path'] = device.abspath osd['data']['size'] = extents['sizes'] osd['data']['parts'] = extents['parts'] - osd['data']['percentage'] = 50 + osd['data']['percentage'] = 100 / self.osds_per_device osd['data']['human_readable_size'] = str(disk.Size(b=extents['sizes'])) osds.append(osd) + self.computed['changed'] = len(osds) > 0 + def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles @@ -110,7 +135,10 @@ class SingleType(object): if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) - Create(command).main() + if self.args.prepare: + Prepare(command).main() + else: + Create(command).main() class MixedType(object): @@ -118,34 +146,51 @@ class MixedType(object): def __init__(self, devices, args): self.args = args self.devices = devices + self.osds_per_device = args.osds_per_device + # TODO: add --fast-devices and --slow-devices so these can be customized self.hdds = [device for device in devices if device.sys_api['rotational'] == '1'] self.ssds = [device for device in devices if device.sys_api['rotational'] == '0'] - self.computed = {'osds': [], 'vgs': []} - self.block_db_size = None - # For every HDD we get 1 block.db - self.db_lvs = len(self.hdds) - self.validate() - self.compute() + self.computed = {'osds': [], 'filtered_devices': args.filtered_devices} + self.block_db_size = self.get_block_size() + self.system_vgs = lvm.VolumeGroups() + self.dbs_needed = len(self.hdds) * self.osds_per_device + if self.devices: + self.validate() + self.compute() + else: + self.computed["changed"] = False + + @staticmethod + def type(): + return "bluestore.MixedType" def report_json(self): print(json.dumps(self.computed, indent=4, sort_keys=True)) + def get_block_size(self): + if self.args.block_db_size: + return disk.Size(b=self.args.block_db_size) + else: + return prepare.get_block_db_size(lv_format=False) or disk.Size(b=0) + def report_pretty(self): - vg_extents = lvm.sizing(self.total_ssd_size.b, parts=self.db_lvs) + vg_extents = lvm.sizing(self.total_available_db_space.b, parts=self.dbs_needed) db_size = str(disk.Size(b=(vg_extents['sizes']))) string = "" + if self.args.filtered_devices: + string += templates.filtered_devices(self.args.filtered_devices) string += templates.total_osds.format( - total_osds=len(self.hdds) + total_osds=len(self.hdds) * self.osds_per_device ) string += templates.ssd_volume_group.format( target='block.db', - total_lv_size=str(self.total_ssd_size), - total_lvs=vg_extents['parts'], + total_lv_size=str(self.total_available_db_space), + total_lvs=vg_extents['parts'] * self.osds_per_device, block_lv_size=db_size, block_db_devices=', '.join([ssd.abspath for ssd in self.ssds]), - lv_size=str(disk.Size(b=(vg_extents['sizes']))), + lv_size=self.block_db_size or str(disk.Size(b=(vg_extents['sizes']))), total_osds=len(self.hdds) ) @@ -160,7 +205,7 @@ class MixedType(object): string += templates.osd_component.format( _type='[block.db]', - path='(volume-group/lv)', + path=osd['block.db']['path'], size=osd['block.db']['human_readable_size'], percent=osd['block.db']['percentage']) @@ -168,27 +213,44 @@ class MixedType(object): def compute(self): osds = self.computed['osds'] + + # unconfigured block db size will be 0, so set it back to using as much + # as possible from looking at extents + if self.block_db_size.b == 0: + self.block_db_size = disk.Size(b=self.vg_extents['sizes']) + + if not self.common_vg: + # there isn't a common vg, so a new one must be created with all + # the blank SSDs + self.computed['vg'] = { + 'devices': ", ".join([ssd.abspath for ssd in self.blank_ssds]), + 'parts': self.dbs_needed, + 'percentages': self.vg_extents['percentages'], + 'sizes': self.block_db_size.b.as_int(), + 'size': self.total_blank_ssd_size.b.as_int(), + 'human_readable_sizes': str(self.block_db_size), + 'human_readable_size': str(self.total_available_db_space), + } + vg_name = 'vg/lv' + else: + vg_name = self.common_vg.name + for device in self.hdds: - osd = {'data': {}, 'block.db': {}} - osd['data']['path'] = device.abspath - osd['data']['size'] = device.sys_api['size'] - osd['data']['percentage'] = 100 - osd['data']['human_readable_size'] = str(disk.Size(b=(device.sys_api['size']))) - osd['block.db']['path'] = None - osd['block.db']['size'] = int(self.block_db_size.b) - osd['block.db']['human_readable_size'] = str(self.block_db_size) - osd['block.db']['percentage'] = self.vg_extents['percentages'] - osds.append(osd) - - self.computed['vgs'] = [{ - 'devices': [d.abspath for d in self.ssds], - 'parts': self.db_lvs, - 'percentages': self.vg_extents['percentages'], - 'sizes': self.vg_extents['sizes'], - 'size': int(self.total_ssd_size.b), - 'human_readable_sizes': str(disk.Size(b=self.vg_extents['sizes'])), - 'human_readable_size': str(self.total_ssd_size), - }] + for hdd in range(self.osds_per_device): + osd = {'data': {}, 'block.db': {}} + osd['data']['path'] = device.abspath + osd['data']['size'] = device.sys_api['size'] / self.osds_per_device + osd['data']['percentage'] = 100 / self.osds_per_device + osd['data']['human_readable_size'] = str( + disk.Size(b=(device.sys_api['size'])) / self.osds_per_device + ) + osd['block.db']['path'] = 'vg: %s' % vg_name + osd['block.db']['size'] = int(self.block_db_size.b) + osd['block.db']['human_readable_size'] = str(self.block_db_size) + osd['block.db']['percentage'] = self.vg_extents['percentages'] + osds.append(osd) + + self.computed['changed'] = len(osds) > 0 def execute(self): """ @@ -196,18 +258,50 @@ class MixedType(object): (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ - # create the single vg for all block.db lv's first - vg_info = self.computed['vgs'][0] - vg = lvm.create_vg(vg_info['devices']) - - # now produce all the block.db lvs needed from that single vg - db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') + blank_ssd_paths = [d.abspath for d in self.blank_ssds] + data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) + + # no common vg is found, create one with all the blank SSDs + if not self.common_vg: + db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') + + # if a common vg exists then extend it with any blank ssds + elif self.common_vg and blank_ssd_paths: + db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) + + # one common vg with nothing else to extend can be used directly, + # either this is one device with one vg, or multiple devices with the + # same vg + else: + db_vg = self.common_vg + + # since we are falling back to a block_db_size that might be "as large + # as possible" we can't fully rely on LV format coming from the helper + # function that looks up this value + block_db_size = "%sG" % self.block_db_size.gb.as_int() + + # create 1 vg per data device first, mapping them to the device path, + # when the lv gets created later, it can create as many as needed (or + # even just 1) + for osd in self.computed['osds']: + vg = data_vgs.get(osd['data']['path']) + if not vg: + vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') + data_vgs[osd['data']['path']] = vg - # create the data lvs, and create the OSD with the matching block.db lvs from before + # create the data lvs, and create the OSD with an lv from the common + # block.db vg from before for osd in self.computed['osds']: - vg = lvm.create_vg(osd['data']['path']) - data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) - db_lv = db_lvs.pop() + data_path = osd['data']['path'] + data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() + data_vg = data_vgs[data_path] + data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] + data_lv = lvm.create_lv( + 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True + ) + db_lv = lvm.create_lv( + 'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True + ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), @@ -220,7 +314,21 @@ class MixedType(object): if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) - Create(command).main() + if self.args.prepare: + Prepare(command).main() + else: + Create(command).main() + + def get_common_vg(self): + # find all the vgs associated with the current device + for ssd in self.ssds: + for pv in ssd.pvs_api: + vg = self.system_vgs.get(vg_name=pv.vg_name) + if not vg: + continue + # this should give us just one VG, it would've been caught by + # the validator otherwise + return vg def validate(self): """ @@ -229,20 +337,65 @@ class MixedType(object): those LVs would be large enough to accommodate a block.db """ # validate minimum size for all devices - validators.minimum_device_size(self.devices) + validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device) + + # make sure that data devices do not have any LVs + validators.no_lvm_membership(self.hdds) + + # do not allow non-common VG to continue + validators.has_common_vg(self.ssds) + + # find the common VG to calculate how much is available + self.common_vg = self.get_common_vg() + + # find how many block.db LVs are possible from the common VG + if self.common_vg: + common_vg_size = disk.Size(gb=self.common_vg.free) + else: + common_vg_size = disk.Size(gb=0) + + # non-VG SSDs + self.vg_ssds = set([d for d in self.ssds if d.is_lvm_member]) + self.blank_ssds = set(self.ssds).difference(self.vg_ssds) + self.total_blank_ssd_size = disk.Size(b=0) + for blank_ssd in self.blank_ssds: + self.total_blank_ssd_size += disk.Size(b=blank_ssd.sys_api['size']) + + self.total_available_db_space = self.total_blank_ssd_size + common_vg_size + + # If not configured, we default to 0, which is really "use as much as + # possible" captured by the `else` condition + if self.block_db_size.gb > 0: + try: + self.vg_extents = lvm.sizing( + self.total_available_db_space.b, size=self.block_db_size.b * self.osds_per_device + ) + except SizeAllocationError: + msg = "Not enough space in fast devices (%s) to create %s x %s block.db LV" + raise RuntimeError( + msg % (self.total_available_db_space, self.osds_per_device, self.block_db_size) + ) + else: + self.vg_extents = lvm.sizing( + self.total_available_db_space.b, parts=self.dbs_needed + ) - # add all the size available in solid drives and divide it by the - # expected number of osds, the expected output should be larger than - # the minimum alllowed for block.db - self.total_ssd_size = disk.Size(b=0) - for ssd in self.ssds: - self.total_ssd_size += disk.Size(b=ssd.sys_api['size']) + # validate that number of block.db LVs possible are enough for number of + # OSDs proposed + if self.total_available_db_space.b == 0: + msg = "No space left in fast devices to create block.db LVs" + raise RuntimeError(msg) + + # bluestore_block_db_size was unset, so we must set this to whatever + # size we get by dividing the total available space for block.db LVs + # into the number of block.db LVs needed (i.e. "as large as possible") + if self.block_db_size.b == 0: + self.block_db_size = self.total_available_db_space / self.dbs_needed - self.block_db_size = self.total_ssd_size / self.db_lvs - self.vg_extents = lvm.sizing(self.total_ssd_size.b, parts=self.db_lvs) + total_dbs_possible = self.total_available_db_space / self.block_db_size - # min 2GB of block.db is allowed - msg = 'Total solid size (%s) is not enough for block.db LVs larger than 2 GB' - if self.block_db_size < disk.Size(gb=2): - # use ad-hoc exception here - raise RuntimeError(msg % self.total_ssd_size) + if self.dbs_needed > total_dbs_possible: + msg = "Not enough space (%s) to create %s x %s block.db LVs" % ( + self.total_available_db_space, self.dbs_needed, self.block_db_size, + ) + raise RuntimeError(msg) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py index 9e80c5cbb..b94cc6ea3 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py @@ -4,7 +4,20 @@ from ceph_volume.util import disk, prepare from ceph_volume.api import lvm from . import validators from ceph_volume.devices.lvm.create import Create +from ceph_volume.devices.lvm.prepare import Prepare from ceph_volume.util import templates +from ceph_volume.exceptions import SizeAllocationError + + +def get_journal_size(args): + """ + Helper for Filestore strategies, to prefer the --journal-size value from + the CLI over anything that might be in a ceph configuration file (if any). + """ + if args.journal_size: + return disk.Size(mb=args.journal_size) + else: + return prepare.get_journal_size(lv_format=False) class SingleType(object): @@ -15,20 +28,38 @@ class SingleType(object): def __init__(self, devices, args): self.args = args + self.osds_per_device = args.osds_per_device self.devices = devices self.hdds = [device for device in devices if device.sys_api['rotational'] == '1'] self.ssds = [device for device in devices if device.sys_api['rotational'] == '0'] - self.computed = {'osds': [], 'vgs': []} - self.validate() - self.compute() + self.computed = {'osds': [], 'vgs': [], 'filtered_devices': args.filtered_devices} + self.journal_size = get_journal_size(args) + if self.devices: + self.validate() + self.compute() + else: + self.computed["changed"] = False + + @staticmethod + def type(): + return "filestore.SingleType" + + @property + def total_osds(self): + if self.hdds: + return len(self.hdds) * self.osds_per_device + else: + return len(self.ssds) * self.osds_per_device def report_json(self): print(json.dumps(self.computed, indent=4, sort_keys=True)) def report_pretty(self): string = "" + if self.args.filtered_devices: + string += templates.filtered_devices(self.args.filtered_devices) string += templates.total_osds.format( - total_osds=len(self.hdds) or len(self.ssds) * 2 + total_osds=self.total_osds ) string += templates.osd_component_titles @@ -55,7 +86,20 @@ class SingleType(object): met, raise an error if the provided devices would not work """ # validate minimum size for all devices - validators.minimum_device_size(self.devices) + validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device) + + # validate collocation + if self.hdds: + validators.minimum_device_collocated_size( + self.hdds, self.journal_size, osds_per_device=self.osds_per_device + ) + else: + validators.minimum_device_collocated_size( + self.ssds, self.journal_size, osds_per_device=self.osds_per_device + ) + + # make sure that data devices do not have any LVs + validators.no_lvm_membership(self.hdds) def compute(self): """ @@ -65,51 +109,60 @@ class SingleType(object): # chose whichever is the one group we have to compute against devices = self.hdds or self.ssds osds = self.computed['osds'] - vgs = self.computed['vgs'] for device in devices: - device_size = disk.Size(b=device.sys_api['size']) - journal_size = prepare.get_journal_size(lv_format=False) - data_size = device_size - journal_size - data_percentage = data_size * 100 / device_size - vgs.append({'devices': [device.abspath], 'parts': 2}) - osd = {'data': {}, 'journal': {}} - osd['data']['path'] = device.abspath - osd['data']['size'] = data_size.b - osd['data']['percentage'] = int(data_percentage) - osd['data']['human_readable_size'] = str(data_size) - osd['journal']['path'] = device.abspath - osd['journal']['size'] = journal_size.b - osd['journal']['percentage'] = int(100 - data_percentage) - osd['journal']['human_readable_size'] = str(journal_size) - osds.append(osd) + for osd in range(self.osds_per_device): + device_size = disk.Size(b=device.sys_api['size']) + osd_size = device_size / self.osds_per_device + journal_size = self.journal_size + data_size = osd_size - journal_size + data_percentage = data_size * 100 / device_size + osd = {'data': {}, 'journal': {}} + osd['data']['path'] = device.abspath + osd['data']['size'] = data_size.b.as_int() + osd['data']['parts'] = self.osds_per_device + osd['data']['percentage'] = int(data_percentage) + osd['data']['human_readable_size'] = str(data_size) + osd['journal']['path'] = device.abspath + osd['journal']['size'] = journal_size.b.as_int() + osd['journal']['percentage'] = int(100 - data_percentage) + osd['journal']['human_readable_size'] = str(journal_size) + osds.append(osd) + + self.computed['changed'] = len(osds) > 0 def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ - osd_vgs = [] + device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) - # create the vgs first, one per device (since this is colocating, it - # picks the 'data' path) + # create 1 vg per data device first, mapping them to the device path, + # when the lvs get created later, it can create as many as needed, + # including the journals since it is going to be collocated for osd in self.computed['osds']: - vg = lvm.create_vg(osd['data']['path']) - osd_vgs.append(vg) + vg = device_vgs.get(osd['data']['path']) + if not vg: + vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore') + device_vgs[osd['data']['path']] = vg - journal_size = prepare.get_journal_size() - - # create the lvs from the vgs captured in the beginning - for vg in osd_vgs: - # this is called again, getting us the LVM formatted string + # create the lvs from the per-device vg created in the beginning + for osd in self.computed['osds']: + data_path = osd['data']['path'] + data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() + device_vg = device_vgs[data_path] + data_lv_extents = device_vg.sizing(size=data_lv_size)['extents'] + journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents'] + data_lv = lvm.create_lv( + 'osd-data', device_vg.name, extents=data_lv_extents, uuid_name=True + ) journal_lv = lvm.create_lv( - 'osd-journal', vg.name, size=journal_size, uuid_name=True + 'osd-journal', device_vg.name, extents=journal_lv_extents, uuid_name=True ) - # no extents or size means it will use 100%FREE - data_lv = lvm.create_lv('osd-data', vg.name) command = ['--filestore', '--data'] - command.append('%s/%s' % (vg.name, data_lv.name)) - command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)]) + command.append('%s/%s' % (device_vg.name, data_lv.name)) + command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: @@ -117,7 +170,10 @@ class SingleType(object): if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) - Create(command).main() + if self.args.prepare: + Prepare(command).main() + else: + Create(command).main() class MixedType(object): @@ -131,24 +187,41 @@ class MixedType(object): def __init__(self, devices, args): self.args = args + self.osds_per_device = args.osds_per_device self.devices = devices self.hdds = [device for device in devices if device.sys_api['rotational'] == '1'] self.ssds = [device for device in devices if device.sys_api['rotational'] == '0'] - self.computed = {'osds': [], 'vg': None} + self.computed = {'osds': [], 'vg': None, 'filtered_devices': args.filtered_devices} self.blank_ssds = [] - self.journals_needed = len(self.hdds) - self.journal_size = prepare.get_journal_size(lv_format=False) + self.journals_needed = len(self.hdds) * self.osds_per_device + self.journal_size = get_journal_size(args) self.system_vgs = lvm.VolumeGroups() - self.validate() - self.compute() + if self.devices: + self.validate() + self.compute() + else: + self.computed["changed"] = False + + @staticmethod + def type(): + return "filestore.MixedType" def report_json(self): print(json.dumps(self.computed, indent=4, sort_keys=True)) + @property + def total_osds(self): + if self.hdds: + return len(self.hdds) * self.osds_per_device + else: + return len(self.ssds) * self.osds_per_device + def report_pretty(self): string = "" + if self.args.filtered_devices: + string += templates.filtered_devices(self.args.filtered_devices) string += templates.total_osds.format( - total_osds=len(self.hdds) or len(self.ssds) * 2 + total_osds=self.total_osds ) string += templates.ssd_volume_group.format( @@ -196,7 +269,7 @@ class MixedType(object): met, raise an error if the provided devices would not work """ # validate minimum size for all devices - validators.minimum_device_size(self.devices) + validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device) # make sure that data devices do not have any LVs validators.no_lvm_membership(self.hdds) @@ -224,19 +297,20 @@ class MixedType(object): try: self.vg_extents = lvm.sizing( - self.total_available_journal_space.b, size=self.journal_size.b + self.total_available_journal_space.b, size=self.journal_size.b * self.osds_per_device + ) + except SizeAllocationError: + msg = "Not enough space in fast devices (%s) to create %s x %s journal LV" + raise RuntimeError( + msg % (self.total_available_journal_space, self.osds_per_device, self.journal_size) ) - # FIXME with real exception catching from sizing that happens when the - # journal space is not enough - except Exception: - self.vg_extents = {'parts': 0, 'percentages': 0, 'sizes': 0} # validate that number of journals possible are enough for number of # OSDs proposed total_journals_possible = self.total_available_journal_space / self.journal_size - if len(self.hdds) > total_journals_possible: - msg = "Not enough %s journals (%s) can be created for %s OSDs" % ( - self.journal_size, total_journals_possible, len(self.hdds) + if self.osds_per_device > total_journals_possible: + msg = "Not enough space (%s) to create %s x %s journal LVs" % ( + self.total_available_journal_space, self.journals_needed, self.journal_size ) raise RuntimeError(msg) @@ -252,11 +326,11 @@ class MixedType(object): # there isn't a common vg, so a new one must be created with all # the blank SSDs self.computed['vg'] = { - 'devices': self.blank_ssds, + 'devices': ", ".join([ssd.abspath for ssd in self.blank_ssds]), 'parts': self.journals_needed, 'percentages': self.vg_extents['percentages'], - 'sizes': self.journal_size.b, - 'size': int(self.total_blank_ssd_size.b), + 'sizes': self.journal_size.b.as_int(), + 'size': self.total_blank_ssd_size.b.as_int(), 'human_readable_sizes': str(self.journal_size), 'human_readable_size': str(self.total_available_journal_space), } @@ -265,42 +339,59 @@ class MixedType(object): vg_name = self.common_vg.name for device in self.hdds: - device_size = disk.Size(b=device.sys_api['size']) - data_size = device_size - self.journal_size - osd = {'data': {}, 'journal': {}} - osd['data']['path'] = device.path - osd['data']['size'] = data_size.b - osd['data']['percentage'] = 100 - osd['data']['human_readable_size'] = str(device_size) - osd['journal']['path'] = 'vg: %s' % vg_name - osd['journal']['size'] = self.journal_size.b - osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free) - osd['journal']['human_readable_size'] = str(self.journal_size) - osds.append(osd) + for osd in range(self.osds_per_device): + device_size = disk.Size(b=device.sys_api['size']) + data_size = device_size / self.osds_per_device + osd = {'data': {}, 'journal': {}} + osd['data']['path'] = device.path + osd['data']['size'] = data_size.b.as_int() + osd['data']['percentage'] = 100 / self.osds_per_device + osd['data']['human_readable_size'] = str(data_size) + osd['journal']['path'] = 'vg: %s' % vg_name + osd['journal']['size'] = self.journal_size.b.as_int() + osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free) + osd['journal']['human_readable_size'] = str(self.journal_size) + osds.append(osd) + + self.computed['changed'] = len(osds) > 0 def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ - ssd_paths = [d.abspath for d in self.blank_ssds] + blank_ssd_paths = [d.abspath for d in self.blank_ssds] + data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: - journal_vg = lvm.create_vg(ssd_paths, name_prefix='ceph-journals') + journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended - elif self.common_vg and ssd_paths: - journal_vg = lvm.extend_vg(self.common_vg, ssd_paths) + elif self.common_vg and blank_ssd_paths: + journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) + # create 1 vg per data device first, mapping them to the device path, + # when the lv gets created later, it can create as many as needed (or + # even just 1) for osd in self.computed['osds']: - data_vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') - # no extents or size means it will use 100%FREE - data_lv = lvm.create_lv('osd-data', data_vg.name) + vg = data_vgs.get(osd['data']['path']) + if not vg: + vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') + data_vgs[osd['data']['path']] = vg + + for osd in self.computed['osds']: + data_path = osd['data']['path'] + data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() + data_vg = data_vgs[data_path] + data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] + data_lv = lvm.create_lv( + 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True + ) journal_lv = lvm.create_lv( 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True ) @@ -315,4 +406,7 @@ class MixedType(object): if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) - Create(command).main() + if self.args.prepare: + Prepare(command).main() + else: + Create(command).main() diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py index 06c5ebeaf..6b8938720 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py @@ -2,16 +2,30 @@ from ceph_volume.util import disk from ceph_volume.api import lvm -def minimum_device_size(devices): +def minimum_device_size(devices, osds_per_device=1): """ Ensure that the minimum requirements for this type of scenario is met, raise an error if the provided devices would not work """ - msg = 'Unable to use device smaller than 5GB: %s (%s)' + msg = 'Unable to use device %s %s, LVs would be smaller than 5GB' for device in devices: device_size = disk.Size(b=device.sys_api['size']) - if device_size < disk.Size(gb=5): - raise RuntimeError(msg % (device, device_size)) + lv_size = device_size / osds_per_device + if lv_size < disk.Size(gb=5): + raise RuntimeError(msg % (device_size, device.path)) + + +def minimum_device_collocated_size(devices, journal_size, osds_per_device=1): + """ + Similar to ``minimum_device_size``, but take into account that the size of + the journal affects the size left of the device + """ + msg = 'Unable to use device %s %s, LVs would be smaller than 5GB' + for device in devices: + device_size = disk.Size(b=device.sys_api['size']) + lv_size = (device_size / osds_per_device) - journal_size + if lv_size < disk.Size(gb=5): + raise RuntimeError(msg % (device_size, device.path)) def no_lvm_membership(devices): diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py index 90c5447c0..8e0e3a3c5 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py @@ -100,15 +100,27 @@ class Zap(object): for vg_name in vgs: mlogger.info("Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) - mlogger.info("Destroying physical volume %s because --destroy was given", device) - api.remove_pv(device) + if not lv: + mlogger.info("Destroying physical volume %s because --destroy was given", device) + api.remove_pv(device) wipefs(path) zap_data(path) if lv and not pvs: - # remove all lvm metadata - lv.clear_tags() + if args.destroy: + lvs = api.Volumes() + lvs.filter(vg_name=lv.vg_name) + if len(lvs) <= 1: + mlogger.info('Only 1 LV left in VG, will proceed to destroy volume group %s', lv.vg_name) + api.remove_vg(lv.vg_name) + else: + mlogger.info('More than 1 LV left in VG, will proceed to destroy LV only') + mlogger.info('Removing LV because --destroy was given: %s', lv) + api.remove_lv(lv) + else: + # just remove all lvm metadata, leaving the LV around + lv.clear_tags() terminal.success("Zapping successful for: %s" % ", ".join(args.devices)) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/simple/activate.py b/ceph/src/ceph-volume/ceph_volume/devices/simple/activate.py index a429018bf..814c6fe37 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/simple/activate.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/simple/activate.py @@ -19,9 +19,10 @@ class Activate(object): help = 'Enable systemd units to mount configured devices and start a Ceph OSD' - def __init__(self, argv, systemd=False): + def __init__(self, argv, from_trigger=False): self.argv = argv - self.systemd = systemd + self.from_trigger = from_trigger + self.skip_systemd = False def validate_devices(self, json_config): """ @@ -83,6 +84,43 @@ class Activate(object): return '/dev/mapper/%s' % uuid + def enable_systemd_units(self, osd_id, osd_fsid): + """ + * disables the ceph-disk systemd units to prevent them from running when + a UDEV event matches Ceph rules + * creates the ``simple`` systemd units to handle the activation and + startup of the OSD with ``osd_id`` and ``osd_fsid`` + * enables the OSD systemd unit and finally starts the OSD. + """ + if not self.from_trigger and not self.skip_systemd: + # means it was scanned and now activated directly, so ensure that + # ceph-disk units are disabled, and that the `simple` systemd unit + # is created and enabled + + # enable the ceph-volume unit for this OSD + systemctl.enable_volume(osd_id, osd_fsid, 'simple') + + # disable any/all ceph-disk units + systemctl.mask_ceph_disk() + terminal.warning( + ('All ceph-disk systemd units have been disabled to ' + 'prevent OSDs getting triggered by UDEV events') + ) + else: + terminal.info('Skipping enabling of `simple` systemd unit') + terminal.info('Skipping masking of ceph-disk systemd units') + + if not self.skip_systemd: + # enable the OSD + systemctl.enable_osd(osd_id) + + # start the OSD + systemctl.start_osd(osd_id) + else: + terminal.info( + 'Skipping enabling and starting OSD simple systemd unit because --no-systemd was used' + ) + @decorators.needs_root def activate(self, args): with open(args.json_config, 'r') as fp: @@ -148,24 +186,9 @@ class Activate(object): # make sure that the journal has proper permissions system.chown(device) - if not self.systemd: - # enable the ceph-volume unit for this OSD - systemctl.enable_volume(osd_id, osd_fsid, 'simple') - - # disable any/all ceph-disk units - systemctl.mask_ceph_disk() - - # enable the OSD - systemctl.enable_osd(osd_id) - - # start the OSD - systemctl.start_osd(osd_id) + self.enable_systemd_units(osd_id, osd_fsid) terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid)) - terminal.warning( - ('All ceph-disk systemd units have been disabled to ' - 'prevent OSDs getting triggered by UDEV events') - ) def main(self): sub_command_help = dedent(""" @@ -211,6 +234,12 @@ class Activate(object): '--file', help='The path to a JSON file, from a scanned OSD' ) + parser.add_argument( + '--no-systemd', + dest='skip_systemd', + action='store_true', + help='Skip creating and enabling systemd units and starting OSD services', + ) if len(self.argv) == 0: print(sub_command_help) return @@ -232,4 +261,5 @@ class Activate(object): if not os.path.exists(json_config): raise RuntimeError('Expected JSON config path not found: %s' % json_config) args.json_config = json_config + self.skip_systemd = args.skip_systemd self.activate(args) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py b/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py index f215e2dc9..f2f7d3dc9 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py @@ -8,6 +8,7 @@ from textwrap import dedent from ceph_volume import decorators, terminal, conf from ceph_volume.api import lvm from ceph_volume.util import arg_validators, system, disk, encryption +from ceph_volume.util.device import Device logger = logging.getLogger(__name__) @@ -336,10 +337,12 @@ class Scan(object): return args = parser.parse_args(self.argv) - if disk.is_partition(args.osd_path): - label = disk.lsblk(args.osd_path)['PARTLABEL'] - if 'data' not in label: - raise RuntimeError('Device must be the data partition, but got: %s' % label) + device = Device(args.osd_path) + if device.is_partition: + if device.ceph_disk.type != 'data': + label = device.ceph_disk.partlabel + msg = 'Device must be the ceph data partition, but PARTLABEL reported: "%s"' % label + raise RuntimeError(msg) # Capture some environment status, so that it can be reused all over self.device_mounts = system.get_mounts(devices=True) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/simple/trigger.py b/ceph/src/ceph-volume/ceph_volume/devices/simple/trigger.py index aeb5cf1aa..c01d9ae2a 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/simple/trigger.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/simple/trigger.py @@ -67,4 +67,4 @@ class Trigger(object): args = parser.parse_args(self.argv) osd_id = parse_osd_id(args.systemd_data) osd_uuid = parse_osd_uuid(args.systemd_data) - Activate([osd_id, osd_uuid], systemd=True).main() + Activate([osd_id, osd_uuid], from_trigger=True).main() diff --git a/ceph/src/ceph-volume/ceph_volume/inventory/__init__.py b/ceph/src/ceph-volume/ceph_volume/inventory/__init__.py new file mode 100644 index 000000000..c9e0c0ccc --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/inventory/__init__.py @@ -0,0 +1 @@ +from .main import Inventory # noqa diff --git a/ceph/src/ceph-volume/ceph_volume/inventory/main.py b/ceph/src/ceph-volume/ceph_volume/inventory/main.py new file mode 100644 index 000000000..f4c732cab --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/inventory/main.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +import argparse +import pprint + +from ceph_volume.util.device import Devices, Device + + +class Inventory(object): + + help = "Get this nodes available disk inventory" + + def __init__(self, argv): + self.argv = argv + + def main(self): + parser = argparse.ArgumentParser( + prog='ceph-volume inventory', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self.help, + ) + parser.add_argument( + 'path', + nargs='?', + default=None, + help=('Report on specific disk'), + ) + parser.add_argument( + '--format', + choices=['plain', 'json', 'json-pretty'], + default='plain', + help='Output format', + ) + self.args = parser.parse_args(self.argv) + if self.args.path: + self.format_report(Device(self.args.path)) + else: + self.format_report(Devices()) + + def format_report(self, inventory): + if self.args.format == 'json': + print(inventory.json_report()) + elif self.args.format == 'json-pretty': + pprint.pprint(inventory.json_report()) + else: + print(inventory.pretty_report()) diff --git a/ceph/src/ceph-volume/ceph_volume/main.py b/ceph/src/ceph-volume/ceph_volume/main.py index 94730cbe4..4685cb41c 100644 --- a/ceph/src/ceph-volume/ceph_volume/main.py +++ b/ceph/src/ceph-volume/ceph_volume/main.py @@ -5,9 +5,8 @@ import pkg_resources import sys import logging -import ceph_volume from ceph_volume.decorators import catches -from ceph_volume import log, devices, configuration, conf, exceptions, terminal +from ceph_volume import log, devices, configuration, conf, exceptions, terminal, inventory class Volume(object): @@ -15,8 +14,6 @@ class Volume(object): ceph-volume: Deploy Ceph OSDs using different device technologies like lvm or physical disks. -Version: {version} - Log Path: {log_path} Ceph Conf: {ceph_path} @@ -30,6 +27,7 @@ Ceph Conf: {ceph_path} self.mapper = { 'lvm': devices.lvm.LVM, 'simple': devices.simple.Simple, + 'inventory': inventory.Inventory, } self.plugin_help = "No plugins found/loaded" if argv is None: @@ -43,7 +41,6 @@ Ceph Conf: {ceph_path} warning = 'See "ceph-volume --help" for full list of options.' if warning else '' return self._help.format( warning=warning, - version=ceph_volume.__version__, log_path=conf.log_path, ceph_path=self.stat_ceph_conf(), plugins=self.plugin_help, @@ -76,11 +73,6 @@ Ceph Conf: {ceph_path} if self.plugin_help: self.plugin_help = '\nPlugins:\n' + self.plugin_help - def load_ceph_conf_path(self, cluster_name='ceph'): - abspath = '/etc/ceph/%s.conf' % cluster_name - conf.path = os.getenv('CEPH_CONF', abspath) - conf.cluster = cluster_name - def load_log_path(self): conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph') @@ -105,7 +97,7 @@ Ceph Conf: {ceph_path} def main(self, argv): # these need to be available for the help, which gets parsed super # early - self.load_ceph_conf_path() + configuration.load_ceph_conf_path() self.load_log_path() self.enable_plugins() main_args, subcommand_args = self._get_split_args() @@ -113,7 +105,7 @@ Ceph Conf: {ceph_path} # argparse which will end up complaning that there are no args if len(argv) <= 1: print(self.help(warning=True)) - return + raise SystemExit(0) parser = argparse.ArgumentParser( prog='ceph-volume', formatter_class=argparse.RawDescriptionHelpFormatter, @@ -143,7 +135,7 @@ Ceph Conf: {ceph_path} logger.info("Running command: ceph-volume %s %s", " ".join(main_args), " ".join(subcommand_args)) # set all variables from args and load everything needed according to # them - self.load_ceph_conf_path(cluster_name=args.cluster) + configuration.load_ceph_conf_path(cluster_name=args.cluster) try: conf.ceph = configuration.load(conf.path) except exceptions.ConfigurationError as error: diff --git a/ceph/src/ceph-volume/ceph_volume/systemd/__init__.py b/ceph/src/ceph-volume/ceph_volume/systemd/__init__.py index e69de29bb..493b8814b 100644 --- a/ceph/src/ceph-volume/ceph_volume/systemd/__init__.py +++ b/ceph/src/ceph-volume/ceph_volume/systemd/__init__.py @@ -0,0 +1 @@ +from .main import main # noqa diff --git a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py index d4c0b7231..3dc1ac6b9 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py @@ -33,17 +33,17 @@ class TestParseTags(object): class TestGetAPIVgs(object): def test_report_is_emtpy(self, monkeypatch): - monkeypatch.setattr(api.process, 'call', lambda x: ('\n\n', '', 0)) + monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('\n\n', '', 0)) assert api.get_api_vgs() == [] def test_report_has_stuff(self, monkeypatch): report = [' VolGroup00'] - monkeypatch.setattr(api.process, 'call', lambda x: (report, '', 0)) + monkeypatch.setattr(api.process, 'call', lambda x, **kw: (report, '', 0)) assert api.get_api_vgs() == [{'vg_name': 'VolGroup00'}] def test_report_has_stuff_with_empty_attrs(self, monkeypatch): report = [' VolGroup00 ;;;;;;9g'] - monkeypatch.setattr(api.process, 'call', lambda x: (report, '', 0)) + monkeypatch.setattr(api.process, 'call', lambda x, **kw: (report, '', 0)) result = api.get_api_vgs()[0] assert len(result.keys()) == 7 assert result['vg_name'] == 'VolGroup00' @@ -51,7 +51,7 @@ class TestGetAPIVgs(object): def test_report_has_multiple_items(self, monkeypatch): report = [' VolGroup00;;;;;;;', ' ceph_vg;;;;;;;'] - monkeypatch.setattr(api.process, 'call', lambda x: (report, '', 0)) + monkeypatch.setattr(api.process, 'call', lambda x, **kw: (report, '', 0)) result = api.get_api_vgs() assert result[0]['vg_name'] == 'VolGroup00' assert result[1]['vg_name'] == 'ceph_vg' @@ -60,18 +60,18 @@ class TestGetAPIVgs(object): class TestGetAPILvs(object): def test_report_is_emtpy(self, monkeypatch): - monkeypatch.setattr(api.process, 'call', lambda x: ('', '', 0)) + monkeypatch.setattr(api.process, 'call', lambda x, **kw: ('', '', 0)) assert api.get_api_lvs() == [] def test_report_has_stuff(self, monkeypatch): report = [' ;/path;VolGroup00;root'] - monkeypatch.setattr(api.process, 'call', lambda x: (report, '', 0)) + monkeypatch.setattr(api.process, 'call', lambda x, **kw: (report, '', 0)) result = api.get_api_lvs() assert result[0]['lv_name'] == 'VolGroup00' def test_report_has_multiple_items(self, monkeypatch): report = [' ;/path;VolName;root', ';/dev/path;ceph_lv;ceph_vg'] - monkeypatch.setattr(api.process, 'call', lambda x: (report, '', 0)) + monkeypatch.setattr(api.process, 'call', lambda x, **kw: (report, '', 0)) result = api.get_api_lvs() assert result[0]['lv_name'] == 'VolName' assert result[1]['lv_name'] == 'ceph_lv' @@ -79,7 +79,7 @@ class TestGetAPILvs(object): @pytest.fixture def volumes(monkeypatch): - monkeypatch.setattr(process, 'call', lambda x: ('', '', 0)) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) volumes = api.Volumes() volumes._purge() # also patch api.Volumes so that when it is called, it will use the newly @@ -90,7 +90,7 @@ def volumes(monkeypatch): @pytest.fixture def volume_groups(monkeypatch): - monkeypatch.setattr(process, 'call', lambda x: ('', '', 0)) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) vgs = api.VolumeGroups() vgs._purge() return vgs @@ -541,6 +541,12 @@ class TestRemoveLV(object): monkeypatch.setattr(process, 'call', mock_call) assert api.remove_lv("vg/lv") + def test_removes_lv_object(self, fake_call): + foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='') + api.remove_lv(foo_volume) + # last argument from the list passed to process.call + assert fake_call.calls[0]['args'][0][-1] == '/path' + def test_fails_to_remove_lv(self, monkeypatch): def mock_call(cmd, **kw): return ('', '', 1) @@ -657,12 +663,12 @@ class TestCreateVG(object): @pytest.fixture def disable_kvdo_path(monkeypatch): - monkeypatch.setattr('os.path.isdir', lambda x: False) + monkeypatch.setattr('os.path.isdir', lambda x, **kw: False) @pytest.fixture def enable_kvdo_path(monkeypatch): - monkeypatch.setattr('os.path.isdir', lambda x: True) + monkeypatch.setattr('os.path.isdir', lambda x, **kw: True) # Stub for os.listdir @@ -721,28 +727,28 @@ class TestIsVdo(object): assert api.is_vdo('/path') == '0' def test_is_vdo_returns_a_string(self, monkeypatch): - monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x: True) + monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x, **kw: True) assert api.is_vdo('/path') == '1' def test_kvdo_dir_no_devices(self, makedirs, enable_kvdo_path, listdir, monkeypatch): kvdo_path = makedirs('sys/kvdo') listdir(paths={'/sys/kvdo': kvdo_path}) - monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x: []) - monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x: []) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: []) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: []) assert api._is_vdo('/dev/mapper/vdo0') is False def test_vdo_slaves_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch): kvdo_path = makedirs('sys/kvdo') listdir(paths={'/sys/kvdo': kvdo_path}) - monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x: ['/dev/dm-3']) - monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x: []) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: ['/dev/dm-3']) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: []) assert api._is_vdo('/dev/dm-3') is True def test_vdo_parents_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch): kvdo_path = makedirs('sys/kvdo') listdir(paths={'/sys/kvdo': kvdo_path}) - monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x: []) - monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x: ['/dev/dm-4']) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: []) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: ['/dev/dm-4']) assert api._is_vdo('/dev/dm-4') is True @@ -751,7 +757,7 @@ class TestVdoSlaves(object): def test_slaves_are_not_found(self, makedirs, listdir, monkeypatch): slaves_path = makedirs('sys/block/vdo0/slaves') listdir(paths={'/sys/block/vdo0/slaves': slaves_path}) - monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x: True) + monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True) result = sorted(api._vdo_slaves(['vdo0'])) assert '/dev/mapper/vdo0' in result assert 'vdo0' in result @@ -761,7 +767,7 @@ class TestVdoSlaves(object): makedirs('sys/block/vdo0/slaves/dm-4') makedirs('dev/mapper/vdo0') listdir(paths={'/sys/block/vdo0/slaves': slaves_path}) - monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x: True) + monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True) result = sorted(api._vdo_slaves(['vdo0'])) assert '/dev/dm-4' in result assert 'dm-4' in result @@ -809,14 +815,14 @@ class TestSplitNameParser(object): class TestIsLV(object): def test_is_not_an_lv(self, monkeypatch): - monkeypatch.setattr(api, 'dmsetup_splitname', lambda x: {}) + monkeypatch.setattr(api, 'dmsetup_splitname', lambda x, **kw: {}) assert api.is_lv('/dev/sda1', lvs=[]) is False def test_lvs_not_found(self, monkeypatch, volumes): CephVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo', lv_tags="ceph.type=data") volumes.append(CephVolume) splitname = {'LV_NAME': 'data', 'VG_NAME': 'ceph'} - monkeypatch.setattr(api, 'dmsetup_splitname', lambda x: splitname) + monkeypatch.setattr(api, 'dmsetup_splitname', lambda x, **kw: splitname) assert api.is_lv('/dev/sda1', lvs=volumes) is False def test_is_lv(self, monkeypatch, volumes): @@ -826,5 +832,5 @@ class TestIsLV(object): ) volumes.append(CephVolume) splitname = {'LV_NAME': 'data', 'VG_NAME': 'ceph'} - monkeypatch.setattr(api, 'dmsetup_splitname', lambda x: splitname) + monkeypatch.setattr(api, 'dmsetup_splitname', lambda x, **kw: splitname) assert api.is_lv('/dev/sda1', lvs=volumes) is True diff --git a/ceph/src/ceph-volume/ceph_volume/tests/conftest.py b/ceph/src/ceph-volume/ceph_volume/tests/conftest.py index 65279dc9b..cf7dd5d8f 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/conftest.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/conftest.py @@ -52,6 +52,24 @@ def fake_call(monkeypatch): return fake_call +@pytest.fixture +def fakedevice(factory): + def apply(**kw): + params = dict( + path='/dev/sda', + abspath='/dev/sda', + lv_api=None, + pvs_api=[], + disk_api={}, + sys_api={}, + exists=True, + is_lvm_member=True, + ) + params.update(dict(kw)) + return factory(**params) + return apply + + @pytest.fixture def stub_call(monkeypatch): """ @@ -67,6 +85,22 @@ def stub_call(monkeypatch): return apply +@pytest.fixture(autouse=True) +def reset_cluster_name(request, monkeypatch): + """ + The globally available ``ceph_volume.conf.cluster`` might get mangled in + tests, make sure that after evert test, it gets reset, preventing pollution + going into other tests later. + """ + def fin(): + conf.cluster = None + try: + os.environ.pop('CEPH_CONF') + except KeyError: + pass + request.addfinalizer(fin) + + @pytest.fixture def conf_ceph(monkeypatch): """ @@ -103,7 +137,7 @@ def conf_ceph_stub(monkeypatch, tmpfile): @pytest.fixture def volumes(monkeypatch): - monkeypatch.setattr('ceph_volume.process.call', lambda x: ('', '', 0)) + monkeypatch.setattr('ceph_volume.process.call', lambda x, **kw: ('', '', 0)) volumes = lvm_api.Volumes() volumes._purge() return volumes @@ -111,15 +145,22 @@ def volumes(monkeypatch): @pytest.fixture def volume_groups(monkeypatch): - monkeypatch.setattr('ceph_volume.process.call', lambda x: ('', '', 0)) + monkeypatch.setattr('ceph_volume.process.call', lambda x, **kw: ('', '', 0)) vgs = lvm_api.VolumeGroups() vgs._purge() return vgs +@pytest.fixture +def stub_vgs(monkeypatch, volume_groups): + def apply(vgs): + monkeypatch.setattr(lvm_api, 'get_api_vgs', lambda: vgs) + return apply + + @pytest.fixture def pvolumes(monkeypatch): - monkeypatch.setattr('ceph_volume.process.call', lambda x: ('', '', 0)) + monkeypatch.setattr('ceph_volume.process.call', lambda x, **kw: ('', '', 0)) pvolumes = lvm_api.PVolumes() pvolumes._purge() return pvolumes @@ -151,12 +192,18 @@ def tmpfile(tmpdir): @pytest.fixture def device_info(monkeypatch): - def apply(devices=None, lsblk=None, lv=None): + def apply(devices=None, lsblk=None, lv=None, blkid=None): devices = devices if devices else {} lsblk = lsblk if lsblk else {} + blkid = blkid if blkid else {} lv = Factory(**lv) if lv else None monkeypatch.setattr("ceph_volume.sys_info.devices", {}) monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: devices) - monkeypatch.setattr("ceph_volume.util.device.lvm.get_lv_from_argument", lambda path: lv) + if not devices: + monkeypatch.setattr("ceph_volume.util.device.lvm.get_lv_from_argument", lambda path: lv) + else: + monkeypatch.setattr("ceph_volume.util.device.lvm.get_lv_from_argument", lambda path: None) + monkeypatch.setattr("ceph_volume.util.device.lvm.get_lv", lambda vg_name, lv_uuid: lv) monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", lambda path: lsblk) + monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: blkid) return apply diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/__init__.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py new file mode 100644 index 000000000..01a813c66 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py @@ -0,0 +1,139 @@ +import pytest +from ceph_volume.devices.lvm.strategies import bluestore + + +class TestSingleType(object): + + def test_hdd_device_is_large_enough(self, fakedevice, factory): + args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + ] + computed_osd = bluestore.SingleType(devices, args).computed['osds'][0] + assert computed_osd['data']['percentage'] == 100 + assert computed_osd['data']['parts'] == 1 + assert computed_osd['data']['human_readable_size'] == '5.66 GB' + assert computed_osd['data']['path'] == '/dev/sda' + + def test_sdd_device_is_large_enough(self, fakedevice, factory): + args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) + ] + computed_osd = bluestore.SingleType(devices, args).computed['osds'][0] + assert computed_osd['data']['percentage'] == 100 + assert computed_osd['data']['parts'] == 1 + assert computed_osd['data']['human_readable_size'] == '5.66 GB' + assert computed_osd['data']['path'] == '/dev/sda' + + def test_device_cannot_have_many_osds_per_device(self, fakedevice, factory): + args = factory(filtered_devices=[], osds_per_device=3, block_db_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + bluestore.SingleType(devices, args) + assert 'Unable to use device 5.66 GB /dev/sda' in str(error) + + def test_device_is_lvm_member_fails(self, fakedevice, factory): + args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='1', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + bluestore.SingleType(devices, args) + assert 'Unable to use device, already a member of LVM' in str(error) + + +class TestMixedTypeConfiguredSize(object): + # uses a block.db size that has been configured via ceph.conf, instead of + # defaulting to 'as large as possible' + + def test_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): + # 3GB block.db in ceph.conf + conf_ceph(get_safe=lambda *a: 3147483640) + args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + devices = [ssd, hdd] + + osd = bluestore.MixedType(devices, args).computed['osds'][0] + assert osd['data']['percentage'] == 100 + assert osd['data']['human_readable_size'] == '5.66 GB' + assert osd['data']['path'] == '/dev/sda' + # a new vg will be created + assert osd['block.db']['path'] == 'vg: vg/lv' + assert osd['block.db']['percentage'] == 100 + + def test_ssd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): + # 7GB block.db in ceph.conf + conf_ceph(get_safe=lambda *a: 7747483640) + args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + devices = [ssd, hdd] + + with pytest.raises(RuntimeError) as error: + bluestore.MixedType(devices, args).computed['osds'][0] + expected = 'Not enough space in fast devices (5.66 GB) to create 1 x 7.22 GB block.db LV' + assert expected in str(error) + + def test_multi_hdd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): + # 3GB block.db in ceph.conf + conf_ceph(get_safe=lambda *a: 3147483640) + args = factory(filtered_devices=[], osds_per_device=2, block_db_size=None) + ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000)) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + devices = [ssd, hdd] + + with pytest.raises(RuntimeError) as error: + bluestore.MixedType(devices, args) + expected = 'Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB' + assert expected in str(error) + + +class TestMixedTypeLargeAsPossible(object): + + def test_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: None) + args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + devices = [ssd, hdd] + + osd = bluestore.MixedType(devices, args).computed['osds'][0] + assert osd['data']['percentage'] == 100 + assert osd['data']['human_readable_size'] == '5.66 GB' + assert osd['data']['path'] == '/dev/sda' + # a new vg will be created + assert osd['block.db']['path'] == 'vg: vg/lv' + # as large as possible + assert osd['block.db']['percentage'] == 100 + + def test_multi_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: None) + args = factory(filtered_devices=[], osds_per_device=2, block_db_size=None) + ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60073740000)) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=60073740000)) + devices = [ssd, hdd] + + osd = bluestore.MixedType(devices, args).computed['osds'][0] + assert osd['data']['percentage'] == 50 + assert osd['data']['human_readable_size'] == '27.97 GB' + assert osd['data']['path'] == '/dev/sda' + # a new vg will be created + assert osd['block.db']['path'] == 'vg: vg/lv' + # as large as possible + assert osd['block.db']['percentage'] == 50 + + def test_multi_hdd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: None) + args = factory(filtered_devices=[], osds_per_device=2, block_db_size=None) + ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000)) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + devices = [ssd, hdd] + + with pytest.raises(RuntimeError) as error: + bluestore.MixedType(devices, args) + expected = 'Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB' + assert expected in str(error) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py new file mode 100644 index 000000000..0537e1e08 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py @@ -0,0 +1,210 @@ +import pytest +from ceph_volume.devices.lvm.strategies import filestore +from ceph_volume.api import lvm + + +class TestSingleType(object): + + def test_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=12073740000)) + ] + computed_osd = filestore.SingleType(devices, args).computed['osds'][0] + assert computed_osd['data']['percentage'] == 55 + assert computed_osd['data']['parts'] == 1 + assert computed_osd['data']['human_readable_size'] == '6.24 GB' + assert computed_osd['data']['path'] == '/dev/sda' + + def test_hdd_device_with_large_journal(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.SingleType(devices, args) + msg = "Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB" + assert msg in str(error) + + def test_ssd_device_is_large_enough(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=12073740000)) + ] + computed_osd = filestore.SingleType(devices, args).computed['osds'][0] + assert computed_osd['data']['percentage'] == 55 + assert computed_osd['data']['parts'] == 1 + assert computed_osd['data']['human_readable_size'] == '6.24 GB' + assert computed_osd['data']['path'] == '/dev/sda' + + def test_ssd_device_with_large_journal(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.SingleType(devices, args) + msg = "Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB" + assert msg in str(error) + + def test_ssd_device_multi_osd(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=4, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=16073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.SingleType(devices, args) + msg = "Unable to use device 14.97 GB /dev/sda, LVs would be smaller than 5GB" + assert msg in str(error) + + def test_hdd_device_multi_osd(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=4, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=16073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.SingleType(devices, args) + msg = "Unable to use device 14.97 GB /dev/sda, LVs would be smaller than 5GB" + assert msg in str(error) + + def test_device_is_lvm_member_fails(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='1', size=12073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.SingleType(devices, args) + assert 'Unable to use device, already a member of LVM' in str(error) + + def test_hdd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.SingleType(devices, args) + msg = "journal sizes must be larger than 2GB, detected: 120.00 MB" + assert msg in str(error) + + def test_ssd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.SingleType(devices, args) + msg = "journal sizes must be larger than 2GB, detected: 120.00 MB" + assert msg in str(error) + + +class TestMixedType(object): + + def test_minimum_size_is_not_met(self, stub_vgs, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)), + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.MixedType(devices, args) + msg = "journal sizes must be larger than 2GB, detected: 120.00 MB" + assert msg in str(error) + + def test_ssd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '7120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)), + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.MixedType(devices, args) + msg = "Not enough space in fast devices (5.66 GB) to create 1 x 6.95 GB journal LV" + assert msg in str(error) + + def test_hdd_device_is_lvm_member_fails(self, stub_vgs, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ + fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)), + fakedevice(used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='1', size=6073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.MixedType(devices, args) + assert 'Unable to use device, already a member of LVM' in str(error) + + def test_ssd_is_lvm_member_doesnt_fail(self, volumes, stub_vgs, fakedevice, factory, conf_ceph): + # fast PV, because ssd is an LVM member + CephPV = lvm.PVolume(vg_name='fast', pv_name='/dev/sda', pv_tags='') + ssd = fakedevice( + used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='0', size=6073740000), pvs_api=[CephPV] + ) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + # when get_api_vgs() gets called, it will return this one VG + stub_vgs([ + dict( + vg_free='7g', vg_name='fast', lv_name='foo', + lv_path='/dev/vg/foo', lv_tags="ceph.type=data" + ) + ]) + + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ssd, hdd] + result = filestore.MixedType(devices, args).computed['osds'][0] + assert result['journal']['path'] == 'vg: fast' + assert result['journal']['percentage'] == 71 + assert result['journal']['human_readable_size'] == '5.00 GB' + + def test_no_common_vg(self, volumes, stub_vgs, fakedevice, factory, conf_ceph): + # fast PV, because ssd is an LVM member + CephPV1 = lvm.PVolume(vg_name='fast1', pv_name='/dev/sda', pv_tags='') + CephPV2 = lvm.PVolume(vg_name='fast2', pv_name='/dev/sdb', pv_tags='') + ssd1 = fakedevice( + used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='0', size=6073740000), pvs_api=[CephPV1] + ) + ssd2 = fakedevice( + used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='0', size=6073740000), pvs_api=[CephPV2] + ) + hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) + # when get_api_vgs() gets called, it will return this one VG + stub_vgs([ + dict( + vg_free='7g', vg_name='fast1', lv_name='foo', + lv_path='/dev/vg/fast1', lv_tags="ceph.type=data" + ), + dict( + vg_free='7g', vg_name='fast2', lv_name='foo', + lv_path='/dev/vg/fast2', lv_tags="ceph.type=data" + ) + ]) + + conf_ceph(get_safe=lambda *a: '5120') + args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) + devices = [ssd1, ssd2, hdd] + with pytest.raises(RuntimeError) as error: + filestore.MixedType(devices, args) + + assert 'Could not find a common VG between devices' in str(error) + + def test_ssd_device_fails_multiple_osds(self, stub_vgs, fakedevice, factory, conf_ceph): + conf_ceph(get_safe=lambda *a: '15120') + args = factory(filtered_devices=[], osds_per_device=2, journal_size=None) + devices = [ + fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=16073740000)), + fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=16073740000)) + ] + with pytest.raises(RuntimeError) as error: + filestore.MixedType(devices, args) + msg = "Not enough space in fast devices (14.97 GB) to create 2 x 14.77 GB journal LV" + assert msg in str(error) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py new file mode 100644 index 000000000..315ec7c5b --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py @@ -0,0 +1,52 @@ +import pytest +from ceph_volume.util import disk +from ceph_volume.devices.lvm.strategies import validators + + +class TestMinimumDeviceSize(object): + + def test_size_is_larger_than_5gb(self, fakedevice): + devices = [fakedevice(sys_api=dict(size=6073740000))] + assert validators.minimum_device_size(devices) is None + + def test_size_is_smaller_than_5gb(self, fakedevice): + devices = [fakedevice(sys_api=dict(size=1073740000))] + with pytest.raises(RuntimeError) as error: + validators.minimum_device_size(devices) + msg = "LVs would be smaller than 5GB" + assert msg in str(error) + + def test_large_device_multiple_osds_fails(self, fakedevice): + devices = [fakedevice(sys_api=dict(size=6073740000))] + with pytest.raises(RuntimeError) as error: + validators.minimum_device_size( + devices, osds_per_device=4 + ) + msg = "LVs would be smaller than 5GB" + assert msg in str(error) + + +class TestMinimumCollocatedDeviceSize(object): + + def setup(self): + self.journal_size = disk.Size(gb=5) + + def test_size_is_larger_than_5gb_large_journal(self, fakedevice): + devices = [fakedevice(sys_api=dict(size=6073740000))] + assert validators.minimum_device_collocated_size(devices, disk.Size(mb=1)) is None + + def test_size_is_larger_than_5gb_large_journal_fails(self, fakedevice): + devices = [fakedevice(sys_api=dict(size=1073740000))] + with pytest.raises(RuntimeError) as error: + validators.minimum_device_collocated_size(devices, self.journal_size) + msg = "LVs would be smaller than 5GB" + assert msg in str(error) + + def test_large_device_multiple_osds_fails(self, fakedevice): + devices = [fakedevice(sys_api=dict(size=16073740000))] + with pytest.raises(RuntimeError) as error: + validators.minimum_device_collocated_size( + devices, self.journal_size, osds_per_device=3 + ) + msg = "LVs would be smaller than 5GB" + assert msg in str(error) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py index 45d7b7191..7520e52bf 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py @@ -50,6 +50,7 @@ class TestActivate(object): activate.Activate([]).activate(args) def test_filestore_no_systemd(self, is_root, volumes, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) fake_enable = Capture() fake_start_osd = Capture() monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) @@ -64,8 +65,8 @@ class TestActivate(object): lv_tags=','.join([ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal", "ceph.journal_uuid=000", "ceph.type=journal", - "ceph.osd_id=0","ceph.osd_fsid=1234"]) - ) + "ceph.osd_id=0", "ceph.osd_fsid=1234"]) + ) DataVolume = api.Volume( lv_name='data', lv_path='/dev/vg/data', @@ -78,9 +79,70 @@ class TestActivate(object): assert fake_enable.calls == [] assert fake_start_osd.calls == [] + def test_filestore_no_systemd_autodetect(self, is_root, volumes, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + JournalVolume = api.Volume( + lv_name='journal', + lv_path='/dev/vg/journal', + lv_uuid='000', + lv_tags=','.join([ + "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal", + "ceph.journal_uuid=000", "ceph.type=journal", + "ceph.osd_id=0", "ceph.osd_fsid=1234"]) + ) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/journal,ceph.journal_uuid=000,ceph.type=data,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes.append(DataVolume) + volumes.append(JournalVolume) + monkeypatch.setattr(api, 'Volumes', lambda: volumes) + args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True, auto_detect_objectstore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls == [] + assert fake_start_osd.calls == [] + + def test_filestore_systemd_autodetect(self, is_root, volumes, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) + monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + JournalVolume = api.Volume( + lv_name='journal', + lv_path='/dev/vg/journal', + lv_uuid='000', + lv_tags=','.join([ + "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal", + "ceph.journal_uuid=000", "ceph.type=journal", + "ceph.osd_id=0","ceph.osd_fsid=1234"]) + ) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/journal,ceph.journal_uuid=000,ceph.type=data,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes.append(DataVolume) + volumes.append(JournalVolume) + monkeypatch.setattr(api, 'Volumes', lambda: volumes) + args = Args(osd_id=None, osd_fsid='1234', no_systemd=False, filestore=True, auto_detect_objectstore=False) + activate.Activate([]).activate(args) + assert fake_enable.calls != [] + assert fake_start_osd.calls != [] + def test_filestore_systemd(self, is_root, volumes, monkeypatch, capture): fake_enable = Capture() fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) @@ -145,6 +207,43 @@ class TestActivate(object): assert fake_enable.calls != [] assert fake_start_osd.calls != [] + def test_bluestore_no_systemd_autodetect(self, is_root, volumes, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,,ceph.block_uuid=000,ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes.append(DataVolume) + monkeypatch.setattr(api, 'Volumes', lambda: volumes) + args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True, auto_detect_objectstore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls == [] + assert fake_start_osd.calls == [] + + def test_bluestore_systemd_autodetect(self, is_root, volumes, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000,ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes.append(DataVolume) + monkeypatch.setattr(api, 'Volumes', lambda: volumes) + args = Args(osd_id=None, osd_fsid='1234', no_systemd=False, bluestore=True, auto_detect_objectstore=False) + activate.Activate([]).activate(args) + assert fake_enable.calls != [] + assert fake_start_osd.calls != [] class TestActivateFlags(object): diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py new file mode 100644 index 000000000..d1f9046a0 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py @@ -0,0 +1,61 @@ +from ceph_volume.devices.lvm import batch + + +class TestFilterDevices(object): + + def test_filter_used_device(self, factory): + device1 = factory(used_by_ceph=True, abspath="/dev/sda") + args = factory(devices=[device1], filtered_devices={}) + result = batch.filter_devices(args) + assert not result + assert device1.abspath in args.filtered_devices + + def test_has_unused_devices(self, factory): + device1 = factory( + used_by_ceph=False, + abspath="/dev/sda", + rotational=False, + is_lvm_member=False + ) + args = factory(devices=[device1], filtered_devices={}) + result = batch.filter_devices(args) + assert device1 in result + assert not args.filtered_devices + + def test_filter_device_used_as_a_journal(self, factory): + hdd1 = factory( + used_by_ceph=True, + abspath="/dev/sda", + rotational=True, + is_lvm_member=True, + ) + lv = factory(tags={"ceph.type": "journal"}) + ssd1 = factory( + used_by_ceph=False, + abspath="/dev/nvme0n1", + rotational=False, + is_lvm_member=True, + lvs=[lv], + ) + args = factory(devices=[hdd1, ssd1], filtered_devices={}) + result = batch.filter_devices(args) + assert not result + assert ssd1.abspath in args.filtered_devices + + def test_last_device_is_not_filtered(self, factory): + hdd1 = factory( + used_by_ceph=True, + abspath="/dev/sda", + rotational=True, + is_lvm_member=True, + ) + ssd1 = factory( + used_by_ceph=False, + abspath="/dev/nvme0n1", + rotational=False, + is_lvm_member=False, + ) + args = factory(devices=[hdd1, ssd1], filtered_devices={}) + result = batch.filter_devices(args) + assert result + assert len(args.filtered_devices) == 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py index a08564c26..62790c9eb 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py @@ -17,14 +17,16 @@ class TestCreate(object): assert 'Use the bluestore objectstore' in stdout assert 'A physical device or logical' in stdout - def test_excludes_filestore_bluestore_flags(self, capsys): + def test_excludes_filestore_bluestore_flags(self, capsys, device_info): + device_info() with pytest.raises(SystemExit): lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main() stdout, sterr = capsys.readouterr() expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)' assert expected in stdout - def test_excludes_other_filestore_bluestore_flags(self, capsys): + def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info): + device_info() with pytest.raises(SystemExit): lvm.create.Create(argv=[ '--bluestore', '--data', '/dev/sdfoo', @@ -34,7 +36,8 @@ class TestCreate(object): expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)' assert expected in stdout - def test_excludes_block_and_journal_flags(self, capsys): + def test_excludes_block_and_journal_flags(self, capsys, device_info): + device_info() with pytest.raises(SystemExit): lvm.create.Create(argv=[ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1', diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py index bc2486cf6..c7963bb2f 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py @@ -32,6 +32,21 @@ class TestPrepareDevice(object): assert 'A vg/lv path or an existing device is needed' in str(error) +class TestGetClusterFsid(object): + + def test_fsid_is_passed_in(self, factory): + args = factory(cluster_fsid='aaaa-1111') + prepare_obj = lvm.prepare.Prepare([]) + prepare_obj.args = args + assert prepare_obj.get_cluster_fsid() == 'aaaa-1111' + + def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub): + conf_ceph_stub('[global]\nfsid = bbbb-2222') + prepare_obj = lvm.prepare.Prepare([]) + prepare_obj.args = factory(cluster_fsid=None) + assert prepare_obj.get_cluster_fsid() == 'bbbb-2222' + + class TestPrepare(object): def test_main_spits_help_with_no_arguments(self, capsys): @@ -47,14 +62,16 @@ class TestPrepare(object): assert 'Use the bluestore objectstore' in stdout assert 'A physical device or logical' in stdout - def test_excludes_filestore_bluestore_flags(self, capsys): + def test_excludes_filestore_bluestore_flags(self, capsys, device_info): + device_info() with pytest.raises(SystemExit): lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main() stdout, stderr = capsys.readouterr() expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)' assert expected in stdout - def test_excludes_other_filestore_bluestore_flags(self, capsys): + def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info): + device_info() with pytest.raises(SystemExit): lvm.prepare.Prepare(argv=[ '--bluestore', '--data', '/dev/sdfoo', @@ -64,7 +81,8 @@ class TestPrepare(object): expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)' assert expected in stdout - def test_excludes_block_and_journal_flags(self, capsys): + def test_excludes_block_and_journal_flags(self, capsys, device_info): + device_info() with pytest.raises(SystemExit): lvm.prepare.Prepare(argv=[ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1', @@ -74,6 +92,14 @@ class TestPrepare(object): expected = 'Cannot use --block.db (bluestore) with --journal (filestore)' assert expected in stdout + def test_journal_is_required_with_filestore(self, is_root, monkeypatch, device_info): + monkeypatch.setattr("os.path.exists", lambda path: True) + device_info() + with pytest.raises(SystemExit) as error: + lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main() + expected = '--journal is required when using --filestore' + assert expected in str(error) + class TestGetJournalLV(object): diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py index da3b955a0..a275bdd00 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py @@ -23,6 +23,89 @@ class TestActivate(object): assert 'Activate OSDs by mounting devices previously configured' in stdout +class TestEnableSystemdUnits(object): + + def test_nothing_is_activated(self, tmpfile, is_root, capsys): + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + out, err = capsys.readouterr() + assert 'Skipping enabling of `simple`' in out + assert 'Skipping masking of ceph-disk' in out + assert 'Skipping enabling and starting OSD simple' in out + + def test_no_systemd_flag_is_true(self, tmpfile, is_root): + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True) + activation.activate = lambda x: True + activation.main() + assert activation.skip_systemd is True + + def test_no_systemd_flag_is_false(self, tmpfile, is_root): + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=True) + activation.activate = lambda x: True + activation.main() + assert activation.skip_systemd is False + + def test_masks_ceph_disk(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', capture) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + + def test_enables_simple_unit(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', capture) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + assert capture.calls[0]['args'] == ('0', '1234', 'simple') + + def test_enables_osd_unit(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', capture) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + assert capture.calls[0]['args'] == ('0',) + + def test_starts_osd_unit(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', capture) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + assert capture.calls[0]['args'] == ('0',) + + class TestValidateDevices(object): def test_filestore_missing_journal(self): diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile new file mode 120000 index 000000000..16076e424 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all new file mode 100644 index 000000000..c3335fae6 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +dmcrypt: true +num_osds: 2 +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts new file mode 100644 index 000000000..e1c1de6f8 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml new file mode 120000 index 000000000..8cf11d4ef --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml new file mode 120000 index 000000000..aa867bcde --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 000000000..7d1a4449a --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile new file mode 120000 index 000000000..16076e424 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all new file mode 100644 index 000000000..06b966be5 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all @@ -0,0 +1,31 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +num_osds: 2 +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts new file mode 100644 index 000000000..e1c1de6f8 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml new file mode 120000 index 000000000..8cf11d4ef --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml new file mode 120000 index 000000000..aa867bcde --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml new file mode 100644 index 000000000..7d1a4449a --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all index 5cb47c8d2..92ca5bce0 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -21,3 +21,10 @@ ceph_conf_overrides: global: osd_pool_default_pg_num: 8 osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml index ac5ac6b0d..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml @@ -1 +1 @@ -../../../playbooks/test_bluestore_dmcrypt.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all index 87b031fd4..f71c89ef3 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -20,3 +20,10 @@ ceph_conf_overrides: global: osd_pool_default_pg_num: 8 osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml index 165d9da29..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml @@ -1 +1 @@ -../../../playbooks/test_bluestore.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile new file mode 120000 index 000000000..16076e424 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all new file mode 100644 index 000000000..46480e842 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +dmcrypt: true +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +num_osds: 2 +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts new file mode 100644 index 000000000..e1c1de6f8 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml new file mode 120000 index 000000000..8cf11d4ef --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml new file mode 120000 index 000000000..aa867bcde --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 000000000..7d1a4449a --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile new file mode 120000 index 000000000..16076e424 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all new file mode 100644 index 000000000..8f5c6ed96 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all @@ -0,0 +1,31 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +num_osds: 2 +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts new file mode 100644 index 000000000..e1c1de6f8 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml new file mode 120000 index 000000000..8cf11d4ef --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml new file mode 120000 index 000000000..aa867bcde --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml new file mode 100644 index 000000000..7d1a4449a --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all index f6f67a992..719321cb4 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -23,3 +23,10 @@ ceph_conf_overrides: osd_pool_default_size: 1 osd: osd_journal_size: 2048 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml index 8ed725f8a..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml @@ -1 +1 @@ -../../../playbooks/test_filestore_dmcrypt.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all index d4b26c383..8cf7a0c97 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -20,3 +20,10 @@ ceph_conf_overrides: global: osd_pool_default_pg_num: 8 osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml index 1a8c37c13..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml @@ -1 +1 @@ -../../../playbooks/test_filestore.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml new file mode 100644 index 000000000..5922ecf2e --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml @@ -0,0 +1,12 @@ +--- + +# Allows to always include a 'setup.yml' file in functional tests, and execute +# only on the ones that actually need it + +- hosts: all + gather_facts: no + + tasks: + + - debug: + msg: "This is an empty setup playbook. The current scenario didn't need any work done" diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml new file mode 100644 index 000000000..af29487fb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml @@ -0,0 +1,140 @@ + +- hosts: osds + become: yes + tasks: + + - name: create the nvme image systemd unit + copy: + content: | + [Unit] + Description=NVMe loop device + After=local-fs.target + Wants=local-fs.target + + [Service] + Type=simple + ExecStart=/bin/bash /opt/ceph-nvme.sh + StandardOutput=journal + StandardError=journal + + [Install] + WantedBy=multi-user.target + dest: "/etc/systemd/system/ceph-nvme.service" + + - name: create the ceph-nvme startup script + copy: + content: | + set -x + set -e + modprobe nvmet + modprobe nvme_loop + modprobe nvme_fabrics + modprobe loop + losetup -v /dev/loop0 /opt/loop0_nvme0 + losetup -l + nvmetcli restore /opt/loop.json + nvme connect -t loop -n testnqn -q hostnqn + nvme list + dest: "/opt/ceph-nvme.sh" + + - name: ensure ceph-nvme is enabled + service: + name: ceph-nvme + state: stopped + enabled: yes + + - name: install nvme dependencies + package: + name: "{{ item }}" + state: present + with_items: + - nvme-cli + - nvmetcli + + - name: enable NVME kernel modules + modprobe: + name: "{{ item }}" + state: present + with_items: + - nvmet + - nvme_loop + - nvme_fabrics + + - name: check if the nvme file is attached to loop0 + command: losetup -l /dev/loop0 + register: losetup_list + + - name: detach current nvme0 file + command: losetup -d /dev/loop0 + when: '"loop0_nvme0" in losetup_list.stdout' + + - name: remove previous nvme0 file + file: + path: /opt/loop0_nvme0 + state: absent + + - name: create a 15GB sparse file for NVMe + command: fallocate -l 15G /opt/loop0_nvme0 + + - name: setup loop device with sparse file + command: losetup /dev/loop0 /opt/loop0_nvme0 + when: + - '"loop0_nvme0" not in losetup_list.stdout' + + - name: create the loop.json file for nvmetcli + copy: + content: | + { + "hosts": [ + { + "nqn": "hostnqn" + } + ], + "ports": [ + { + "addr": { + "adrfam": "", + "traddr": "", + "treq": "not specified", + "trsvcid": "", + "trtype": "loop" + }, + "portid": 1, + "referrals": [], + "subsystems": [ + "testnqn" + ] + } + ], + "subsystems": [ + { + "allowed_hosts": [ + "hostnqn" + ], + "attr": { + "allow_any_host": "0" + }, + "namespaces": [ + { + "device": { + "nguid": "ef90689c-6c46-d44c-89c1-4067801309a8", + "path": "/dev/loop0" + }, + "enable": 1, + "nsid": 1 + } + ], + "nqn": "testnqn" + } + ] + } + dest: "/opt/loop.json" + + - name: setup the /dev/loop0 target with nvmetcli + command: nvmetcli restore /opt/loop.json + + - name: connect the new target as an nvme device + command: nvme connect -t loop -n testnqn -q hostnqn + + - name: debug output for nvme list + command: nvme list diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml new file mode 100644 index 000000000..c9375e2b7 --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml @@ -0,0 +1,63 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd daemons + service: + name: "ceph-osd@{{ item }}" + state: stopped + with_items: "{{ osd_ids }}" + + +- hosts: mons + become: yes + tasks: + + - name: purge osds + command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it" + with_items: "{{ osd_ids }}" + + +- hosts: osds + become: yes + tasks: + + - name: zap devices used for OSDs + command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy" + with_items: "{{ devices }}" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: batch create devices again + command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: ensure batch create is idempotent + command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}" + register: batch_cmd + failed_when: false + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: check batch idempotency + fail: + msg: "lvm batch failed idempotency check" + when: + - batch_cmd.rc != 0 + - "'strategy changed' not in batch_cmd.stdout" + + - name: run batch --report to see if devices get filtered + command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}" + register: report_cmd + failed_when: false + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: check batch --report idempotency + fail: + msg: "lvm batch --report failed idempotency check" + when: + - batch_cmd.rc != 0 + - "'strategy changed' not in batch_cmd.stdout" diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml deleted file mode 100644 index 85c702e38..000000000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml +++ /dev/null @@ -1,46 +0,0 @@ - -- hosts: osds - become: yes - tasks: - - - name: stop ceph-osd@1 daemon - service: - name: ceph-osd@1 - state: stopped - - - name: stop ceph-osd@0 daemon - service: - name: ceph-osd@0 - state: stopped - - -- hosts: mons - become: yes - tasks: - - - name: destroy osd.1 - command: "ceph osd purge osd.1 --yes-i-really-mean-it" - - - name: destroy osd.0 - command: "ceph osd purge osd.0 --yes-i-really-mean-it" - - -- hosts: osds - become: yes - tasks: - - - name: zap /dev/sdd - command: "ceph-volume lvm zap /dev/sdb --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - - name: zap /dev/sdc - command: "ceph-volume lvm zap /dev/sdc --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - name: batch create /dev/sdb and /dev/sdc again - command: "ceph-volume lvm batch --yes --bluestore /dev/sdb /dev/sdc" - environment: - CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml deleted file mode 100644 index 9e1a73f65..000000000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml +++ /dev/null @@ -1,46 +0,0 @@ - -- hosts: osds - become: yes - tasks: - - - name: stop ceph-osd@1 daemon - service: - name: ceph-osd@1 - state: stopped - - - name: stop ceph-osd@0 daemon - service: - name: ceph-osd@0 - state: stopped - - -- hosts: mons - become: yes - tasks: - - - name: destroy osd.1 - command: "ceph osd purge osd.1 --yes-i-really-mean-it" - - - name: destroy osd.0 - command: "ceph osd purge osd.0 --yes-i-really-mean-it" - - -- hosts: osds - become: yes - tasks: - - - name: zap /dev/sdd - command: "ceph-volume lvm zap /dev/sdb --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - - name: zap /dev/sdc - command: "ceph-volume lvm zap /dev/sdc --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - name: batch create /dev/sdb and /dev/sdc again - command: "ceph-volume lvm batch --yes --bluestore --dmcrypt /dev/sdb /dev/sdc" - environment: - CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml deleted file mode 100644 index 95909f97b..000000000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml +++ /dev/null @@ -1,46 +0,0 @@ - -- hosts: osds - become: yes - tasks: - - - name: stop ceph-osd@1 daemon - service: - name: ceph-osd@1 - state: stopped - - - name: stop ceph-osd@0 daemon - service: - name: ceph-osd@0 - state: stopped - - -- hosts: mons - become: yes - tasks: - - - name: destroy osd.1 - command: "ceph osd purge osd.1 --yes-i-really-mean-it" - - - name: destroy osd.0 - command: "ceph osd purge osd.0 --yes-i-really-mean-it" - - -- hosts: osds - become: yes - tasks: - - - name: zap /dev/sdd - command: "ceph-volume lvm zap /dev/sdb --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - - name: zap /dev/sdc - command: "ceph-volume lvm zap /dev/sdc --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - name: batch create /dev/sdb and /dev/sdc again - command: "ceph-volume lvm batch --yes --filestore /dev/sdb /dev/sdc" - environment: - CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml deleted file mode 100644 index 81f84e919..000000000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml +++ /dev/null @@ -1,46 +0,0 @@ - -- hosts: osds - become: yes - tasks: - - - name: stop ceph-osd@1 daemon - service: - name: ceph-osd@1 - state: stopped - - - name: stop ceph-osd@0 daemon - service: - name: ceph-osd@0 - state: stopped - - -- hosts: mons - become: yes - tasks: - - - name: destroy osd.1 - command: "ceph osd purge osd.1 --yes-i-really-mean-it" - - - name: destroy osd.0 - command: "ceph osd purge osd.0 --yes-i-really-mean-it" - - -- hosts: osds - become: yes - tasks: - - - name: zap /dev/sdd - command: "ceph-volume lvm zap /dev/sdb --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - - name: zap /dev/sdc - command: "ceph-volume lvm zap /dev/sdc --destroy" - environment: - CEPH_VOLUME_DEBUG: 1 - - - name: batch create /dev/sdb and /dev/sdc again - command: "ceph-volume lvm batch --yes --filestore --dmcrypt /dev/sdb /dev/sdc" - environment: - CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini index 6a43a110e..c2725a09f 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = {centos7,xenial}-{bluestore,filestore}-{single_type,single_type_dmcrypt} +envlist = {centos7,xenial}-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos7-{bluestore,filestore}-{mixed_type, mixed_type_dmcrypt} skipsdist = True [testenv] @@ -17,28 +17,31 @@ setenv= ANSIBLE_SSH_RETRIES = 5 VAGRANT_CWD = {changedir} CEPH_VOLUME_DEBUG = 1 -deps= - ansible~=2.6,<2.7 - testinfra - pytest-xdist - notario>=0.0.13 changedir= centos7-filestore-single_type: {toxinidir}/centos7/filestore/single-type centos7-filestore-single_type_dmcrypt: {toxinidir}/centos7/filestore/single-type-dmcrypt + centos7-filestore-mixed_type: {toxinidir}/centos7/filestore/mixed-type + centos7-filestore-mixed_type_dmcrypt: {toxinidir}/centos7/filestore/mixed-type-dmcrypt centos7-bluestore-single_type: {toxinidir}/centos7/bluestore/single-type centos7-bluestore-single_type_dmcrypt: {toxinidir}/centos7/bluestore/single-type-dmcrypt + centos7-bluestore-mixed_type: {toxinidir}/centos7/bluestore/mixed-type + centos7-bluestore-mixed_type_dmcrypt: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt xenial-filestore-single_type: {toxinidir}/xenial/filestore/single-type xenial-filestore-single_type_dmcrypt: {toxinidir}/xenial/filestore/single-type-dmcrypt xenial-bluestore-single_type: {toxinidir}/xenial/bluestore/single-type xenial-bluestore-single_type_dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt commands= git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible + # individual scenario setup + ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml + # use ceph-ansible to deploy a ceph cluster on the vms ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}" diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all index 78228765e..591514268 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all @@ -2,7 +2,7 @@ dmcrypt: True ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -21,3 +21,10 @@ ceph_conf_overrides: global: osd_pool_default_pg_num: 8 osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml index ac5ac6b0d..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml @@ -1 +1 @@ -../../../playbooks/test_bluestore_dmcrypt.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all index 87b031fd4..f71c89ef3 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -20,3 +20,10 @@ ceph_conf_overrides: global: osd_pool_default_pg_num: 8 osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml index 165d9da29..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml @@ -1 +1 @@ -../../../playbooks/test_bluestore.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all index 2f5d6fb2b..a4eafa104 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all @@ -2,7 +2,7 @@ dmcrypt: True ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -23,3 +23,10 @@ ceph_conf_overrides: osd_pool_default_size: 1 osd: osd_journal_size: 2048 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml index 8ed725f8a..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml @@ -1 +1 @@ -../../../playbooks/test_filestore_dmcrypt.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all index d4b26c383..8cf7a0c97 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 @@ -20,3 +20,10 @@ ceph_conf_overrides: global: osd_pool_default_pg_num: 8 osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml new file mode 120000 index 000000000..30874dfbb --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml index 1a8c37c13..aa867bcde 120000 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml @@ -1 +1 @@ -../../../playbooks/test_filestore.yml \ No newline at end of file +../../../playbooks/test.yml \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all index 37e87be31..01ae1dae9 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all index 6f36f4922..9d4f50de7 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all @@ -2,7 +2,7 @@ dmcrypt: True ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml index d882293de..bebe6dc36 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml @@ -17,10 +17,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds become: yes @@ -28,23 +28,23 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 lv - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.0 using test_group/data-lv1 - command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 @@ -59,7 +59,7 @@ tasks: - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -68,12 +68,12 @@ - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 using test_group/data-lv1 - command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all index af647e911..5af1b7ac4 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all index 8cd6c48c1..7544678b9 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all @@ -2,7 +2,7 @@ dmcrypt: True ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml index 5dc67ade1..c48e4bece 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml @@ -19,10 +19,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -31,33 +31,33 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: zap /dev/sdd2 - command: "ceph-volume lvm zap /dev/sdd2 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 lv - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: zap /dev/sdc1 - command: "ceph-volume lvm zap /dev/sdc1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 again using test_group/data-lv1 - command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml index 90eedbdee..e4e804a70 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml @@ -19,10 +19,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -31,23 +31,23 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 - # osd.0 device + # osd.0 device (zap without --destroy that removes the LV) - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 again using test_group/data-lv1 - command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 @@ -60,3 +60,42 @@ command: "ceph-volume lvm list" environment: CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml index 697950283..4aa3cf19d 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml @@ -19,10 +19,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -31,35 +31,37 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 # osd.2 journal - name: zap /dev/sdd2 - command: "ceph-volume lvm zap /dev/sdd2 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 data lv + # note: we don't use --destroy here to test this works without that flag. + # --destroy is used in the bluestore tests - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - # osd.0 journal device + # osd.0 journal device (zap without --destroy that removes the LV) - name: zap /dev/sdc1 - command: "ceph-volume lvm zap /dev/sdc1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 again using test_group/data-lv1 - command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 @@ -72,3 +74,42 @@ command: "ceph-volume lvm list" environment: CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini index 3e451aab5..d2432c8a8 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -18,11 +18,6 @@ setenv= ANSIBLE_SSH_RETRIES = 5 VAGRANT_CWD = {changedir} CEPH_VOLUME_DEBUG = 1 -deps= - ansible~=2.6,<2.7 - testinfra - pytest-xdist - notario>=0.0.13 changedir= # plain/unencrypted centos7-filestore-create: {toxinidir}/centos7/filestore/create @@ -42,9 +37,7 @@ changedir= centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate commands= git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible - # XXX Ideally we should be able to consume the requirements for ceph-ansible directly, - # but the master branch doesn't pin dependencies so we can't guarantee to work correctly - #pip install -r {envdir}/tmp/ceph-ansible/requirements.txt + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all index 37e87be31..01ae1dae9 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all index 6f36f4922..9d4f50de7 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all @@ -2,7 +2,7 @@ dmcrypt: True ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml index b6db0ac2f..19209b1d2 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml @@ -17,10 +17,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -29,23 +29,23 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 lv - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.0 using test_group/data-lv1 - command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 @@ -60,7 +60,7 @@ tasks: - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -68,12 +68,12 @@ tasks: - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 using test_group/data-lv1 - command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all index af647e911..5af1b7ac4 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all @@ -1,7 +1,7 @@ --- ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all index 8cd6c48c1..7544678b9 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all @@ -2,7 +2,7 @@ dmcrypt: True ceph_dev: True -cluster: ceph +cluster: test public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" monitor_interface: eth1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml index 5dc67ade1..c48e4bece 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml @@ -19,10 +19,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -31,33 +31,33 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: zap /dev/sdd2 - command: "ceph-volume lvm zap /dev/sdd2 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 lv - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: zap /dev/sdc1 - command: "ceph-volume lvm zap /dev/sdc1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 again using test_group/data-lv1 - command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml index ff3954b21..3564cf3cd 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml @@ -73,51 +73,49 @@ - mgrs gather_facts: false become: True + any_errors_fatal: true roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - - role: ceph-config - tags: ['ceph_update_config'] - -- hosts: mons - gather_facts: false - become: True - roles: - - role: ceph-defaults - - role: ceph-common - - role: ceph-mon - -- hosts: mgrs - gather_facts: false - become: True - roles: - - role: ceph-defaults - - role: ceph-common - - role: ceph-mgr - -- hosts: osds - gather_facts: false - become: True tasks: - name: rsync ceph-volume to test nodes on centos synchronize: src: "{{ toxinidir}}/../../../../ceph_volume" dest: "/usr/lib/python2.7/site-packages" use_ssh_args: true - when: ansible_os_family == "RedHat" + when: + - ansible_os_family == "RedHat" + - inventory_hostname in groups.get(osd_group_name, []) - name: rsync ceph-volume to test nodes on ubuntu synchronize: src: "{{ toxinidir}}/../../../../ceph_volume" dest: "/usr/lib/python2.7/dist-packages" use_ssh_args: true - when: ansible_os_family == "Debian" + when: + - ansible_os_family == "Debian" + - inventory_hostname in groups.get(osd_group_name, []) -- hosts: osds - gather_facts: false - become: True - roles: - - role: ceph-defaults - - role: ceph-common - - role: ceph-osd + - name: run ceph-config role + import_role: + name: ceph-config + + - name: run ceph-mon role + import_role: + name: ceph-mon + when: + - inventory_hostname in groups.get(mon_group_name, []) + + - name: run ceph-mgr role + import_role: + name: ceph-mgr + when: + - inventory_hostname in groups.get(mgr_group_name, []) + + - name: run ceph-osd role + import_role: + name: ceph-osd + when: + - inventory_hostname in groups.get(osd_group_name, []) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini index 1e813cedf..391fb4ae9 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -18,11 +18,6 @@ setenv= ANSIBLE_SSH_RETRIES = 5 VAGRANT_CWD = {changedir} CEPH_VOLUME_DEBUG = 1 -deps= - ansible~=2.6,<2.7 - testinfra - pytest-xdist - notario>=0.0.13 changedir= centos7-filestore-activate: {toxinidir}/centos7/filestore/activate centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate @@ -38,9 +33,7 @@ changedir= centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks commands= git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible - # XXX Ideally we should be able to consume the requirements for ceph-ansible directly, - # but the master branch doesn't pin dependencies so we can't guarantee to work correctly - #pip install -r {envdir}/tmp/ceph-ansible/requirements.txt + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} diff --git a/ceph/src/ceph-volume/ceph_volume/tests/systemd/test_main.py b/ceph/src/ceph-volume/ceph_volume/tests/systemd/test_main.py index 86a685ab5..0af52e8d1 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/systemd/test_main.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/systemd/test_main.py @@ -1,16 +1,16 @@ import pytest from ceph_volume import exceptions, conf -from ceph_volume.systemd import main +from ceph_volume.systemd.main import parse_subcommand, main, process class TestParseSubcommand(object): def test_no_subcommand_found(self): with pytest.raises(exceptions.SuffixParsingError): - main.parse_subcommand('') + parse_subcommand('') def test_sub_command_is_found(self): - result = main.parse_subcommand('lvm-1-sha-1-something-0') + result = parse_subcommand('lvm-1-sha-1-something-0') assert result == 'lvm' @@ -33,16 +33,16 @@ class TestMain(object): def test_no_arguments_parsing_error(self): with pytest.raises(RuntimeError): - main.main(args=[]) + main(args=[]) def test_parsing_suffix_error(self): with pytest.raises(exceptions.SuffixParsingError): - main.main(args=['asdf']) + main(args=['asdf']) def test_correct_command(self, monkeypatch): run = Capture() - monkeypatch.setattr(main.process, 'run', run) - main.main(args=['ceph-volume-systemd', 'lvm-8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ]) + monkeypatch.setattr(process, 'run', run) + main(args=['ceph-volume-systemd', 'lvm-8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ]) command = run.calls[0][0] assert command == [ 'ceph-volume', diff --git a/ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py b/ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py new file mode 100644 index 000000000..71cb027ed --- /dev/null +++ b/ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +import pytest +from ceph_volume.util.device import Devices + + +@pytest.fixture +def device_report_keys(device_info): + device_info(devices={ + # example output of disk.get_devices() + '/dev/sdb': {'human_readable_size': '1.82 TB', + 'locked': 0, + 'model': 'PERC H700', + 'nr_requests': '128', + 'partitions': {}, + 'path': '/dev/sdb', + 'removable': '0', + 'rev': '2.10', + 'ro': '0', + 'rotational': '1', + 'sas_address': '', + 'sas_device_handle': '', + 'scheduler_mode': 'cfq', + 'sectors': 0, + 'sectorsize': '512', + 'size': 1999844147200.0, + 'support_discard': '', + 'vendor': 'DELL'} + } + ) + report = Devices().json_report()[0] + return list(report.keys()) + +@pytest.fixture +def device_sys_api_keys(device_info): + device_info(devices={ + # example output of disk.get_devices() + '/dev/sdb': {'human_readable_size': '1.82 TB', + 'locked': 0, + 'model': 'PERC H700', + 'nr_requests': '128', + 'partitions': {}, + 'path': '/dev/sdb', + 'removable': '0', + 'rev': '2.10', + 'ro': '0', + 'rotational': '1', + 'sas_address': '', + 'sas_device_handle': '', + 'scheduler_mode': 'cfq', + 'sectors': 0, + 'sectorsize': '512', + 'size': 1999844147200.0, + 'support_discard': '', + 'vendor': 'DELL'} + } + ) + report = Devices().json_report()[0] + return list(report['sys_api'].keys()) + + +class TestInventory(object): + + expected_keys = [ + 'path', + 'rejected_reasons', + 'sys_api', + 'available', + 'lvs', + ] + + expected_sys_api_keys = [ + 'human_readable_size', + 'locked', + 'model', + 'nr_requests', + 'partitions', + 'path', + 'removable', + 'rev', + 'ro', + 'rotational', + 'sas_address', + 'sas_device_handle', + 'scheduler_mode', + 'sectors', + 'sectorsize', + 'size', + 'support_discard', + 'vendor', + ] + + def test_json_inventory_keys_unexpected(self, device_report_keys): + for k in device_report_keys: + assert k in self.expected_keys, "unexpected key {} in report".format(k) + + def test_json_inventory_keys_missing(self, device_report_keys): + for k in self.expected_keys: + assert k in device_report_keys, "expected key {} in report".format(k) + + def test_sys_api_keys_unexpected(self, device_sys_api_keys): + for k in device_sys_api_keys: + assert k in self.expected_sys_api_keys, "unexpected key {} in sys_api field".format(k) + + def test_sys_api_keys_missing(self, device_sys_api_keys): + for k in self.expected_sys_api_keys: + assert k in device_sys_api_keys, "expected key {} in sys_api field".format(k) + diff --git a/ceph/src/ceph-volume/ceph_volume/tests/test_main.py b/ceph/src/ceph-volume/ceph_volume/tests/test_main.py index 42fbec155..45dcfff85 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/test_main.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/test_main.py @@ -6,18 +6,21 @@ from ceph_volume import main class TestVolume(object): def test_main_spits_help_with_no_arguments(self, capsys): - main.Volume(argv=[]) + with pytest.raises(SystemExit): + main.Volume(argv=[]) stdout, stderr = capsys.readouterr() assert 'Log Path' in stdout def test_warn_about_using_help_for_full_options(self, capsys): - main.Volume(argv=[]) + with pytest.raises(SystemExit): + main.Volume(argv=[]) stdout, stderr = capsys.readouterr() assert 'See "ceph-volume --help" for full list' in stdout def test_environ_vars_show_up(self, capsys): os.environ['CEPH_CONF'] = '/opt/ceph.conf' - main.Volume(argv=[]) + with pytest.raises(SystemExit): + main.Volume(argv=[]) stdout, stderr = capsys.readouterr() assert 'CEPH_CONF' in stdout assert '/opt/ceph.conf' in stdout diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py index 9f20edbf7..2167aeac1 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py @@ -4,31 +4,6 @@ from ceph_volume import exceptions from ceph_volume.util import arg_validators -invalid_lv_paths = [ - '', 'lv_name', '/lv_name', 'lv_name/', - '/dev/lv_group/lv_name' -] - - -class TestLVPath(object): - - def setup(self): - self.validator = arg_validators.LVPath() - - @pytest.mark.parametrize('path', invalid_lv_paths) - def test_no_slash_is_an_error(self, path): - with pytest.raises(argparse.ArgumentError): - self.validator(path) - - def test_is_valid(self): - path = 'vg/lv' - assert self.validator(path) == path - - def test_abspath_is_valid(self): - path = '/' - assert self.validator(path) == path - - class TestOSDPath(object): def setup(self): diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py index 225b06031..99e1d494c 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py @@ -1,3 +1,4 @@ +import pytest from ceph_volume.util import device from ceph_volume.api import lvm as api @@ -12,7 +13,7 @@ class TestDevice(object): assert "foo" in disk.sys_api def test_is_lv(self, device_info): - data = {"lv_path": "vg/lv"} + data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"} device_info(lv=data) disk = device.Device("vg/lv") assert disk.is_lv @@ -55,8 +56,36 @@ class TestDevice(object): disk = device.Device("/dev/sda") assert not disk.is_mapper + def test_is_ceph_disk_member_lsblk(self, device_info): + lsblk = {"PARTLABEL": "ceph data"} + device_info(lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member + + def test_is_not_ceph_disk_member_lsblk(self, device_info): + lsblk = {"PARTLABEL": "gluster partition"} + device_info(lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member is False + + def test_is_ceph_disk_member_blkid(self, device_info): + # falls back to blkid + lsblk = {"PARTLABEL": ""} + blkid = {"PARTLABEL": "ceph data"} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member + + def test_is_not_ceph_disk_member_blkid(self, device_info): + # falls back to blkid + lsblk = {"PARTLABEL": ""} + blkid = {"PARTLABEL": "gluster partition"} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member is False + def test_pv_api(self, device_info, pvolumes, monkeypatch): - FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", pv_tags={}, vg_name="vg") + FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes.append(FooPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) data = {"/dev/sda": {"foo": "bar"}} @@ -64,3 +93,153 @@ class TestDevice(object): device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.pvs_api + + @pytest.mark.parametrize("ceph_type", ["data", "block"]) + def test_used_by_ceph(self, device_info, pvolumes, monkeypatch, ceph_type): + FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") + pvolumes.append(FooPVolume) + monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "part"} + lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": ceph_type}} + device_info(devices=data, lsblk=lsblk, lv=lv_data) + disk = device.Device("/dev/sda") + assert disk.used_by_ceph + + def test_not_used_by_ceph(self, device_info, pvolumes, monkeypatch): + FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") + pvolumes.append(FooPVolume) + monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "part"} + lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}} + device_info(devices=data, lsblk=lsblk, lv=lv_data) + disk = device.Device("/dev/sda") + assert not disk.used_by_ceph + + +class TestDeviceOrdering(object): + + def setup(self): + self.data = { + "/dev/sda": {"removable": 0}, + "/dev/sdb": {"removable": 1}, # invalid + "/dev/sdc": {"removable": 0}, + "/dev/sdd": {"removable": 1}, # invalid + } + + def test_valid_before_invalid(self, device_info): + device_info(devices=self.data) + sda = device.Device("/dev/sda") + sdb = device.Device("/dev/sdb") + + assert sda < sdb + assert sdb > sda + + def test_valid_alphabetical_ordering(self, device_info): + device_info(devices=self.data) + sda = device.Device("/dev/sda") + sdc = device.Device("/dev/sdc") + + assert sda < sdc + assert sdc > sda + + def test_invalid_alphabetical_ordering(self, device_info): + device_info(devices=self.data) + sdb = device.Device("/dev/sdb") + sdd = device.Device("/dev/sdd") + + assert sdb < sdd + assert sdd > sdb + + +ceph_partlabels = [ + 'ceph data', 'ceph journal', 'ceph block', + 'ceph block.wal', 'ceph block.db', 'ceph lockbox' +] + + +class TestCephDiskDevice(object): + + def test_partlabel_lsblk(self, device_info): + lsblk = {"PARTLABEL": ""} + device_info(lsblk=lsblk) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.partlabel == '' + + def test_partlabel_blkid(self, device_info): + lsblk = {"PARTLABEL": ""} + blkid = {"PARTLABEL": "ceph data"} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.partlabel == 'ceph data' + + @pytest.mark.parametrize("label", ceph_partlabels) + def test_is_member_blkid(self, device_info, label): + lsblk = {"PARTLABEL": ""} + blkid = {"PARTLABEL": label} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.is_member is True + + def test_reject_removable_device(self, device_info): + data = {"/dev/sdb": {"removable": 1}} + device_info(devices=data) + disk = device.Device("/dev/sdb") + assert not disk.available + + def test_accept_non_removable_device(self, device_info): + data = {"/dev/sdb": {"removable": 0}} + device_info(devices=data) + disk = device.Device("/dev/sdb") + assert disk.available + + def test_reject_readonly_device(self, device_info): + data = {"/dev/cdrom": {"ro": 1}} + device_info(devices=data) + disk = device.Device("/dev/cdrom") + assert not disk.available + + def test_accept_non_readonly_device(self, device_info): + data = {"/dev/sda": {"ro": 0}} + device_info(devices=data) + disk = device.Device("/dev/sda") + assert disk.available + + @pytest.mark.parametrize("label", ceph_partlabels) + def test_is_member_lsblk(self, device_info, label): + lsblk = {"PARTLABEL": label} + device_info(lsblk=lsblk) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.is_member is True + + def test_unknown_type(self, device_info): + lsblk = {"PARTLABEL": "gluster"} + device_info(lsblk=lsblk) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.type == 'unknown' + + @pytest.mark.parametrize("label", ceph_partlabels) + def test_type_blkid(self, device_info, label): + expected = label.split()[-1].split('.')[-1] + lsblk = {"PARTLABEL": ""} + blkid = {"PARTLABEL": label} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.type == expected + + @pytest.mark.parametrize("label", ceph_partlabels) + def test_type_lsblk(self, device_info, label): + expected = label.split()[-1].split('.')[-1] + lsblk = {"PARTLABEL": label} + blkid = {"PARTLABEL": ''} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.type == expected diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py index ae5fbe508..5d1bd82b6 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py @@ -16,6 +16,36 @@ class TestLsblkParser(object): assert result['SIZE'] == '10M' +class TestBlkidParser(object): + + def test_parses_whitespace_values(self): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + result = disk._blkid_parser(output) + assert result['PARTLABEL'] == 'ceph data' + + def test_ignores_unmapped(self): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + result = disk._blkid_parser(output) + assert len(result.keys()) == 4 + + def test_translates_to_partuuid(self): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + result = disk._blkid_parser(output) + assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f' + + +class TestBlkid(object): + + def test_parses_translated(self, stub_call): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + stub_call((output.split(), [], 0)) + result = disk.blkid('/dev/sdb1') + assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f' + assert result['PARTLABEL'] == 'ceph data' + assert result['UUID'] == '62416664-cbaf-40bd-9689-10bd337379c3' + assert result['TYPE'] == 'xfs' + + class TestDeviceFamily(object): def test_groups_multiple_devices(self, stub_call): @@ -167,6 +197,15 @@ class TestGetDevices(object): _mapper_path=str(tmpdir)) assert result == {} + def test_no_devices_are_found_errors(self, tmpdir): + block_path, dev_path, mapper_path = self.setup_paths(tmpdir) + os.makedirs(os.path.join(block_path, 'sda')) + result = disk.get_devices( + _sys_block_path=block_path, # has 1 device + _dev_path=str(tmpdir), # exists but no devices + _mapper_path='/does/not/exist/path') # does not exist + assert result == {} + def test_sda_block_is_found(self, tmpfile, tmpdir): block_path, dev_path, mapper_path = self.setup_paths(tmpdir) dev_sda_path = os.path.join(dev_path, 'sda') @@ -181,19 +220,6 @@ class TestGetDevices(object): assert result[dev_sda_path]['model'] == '' assert result[dev_sda_path]['partitions'] == {} - def test_sda_is_removable_gets_skipped(self, tmpfile, tmpdir): - block_path, dev_path, mapper_path = self.setup_paths(tmpdir) - dev_sda_path = os.path.join(dev_path, 'sda') - block_sda_path = os.path.join(block_path, 'sda') - os.makedirs(block_sda_path) - os.makedirs(dev_sda_path) - - tmpfile('removable', contents='1', directory=block_sda_path) - result = disk.get_devices( - _sys_block_path=block_path, - _dev_path=dev_path, - _mapper_path=mapper_path) - assert result == {} def test_dm_device_is_not_used(self, monkeypatch, tmpdir): # the link to the mapper is used instead @@ -430,10 +456,16 @@ class TestSizeOperations(object): assert int(result) == 1024 def test_division_with_non_size_objects(self): + base = disk.Size(gb=1) + result = base / 2 + assert result.mb == 512 + assert result.mb.as_int() == 512 + + def test_division_with_non_size_objects_without_state(self): base = disk.Size(gb=1) base / 2 - assert base.mb == 512 - assert base.mb.as_int() == 512 + assert base.gb == 1 + assert base.gb.as_int() == 1 class TestSizeAttributes(object): diff --git a/ceph/src/ceph-volume/ceph_volume/util/arg_validators.py b/ceph/src/ceph-volume/ceph_volume/util/arg_validators.py index d0144fc58..534c9aa64 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/arg_validators.py +++ b/ceph/src/ceph-volume/ceph_volume/util/arg_validators.py @@ -6,51 +6,27 @@ from ceph_volume.util import disk from ceph_volume.util.device import Device -class LVPath(object): - """ - A simple validator to ensure that a logical volume is specified like:: - - / - - Or a full path to a device, like ``/dev/sda`` +class ValidDevice(object): - Because for LVM it is better to be specific on what group does an lv - belongs to. - """ + def __init__(self, as_string=False): + self.as_string = as_string def __call__(self, string): + device = Device(string) error = None - if string.startswith('/'): - if not os.path.exists(string): - error = "Argument (device) does not exist: %s" % string - raise argparse.ArgumentError(None, error) - else: - return string - try: - vg, lv = string.split('/') - except ValueError: - error = "Logical volume must be specified as 'volume_group/logical_volume' but got: %s" % string - raise argparse.ArgumentError(None, error) - - if not vg: - error = "Didn't specify a volume group like 'volume_group/logical_volume', got: %s" % string - if not lv: - error = "Didn't specify a logical volume like 'volume_group/logical_volume', got: %s" % string + if not device.exists: + error = "Unable to proceed with non-existing device: %s" % string + elif device.has_gpt_headers: + error = "GPT headers found, they must be removed on: %s" % string if error: raise argparse.ArgumentError(None, error) - return string - - -class ValidDevice(object): - - def __call__(self, string): - device = Device(string) - if not device.exists: - raise argparse.ArgumentError( - None, "Unable to proceed with non-existing device: %s" % string - ) + if self.as_string: + if device.is_lv: + # all codepaths expect an lv path to be returned in this format + return "{}/{}".format(device.vg_name, device.lv_name) + return string return device diff --git a/ceph/src/ceph-volume/ceph_volume/util/device.py b/ceph/src/ceph-volume/ceph_volume/util/device.py index 7dca60a49..181044886 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/device.py +++ b/ceph/src/ceph-volume/ceph_volume/util/device.py @@ -1,40 +1,123 @@ +# -*- coding: utf-8 -*- + import os +from functools import total_ordering from ceph_volume import sys_info from ceph_volume.api import lvm from ceph_volume.util import disk +report_template = """ +{dev:<25} {size:<12} {rot!s:<7} {available!s:<9} {model}""" + + +class Devices(object): + """ + A container for Device instances with reporting + """ + + def __init__(self, devices=None): + if not sys_info.devices: + sys_info.devices = disk.get_devices() + self.devices = [Device(k) for k in + sys_info.devices.keys()] + + def pretty_report(self, all=True): + output = [ + report_template.format( + dev='Device Path', + size='Size', + rot='rotates', + model='Model name', + available='available', + )] + for device in sorted(self.devices): + output.append(device.report()) + return ''.join(output) + def json_report(self): + output = [] + for device in sorted(self.devices): + output.append(device.json_report()) + return output + +@total_ordering class Device(object): + pretty_template = """ + {attr:<25} {value}""" + + report_fields = [ + 'rejected_reasons', + 'available', + 'path', + 'sys_api', + ] + pretty_report_sys_fields = [ + 'human_readable_size', + 'model', + 'removable', + 'ro', + 'rotational', + 'sas_address', + 'scheduler_mode', + 'vendor', + ] + def __init__(self, path): self.path = path # LVs can have a vg/lv path, while disks will have /dev/sda self.abspath = path self.lv_api = None + self.lvs = [] + self.vg_name = None + self.lv_name = None self.pvs_api = [] self.disk_api = {} + self.blkid_api = {} self.sys_api = {} self._exists = None self._is_lvm_member = None self._parse() + self.available, self.rejected_reasons = self._check_reject_reasons() + + def __lt__(self, other): + ''' + Implementing this method and __eq__ allows the @total_ordering + decorator to turn the Device class into a totally ordered type. + This can slower then implementing all comparison operations. + This sorting should put available devices before unavailable devices + and sort on the path otherwise (str sorting). + ''' + if self.available == other.available: + return self.path < other.path + return self.available and not other.available + + def __eq__(self, other): + return self.path == other.path def _parse(self): + if not sys_info.devices: + sys_info.devices = disk.get_devices() + self.sys_api = sys_info.devices.get(self.abspath, {}) + # start with lvm since it can use an absolute or relative path lv = lvm.get_lv_from_argument(self.path) if lv: self.lv_api = lv + self.lvs = [lv] self.abspath = lv.lv_path + self.vg_name = lv.vg_name + self.lv_name = lv.name else: dev = disk.lsblk(self.path) + self.blkid_api = disk.blkid(self.path) self.disk_api = dev device_type = dev.get('TYPE', '') # always check is this is an lvm member if device_type in ['part', 'disk']: self._set_lvm_membership() - if not sys_info.devices: - sys_info.devices = disk.get_devices() - self.sys_api = sys_info.devices.get(self.abspath, {}) + self.ceph_disk = CephDiskDevice(self) def __repr__(self): prefix = 'Unknown' @@ -46,37 +129,118 @@ class Device(object): prefix = 'Raw Device' return '<%s: %s>' % (prefix, self.abspath) - def _set_lvm_membership(self): - if self._is_lvm_member is None: - # check if there was a pv created with the - # name of device - pvs = lvm.PVolumes() - pvs.filter(pv_name=self.abspath) - if not pvs: - self._is_lvm_member = False - return self._is_lvm_member - has_vgs = [pv.vg_name for pv in pvs if pv.vg_name] - if has_vgs: - self._is_lvm_member = True - self.pvs_api = pvs + def pretty_report(self): + def format_value(v): + if isinstance(v, list): + return ', '.join(v) else: - # this is contentious, if a PV is recognized by LVM but has no - # VGs, should we consider it as part of LVM? We choose not to - # here, because most likely, we need to use VGs from this PV. - self._is_lvm_member = False + return v + def format_key(k): + return k.strip('_').replace('_', ' ') + output = ['\n====== Device report {} ======\n'.format(self.path)] + output.extend( + [self.pretty_template.format( + attr=format_key(k), + value=format_value(v)) for k, v in vars(self).items() if k in + self.report_fields and k != 'disk_api' and k != 'sys_api'] ) + output.extend( + [self.pretty_template.format( + attr=format_key(k), + value=format_value(v)) for k, v in self.sys_api.items() if k in + self.pretty_report_sys_fields]) + for lv in self.lvs: + output.append(""" + --- Logical Volume ---""") + output.extend( + [self.pretty_template.format( + attr=format_key(k), + value=format_value(v)) for k, v in lv.report().items()]) + return ''.join(output) + + def report(self): + return report_template.format( + dev=self.abspath, + size=self.size_human, + rot=self.rotational, + available=self.available, + model=self.model, + ) + def json_report(self): + output = {k.strip('_'): v for k, v in vars(self).items() if k in + self.report_fields} + output['lvs'] = [lv.report() for lv in self.lvs] + return output + + def _set_lvm_membership(self): + if self._is_lvm_member is None: + # this is contentious, if a PV is recognized by LVM but has no + # VGs, should we consider it as part of LVM? We choose not to + # here, because most likely, we need to use VGs from this PV. + self._is_lvm_member = False + for path in self._get_pv_paths(): + # check if there was a pv created with the + # name of device + pvs = lvm.PVolumes() + pvs.filter(pv_name=path) + has_vgs = [pv.vg_name for pv in pvs if pv.vg_name] + if has_vgs: + # a pv can only be in one vg, so this should be safe + self.vg_name = has_vgs[0] + self._is_lvm_member = True + self.pvs_api = pvs + for pv in pvs: + if pv.vg_name and pv.lv_uuid: + lv = lvm.get_lv(vg_name=pv.vg_name, lv_uuid=pv.lv_uuid) + if lv: + self.lvs.append(lv) return self._is_lvm_member + def _get_pv_paths(self): + """ + For block devices LVM can reside on the raw block device or on a + partition. Return a list of paths to be checked for a pv. + """ + paths = [self.abspath] + path_dir = os.path.dirname(self.abspath) + for part in self.sys_api.get('partitions', {}).keys(): + paths.append(os.path.join(path_dir, part)) + return paths + @property def exists(self): return os.path.exists(self.abspath) + @property + def has_gpt_headers(self): + return self.blkid_api.get("PTTYPE") == "gpt" + + @property + def rotational(self): + return self.sys_api['rotational'] == '1' + + @property + def model(self): + return self.sys_api['model'] + + @property + def size_human(self): + return self.sys_api['human_readable_size'] + + @property + def size(self): + return self.sys_api['size'] + @property def is_lvm_member(self): if self._is_lvm_member is None: self._set_lvm_membership() return self._is_lvm_member + @property + def is_ceph_disk_member(self): + return self.ceph_disk.is_member + @property def is_mapper(self): return self.path.startswith('/dev/mapper') @@ -96,3 +260,71 @@ class Device(object): if self.disk_api: return self.disk_api['TYPE'] == 'device' return False + + @property + def used_by_ceph(self): + # only filter out data devices as journals could potentially be reused + osd_ids = [lv.tags.get("ceph.osd_id") is not None for lv in self.lvs + if lv.tags.get("ceph.type") in ["data", "block"]] + return any(osd_ids) + + def _check_reject_reasons(self): + """ + This checks a number of potential reject reasons for a drive and + returns a tuple (boolean, list). The first element denotes whether a + drive is available or not, the second element lists reasons in case a + drive is not available. + """ + reasons = [ + ('removable', 1, 'removable'), + ('ro', 1, 'read-only'), + ('locked', 1, 'locked'), + ] + rejected = [reason for (k, v, reason) in reasons if + self.sys_api.get(k, '') == v] + return len(rejected) == 0, rejected + + +class CephDiskDevice(object): + """ + Detect devices that have been created by ceph-disk, report their type + (journal, data, etc..). Requires a ``Device`` object as input. + """ + + def __init__(self, device): + self.device = device + self._is_ceph_disk_member = None + + @property + def partlabel(self): + """ + In containers, the 'PARTLABEL' attribute might not be detected + correctly via ``lsblk``, so we poke at the value with ``lsblk`` first, + falling back to ``blkid`` (which works correclty in containers). + """ + lsblk_partlabel = self.device.disk_api.get('PARTLABEL') + if lsblk_partlabel: + return lsblk_partlabel + return self.device.blkid_api.get('PARTLABEL', '') + + @property + def is_member(self): + if self._is_ceph_disk_member is None: + if 'ceph' in self.partlabel: + self._is_ceph_disk_member = True + return True + return False + return self._is_ceph_disk_member + + @property + def type(self): + types = [ + 'data', 'wal', 'db', 'lockbox', 'journal', + # ceph-disk uses 'ceph block' when placing data in bluestore, but + # keeps the regular OSD files in 'ceph data' :( :( :( :( + 'block', + ] + for t in types: + if t in self.partlabel: + return t + return 'unknown' diff --git a/ceph/src/ceph-volume/ceph_volume/util/disk.py b/ceph/src/ceph-volume/ceph_volume/util/disk.py index 053338972..ccc2ff7a1 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/disk.py +++ b/ceph/src/ceph-volume/ceph_volume/util/disk.py @@ -29,6 +29,79 @@ def get_partuuid(device): return ' '.join(out).strip() +def _blkid_parser(output): + """ + Parses the output from a system ``blkid`` call, requires output to be + produced using the ``-p`` flag which bypasses the cache, mangling the + names. These names are corrected to what it would look like without the + ``-p`` flag. + + Normal output:: + + /dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" [...] + """ + # first spaced separated item is garbage, gets tossed: + output = ' '.join(output.split()[1:]) + # split again, respecting possible whitespace in quoted values + pairs = output.split('" ') + raw = {} + processed = {} + mapping = { + 'UUID': 'UUID', + 'TYPE': 'TYPE', + 'PART_ENTRY_NAME': 'PARTLABEL', + 'PART_ENTRY_UUID': 'PARTUUID', + 'PTTYPE': 'PTTYPE', + } + + for pair in pairs: + try: + column, value = pair.split('=') + except ValueError: + continue + raw[column] = value.strip().strip().strip('"') + + for key, value in raw.items(): + new_key = mapping.get(key) + if not new_key: + continue + processed[new_key] = value + + return processed + + +def blkid(device): + """ + The blkid interface to its CLI, creating an output similar to what is + expected from ``lsblk``. In most cases, ``lsblk()`` should be the preferred + method for extracting information about a device. There are some corner + cases where it might provide information that is otherwise unavailable. + + The system call uses the ``-p`` flag which bypasses the cache, the caveat + being that the keys produced are named completely different to expected + names. + + For example, instead of ``PARTLABEL`` it provides a ``PART_ENTRY_NAME``. + A bit of translation between these known keys is done, which is why + ``lsblk`` should always be preferred: the output provided here is not as + rich, given that a translation of keys is required for a uniform interface + with the ``-p`` flag. + + Label name to expected output chart: + + cache bypass name expected name + + UUID UUID + TYPE TYPE + PART_ENTRY_NAME PARTLABEL + PART_ENTRY_UUID PARTUUID + """ + out, err, rc = process.call( + ['blkid', '-p', device] + ) + return _blkid_parser(' '.join(out)) + + def get_part_entry_type(device): """ Parses the ``ID_PART_ENTRY_TYPE`` from the "low level" (bypasses the cache) @@ -509,14 +582,14 @@ class Size(object): def __truediv__(self, other): if isinstance(other, Size): return self._b / other._b - self._b = self._b / other - return self + _b = self._b / other + return Size(b=_b) def __div__(self, other): if isinstance(other, Size): return self._b / other._b - self._b = self._b / other - return self + _b = self._b / other + return Size(b=_b) def __getattr__(self, unit): """ @@ -581,6 +654,28 @@ def is_mapper_device(device_name): return device_name.startswith(('/dev/mapper', '/dev/dm-')) +def is_locked_raw_device(disk_path): + """ + A device can be locked by a third party software like a database. + To detect that case, the device is opened in Read/Write and exclusive mode + """ + open_flags = (os.O_RDWR | os.O_EXCL) + open_mode = 0 + fd = None + + try: + fd = os.open(disk_path, open_flags, open_mode) + except OSError: + return 1 + + try: + os.close(fd) + except OSError: + return 1 + + return 0 + + def get_devices(_sys_block_path='/sys/block', _dev_path='/dev', _mapper_path='/dev/mapper'): """ Captures all available devices from /sys/block/, including its partitions, @@ -614,18 +709,20 @@ def get_devices(_sys_block_path='/sys/block', _dev_path='/dev', _mapper_path='/d # Ensure that the diskname is an absolute path and that it never points # to a /dev/dm-* device diskname = mapper_devs.get(block) or dev_devs.get(block) + if not diskname: + continue # If the mapper device is a logical volume it gets excluded if is_mapper_device(diskname): if lvm.is_lv(diskname): continue - # If the device reports itself as 'removable', get it excluded metadata['removable'] = get_file_contents(os.path.join(sysdir, 'removable')) - if metadata['removable'] == '1': - continue + # Is the device read-only ? + metadata['ro'] = get_file_contents(os.path.join(sysdir, 'ro')) - for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']: + + for key in ['vendor', 'model', 'rev', 'sas_address', 'sas_device_handle']: metadata[key] = get_file_contents(sysdir + "/device/" + key) for key in ['sectors', 'size']: @@ -636,7 +733,9 @@ def get_devices(_sys_block_path='/sys/block', _dev_path='/dev', _mapper_path='/d metadata['partitions'] = get_partitions_facts(sysdir) - metadata['rotational'] = get_file_contents(sysdir + "/queue/rotational") + for key in ['rotational', 'nr_requests']: + metadata[key] = get_file_contents(sysdir + "/queue/" + key) + metadata['scheduler_mode'] = "" scheduler = get_file_contents(sysdir + "/queue/scheduler") if scheduler is not None: @@ -653,6 +752,7 @@ def get_devices(_sys_block_path='/sys/block', _dev_path='/dev', _mapper_path='/d metadata['human_readable_size'] = human_readable_size(float(size) * 512) metadata['size'] = float(size) * 512 metadata['path'] = diskname + metadata['locked'] = is_locked_raw_device(metadata['path']) device_facts[diskname] = metadata return device_facts diff --git a/ceph/src/ceph-volume/ceph_volume/util/encryption.py b/ceph/src/ceph-volume/ceph_volume/util/encryption.py index cc594a07e..f6e3fdd7e 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/encryption.py +++ b/ceph/src/ceph-volume/ceph_volume/util/encryption.py @@ -3,6 +3,7 @@ import os import logging from ceph_volume import process, conf from ceph_volume.util import constants, system +from ceph_volume.util.device import Device from .prepare import write_keyring from .disk import lsblk, device_family, get_part_entry_type @@ -189,7 +190,8 @@ def status(device): 'status', device, ] - out, err, code = process.call(command, show_command=True) + out, err, code = process.call(command, show_command=True, verbose_on_failure=False) + metadata = {} if code != 0: logger.warning('failed to detect device mapper information') @@ -250,9 +252,9 @@ def legacy_encrypted(device): return metadata parent_device = disk_meta['PKNAME'] # With the parent device set, we can now look for the lockbox listing associated devices - devices = device_family(parent_device) - for i in devices: - if 'lockbox' in i.get('PARTLABEL', ''): - metadata['lockbox'] = i['NAME'] + devices = [Device(i['NAME']) for i in device_family(parent_device)] + for d in devices: + if d.ceph_disk.type == 'lockbox': + metadata['lockbox'] = d.abspath break return metadata diff --git a/ceph/src/ceph-volume/ceph_volume/util/prepare.py b/ceph/src/ceph-volume/ceph_volume/util/prepare.py index 687a5892b..3afa061c8 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/util/prepare.py @@ -70,6 +70,41 @@ def get_journal_size(lv_format=True): return journal_size +def get_block_db_size(lv_format=True): + """ + Helper to retrieve the size (defined in megabytes in ceph.conf) to create + the block.db logical volume, it "translates" the string into a float value, + then converts that into gigabytes, and finally (optionally) it formats it + back as a string so that it can be used for creating the LV. + + :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size + would result in '5G', otherwise it will return a ``Size`` object. + + .. note: Configuration values are in bytes, unlike journals which + are defined in gigabytes + """ + conf_db_size = None + try: + conf_db_size = conf.ceph.get_safe('osd', 'bluestore_block_db_size', None) + except RuntimeError: + logger.exception("failed to load ceph configuration, will use defaults") + + if not conf_db_size: + logger.debug( + 'block.db has no size configuration, will fallback to using as much as possible' + ) + return None + logger.debug('bluestore_block_db_size set to %s' % conf_db_size) + db_size = disk.Size(b=str_to_int(conf_db_size)) + + if db_size < disk.Size(gb=2): + mlogger.error('Refusing to continue with configured size for block.db') + raise RuntimeError('block.db sizes must be larger than 2GB, detected: %s' % db_size) + if lv_format: + return '%sG' % db_size.gb.as_int() + return db_size + + def create_id(fsid, json_secrets, osd_id=None): """ :param fsid: The osd fsid to create, always required diff --git a/ceph/src/ceph-volume/ceph_volume/util/templates.py b/ceph/src/ceph-volume/ceph_volume/util/templates.py index 90858d62d..78edd63d7 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/templates.py +++ b/ceph/src/ceph-volume/ceph_volume/util/templates.py @@ -15,6 +15,22 @@ total_osds = """ Total OSDs: {total_osds} """ + +def filtered_devices(devices): + string = """ +Filtered Devices:""" + for device, info in devices.iteritems(): + string += """ + %s""" % device + + for reason in info['reasons']: + string += """ + %s""" % reason + + string += "\n" + return string + + ssd_volume_group = """ Solid State VG: Targets: {target: <25} Total size: {total_lv_size: <25} diff --git a/ceph/src/ceph-volume/setup.py b/ceph/src/ceph-volume/setup.py index cfdb1e1a9..9bd48178c 100644 --- a/ceph/src/ceph-volume/setup.py +++ b/ceph/src/ceph-volume/setup.py @@ -17,7 +17,12 @@ setup( 'pytest >=2.1.3', 'tox', ], - scripts = ['bin/ceph-volume', 'bin/ceph-volume-systemd'], + entry_points = dict( + console_scripts = [ + 'ceph-volume = ceph_volume.main:Volume', + 'ceph-volume-systemd = ceph_volume.systemd:main', + ], + ), classifiers = [ 'Environment :: Console', 'Intended Audience :: Information Technology', @@ -29,5 +34,4 @@ setup( 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ] - ) diff --git a/ceph/src/ceph_osd.cc b/ceph/src/ceph_osd.cc index b2c845065..efab291e6 100644 --- a/ceph/src/ceph_osd.cc +++ b/ceph/src/ceph_osd.cc @@ -234,7 +234,7 @@ int main(int argc, const char **argv) { char fn[PATH_MAX]; snprintf(fn, sizeof(fn), "%s/type", g_conf->osd_data.c_str()); - int fd = ::open(fn, O_RDONLY); + int fd = ::open(fn, O_RDONLY|O_CLOEXEC); if (fd >= 0) { bufferlist bl; bl.read_fd(fd, 64); diff --git a/ceph/src/client/Client.cc b/ceph/src/client/Client.cc index 7043f9058..fc99ad53b 100644 --- a/ceph/src/client/Client.cc +++ b/ceph/src/client/Client.cc @@ -1465,6 +1465,10 @@ mds_rank_t Client::choose_target_mds(MetaRequest *req, Inode** phash_diri) mds = in->fragmap[fg]; if (phash_diri) *phash_diri = in; + } else if (in->auth_cap) { + mds = in->auth_cap->session->mds_num; + } + if (mds >= 0) { ldout(cct, 10) << "choose_target_mds from dirfragtree hash" << dendl; goto out; } @@ -2989,10 +2993,15 @@ Dentry* Client::link(Dir *dir, const string& name, Inode *in, Dentry *dn) dn->dir = dir; dir->dentries[dn->name] = dn; lru.lru_insert_mid(dn); // mid or top? + if (!in) + dir->num_null_dentries++; ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in << " dn " << dn << " (new dn)" << dendl; } else { + assert(!dn->inode); + if (in) + dir->num_null_dentries--; ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in << " dn " << dn << " (old dn)" << dendl; } @@ -3049,11 +3058,15 @@ void Client::unlink(Dentry *dn, bool keepdir, bool keepdentry) if (keepdentry) { dn->lease_mds = -1; + if (in) + dn->dir->num_null_dentries++; } else { ldout(cct, 15) << "unlink removing '" << dn->name << "' dn " << dn << dendl; // unlink from dir dn->dir->dentries.erase(dn->name); + if (!in) + dn->dir->num_null_dentries--; if (dn->dir->is_empty() && !keepdir) close_dir(dn->dir); dn->dir = 0; @@ -3936,7 +3949,7 @@ void Client::add_update_cap(Inode *in, MetaSession *mds_session, uint64_t cap_id unsigned old_caps = cap->issued; cap->cap_id = cap_id; - cap->issued |= issued; + cap->issued = issued; cap->implemented |= issued; cap->seq = seq; cap->issue_seq = seq; @@ -4045,11 +4058,15 @@ void Client::remove_session_caps(MetaSession *s) sync_cond.Signal(); } -int Client::_do_remount(void) +int Client::_do_remount(bool retry_on_error) { + uint64_t max_retries = cct->_conf->get_val("mds_max_retries_on_remount_failure"); + errno = 0; int r = remount_cb(callback_handle); - if (r != 0) { + if (r == 0) { + retries_on_invalidate = 0; + } else { int e = errno; client_t whoami = get_nodeid(); if (r == -1) { @@ -4061,8 +4078,10 @@ int Client::_do_remount(void) "failed to remount (to trim kernel dentries): " "return code = " << r << dendl; } - bool should_abort = cct->_conf->get_val("client_die_on_failed_remount") || - cct->_conf->get_val("client_die_on_failed_dentry_invalidate"); + bool should_abort = + (cct->_conf->get_val("client_die_on_failed_remount") || + cct->_conf->get_val("client_die_on_failed_dentry_invalidate")) && + !(retry_on_error && (++retries_on_invalidate < max_retries)); if (should_abort && !unmounting) { lderr(cct) << "failed to remount for kernel dentry trimming; quitting!" << dendl; ceph_abort(); @@ -4078,7 +4097,7 @@ public: explicit C_Client_Remount(Client *c) : client(c) {} void finish(int r) override { assert(r == 0); - client->_do_remount(); + client->_do_remount(true); } }; @@ -4102,6 +4121,31 @@ void Client::_invalidate_kernel_dcache() } } +void Client::_trim_negative_child_dentries(InodeRef& in) +{ + if (!in->is_dir()) + return; + + Dir* dir = in->dir; + if (dir && dir->dentries.size() == dir->num_null_dentries) { + for (auto p = dir->dentries.begin(); p != dir->dentries.end(); ) { + Dentry *dn = p->second; + ++p; + assert(!dn->inode); + if (dn->lru_is_expireable()) + unlink(dn, true, false); // keep dir, drop dentry + } + if (dir->dentries.empty()) { + close_dir(dir); + } + } + + if (in->flags & I_SNAPDIR_OPEN) { + InodeRef snapdir = open_snapdir(in.get()); + _trim_negative_child_dentries(snapdir); + } +} + void Client::trim_caps(MetaSession *s, uint64_t max) { mds_rank_t mds = s->mds_num; @@ -4132,6 +4176,7 @@ void Client::trim_caps(MetaSession *s, uint64_t max) } } else { ldout(cct, 20) << " trying to trim dentries for " << *in << dendl; + _trim_negative_child_dentries(in); bool all = true; set::iterator q = in->dn_set.begin(); while (q != in->dn_set.end()) { @@ -9360,7 +9405,7 @@ success: } // mtime - in->mtime = ceph_clock_now(); + in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); @@ -9666,6 +9711,7 @@ int Client::statfs(const char *path, struct statvfs *stbuf, { Mutex::Locker l(client_lock); tout(cct) << "statfs" << std::endl; + unsigned long int total_files_on_fs; if (unmounting) return -ENOTCONN; @@ -9682,6 +9728,8 @@ int Client::statfs(const char *path, struct statvfs *stbuf, client_lock.Unlock(); int rval = cond.wait(); + assert(root); + total_files_on_fs = root->rstat.rfiles + root->rstat.rsubdirs; client_lock.Lock(); if (rval < 0) { @@ -9703,8 +9751,8 @@ int Client::statfs(const char *path, struct statvfs *stbuf, const int CEPH_BLOCK_SHIFT = 22; stbuf->f_frsize = 1 << CEPH_BLOCK_SHIFT; stbuf->f_bsize = 1 << CEPH_BLOCK_SHIFT; - stbuf->f_files = stats.num_objects; - stbuf->f_ffree = -1; + stbuf->f_files = total_files_on_fs; + stbuf->f_ffree = 0; stbuf->f_favail = -1; stbuf->f_fsid = -1; // ?? stbuf->f_flag = 0; // ?? @@ -10109,7 +10157,7 @@ int Client::test_dentry_handling(bool can_invalidate) r = 0; } else if (remount_cb) { ldout(cct, 1) << "using remount_cb" << dendl; - r = _do_remount(); + r = _do_remount(false); } if (r) { bool should_abort = cct->_conf->get_val("client_die_on_failed_dentry_invalidate"); @@ -13041,7 +13089,7 @@ int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) in->inline_data = bl; in->inline_version++; } - in->mtime = ceph_clock_now(); + in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); } else { @@ -13067,7 +13115,7 @@ int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) offset, length, ceph::real_clock::now(), 0, true, onfinish); - in->mtime = ceph_clock_now(); + in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); @@ -13083,7 +13131,7 @@ int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) uint64_t size = offset + length; if (size > in->size) { in->size = size; - in->mtime = ceph_clock_now(); + in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); @@ -13159,14 +13207,15 @@ int Client::fallocate(int fd, int mode, loff_t offset, loff_t length) int Client::ll_release(Fh *fh) { Mutex::Locker lock(client_lock); + + if (unmounting) + return -ENOTCONN; + ldout(cct, 3) << "ll_release (fh)" << fh << " " << fh->inode->ino << " " << dendl; tout(cct) << "ll_release (fh)" << std::endl; tout(cct) << (unsigned long)fh << std::endl; - if (unmounting) - return -ENOTCONN; - if (ll_unclosed_fh_set.count(fh)) ll_unclosed_fh_set.erase(fh); return _release_fh(fh); diff --git a/ceph/src/client/Client.h b/ceph/src/client/Client.h index 2616f6d71..dd9e70ace 100644 --- a/ceph/src/client/Client.h +++ b/ceph/src/client/Client.h @@ -539,6 +539,7 @@ protected: void trim_dentry(Dentry *dn); void trim_caps(MetaSession *s, uint64_t max); void _invalidate_kernel_dcache(); + void _trim_negative_child_dentries(InodeRef& in); void dump_inode(Formatter *f, Inode *in, set& did, bool disconnected); void dump_cache(Formatter *f); // debug @@ -764,7 +765,7 @@ private: int _release_fh(Fh *fh); void _put_fh(Fh *fh); - int _do_remount(void); + int _do_remount(bool retry_on_error); friend class C_Client_Remount; struct C_Readahead : public Context { @@ -1253,6 +1254,9 @@ public: uint32_t get_deleg_timeout() { return deleg_timeout; } int set_deleg_timeout(uint32_t timeout); int ll_delegation(Fh *fh, unsigned cmd, ceph_deleg_cb_t cb, void *priv); + +private: + uint64_t retries_on_invalidate = 0; }; /** diff --git a/ceph/src/client/Dir.h b/ceph/src/client/Dir.h index e6d7f99c9..731a2038e 100644 --- a/ceph/src/client/Dir.h +++ b/ceph/src/client/Dir.h @@ -7,6 +7,8 @@ class Dir { public: Inode *parent_inode; // my inode ceph::unordered_map dentries; + unsigned num_null_dentries = 0; + vector readdir_cache; explicit Dir(Inode* in) { parent_inode = in; } diff --git a/ceph/src/client/fuse_ll.cc b/ceph/src/client/fuse_ll.cc index 018653f7e..5d0e81d7b 100644 --- a/ceph/src/client/fuse_ll.cc +++ b/ceph/src/client/fuse_ll.cc @@ -404,7 +404,7 @@ static void fuse_ll_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name, cfuse->client->cct->_conf->fuse_multithreaded && cfuse->client->cct->_conf->fuse_syncfs_on_mksnap) { int err = 0; - int fd = ::open(cfuse->mountpoint, O_RDONLY | O_DIRECTORY); + int fd = ::open(cfuse->mountpoint, O_RDONLY | O_DIRECTORY | O_CLOEXEC); if (fd < 0) { err = errno; } else { diff --git a/ceph/src/cls/rgw/cls_rgw.cc b/ceph/src/cls/rgw/cls_rgw.cc index cc47818a8..13b3e92dc 100644 --- a/ceph/src/cls/rgw/cls_rgw.cc +++ b/ceph/src/cls/rgw/cls_rgw.cc @@ -311,12 +311,20 @@ static void split_key(const string& key, list& vals) } } +static string escape_str(const string& s) +{ + int len = escape_json_attr_len(s.c_str(), s.size()); + char escaped[len]; + escape_json_attr(s.c_str(), s.size(), escaped); + return string(escaped); +} + /* * list index key structure: * * \0[v\0i] */ -static void decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, uint64_t *ver) +static int decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, uint64_t *ver) { size_t len = strlen(index_key.c_str()); @@ -325,19 +333,25 @@ static void decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, if (len == index_key.size()) { key->name = index_key; - return; + return 0; } list vals; split_key(index_key, vals); - assert(!vals.empty()); + if (vals.empty()) { + CLS_LOG(0, "ERROR: %s(): bad index_key (%s): split_key() returned empty vals", __func__, escape_str(index_key).c_str()); + return -EIO; + } list::iterator iter = vals.begin(); key->name = *iter; ++iter; - assert(iter != vals.end()); + if (iter == vals.end()) { + CLS_LOG(0, "ERROR: %s(): bad index_key (%s): no vals", __func__, escape_str(index_key).c_str()); + return -EIO; + } for (; iter != vals.end(); ++iter) { string& val = *iter; @@ -347,9 +361,14 @@ static void decode_list_index_key(const string& index_key, cls_rgw_obj_key *key, string err; const char *s = val.c_str() + 1; *ver = strict_strtoll(s, 10, &err); - assert(err.empty()); + if (!err.empty()) { + CLS_LOG(0, "ERROR: %s(): bad index_key (%s): could not parse val (v=%s)", __func__, escape_str(index_key).c_str(), s); + return -EIO; + } } } + + return 0; } static int read_bucket_header(cls_method_context_t hctx, struct rgw_bucket_dir_header *header) @@ -430,11 +449,16 @@ int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) cls_rgw_obj_key key; uint64_t ver; - decode_list_index_key(kiter->first, &key, &ver); start_key = kiter->first; CLS_LOG(20, "start_key=%s len=%zu", start_key.c_str(), start_key.size()); + int ret = decode_list_index_key(kiter->first, &key, &ver); + if (ret < 0) { + CLS_LOG(0, "ERROR: failed to decode list index key (%s)\n", escape_str(kiter->first).c_str()); + continue; + } + if (!entry.is_valid()) { CLS_LOG(20, "entry %s[%s] is not valid\n", key.name.c_str(), key.instance.c_str()); continue; @@ -573,6 +597,7 @@ int rgw_bucket_update_stats(cls_method_context_t hctx, bufferlist *in, bufferlis dest.total_size += s.second.total_size; dest.total_size_rounded += s.second.total_size_rounded; dest.num_entries += s.second.num_entries; + dest.actual_size += s.second.actual_size; } } @@ -985,14 +1010,6 @@ static void update_olh_log(struct rgw_bucket_olh_entry& olh_data_entry, OLHLogOp log.push_back(log_entry); } -static string escape_str(const string& s) -{ - int len = escape_json_attr_len(s.c_str(), s.size()); - char escaped[len]; - escape_json_attr(s.c_str(), s.size(), escaped); - return string(escaped); -} - static int write_obj_instance_entry(cls_method_context_t hctx, struct rgw_bucket_dir_entry& instance_entry, const string& instance_idx) { CLS_LOG(20, "write_entry() instance=%s idx=%s flags=%d", escape_str(instance_entry.key.instance).c_str(), instance_idx.c_str(), instance_entry.flags); @@ -3290,7 +3307,7 @@ static int gc_iterate_entries(cls_method_context_t hctx, const string& marker, b get_time_key(now, &now_str); prepend_index_prefix(now_str, GC_OBJ_TIME_INDEX, &end_key); - CLS_LOG(0, "gc_iterate_entries end_key=%s\n", end_key.c_str()); + CLS_LOG(10, "gc_iterate_entries end_key=%s\n", end_key.c_str()); } string filter; diff --git a/ceph/src/cls/rgw/cls_rgw_types.cc b/ceph/src/cls/rgw/cls_rgw_types.cc index 51ee342e9..adfc1aa79 100644 --- a/ceph/src/cls/rgw/cls_rgw_types.cc +++ b/ceph/src/cls/rgw/cls_rgw_types.cc @@ -253,6 +253,7 @@ bool rgw_cls_bi_entry::get_info(cls_rgw_obj_key *key, uint8_t *category, rgw_buc accounted_stats->num_entries++; accounted_stats->total_size += entry.meta.accounted_size; accounted_stats->total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); + accounted_stats->actual_size += entry.meta.size; account = true; } break; @@ -575,6 +576,54 @@ void rgw_bucket_dir::dump(Formatter *f) const f->close_section(); } +void rgw_usage_log_entry::dump(Formatter *f) const +{ + f->dump_string("owner", owner.to_str()); + f->dump_string("payer", payer.to_str()); + f->dump_string("bucket", bucket); + f->dump_unsigned("epoch", epoch); + + f->open_object_section("total_usage"); + f->dump_unsigned("bytes_sent", total_usage.bytes_sent); + f->dump_unsigned("bytes_received", total_usage.bytes_received); + f->dump_unsigned("ops", total_usage.ops); + f->dump_unsigned("successful_ops", total_usage.successful_ops); + f->close_section(); + + f->open_array_section("categories"); + if (usage_map.size() > 0) { + map::const_iterator it; + for (it = usage_map.begin(); it != usage_map.end(); it++) { + const rgw_usage_data& total_usage = it->second; + f->open_object_section("entry"); + f->dump_string("category", it->first.c_str()); + f->dump_unsigned("bytes_sent", total_usage.bytes_sent); + f->dump_unsigned("bytes_received", total_usage.bytes_received); + f->dump_unsigned("ops", total_usage.ops); + f->dump_unsigned("successful_ops", total_usage.successful_ops); + f->close_section(); + } + } + f->close_section(); +} + +void rgw_usage_log_entry::generate_test_instances(list &o) +{ + rgw_usage_log_entry *entry = new rgw_usage_log_entry; + rgw_usage_data usage_data{1024, 2048}; + entry->owner = rgw_user("owner"); + entry->payer = rgw_user("payer"); + entry->bucket = "bucket"; + entry->epoch = 1234; + entry->total_usage.bytes_sent = usage_data.bytes_sent; + entry->total_usage.bytes_received = usage_data.bytes_received; + entry->total_usage.ops = usage_data.ops; + entry->total_usage.successful_ops = usage_data.successful_ops; + entry->usage_map["get_obj"] = usage_data; + o.push_back(entry); + o.push_back(new rgw_usage_log_entry); +} + void cls_rgw_reshard_entry::generate_key(const string& tenant, const string& bucket_name, string *key) { *key = tenant + ":" + bucket_name; diff --git a/ceph/src/cls/rgw/cls_rgw_types.h b/ceph/src/cls/rgw/cls_rgw_types.h index 97b53742b..51107c325 100644 --- a/ceph/src/cls/rgw/cls_rgw_types.h +++ b/ceph/src/cls/rgw/cls_rgw_types.h @@ -854,6 +854,10 @@ struct rgw_usage_log_entry { usage_map[category].aggregate(data); total_usage.aggregate(data); } + + void dump(Formatter* f) const; + static void generate_test_instances(list& o); + }; WRITE_CLASS_ENCODER(rgw_usage_log_entry) diff --git a/ceph/src/cls/user/cls_user.cc b/ceph/src/cls/user/cls_user.cc index 9a9c5bd9f..984524b2d 100644 --- a/ceph/src/cls/user/cls_user.cc +++ b/ceph/src/cls/user/cls_user.cc @@ -145,6 +145,9 @@ static int cls_user_set_buckets_info(cls_method_context_t hctx, bufferlist *in, entry = update_entry; ret = 0; + } else if (op.add) { + // bucket id may have changed (ie reshard) + entry.bucket.bucket_id = update_entry.bucket.bucket_id; } if (ret < 0) { @@ -254,17 +257,23 @@ static int cls_user_remove_bucket(cls_method_context_t hctx, bufferlist *in, buf return ret; } - if (entry.user_stats_sync) { - dec_header_stats(&header.stats, entry); - } - CLS_LOG(20, "removing entry at %s", key.c_str()); ret = remove_entry(hctx, key); if (ret < 0) return ret; - - return 0; + + if (!entry.user_stats_sync) { + return 0; + } + + dec_header_stats(&header.stats, entry); + + CLS_LOG(20, "header: total bytes=%lld entries=%lld", (long long)header.stats.total_bytes, (long long)header.stats.total_entries); + + bufferlist bl; + encode(header, bl); + return cls_cxx_map_write_header(hctx, &bl); } static int cls_user_list_buckets(cls_method_context_t hctx, bufferlist *in, bufferlist *out) diff --git a/ceph/src/common/HeartbeatMap.cc b/ceph/src/common/HeartbeatMap.cc index ae1f8e8fa..f6822852d 100644 --- a/ceph/src/common/HeartbeatMap.cc +++ b/ceph/src/common/HeartbeatMap.cc @@ -165,7 +165,7 @@ void HeartbeatMap::check_touch_file() if (is_healthy()) { string path = m_cct->_conf->heartbeat_file; if (path.length()) { - int fd = ::open(path.c_str(), O_WRONLY|O_CREAT, 0644); + int fd = ::open(path.c_str(), O_WRONLY|O_CREAT|O_CLOEXEC, 0644); if (fd >= 0) { ::utimes(path.c_str(), NULL); ::close(fd); diff --git a/ceph/src/common/OutputDataSocket.cc b/ceph/src/common/OutputDataSocket.cc index bbee667f3..f2244cde8 100644 --- a/ceph/src/common/OutputDataSocket.cc +++ b/ceph/src/common/OutputDataSocket.cc @@ -14,9 +14,9 @@ #include "common/OutputDataSocket.h" #include "common/errno.h" -#include "common/pipe.h" #include "common/safe_io.h" #include "include/compat.h" +#include "include/sock_compat.h" #include #include @@ -117,10 +117,10 @@ OutputDataSocket::~OutputDataSocket() std::string OutputDataSocket::create_shutdown_pipe(int *pipe_rd, int *pipe_wr) { int pipefd[2]; - int ret = pipe_cloexec(pipefd); - if (ret < 0) { + if (pipe_cloexec(pipefd) < 0) { + int e = errno; ostringstream oss; - oss << "OutputDataSocket::create_shutdown_pipe error: " << cpp_strerror(ret); + oss << "OutputDataSocket::create_shutdown_pipe error: " << cpp_strerror(e); return oss.str(); } @@ -142,7 +142,7 @@ std::string OutputDataSocket::bind_and_listen(const std::string &sock_path, int << (sizeof(address.sun_path) - 1); return oss.str(); } - int sock_fd = socket(PF_UNIX, SOCK_STREAM, 0); + int sock_fd = socket_cloexec(PF_UNIX, SOCK_STREAM, 0); if (sock_fd < 0) { int err = errno; ostringstream oss; @@ -150,14 +150,6 @@ std::string OutputDataSocket::bind_and_listen(const std::string &sock_path, int << "failed to create socket: " << cpp_strerror(err); return oss.str(); } - int r = fcntl(sock_fd, F_SETFD, FD_CLOEXEC); - if (r < 0) { - r = errno; - VOID_TEMP_FAILURE_RETRY(::close(sock_fd)); - ostringstream oss; - oss << "OutputDataSocket::bind_and_listen: failed to fcntl on socket: " << cpp_strerror(r); - return oss.str(); - } memset(&address, 0, sizeof(struct sockaddr_un)); address.sun_family = AF_UNIX; snprintf(address.sun_path, sizeof(address.sun_path), @@ -241,15 +233,15 @@ bool OutputDataSocket::do_accept() struct sockaddr_un address; socklen_t address_length = sizeof(address); ldout(m_cct, 30) << "OutputDataSocket: calling accept" << dendl; - int connection_fd = accept(m_sock_fd, (struct sockaddr*) &address, + int connection_fd = accept_cloexec(m_sock_fd, (struct sockaddr*) &address, &address_length); - ldout(m_cct, 30) << "OutputDataSocket: finished accept" << dendl; if (connection_fd < 0) { int err = errno; lderr(m_cct) << "OutputDataSocket: do_accept error: '" << cpp_strerror(err) << dendl; return false; } + ldout(m_cct, 30) << "OutputDataSocket: finished accept" << dendl; handle_connection(connection_fd); close_connection(connection_fd); diff --git a/ceph/src/common/Preforker.h b/ceph/src/common/Preforker.h index f6671a5d3..a05a7dba4 100644 --- a/ceph/src/common/Preforker.h +++ b/ceph/src/common/Preforker.h @@ -9,8 +9,10 @@ #include #include "include/assert.h" -#include "common/safe_io.h" #include "common/errno.h" +#include "common/safe_io.h" +#include "include/compat.h" +#include "include/sock_compat.h" /** * pre-fork fork/daemonize helper class @@ -34,22 +36,23 @@ public: int prefork(std::string &err) { assert(!forked); - int r = ::socketpair(AF_UNIX, SOCK_STREAM, 0, fd); std::ostringstream oss; + int r = socketpair_cloexec(AF_UNIX, SOCK_STREAM, 0, fd); if (r < 0) { - oss << "[" << getpid() << "]: unable to create socketpair: " << cpp_strerror(errno); + int e = errno; + oss << "[" << getpid() << "]: unable to create socketpair: " << cpp_strerror(e); err = oss.str(); - return r; + return (errno = e, -1); } forked = true; childpid = fork(); if (childpid < 0) { - r = -errno; - oss << "[" << getpid() << "]: unable to fork: " << cpp_strerror(errno); + int e = errno; + oss << "[" << getpid() << "]: unable to fork: " << cpp_strerror(e); err = oss.str(); - return r; + return (errno = e, -1); } if (is_child()) { ::close(fd[0]); diff --git a/ceph/src/common/PriorityCache.cc b/ceph/src/common/PriorityCache.cc new file mode 100644 index 000000000..d62e61cc6 --- /dev/null +++ b/ceph/src/common/PriorityCache.cc @@ -0,0 +1,29 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2018 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "PriorityCache.h" + +namespace PriorityCache { + int64_t get_chunk(uint64_t usage, uint64_t chunk_bytes) { + // Add a chunk of headroom and round up to the near chunk + uint64_t val = usage + chunk_bytes; + uint64_t r = (val) % chunk_bytes; + if (r > 0) + val = val + chunk_bytes - r; + return val; + } + + PriCache::~PriCache() { + } +} diff --git a/ceph/src/common/PriorityCache.h b/ceph/src/common/PriorityCache.h new file mode 100644 index 000000000..c31f896e5 --- /dev/null +++ b/ceph/src/common/PriorityCache.h @@ -0,0 +1,69 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2018 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#ifndef CEPH_PRIORITY_CACHE_H +#define CEPH_PRIORITY_CACHE_H + +#include +#include + +namespace PriorityCache { + enum Priority { + PRI0, // Reserved for special items + PRI1, // High priority cache items + PRI2, // Medium priority cache items + PRI3, // Low priority cache items + LAST = PRI3, + }; + + int64_t get_chunk(uint64_t usage, uint64_t chunk_bytes); + + struct PriCache { + virtual ~PriCache(); + + /* Ask the cache to request memory for the given priority rounded up to + * the nearst chunk_bytes. This for example, may return the size of all + * items associated with this priority plus some additional space for + * future growth. Note that the cache may ultimately be allocated less + * memory than it requests here. + */ + virtual int64_t request_cache_bytes(PriorityCache::Priority pri, uint64_t chunk_bytes) const = 0; + + // Get the number of bytes currently allocated to the given priority. + virtual int64_t get_cache_bytes(PriorityCache::Priority pri) const = 0; + + // Get the number of bytes currently allocated to all priorities. + virtual int64_t get_cache_bytes() const = 0; + + // Allocate bytes for a given priority. + virtual void set_cache_bytes(PriorityCache::Priority pri, int64_t bytes) = 0; + + // Allocate additional bytes for a given priority. + virtual void add_cache_bytes(PriorityCache::Priority pri, int64_t bytes) = 0; + + // Commit the current number of bytes allocated to the cache. + virtual int64_t commit_cache_size() = 0; + + // Get the ratio of available memory this cache should target. + virtual double get_cache_ratio() const = 0; + + // Set the ratio of available memory this cache should target. + virtual void set_cache_ratio(double ratio) = 0; + + // Get the name of this cache. + virtual std::string get_cache_name() const = 0; + }; +} + +#endif diff --git a/ceph/src/common/admin_socket.cc b/ceph/src/common/admin_socket.cc index f5edc5647..62c05ce50 100644 --- a/ceph/src/common/admin_socket.cc +++ b/ceph/src/common/admin_socket.cc @@ -15,7 +15,6 @@ #include "common/admin_socket.h" #include "common/admin_socket_client.h" #include "common/errno.h" -#include "common/pipe.h" #include "common/safe_io.h" #include "common/version.h" #include "include/compat.h" @@ -25,6 +24,8 @@ // re-include our assert to clobber the system one; fix dout: #include "include/assert.h" +#include "include/compat.h" +#include "include/sock_compat.h" #define dout_subsys ceph_subsys_asok #undef dout_prefix @@ -121,10 +122,10 @@ AdminSocket::~AdminSocket() std::string AdminSocket::create_shutdown_pipe(int *pipe_rd, int *pipe_wr) { int pipefd[2]; - int ret = pipe_cloexec(pipefd); - if (ret < 0) { + if (pipe_cloexec(pipefd) < 0) { + int e = errno; ostringstream oss; - oss << "AdminSocket::create_shutdown_pipe error: " << cpp_strerror(ret); + oss << "AdminSocket::create_shutdown_pipe error: " << cpp_strerror(e); return oss.str(); } @@ -173,7 +174,7 @@ std::string AdminSocket::bind_and_listen(const std::string &sock_path, int *fd) << (sizeof(address.sun_path) - 1); return oss.str(); } - int sock_fd = socket(PF_UNIX, SOCK_STREAM, 0); + int sock_fd = socket_cloexec(PF_UNIX, SOCK_STREAM, 0); if (sock_fd < 0) { int err = errno; ostringstream oss; @@ -181,14 +182,6 @@ std::string AdminSocket::bind_and_listen(const std::string &sock_path, int *fd) << "failed to create socket: " << cpp_strerror(err); return oss.str(); } - int r = fcntl(sock_fd, F_SETFD, FD_CLOEXEC); - if (r < 0) { - r = errno; - VOID_TEMP_FAILURE_RETRY(::close(sock_fd)); - ostringstream oss; - oss << "AdminSocket::bind_and_listen: failed to fcntl on socket: " << cpp_strerror(r); - return oss.str(); - } memset(&address, 0, sizeof(struct sockaddr_un)); address.sun_family = AF_UNIX; snprintf(address.sun_path, sizeof(address.sun_path), @@ -299,15 +292,15 @@ bool AdminSocket::do_accept() struct sockaddr_un address; socklen_t address_length = sizeof(address); ldout(m_cct, 30) << "AdminSocket: calling accept" << dendl; - int connection_fd = accept(m_sock_fd, (struct sockaddr*) &address, + int connection_fd = accept_cloexec(m_sock_fd, (struct sockaddr*) &address, &address_length); - ldout(m_cct, 30) << "AdminSocket: finished accept" << dendl; if (connection_fd < 0) { int err = errno; lderr(m_cct) << "AdminSocket: do_accept error: '" << cpp_strerror(err) << dendl; return false; } + ldout(m_cct, 30) << "AdminSocket: finished accept" << dendl; char cmd[1024]; unsigned pos = 0; diff --git a/ceph/src/common/admin_socket_client.cc b/ceph/src/common/admin_socket_client.cc index 32eb66479..9886bba6c 100644 --- a/ceph/src/common/admin_socket_client.cc +++ b/ceph/src/common/admin_socket_client.cc @@ -12,12 +12,18 @@ * */ +#include +#include +#include +#include + #include "common/admin_socket.h" #include "common/errno.h" #include "common/safe_io.h" #include "common/admin_socket_client.h" -#include +#include "include/compat.h" +#include "include/sock_compat.h" using std::ostringstream; @@ -41,7 +47,7 @@ const char* get_rand_socket_path() static std::string asok_connect(const std::string &path, int *fd) { - int socket_fd = socket(PF_UNIX, SOCK_STREAM, 0); + int socket_fd = socket_cloexec(PF_UNIX, SOCK_STREAM, 0); if(socket_fd < 0) { int err = errno; ostringstream oss; diff --git a/ceph/src/common/autovector.h b/ceph/src/common/autovector.h new file mode 100644 index 000000000..5fbaa8328 --- /dev/null +++ b/ceph/src/common/autovector.h @@ -0,0 +1,334 @@ +// Copyright (c) 2018-Present Red Hat Inc. All rights reserved. +// +// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 and Apache 2.0 License + +#ifndef CEPH_AUTOVECTOR_H +#define CEPH_AUTOVECTOR_H + +#include +#include +#include +#include +#include +#include + +// A vector that leverages pre-allocated stack-based array to achieve better +// performance for array with small amount of items. +// +// The interface resembles that of vector, but with less features since we aim +// to solve the problem that we have in hand, rather than implementing a +// full-fledged generic container. +// +// Currently we don't support: +// * reserve()/shrink_to_fit() +// If used correctly, in most cases, people should not touch the +// underlying vector at all. +// * random insert()/erase(), please only use push_back()/pop_back(). +// * No move/swap operations. Each autovector instance has a +// stack-allocated array and if we want support move/swap operations, we +// need to copy the arrays other than just swapping the pointers. In this +// case we'll just explicitly forbid these operations since they may +// lead users to make false assumption by thinking they are inexpensive +// operations. +// +// Naming style of public methods almost follows that of the STL's. +namespace ceph { + +template +class autovector { + public: + // General STL-style container member types. + typedef T value_type; + typedef typename std::vector::difference_type difference_type; + typedef typename std::vector::size_type size_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + // This class is the base for regular/const iterator + template + class iterator_impl { + public: + // -- iterator traits + typedef iterator_impl self_type; + typedef TValueType value_type; + typedef TValueType& reference; + typedef TValueType* pointer; + typedef typename TAutoVector::difference_type difference_type; + typedef std::random_access_iterator_tag iterator_category; + + iterator_impl(TAutoVector* vect, size_t index) + : vect_(vect), index_(index) {}; + iterator_impl(const iterator_impl&) = default; + ~iterator_impl() {} + iterator_impl& operator=(const iterator_impl&) = default; + + // -- Advancement + // ++iterator + self_type& operator++() { + ++index_; + return *this; + } + + // iterator++ + self_type operator++(int) { + auto old = *this; + ++index_; + return old; + } + + // --iterator + self_type& operator--() { + --index_; + return *this; + } + + // iterator-- + self_type operator--(int) { + auto old = *this; + --index_; + return old; + } + + self_type operator-(difference_type len) const { + return self_type(vect_, index_ - len); + } + + difference_type operator-(const self_type& other) const { + assert(vect_ == other.vect_); + return index_ - other.index_; + } + + self_type operator+(difference_type len) const { + return self_type(vect_, index_ + len); + } + + self_type& operator+=(difference_type len) { + index_ += len; + return *this; + } + + self_type& operator-=(difference_type len) { + index_ -= len; + return *this; + } + + // -- Reference + reference operator*() { + assert(vect_->size() >= index_); + return (*vect_)[index_]; + } + + const_reference operator*() const { + assert(vect_->size() >= index_); + return (*vect_)[index_]; + } + + pointer operator->() { + assert(vect_->size() >= index_); + return &(*vect_)[index_]; + } + + const_pointer operator->() const { + assert(vect_->size() >= index_); + return &(*vect_)[index_]; + } + + + // -- Logical Operators + bool operator==(const self_type& other) const { + assert(vect_ == other.vect_); + return index_ == other.index_; + } + + bool operator!=(const self_type& other) const { return !(*this == other); } + + bool operator>(const self_type& other) const { + assert(vect_ == other.vect_); + return index_ > other.index_; + } + + bool operator<(const self_type& other) const { + assert(vect_ == other.vect_); + return index_ < other.index_; + } + + bool operator>=(const self_type& other) const { + assert(vect_ == other.vect_); + return index_ >= other.index_; + } + + bool operator<=(const self_type& other) const { + assert(vect_ == other.vect_); + return index_ <= other.index_; + } + + private: + TAutoVector* vect_ = nullptr; + size_t index_ = 0; + }; + + typedef iterator_impl iterator; + typedef iterator_impl const_iterator; + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; + + autovector() = default; + + autovector(std::initializer_list init_list) { + for (const T& item : init_list) { + push_back(item); + } + } + + ~autovector() = default; + + // -- Immutable operations + // Indicate if all data resides in in-stack data structure. + bool only_in_stack() const { + // If no element was inserted at all, the vector's capacity will be `0`. + return vect_.capacity() == 0; + } + + size_type size() const { return num_stack_items_ + vect_.size(); } + + // resize does not guarantee anything about the contents of the newly + // available elements + void resize(size_type n) { + if (n > kSize) { + vect_.resize(n - kSize); + num_stack_items_ = kSize; + } else { + vect_.clear(); + num_stack_items_ = n; + } + } + + bool empty() const { return size() == 0; } + + const_reference operator[](size_type n) const { + assert(n < size()); + return n < kSize ? values_[n] : vect_[n - kSize]; + } + + reference operator[](size_type n) { + assert(n < size()); + return n < kSize ? values_[n] : vect_[n - kSize]; + } + + const_reference at(size_type n) const { + assert(n < size()); + return (*this)[n]; + } + + reference at(size_type n) { + assert(n < size()); + return (*this)[n]; + } + + reference front() { + assert(!empty()); + return *begin(); + } + + const_reference front() const { + assert(!empty()); + return *begin(); + } + + reference back() { + assert(!empty()); + return *(end() - 1); + } + + const_reference back() const { + assert(!empty()); + return *(end() - 1); + } + + // -- Mutable Operations + void push_back(T&& item) { + if (num_stack_items_ < kSize) { + values_[num_stack_items_++] = std::move(item); + } else { + vect_.push_back(item); + } + } + + void push_back(const T& item) { + if (num_stack_items_ < kSize) { + values_[num_stack_items_++] = item; + } else { + vect_.push_back(item); + } + } + + template + void emplace_back(Args&&... args) { + push_back(value_type(args...)); + } + + void pop_back() { + assert(!empty()); + if (!vect_.empty()) { + vect_.pop_back(); + } else { + --num_stack_items_; + } + } + + void clear() { + num_stack_items_ = 0; + vect_.clear(); + } + + // -- Copy and Assignment + autovector& assign(const autovector& other); + + autovector(const autovector& other) { assign(other); } + + autovector& operator=(const autovector& other) { return assign(other); } + + // -- Iterator Operations + iterator begin() { return iterator(this, 0); } + + const_iterator begin() const { return const_iterator(this, 0); } + + iterator end() { return iterator(this, this->size()); } + + const_iterator end() const { return const_iterator(this, this->size()); } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + reverse_iterator rend() { return reverse_iterator(begin()); } + + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + private: + size_type num_stack_items_ = 0; // current number of items + value_type values_[kSize]; // the first `kSize` items + // used only if there are more than `kSize` items. + std::vector vect_; +}; + +template +autovector& autovector::assign(const autovector& other) { + // copy the internal vector + vect_.assign(other.vect_.begin(), other.vect_.end()); + + // copy array + num_stack_items_ = other.num_stack_items_; + std::copy(other.values_, other.values_ + num_stack_items_, values_); + + return *this; +} +} // namespace ceph +#endif // CEPH_AUTOVECTOR_H diff --git a/ceph/src/common/buffer.cc b/ceph/src/common/buffer.cc index 5ad822f12..09dcc67b2 100644 --- a/ceph/src/common/buffer.cc +++ b/ceph/src/common/buffer.cc @@ -2255,7 +2255,7 @@ void buffer::list::decode_base64(buffer::list& e) int buffer::list::read_file(const char *fn, std::string *error) { - int fd = TEMP_FAILURE_RETRY(::open(fn, O_RDONLY)); + int fd = TEMP_FAILURE_RETRY(::open(fn, O_RDONLY|O_CLOEXEC)); if (fd < 0) { int err = errno; std::ostringstream oss; @@ -2332,7 +2332,7 @@ int buffer::list::read_fd_zero_copy(int fd, size_t len) int buffer::list::write_file(const char *fn, int mode) { - int fd = TEMP_FAILURE_RETRY(::open(fn, O_WRONLY|O_CREAT|O_TRUNC, mode)); + int fd = TEMP_FAILURE_RETRY(::open(fn, O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC, mode)); if (fd < 0) { int err = errno; cerr << "bufferlist::write_file(" << fn << "): failed to open file: " diff --git a/ceph/src/common/ceph_time.h b/ceph/src/common/ceph_time.h index d3d5dfa36..33777c4c2 100644 --- a/ceph/src/common/ceph_time.h +++ b/ceph/src/common/ceph_time.h @@ -178,6 +178,14 @@ namespace ceph { return from_timespec(ts); } + static bool is_zero(const time_point& t) { + return (t == time_point::min()); + } + + static time_point zero() { + return time_point::min(); + } + static time_t to_time_t(const time_point& t) noexcept { return duration_cast(t.time_since_epoch()).count(); } diff --git a/ceph/src/common/compat.cc b/ceph/src/common/compat.cc index 18b75874a..7a6bf7212 100644 --- a/ceph/src/common/compat.cc +++ b/ceph/src/common/compat.cc @@ -1,19 +1,34 @@ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2011 New Dream Network + * Copyright (C) 2018 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ +#include #include #include -#include #include -#include -#include -#include #include +#include +#include +#include +#include +#include #if defined(__linux__) #include #endif -#include "include/compat.h" +#include "include/compat.h" +#include "include/sock_compat.h" #include "common/safe_io.h" // The type-value for a ZFS FS in fstatfs. @@ -76,3 +91,96 @@ int ceph_posix_fallocate(int fd, off_t offset, off_t len) { #endif } +int pipe_cloexec(int pipefd[2]) +{ +#if defined(HAVE_PIPE2) + return pipe2(pipefd, O_CLOEXEC); +#else + if (pipe(pipefd) == -1) + return -1; + + /* + * The old-fashioned, race-condition prone way that we have to fall + * back on if pipe2 does not exist. + */ + if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) < 0) { + goto fail; + } + + if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) < 0) { + goto fail; + } + + return 0; +fail: + int save_errno = errno; + VOID_TEMP_FAILURE_RETRY(close(pipefd[0])); + VOID_TEMP_FAILURE_RETRY(close(pipefd[1])); + return (errno = save_errno, -1); +#endif +} + + +int socket_cloexec(int domain, int type, int protocol) +{ +#ifdef SOCK_CLOEXEC + return socket(domain, type|SOCK_CLOEXEC, protocol); +#else + int fd = socket(domain, type, protocol); + if (fd == -1) + return -1; + + if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) + goto fail; + + return fd; +fail: + int save_errno = errno; + VOID_TEMP_FAILURE_RETRY(close(fd)); + return (errno = save_errno, -1); +#endif +} + +int socketpair_cloexec(int domain, int type, int protocol, int sv[2]) +{ +#ifdef SOCK_CLOEXEC + return socketpair(domain, type|SOCK_CLOEXEC, protocol, sv); +#else + int rc = socketpair(domain, type, protocol, sv); + if (rc == -1) + return -1; + + if (fcntl(sv[0], F_SETFD, FD_CLOEXEC) < 0) + goto fail; + + if (fcntl(sv[1], F_SETFD, FD_CLOEXEC) < 0) + goto fail; + + return 0; +fail: + int save_errno = errno; + VOID_TEMP_FAILURE_RETRY(close(sv[0])); + VOID_TEMP_FAILURE_RETRY(close(sv[1])); + return (errno = save_errno, -1); +#endif +} + +int accept_cloexec(int sockfd, struct sockaddr* addr, socklen_t* addrlen) +{ +#ifdef HAVE_ACCEPT4 + return accept4(sockfd, addr, addrlen, SOCK_CLOEXEC); +#else + int fd = accept(sockfd, addr, addrlen); + if (fd == -1) + return -1; + + if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) + goto fail; + + return fd; +fail: + int save_errno = errno; + VOID_TEMP_FAILURE_RETRY(close(fd)); + return (errno = save_errno, -1); +#endif +} diff --git a/ceph/src/common/config.cc b/ceph/src/common/config.cc index 88e3da9b5..b3a98a595 100644 --- a/ceph/src/common/config.cc +++ b/ceph/src/common/config.cc @@ -59,7 +59,7 @@ int ceph_resolve_file_search(const std::string& filename_list, int ret = -ENOENT; list::iterator iter; for (iter = ls.begin(); iter != ls.end(); ++iter) { - int fd = ::open(iter->c_str(), O_RDONLY); + int fd = ::open(iter->c_str(), O_RDONLY|O_CLOEXEC); if (fd < 0) { ret = -errno; continue; diff --git a/ceph/src/common/hostname.cc b/ceph/src/common/hostname.cc index 32436534e..879fc9396 100644 --- a/ceph/src/common/hostname.cc +++ b/ceph/src/common/hostname.cc @@ -18,6 +18,12 @@ std::string ceph_get_hostname() { + // are we in a container? if so we would prefer the *real* hostname. + const char *node_name = getenv("NODE_NAME"); + if (node_name) { + return node_name; + } + char buf[1024]; gethostname(buf, 1024); return std::string(buf); diff --git a/ceph/src/common/legacy_config_opts.h b/ceph/src/common/legacy_config_opts.h index 38b36a60c..a51870ef6 100644 --- a/ceph/src/common/legacy_config_opts.h +++ b/ceph/src/common/legacy_config_opts.h @@ -169,6 +169,10 @@ OPTION(ms_async_rdma_roce_ver, OPT_INT) // 0=RoCEv1, 1=RoCEv2, 2=RoCEv1. OPTION(ms_async_rdma_sl, OPT_INT) // in RoCE, this means PCP OPTION(ms_async_rdma_dscp, OPT_INT) // in RoCE, this means DSCP +// when there are enough accept failures, indicating there are unrecoverable failures, +// just do ceph_abort() . Here we make it configurable. +OPTION(ms_max_accept_failures, OPT_INT) + OPTION(ms_dpdk_port_id, OPT_INT) SAFE_OPTION(ms_dpdk_coremask, OPT_STR) // it is modified in unittest so that use SAFE_OPTION to declare OPTION(ms_dpdk_memory_channel, OPT_STR) @@ -469,8 +473,6 @@ OPTION(mds_bal_split_rd, OPT_FLOAT) OPTION(mds_bal_split_wr, OPT_FLOAT) OPTION(mds_bal_split_bits, OPT_INT) OPTION(mds_bal_merge_size, OPT_INT) -OPTION(mds_bal_interval, OPT_INT) // seconds -OPTION(mds_bal_fragment_interval, OPT_INT) // seconds OPTION(mds_bal_fragment_size_max, OPT_INT) // order of magnitude higher than split size OPTION(mds_bal_fragment_fast_factor, OPT_FLOAT) // multiple of size_max that triggers immediate split OPTION(mds_bal_idle_threshold, OPT_FLOAT) @@ -1081,7 +1083,6 @@ OPTION(bluestore_cache_size_hdd, OPT_U64) OPTION(bluestore_cache_size_ssd, OPT_U64) OPTION(bluestore_cache_meta_ratio, OPT_DOUBLE) OPTION(bluestore_cache_kv_ratio, OPT_DOUBLE) -OPTION(bluestore_cache_kv_max, OPT_INT) // limit the maximum amount of cache for the kv store OPTION(bluestore_kvbackend, OPT_STR) OPTION(bluestore_allocator, OPT_STR) // stupid | bitmap OPTION(bluestore_freelist_blocks_per_key, OPT_INT) @@ -1342,6 +1343,7 @@ OPTION(rgw_content_length_compat, OPT_BOOL) // Check both HTTP_CONTENT_LENGTH an OPTION(rgw_lifecycle_work_time, OPT_STR) //job process lc at 00:00-06:00s OPTION(rgw_lc_lock_max_time, OPT_INT) // total run time for a single lc processor work OPTION(rgw_lc_max_objs, OPT_INT) +OPTION(rgw_lc_max_rules, OPT_U32) // Max rules set on one bucket OPTION(rgw_lc_debug_interval, OPT_INT) // Debug run interval, in seconds OPTION(rgw_script_uri, OPT_STR) // alternative value for SCRIPT_URI if not set in request OPTION(rgw_request_uri, OPT_STR) // alternative value for REQUEST_URI if not set in request diff --git a/ceph/src/common/options.cc b/ceph/src/common/options.cc index 1ed027c9b..ff3bb1a1b 100644 --- a/ceph/src/common/options.cc +++ b/ceph/src/common/options.cc @@ -812,6 +812,11 @@ std::vector