]> git.proxmox.com Git - ceph.git/commit
update source to 12.2.11
authorAlwin Antreich <a.antreich@proxmox.com>
Wed, 6 Feb 2019 08:29:01 +0000 (09:29 +0100)
committerAlwin Antreich <a.antreich@proxmox.com>
Wed, 6 Feb 2019 09:34:02 +0000 (10:34 +0100)
commitf64942e41c1f59e95cdc1205bbe5d32ed6dfd429
treebbf67247deb6135cfa04c24fd95cf21cc9bead36
parentfc2b1783e3727b66315cc667af9d663d30fe7ed4
update source to 12.2.11

Signed-off-by: Alwin Antreich <a.antreich@proxmox.com>
348 files changed:
Makefile
ceph/CMakeLists.txt
ceph/PendingReleaseNotes
ceph/alpine/APKBUILD
ceph/ceph.spec
ceph/ceph.spec.in
ceph/debian/changelog
ceph/debian/control
ceph/doc/README.md [new file with mode: 0644]
ceph/doc/_ext/edit_on_github.py [new file with mode: 0644]
ceph/doc/_static/js/ceph.js [new file with mode: 0644]
ceph/doc/_templates/page.html [new file with mode: 0644]
ceph/doc/ceph-volume/lvm/zap.rst
ceph/doc/cephfs/dirfrags.rst
ceph/doc/cephfs/eviction.rst
ceph/doc/cephfs/fuse.rst
ceph/doc/cephfs/health-messages.rst
ceph/doc/cephfs/mds-config-ref.rst
ceph/doc/conf.py
ceph/doc/man/8/ceph-volume.rst
ceph/doc/man/8/crushtool.rst
ceph/doc/mgr/balancer.rst
ceph/doc/rados/configuration/bluestore-config-ref.rst
ceph/doc/rados/operations/add-or-rm-mons.rst
ceph/doc/rados/operations/crush-map-edits.rst
ceph/doc/rados/operations/crush-map.rst
ceph/doc/rados/operations/user-management.rst
ceph/doc/rados/troubleshooting/troubleshooting-mon.rst
ceph/doc/radosgw/adminops.rst
ceph/doc/radosgw/config-ref.rst
ceph/doc/radosgw/encryption.rst
ceph/doc/radosgw/frontends.rst
ceph/doc/start/hardware-recommendations.rst
ceph/doc/start/quick-ceph-deploy.rst
ceph/examples/librados/Makefile
ceph/examples/librados/hello_world.readme
ceph/install-deps.sh
ceph/qa/cephfs/clusters/1-mds-1-client-coloc.yaml [new file with mode: 0644]
ceph/qa/cephfs/clusters/1-mds-1-client.yaml
ceph/qa/cephfs/clusters/1-mds-2-client-coloc.yaml [new file with mode: 0644]
ceph/qa/cephfs/clusters/1-mds-2-client.yaml
ceph/qa/cephfs/clusters/1-mds-3-client.yaml [new file with mode: 0644]
ceph/qa/cephfs/clusters/1-mds-4-client-coloc.yaml [new file with mode: 0644]
ceph/qa/cephfs/clusters/1-mds-4-client.yaml
ceph/qa/cephfs/clusters/3-mds.yaml
ceph/qa/cephfs/clusters/9-mds.yaml
ceph/qa/cephfs/clusters/fixed-2-ucephfs.yaml
ceph/qa/run-standalone.sh
ceph/qa/standalone/ceph-helpers.sh
ceph/qa/standalone/scrub/osd-scrub-repair.sh
ceph/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml
ceph/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml
ceph/qa/suites/fs/basic_functional/clusters/1-mds-4-client-coloc.yaml [new symlink]
ceph/qa/suites/fs/basic_functional/clusters/4-remote-clients.yaml [deleted file]
ceph/qa/suites/fs/basic_functional/tasks/damage.yaml
ceph/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml
ceph/qa/suites/fs/bugs/client_trim_caps/clusters/small-cluster.yaml
ceph/qa/suites/fs/multiclient/clusters/1-mds-2-client.yaml [new symlink]
ceph/qa/suites/fs/multiclient/clusters/1-mds-3-client.yaml [new symlink]
ceph/qa/suites/fs/multiclient/clusters/three_clients.yaml [deleted file]
ceph/qa/suites/fs/multiclient/clusters/two_clients.yaml [deleted file]
ceph/qa/suites/fs/multifs/clusters/2-remote-clients.yaml [deleted file]
ceph/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml
ceph/qa/suites/fs/thrash/clusters/1-mds-1-client-coloc.yaml [new symlink]
ceph/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml [deleted file]
ceph/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml
ceph/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml
ceph/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml
ceph/qa/suites/kcephfs/recovery/tasks/damage.yaml
ceph/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml
ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/jewel-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/jewel-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/jewel-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/jewel-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/jewel-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/jewel-x/stress-split/2-partial-upgrade/firsthalf.yaml
ceph/qa/suites/upgrade/jewel-x/stress-split/5-finish-upgrade.yaml
ceph/qa/suites/upgrade/luminous-p2p/% [deleted file]
ceph/qa/suites/upgrade/luminous-p2p/.qa [deleted symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-parallel/% [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-parallel/.qa [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-parallel/point-to-point-upgrade.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-parallel/supported [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/% [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/0-cluster/+ [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/0-cluster/.qa [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/0-cluster/openstack.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/0-cluster/start.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/1-ceph-install/luminous.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/1.1-pg-log-overrides/normal_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/1.1-pg-log-overrides/short_pg_log.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/2-partial-upgrade/.qa [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/2-partial-upgrade/firsthalf.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/3-thrash/.qa [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/3-thrash/default.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/+ [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/.qa [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/radosbench.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/rbd-cls.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/rbd-import-export.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/rbd_api.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/readwrite.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/4-workload/snaps-few-objects.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/5-finish-upgrade.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/7-final-workload/+ [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/7-final-workload/.qa [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/7-final-workload/rbd-python.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/7-final-workload/rgw-swift.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/7-final-workload/snaps-many-objects.yaml [new file with mode: 0644]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/supported [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/luminous-p2p-stress-split/thrashosds-health.yaml [new symlink]
ceph/qa/suites/upgrade/luminous-p2p/point-to-point-upgrade.yaml [deleted file]
ceph/qa/suites/upgrade/luminous-p2p/supported [deleted symlink]
ceph/qa/tasks/cephfs/filesystem.py
ceph/qa/tasks/cephfs/fuse_mount.py
ceph/qa/tasks/cephfs/kernel_mount.py
ceph/qa/tasks/cephfs/test_client_limits.py
ceph/qa/tasks/cephfs/test_client_recovery.py
ceph/qa/tasks/cephfs/test_damage.py
ceph/qa/tasks/cephfs/test_data_scan.py
ceph/qa/tasks/cephfs/test_flush.py
ceph/qa/tasks/cephfs/test_forward_scrub.py
ceph/qa/tasks/cephfs/test_fragment.py
ceph/qa/tasks/cephfs/test_journal_migration.py
ceph/qa/tasks/cephfs/test_journal_repair.py
ceph/qa/tasks/cephfs/test_misc.py
ceph/qa/tasks/cephfs/test_recovery_pool.py
ceph/qa/tasks/qemu.py
ceph/qa/tasks/thrashosds-health.yaml
ceph/qa/tasks/workunit.py
ceph/qa/workunits/ceph-tests/ceph-admin-commands.sh
ceph/qa/workunits/mon/test_config_key_caps.sh [new file with mode: 0755]
ceph/qa/workunits/rados/test_librados_build.sh
ceph/qa/workunits/rbd/run_devstack_tempest.sh
ceph/qa/workunits/suites/cephfs_journal_tool_smoke.sh
ceph/run-make-check.sh
ceph/src/.git_version
ceph/src/auth/AuthSessionHandler.cc
ceph/src/ceph-create-keys
ceph/src/ceph-volume/ceph_volume/api/lvm.py
ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py
ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py
ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py
ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py
ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py [new file with mode: 0644]
ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py
ceph/src/ceph-volume/ceph_volume/inventory/main.py
ceph/src/ceph-volume/ceph_volume/tests/conftest.py
ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py [new file with mode: 0644]
ceph/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml [new file with mode: 0644]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml [new symlink]
ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml
ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml
ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml
ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml
ceph/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py
ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py
ceph/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
ceph/src/ceph-volume/ceph_volume/tests/util/test_util.py
ceph/src/ceph-volume/ceph_volume/util/__init__.py
ceph/src/ceph-volume/ceph_volume/util/arg_validators.py
ceph/src/ceph-volume/ceph_volume/util/device.py
ceph/src/ceph-volume/ceph_volume/util/disk.py
ceph/src/ceph-volume/ceph_volume/util/encryption.py
ceph/src/client/Client.cc
ceph/src/cls/lock/cls_lock.cc
ceph/src/cls/lock/cls_lock_client.cc
ceph/src/cls/lock/cls_lock_client.h
ceph/src/cls/lock/cls_lock_ops.cc
ceph/src/cls/lock/cls_lock_ops.h
ceph/src/cls/lock/cls_lock_types.h
ceph/src/cls/rgw/cls_rgw.cc
ceph/src/cls/rgw/cls_rgw_client.cc
ceph/src/cls/rgw/cls_rgw_client.h
ceph/src/cls/rgw/cls_rgw_types.h
ceph/src/common/Cond.h
ceph/src/common/CondVar.h [new file with mode: 0644]
ceph/src/common/TrackedOp.cc
ceph/src/common/WeightedPriorityQueue.h
ceph/src/common/buffer.cc
ceph/src/common/ceph_context.cc
ceph/src/common/cmdparse.h
ceph/src/common/config.cc
ceph/src/common/config.h
ceph/src/common/hobject.h
ceph/src/common/legacy_config_opts.h
ceph/src/common/options.cc
ceph/src/crush/CrushCompiler.cc
ceph/src/crush/CrushTester.cc
ceph/src/crush/CrushTester.h
ceph/src/crush/CrushWrapper.cc
ceph/src/crush/CrushWrapper.h
ceph/src/include/buffer.h
ceph/src/include/ceph_features.h
ceph/src/include/ceph_fs.h
ceph/src/include/cephfs/libcephfs.h
ceph/src/include/config-h.in.cmake
ceph/src/include/rados.h
ceph/src/include/rados/librados.hpp
ceph/src/librados/librados.cc
ceph/src/librbd/librbd.cc
ceph/src/librbd/operation/ResizeRequest.cc
ceph/src/mds/CInode.cc
ceph/src/mds/CInode.h
ceph/src/mds/FSMap.cc
ceph/src/mds/Locker.cc
ceph/src/mds/MDBalancer.cc
ceph/src/mds/MDCache.cc
ceph/src/mds/MDCache.h
ceph/src/mds/MDLog.cc
ceph/src/mds/MDSDaemon.cc
ceph/src/mds/MDSMap.h
ceph/src/mds/MDSRank.cc
ceph/src/mds/MDSRank.h
ceph/src/mds/PurgeQueue.cc
ceph/src/mds/PurgeQueue.h
ceph/src/mds/Server.cc
ceph/src/mds/Server.h
ceph/src/mds/SessionMap.h
ceph/src/mds/StrayManager.cc
ceph/src/mgr/DaemonServer.cc
ceph/src/mgr/DaemonState.cc
ceph/src/mon/AuthMonitor.cc
ceph/src/mon/ConfigKeyService.cc
ceph/src/mon/FSCommands.cc
ceph/src/mon/LogMonitor.cc
ceph/src/mon/MDSMonitor.cc
ceph/src/mon/MgrMonitor.cc
ceph/src/mon/MonCap.cc
ceph/src/mon/MonCommands.h
ceph/src/mon/Monitor.cc
ceph/src/mon/MonmapMonitor.cc
ceph/src/mon/OSDMonitor.cc
ceph/src/os/bluestore/BlueFS.cc
ceph/src/os/bluestore/BlueFS.h
ceph/src/os/bluestore/BlueStore.cc
ceph/src/os/bluestore/BlueStore.h
ceph/src/os/bluestore/bluestore_tool.cc
ceph/src/os/filestore/LFNIndex.h
ceph/src/osd/OSD.cc
ceph/src/osd/OSD.h
ceph/src/osd/OSDMap.cc
ceph/src/osd/OSDMap.h
ceph/src/osd/PG.cc
ceph/src/osd/PG.h
ceph/src/osd/PGLog.cc
ceph/src/osd/PGLog.h
ceph/src/osd/PrimaryLogPG.cc
ceph/src/osd/PrimaryLogPG.h
ceph/src/osd/osd_types.h
ceph/src/osdc/Journaler.cc
ceph/src/osdc/ObjectCacher.cc
ceph/src/osdc/ObjectCacher.h
ceph/src/osdc/Objecter.cc
ceph/src/pybind/ceph_volume_client.py
ceph/src/pybind/mgr/balancer/module.py
ceph/src/pybind/mgr/influx/module.py
ceph/src/pybind/mgr/prometheus/module.py
ceph/src/pybind/mgr/restful/common.py
ceph/src/pybind/mgr/restful/module.py
ceph/src/pybind/mgr/status/module.py
ceph/src/pybind/rbd/rbd.pyx
ceph/src/rgw/CMakeLists.txt
ceph/src/rgw/librgw.cc
ceph/src/rgw/rgw_admin.cc
ceph/src/rgw/rgw_asio_client.cc
ceph/src/rgw/rgw_asio_client.h
ceph/src/rgw/rgw_asio_frontend.cc
ceph/src/rgw/rgw_auth.cc
ceph/src/rgw/rgw_auth_s3.cc
ceph/src/rgw/rgw_bucket.cc
ceph/src/rgw/rgw_bucket.h
ceph/src/rgw/rgw_common.cc
ceph/src/rgw/rgw_common.h
ceph/src/rgw/rgw_cr_rados.cc
ceph/src/rgw/rgw_crypt.cc
ceph/src/rgw/rgw_data_sync.cc
ceph/src/rgw/rgw_file.h
ceph/src/rgw/rgw_iam_policy.cc
ceph/src/rgw/rgw_metadata.cc
ceph/src/rgw/rgw_metadata.h
ceph/src/rgw/rgw_op.cc
ceph/src/rgw/rgw_op.h
ceph/src/rgw/rgw_quota.cc
ceph/src/rgw/rgw_quota.h
ceph/src/rgw/rgw_rados.cc
ceph/src/rgw/rgw_rados.h
ceph/src/rgw/rgw_reshard.cc
ceph/src/rgw/rgw_reshard.h
ceph/src/rgw/rgw_rest.cc
ceph/src/rgw/rgw_rest_s3.cc
ceph/src/rgw/rgw_rest_swift.cc
ceph/src/rgw/rgw_rest_user.cc
ceph/src/rgw/rgw_sync_log_trim.cc
ceph/src/rgw/rgw_sync_module_es.cc
ceph/src/rgw/rgw_user.cc
ceph/src/test/cli/crushtool/crush-classes/a [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/b [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/beesly [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/c [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/d [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/e [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/f [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/flax [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/g [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/gabe [new file with mode: 0644]
ceph/src/test/cli/crushtool/crush-classes/gabe2 [new file with mode: 0644]
ceph/src/test/cli/crushtool/help.t [changed mode: 0755->0644]
ceph/src/test/cli/crushtool/reclassify.t [new file with mode: 0644]
ceph/src/test/cli/radosgw-admin/help.t
ceph/src/test/cls_lock/test_cls_lock.cc
ceph/src/test/cls_rgw/test_cls_rgw.cc
ceph/src/test/compressor/CMakeLists.txt
ceph/src/test/encoding/readable.sh
ceph/src/test/librados/aio.cc
ceph/src/test/librados/lock.cc
ceph/src/test/librados_test_stub/LibradosTestStub.cc
ceph/src/test/objectstore/store_test.cc
ceph/src/test/osd/TestOSDMap.cc
ceph/src/test/rgw/rgw_multi/multisite.py
ceph/src/test/rgw/rgw_multi/tests.py
ceph/src/test/rgw/test_rgw_iam_policy.cc
ceph/src/tools/cephfs/JournalTool.cc
ceph/src/tools/cephfs/JournalTool.h
ceph/src/tools/cephfs/RoleSelector.cc
ceph/src/tools/cephfs/RoleSelector.h
ceph/src/tools/crushtool.cc
ceph/src/tools/rados/rados.cc
ceph/src/tools/rbd_mirror/ImageReplayer.cc