From f91f0fd59dc16d284d230f8953e42d49a893715d Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 17 Dec 2020 12:55:37 +0100 Subject: [PATCH] Import ceph 15.2.8 Signed-off-by: Thomas Lamprecht --- ceph/.github/CODEOWNERS | 1 + ceph/.readthedocs.yml | 16 + ceph/CMakeLists.txt | 2 +- ceph/PendingReleaseNotes | 31 +- ceph/admin/build-doc | 9 +- ceph/admin/doc-read-the-docs.txt | 2 + ceph/admin/doc-requirements.txt | 2 + ceph/alpine/APKBUILD | 6 +- ceph/ceph.spec | 19 +- ceph/ceph.spec.in | 13 +- ceph/changelog.upstream | 16 +- ceph/debian/cephadm.install | 1 - ceph/debian/control | 5 + ceph/debian/librgw-dev.install | 2 - ceph/debian/librgw2.install | 1 - ceph/debian/rules | 1 - ceph/doc/architecture.rst | 60 +- ceph/doc/ceph-volume/intro.rst | 11 +- ceph/doc/ceph-volume/lvm/batch.rst | 249 +- ceph/doc/ceph-volume/lvm/prepare.rst | 28 +- ceph/doc/cephadm/adoption.rst | 4 +- ceph/doc/cephadm/client-setup.rst | 8 +- ceph/doc/cephadm/concepts.rst | 4 +- ceph/doc/cephadm/drivegroups.rst | 56 +- ceph/doc/cephadm/install.rst | 3 + ceph/doc/cephadm/monitoring.rst | 48 +- ceph/doc/cephadm/operations.rst | 19 +- ceph/doc/cephadm/troubleshooting.rst | 32 +- ceph/doc/cephfs/cephfs-io-path.rst | 1 + ceph/doc/cephfs/fs-nfs-exports.rst | 98 +- ceph/doc/cephfs/fs-volumes.rst | 30 +- ceph/doc/cephfs/health-messages.rst | 4 +- ceph/doc/conf.py | 49 +- ceph/doc/dev/deduplication.rst | 3 +- ceph/doc/dev/msgr2.rst | 40 +- .../install/ceph-deploy/quick-ceph-deploy.rst | 1 + ceph/doc/install/ceph-deploy/quick-cephfs.rst | 1 + ceph/doc/install/ceph-deploy/quick-common.rst | 3 +- ceph/doc/install/get-packages.rst | 97 +- ceph/doc/install/install-vm-cloud.rst | 4 +- ceph/doc/install/manual-deployment.rst | 1 + .../doc/install/manual-freebsd-deployment.rst | 1 + ceph/doc/man/8/cephadm.rst | 30 +- ceph/doc/man/8/osdmaptool.rst | 4 + ceph/doc/man/8/rbd.rst | 16 +- ceph/doc/mgr/dashboard.rst | 18 + ceph/doc/mgr/orchestrator.rst | 265 +- ceph/doc/rados/api/librados-intro.rst | 10 +- ceph/doc/rados/configuration/ceph-conf.rst | 7 + .../rados/configuration/mon-config-ref.rst | 7 +- .../configuration/mon-osd-interaction.rst | 15 +- .../configuration/network-config-ref.rst | 2 +- ceph/doc/rados/operations/cache-tiering.rst | 2 +- ceph/doc/rados/operations/devices.rst | 17 + .../rados/operations/monitoring-osd-pg.rst | 12 +- .../doc/rados/operations/placement-groups.rst | 1 - ceph/doc/rados/operations/user-management.rst | 3 +- ceph/doc/radosgw/STS.rst | 81 +- ceph/doc/radosgw/STSLite.rst | 2 +- ceph/doc/radosgw/admin.rst | 3 +- ceph/doc/radosgw/frontends.rst | 9 + ceph/doc/radosgw/index.rst | 5 +- ceph/doc/radosgw/oidc.rst | 97 + ceph/doc/radosgw/swift/tutorial.rst | 4 +- ceph/doc/radosgw/vault.rst | 4 +- ceph/doc/rbd/index.rst | 4 +- ceph/doc/rbd/libvirt.rst | 4 +- ceph/doc/rbd/qemu-rbd.rst | 4 +- ceph/doc/rbd/rbd-cloudstack.rst | 4 +- ceph/doc/rbd/rbd-kubernetes.rst | 3 +- ceph/doc/rbd/rbd-live-migration.rst | 4 +- ceph/doc/rbd/rbd-openstack.rst | 4 +- ceph/doc/rbd/rbd-persistent-cache.rst | 4 +- ceph/doc/rbd/rbd-snapshot.rst | 12 +- ceph/doc/start/intro.rst | 4 +- ceph/doc/start/quick-rbd.rst | 3 +- .../grafana/dashboards/host-details.json | 4 +- ceph/qa/cephfs/clusters/3-mds.yaml | 3 +- ceph/qa/cephfs/clusters/9-mds.yaml | 3 +- ceph/qa/cephfs/conf/client.yaml | 2 + ceph/qa/cephfs/conf/mds.yaml | 2 + ceph/qa/cephfs/overrides/session_timeout.yaml | 4 + ceph/qa/config/rados.yaml | 2 + ceph/qa/debug/mgr.yaml | 1 + ceph/qa/distros/all/rhel_8.2.yaml | 6 + ceph/qa/distros/all/ubuntu_18.04_podman.yaml | 7 +- .../supported-all-distro/rhel_8.2.yaml | 1 + .../distros/supported-all-distro/rhel_8.yaml | 1 - .../supported-random-distro$/rhel_latest.yaml | 2 +- ceph/qa/distros/supported/rhel_latest.yaml | 2 +- .../erasure-code/ec-feature-plugins-v3.yaml | 98 - ceph/qa/machine_types/schedule_rados_ovh.sh | 3 - ceph/qa/machine_types/schedule_subset.sh | 8 +- ceph/qa/rbd/krbd_blkroset.t | 9 + ceph/qa/standalone/mon/mon-handle-forward.sh | 6 +- .../qa/standalone/mon/mon-last-epoch-clean.sh | 2 +- ceph/qa/standalone/osd/osd-rep-recov-eio.sh | 7 +- .../fs/basic_functional/tasks/cap-flush.yaml | 5 +- .../fs/basic_functional/tasks/volumes.yaml | 3 + .../overrides/session_timeout.yaml | 1 + .../fs/thrash/overrides/session_timeout.yaml | 1 + .../old_client/overrides/multimds/no.yaml | 3 +- .../old_client/overrides/multimds/yes.yaml | 3 +- .../overrides/multimds/no.yaml | 3 +- .../overrides/multimds/yes.yaml | 3 +- .../fs/verify/overrides/session_timeout.yaml | 1 + .../rbd-nomount/tasks/krbd_udev_netns.yaml | 5 + .../rbd-nomount/tasks/krbd_udev_symlinks.yaml | 5 + .../suites/rados/cephadm/smoke/fixed-2.yaml | 1 + .../upgrade/2-repo_digest/defaut.yaml} | 0 .../upgrade/2-repo_digest/repo_digest.yaml | 4 + ...tart-upgrade.yaml => 3-start-upgrade.yaml} | 0 .../upgrade/{3-wait.yaml => 4-wait.yaml} | 0 ceph/qa/suites/rados/monthrash/ceph.yaml | 4 + .../thrash-old-clients/1-install/hammer.yaml | 41 - .../distro$/centos_7.6.yaml | 1 - .../distro$/ubuntu_18.04.yaml | 1 + ceph/qa/suites/rbd/immutable-object-cache/% | 0 ceph/qa/suites/rbd/immutable-object-cache/.qa | 1 + .../rbd/immutable-object-cache/clusters/+ | 0 .../rbd/immutable-object-cache/clusters/.qa | 1 + .../clusters/fix-2.yaml | 3 + .../clusters/openstack.yaml | 4 + .../pool/ceph_and_immutable_object_cache.yaml | 11 + .../supported-random-distro$ | 1 + .../rbd/immutable-object-cache/workloads/.qa | 1 + .../workloads/c_api_tests_with_defaults.yaml | 1 + .../fio_on_immutable_object_cache.yaml | 11 + ..._on_immutable_object_cache_and_thrash.yaml | 11 + .../buildpackages/tasks/branch.yaml | 10 - ceph/qa/tasks/ceph.conf.template | 1 + ceph/qa/tasks/ceph.py | 66 +- ceph/qa/tasks/ceph_manager.py | 39 +- ceph/qa/tasks/ceph_test_case.py | 6 +- ceph/qa/tasks/cephadm.py | 86 +- ceph/qa/tasks/cephadm_cases/test_cli.py | 3 + ceph/qa/tasks/cephfs/cephfs_test_case.py | 2 + ceph/qa/tasks/cephfs/filesystem.py | 16 +- ceph/qa/tasks/cephfs/fuse_mount.py | 39 +- ceph/qa/tasks/cephfs/mount.py | 9 +- ceph/qa/tasks/cephfs/test_admin.py | 4 + ceph/qa/tasks/cephfs/test_client_limits.py | 73 +- ceph/qa/tasks/cephfs/test_misc.py | 16 + ceph/qa/tasks/cephfs/test_nfs.py | 106 +- ceph/qa/tasks/cephfs/test_scrub_checks.py | 112 +- ceph/qa/tasks/cephfs/test_volume_client.py | 214 +- ceph/qa/tasks/cephfs/test_volumes.py | 19 + ceph/qa/tasks/immutable_object_cache.py | 66 + .../qa/tasks/immutable_object_cache_thrash.py | 79 + ceph/qa/tasks/mgr/dashboard/test_ganesha.py | 31 +- ceph/qa/tasks/mgr/dashboard/test_osd.py | 6 + ceph/qa/tasks/mgr/dashboard/test_rgw.py | 19 +- ceph/qa/tasks/mgr/test_dashboard.py | 37 +- ceph/qa/tasks/rbd_fio.py | 2 + ceph/qa/tasks/vstart_runner.py | 1 + ceph/qa/workunits/cephadm/test_adoption.sh | 1 - ceph/qa/workunits/cephadm/test_cephadm.sh | 24 +- ceph/qa/workunits/cephtool/test.sh | 5 + ceph/qa/workunits/mon/rbd_snaps_ops.sh | 10 +- ceph/qa/workunits/rbd/krbd_udev_netns.sh | 86 + ceph/qa/workunits/rbd/krbd_udev_symlinks.sh | 116 + ceph/qa/workunits/rbd/rbd-nbd.sh | 41 +- ceph/qa/workunits/rbd/rbd_mirror_helpers.sh | 4 + ceph/qa/workunits/rgw/test_rgw_orphan_list.sh | 5 +- ceph/run-make-check.sh | 5 +- ceph/src/.git_version | 4 +- ceph/src/CMakeLists.txt | 1 - ceph/src/ceph-rbdnamer | 17 +- ceph/src/ceph-volume/ceph_volume/api/lvm.py | 12 +- .../ceph_volume/devices/lvm/batch.py | 678 ++- .../ceph_volume/devices/lvm/common.py | 243 +- .../ceph_volume/devices/lvm/main.py | 4 +- .../ceph_volume/devices/lvm/prepare.py | 75 +- .../devices/lvm/strategies/__init__.py | 1 - .../devices/lvm/strategies/bluestore.py | 539 -- .../devices/lvm/strategies/filestore.py | 391 -- .../devices/lvm/strategies/strategies.py | 70 - .../devices/lvm/strategies/validators.py | 61 - .../ceph_volume/devices/lvm/zap.py | 9 +- .../ceph_volume/devices/simple/activate.py | 17 +- .../ceph_volume/devices/simple/scan.py | 11 +- .../ceph_volume/drive_group/main.py | 1 - .../ceph-volume/ceph_volume/inventory/main.py | 23 +- ceph/src/ceph-volume/ceph_volume/log.py | 9 +- ceph/src/ceph-volume/ceph_volume/main.py | 3 +- .../ceph_volume/tests/api/test_lvm.py | 1 + .../ceph-volume/ceph_volume/tests/conftest.py | 60 +- .../devices/lvm/strategies/test_bluestore.py | 203 - .../devices/lvm/strategies/test_filestore.py | 218 - .../devices/lvm/strategies/test_validate.py | 52 - .../tests/devices/lvm/test_batch.py | 409 +- .../tests/devices/lvm/test_common.py | 8 + .../tests/devices/lvm/test_prepare.py | 46 +- .../batch/playbooks/test_explicit.yml | 8 +- .../ceph_volume/tests/test_inventory.py | 144 + .../ceph_volume/tests/test_main.py | 18 + .../tests/util/test_arg_validators.py | 4 +- .../ceph_volume/tests/util/test_device.py | 104 +- .../ceph_volume/util/arg_validators.py | 37 +- .../ceph-volume/ceph_volume/util/device.py | 108 +- ceph/src/ceph-volume/ceph_volume/util/disk.py | 10 +- .../ceph-volume/ceph_volume/util/lsmdisk.py | 196 + .../ceph-volume/ceph_volume/util/prepare.py | 23 +- .../ceph-volume/ceph_volume/util/templates.py | 8 +- ceph/src/ceph.in | 6 +- ceph/src/cephadm/cephadm | 1315 ++++- .../src/cephadm/samples/custom_container.json | 35 + ceph/src/cephadm/tests/test_cephadm.py | 149 +- ceph/src/cephadm/tox.ini | 2 +- ceph/src/client/Client.cc | 49 +- ceph/src/client/Client.h | 1 + ceph/src/client/Dentry.h | 4 + ceph/src/client/Inode.cc | 16 +- ceph/src/client/Inode.h | 1 + ceph/src/cls/rbd/cls_rbd_types.cc | 3 + ceph/src/cls/rbd/cls_rbd_types.h | 1 + ceph/src/cls/rgw_gc/cls_rgw_gc.cc | 15 + ceph/src/common/Cond.h | 6 +- ceph/src/common/WorkQueue.h | 9 + ceph/src/common/admin_socket.cc | 54 +- ceph/src/common/admin_socket.h | 5 + ceph/src/common/ceph_context.cc | 12 + ceph/src/common/ceph_context.h | 19 + ceph/src/common/cohort_lru.h | 14 +- ceph/src/common/legacy_config_opts.h | 5 +- ceph/src/common/options.cc | 54 +- ceph/src/common/strtol.h | 116 +- ceph/src/compressor/zlib/CMakeLists.txt | 45 +- .../compressor/zstd/CompressionPluginZstd.h | 2 +- ceph/src/compressor/zstd/ZstdCompressor.h | 8 +- ceph/src/erasure-code/CMakeLists.txt | 4 +- ceph/src/erasure-code/isa/CMakeLists.txt | 139 +- ceph/src/include/CMakeLists.txt | 3 +- ceph/src/include/ceph_features.h | 3 +- ceph/src/include/config-h.in.cmake | 3 + ceph/src/include/encoding.h | 35 +- ceph/src/include/krbd.h | 35 +- ceph/src/include/mempool.h | 10 +- ceph/src/include/rbd/librbd.h | 1 + ceph/src/include/rgw/librgw_admin_user.h | 63 - ceph/src/isa-l/.drone.yml | 89 + ceph/src/isa-l/.travis.yml | 107 +- ceph/src/isa-l/CONTRIBUTING.md | 6 +- ceph/src/isa-l/Doxyfile | 6 +- ceph/src/isa-l/LICENSE | 2 +- ceph/src/isa-l/Makefile.am | 48 +- ceph/src/isa-l/Makefile.nmake | 72 +- ceph/src/isa-l/Makefile.unx | 20 +- ceph/src/isa-l/README.md | 30 +- ceph/src/isa-l/Release_notes.txt | 166 +- ceph/src/isa-l/configure.ac | 257 +- ceph/src/isa-l/crc/Makefile.am | 36 +- ceph/src/isa-l/crc/aarch64/Makefile.am | 47 + .../crc/aarch64/crc16_t10dif_copy_pmull.S | 423 ++ .../isa-l/crc/aarch64/crc16_t10dif_pmull.S | 404 ++ .../crc/aarch64/crc32_gzip_refl_hw_fold.S | 176 + .../isa-l/crc/aarch64/crc32_gzip_refl_pmull.S | 33 + .../isa-l/crc/aarch64/crc32_gzip_refl_pmull.h | 87 + .../isa-l/crc/aarch64/crc32_ieee_norm_pmull.S | 33 + .../isa-l/crc/aarch64/crc32_ieee_norm_pmull.h | 87 + .../crc/aarch64/crc32_iscsi_refl_hw_fold.S | 172 + .../crc/aarch64/crc32_iscsi_refl_pmull.S | 53 + .../crc/aarch64/crc32_iscsi_refl_pmull.h | 87 + .../crc/aarch64/crc32_norm_common_pmull.h | 135 + .../crc/aarch64/crc32_refl_common_pmull.h | 126 + .../isa-l/crc/aarch64/crc64_ecma_norm_pmull.S | 33 + .../isa-l/crc/aarch64/crc64_ecma_norm_pmull.h | 200 + .../isa-l/crc/aarch64/crc64_ecma_refl_pmull.S | 33 + .../isa-l/crc/aarch64/crc64_ecma_refl_pmull.h | 196 + .../isa-l/crc/aarch64/crc64_iso_norm_pmull.S | 33 + .../isa-l/crc/aarch64/crc64_iso_norm_pmull.h | 201 + .../isa-l/crc/aarch64/crc64_iso_refl_pmull.S | 33 + .../isa-l/crc/aarch64/crc64_iso_refl_pmull.h | 197 + .../crc/aarch64/crc64_jones_norm_pmull.S | 33 + .../crc/aarch64/crc64_jones_norm_pmull.h | 200 + .../crc/aarch64/crc64_jones_refl_pmull.S | 33 + .../crc/aarch64/crc64_jones_refl_pmull.h | 196 + .../crc/aarch64/crc64_norm_common_pmull.h | 129 + .../crc/aarch64/crc64_refl_common_pmull.h | 126 + .../crc/aarch64/crc_aarch64_dispatcher.c | 145 + ceph/src/isa-l/crc/aarch64/crc_common_pmull.h | 302 ++ .../isa-l/crc/aarch64/crc_multibinary_arm.S | 42 + ceph/src/isa-l/crc/crc16_t10dif_01.asm | 2 +- ceph/src/isa-l/crc/crc16_t10dif_02.asm | 653 +++ ceph/src/isa-l/crc/crc16_t10dif_by16_10.asm | 590 ++ ceph/src/isa-l/crc/crc16_t10dif_by4.asm | 2 +- ceph/src/isa-l/crc/crc16_t10dif_copy_by4.asm | 598 +++ .../isa-l/crc/crc16_t10dif_copy_by4_02.asm | 595 +++ .../crc16_t10dif_copy_perf.c} | 69 +- ceph/src/isa-l/crc/crc16_t10dif_copy_test.c | 175 + ceph/src/isa-l/crc/crc16_t10dif_op_perf.c | 116 + ceph/src/isa-l/crc/crc16_t10dif_perf.c | 14 +- ceph/src/isa-l/crc/crc16_t10dif_test.c | 58 +- ceph/src/isa-l/crc/crc32_funcs_test.c | 324 ++ .../src/isa-l/crc/crc32_gzip_refl_by16_10.asm | 568 ++ .../crc32_gzip_refl_by8.asm} | 33 +- ceph/src/isa-l/crc/crc32_gzip_refl_by8_02.asm | 555 ++ .../crc32_gzip_refl_perf.c} | 74 +- ceph/src/isa-l/crc/crc32_ieee_01.asm | 2 +- ceph/src/isa-l/crc/crc32_ieee_02.asm | 651 +++ ceph/src/isa-l/crc/crc32_ieee_by16_10.asm | 584 ++ ceph/src/isa-l/crc/crc32_ieee_by4.asm | 2 +- ceph/src/isa-l/crc/crc32_ieee_perf.c | 14 +- ceph/src/isa-l/crc/crc32_ieee_test.c | 174 - ceph/src/isa-l/crc/crc32_iscsi_00.asm | 37 +- ceph/src/isa-l/crc/crc32_iscsi_01.asm | 44 +- ceph/src/isa-l/crc/crc32_iscsi_perf.c | 14 +- ceph/src/isa-l/crc/crc32_iscsi_test.c | 171 - ceph/src/isa-l/crc/crc64_base.c | 877 ++- .../src/isa-l/crc/crc64_ecma_norm_by16_10.asm | 61 + ceph/src/isa-l/crc/crc64_ecma_norm_by8.asm | 2 +- .../src/isa-l/crc/crc64_ecma_refl_by16_10.asm | 61 + ceph/src/isa-l/crc/crc64_ecma_refl_by8.asm | 2 +- ceph/src/isa-l/crc/crc64_funcs_perf.c | 16 +- ceph/src/isa-l/crc/crc64_funcs_test.c | 109 +- ceph/src/isa-l/crc/crc64_iso_norm_by16_10.asm | 524 ++ ceph/src/isa-l/crc/crc64_iso_norm_by8.asm | 2 +- ceph/src/isa-l/crc/crc64_iso_refl_by16_10.asm | 494 ++ ceph/src/isa-l/crc/crc64_iso_refl_by8.asm | 2 +- .../isa-l/crc/crc64_jones_norm_by16_10.asm | 61 + ceph/src/isa-l/crc/crc64_jones_norm_by8.asm | 2 +- .../isa-l/crc/crc64_jones_refl_by16_10.asm | 61 + ceph/src/isa-l/crc/crc64_jones_refl_by8.asm | 2 +- ceph/src/isa-l/crc/crc64_multibinary.asm | 27 +- ceph/src/isa-l/crc/crc64_ref.h | 148 + ceph/src/isa-l/crc/crc_base.c | 253 +- ceph/src/isa-l/crc/crc_base_aliases.c | 10 + ceph/src/isa-l/crc/crc_multibinary.asm | 145 +- ceph/src/isa-l/crc/crc_ref.h | 140 + ceph/src/isa-l/crc/crc_simple_test.c | 1 + ceph/src/isa-l/erasure_code/Makefile.am | 71 +- .../isa-l/erasure_code/aarch64/Makefile.am | 45 + .../aarch64/ec_aarch64_dispatcher.c | 69 + .../aarch64/ec_aarch64_highlevel_func.c | 127 + .../erasure_code/aarch64/ec_multibinary_arm.S | 36 + .../aarch64/gf_2vect_dot_prod_neon.S | 399 ++ .../erasure_code/aarch64/gf_2vect_mad_neon.S | 401 ++ .../aarch64/gf_3vect_dot_prod_neon.S | 358 ++ .../erasure_code/aarch64/gf_3vect_mad_neon.S | 381 ++ .../aarch64/gf_4vect_dot_prod_neon.S | 421 ++ .../erasure_code/aarch64/gf_4vect_mad_neon.S | 455 ++ .../aarch64/gf_5vect_dot_prod_neon.S | 481 ++ .../erasure_code/aarch64/gf_5vect_mad_neon.S | 534 ++ .../erasure_code/aarch64/gf_6vect_mad_neon.S | 609 +++ .../aarch64/gf_vect_dot_prod_neon.S | 298 ++ .../erasure_code/aarch64/gf_vect_mad_neon.S | 314 ++ .../erasure_code/aarch64/gf_vect_mul_neon.S | 235 + ceph/src/isa-l/erasure_code/ec_base.c | 9 +- ceph/src/isa-l/erasure_code/ec_base.h | 10 +- .../isa-l/erasure_code/ec_highlevel_func.c | 102 +- .../src/isa-l/erasure_code/ec_multibinary.asm | 24 +- .../erasure_code/erasure_code_base_perf.c | 96 +- .../erasure_code/erasure_code_base_test.c | 2 +- .../isa-l/erasure_code/erasure_code_perf.c | 97 +- .../erasure_code/erasure_code_sse_perf.c | 168 - .../erasure_code/erasure_code_sse_test.c | 764 --- .../isa-l/erasure_code/erasure_code_test.c | 5 +- .../erasure_code/erasure_code_update_perf.c | 189 +- .../erasure_code/erasure_code_update_test.c | 8 +- .../isa-l/erasure_code/gen_rs_matrix_limits.c | 115 + .../erasure_code/gf_2vect_dot_prod_avx.asm | 4 +- .../erasure_code/gf_2vect_dot_prod_avx2.asm | 4 +- .../erasure_code/gf_2vect_dot_prod_avx512.asm | 4 +- .../erasure_code/gf_2vect_dot_prod_sse.asm | 4 +- .../erasure_code/gf_2vect_dot_prod_sse_perf.c | 216 - .../erasure_code/gf_2vect_dot_prod_sse_test.c | 2 +- .../isa-l/erasure_code/gf_2vect_mad_avx.asm | 4 +- .../isa-l/erasure_code/gf_2vect_mad_avx2.asm | 4 +- .../erasure_code/gf_2vect_mad_avx512.asm | 4 +- .../isa-l/erasure_code/gf_2vect_mad_sse.asm | 4 +- .../erasure_code/gf_3vect_dot_prod_avx.asm | 4 +- .../erasure_code/gf_3vect_dot_prod_avx2.asm | 4 +- .../erasure_code/gf_3vect_dot_prod_avx512.asm | 4 +- .../erasure_code/gf_3vect_dot_prod_sse.asm | 4 +- .../erasure_code/gf_3vect_dot_prod_sse_perf.c | 246 - .../erasure_code/gf_3vect_dot_prod_sse_test.c | 2 +- .../isa-l/erasure_code/gf_3vect_mad_avx.asm | 4 +- .../isa-l/erasure_code/gf_3vect_mad_avx2.asm | 4 +- .../erasure_code/gf_3vect_mad_avx512.asm | 4 +- .../isa-l/erasure_code/gf_3vect_mad_sse.asm | 4 +- .../erasure_code/gf_4vect_dot_prod_avx.asm | 4 +- .../erasure_code/gf_4vect_dot_prod_avx2.asm | 4 +- .../erasure_code/gf_4vect_dot_prod_avx512.asm | 4 +- .../erasure_code/gf_4vect_dot_prod_sse.asm | 4 +- .../erasure_code/gf_4vect_dot_prod_sse_perf.c | 281 - .../erasure_code/gf_4vect_dot_prod_sse_test.c | 2 +- .../isa-l/erasure_code/gf_4vect_mad_avx.asm | 4 +- .../isa-l/erasure_code/gf_4vect_mad_avx2.asm | 4 +- .../erasure_code/gf_4vect_mad_avx512.asm | 4 +- .../isa-l/erasure_code/gf_4vect_mad_sse.asm | 4 +- .../erasure_code/gf_5vect_dot_prod_avx.asm | 4 +- .../erasure_code/gf_5vect_dot_prod_avx2.asm | 4 +- .../erasure_code/gf_5vect_dot_prod_avx512.asm | 335 ++ .../erasure_code/gf_5vect_dot_prod_sse.asm | 4 +- .../erasure_code/gf_5vect_dot_prod_sse_perf.c | 319 -- .../erasure_code/gf_5vect_dot_prod_sse_test.c | 2 +- .../isa-l/erasure_code/gf_5vect_mad_avx.asm | 4 +- .../isa-l/erasure_code/gf_5vect_mad_avx2.asm | 4 +- .../erasure_code/gf_5vect_mad_avx512.asm | 287 + .../isa-l/erasure_code/gf_5vect_mad_sse.asm | 4 +- .../erasure_code/gf_6vect_dot_prod_avx.asm | 4 +- .../erasure_code/gf_6vect_dot_prod_avx2.asm | 4 +- .../erasure_code/gf_6vect_dot_prod_avx512.asm | 354 ++ .../erasure_code/gf_6vect_dot_prod_sse.asm | 4 +- .../erasure_code/gf_6vect_dot_prod_sse_perf.c | 352 -- .../erasure_code/gf_6vect_dot_prod_sse_test.c | 2 +- .../isa-l/erasure_code/gf_6vect_mad_avx.asm | 4 +- .../isa-l/erasure_code/gf_6vect_mad_avx2.asm | 4 +- .../erasure_code/gf_6vect_mad_avx512.asm | 321 ++ .../isa-l/erasure_code/gf_6vect_mad_sse.asm | 4 +- ceph/src/isa-l/erasure_code/gf_inverse_test.c | 2 +- .../erasure_code/gf_vect_dot_prod_1tbl.c | 62 +- .../erasure_code/gf_vect_dot_prod_avx.asm | 4 +- .../erasure_code/gf_vect_dot_prod_avx2.asm | 4 +- .../erasure_code/gf_vect_dot_prod_avx512.asm | 4 +- .../erasure_code/gf_vect_dot_prod_avx_perf.c | 184 - .../erasure_code/gf_vect_dot_prod_avx_test.c | 525 -- .../erasure_code/gf_vect_dot_prod_base_test.c | 2 +- .../erasure_code/gf_vect_dot_prod_perf.c | 54 +- .../erasure_code/gf_vect_dot_prod_sse.asm | 4 +- .../erasure_code/gf_vect_dot_prod_sse_perf.c | 184 - .../erasure_code/gf_vect_dot_prod_sse_test.c | 528 -- .../erasure_code/gf_vect_dot_prod_test.c | 2 +- .../isa-l/erasure_code/gf_vect_mad_avx.asm | 4 +- .../isa-l/erasure_code/gf_vect_mad_avx2.asm | 4 +- .../isa-l/erasure_code/gf_vect_mad_avx512.asm | 4 +- .../src/isa-l/erasure_code/gf_vect_mad_perf.c | 385 -- .../isa-l/erasure_code/gf_vect_mad_sse.asm | 4 +- .../src/isa-l/erasure_code/gf_vect_mad_test.c | 14 +- .../isa-l/erasure_code/gf_vect_mul_avx.asm | 4 +- .../isa-l/erasure_code/gf_vect_mul_avx_test.c | 143 - .../erasure_code/gf_vect_mul_base_test.c | 2 +- .../src/isa-l/erasure_code/gf_vect_mul_perf.c | 31 +- .../isa-l/erasure_code/gf_vect_mul_sse.asm | 4 +- .../isa-l/erasure_code/gf_vect_mul_sse_test.c | 160 - .../src/isa-l/erasure_code/gf_vect_mul_test.c | 50 +- .../isa-l/erasure_code/ppc64le/Makefile.am | 15 + .../isa-l/erasure_code/ppc64le/ec_base_vsx.c | 97 + .../isa-l/erasure_code/ppc64le/ec_base_vsx.h | 338 ++ .../ppc64le/gf_2vect_dot_prod_vsx.c | 83 + .../erasure_code/ppc64le/gf_2vect_mad_vsx.c | 65 + .../ppc64le/gf_3vect_dot_prod_vsx.c | 104 + .../erasure_code/ppc64le/gf_3vect_mad_vsx.c | 84 + .../ppc64le/gf_4vect_dot_prod_vsx.c | 124 + .../erasure_code/ppc64le/gf_4vect_mad_vsx.c | 103 + .../ppc64le/gf_5vect_dot_prod_vsx.c | 145 + .../erasure_code/ppc64le/gf_5vect_mad_vsx.c | 122 + .../ppc64le/gf_6vect_dot_prod_vsx.c | 166 + .../erasure_code/ppc64le/gf_6vect_mad_vsx.c | 142 + .../ppc64le/gf_vect_dot_prod_vsx.c | 85 + .../erasure_code/ppc64le/gf_vect_mad_vsx.c | 48 + .../erasure_code/ppc64le/gf_vect_mul_vsx.c | 61 + ceph/src/isa-l/examples/ec/Makefile.am | 33 + ceph/src/isa-l/examples/ec/Makefile.unx | 8 + .../isa-l/examples/ec/ec_piggyback_example.c | 506 ++ .../src/isa-l/examples/ec/ec_simple_example.c | 277 + ceph/src/isa-l/igzip/Makefile.am | 76 +- .../src/isa-l/igzip/aarch64/bitbuf2_aarch64.h | 57 + .../isa-l/igzip/aarch64/data_struct_aarch64.h | 226 + ceph/src/isa-l/igzip/aarch64/encode_df.S | 159 + ceph/src/isa-l/igzip/aarch64/gen_icf_map.S | 266 + .../src/isa-l/igzip/aarch64/huffman_aarch64.h | 173 + .../igzip_decode_huffman_code_block_aarch64.S | 689 +++ .../aarch64/igzip_deflate_body_aarch64.S | 261 + .../aarch64/igzip_deflate_finish_aarch64.S | 264 + .../aarch64/igzip_deflate_hash_aarch64.S | 95 + .../aarch64/igzip_inflate_multibinary_arm64.S | 32 + .../igzip/aarch64/igzip_isal_adler32_neon.S | 178 + .../igzip_multibinary_aarch64_dispatcher.c | 188 + .../igzip/aarch64/igzip_multibinary_arm64.S | 50 + .../igzip/aarch64/igzip_set_long_icf_fg.S | 194 + .../aarch64/isal_deflate_icf_body_hash_hist.S | 364 ++ .../isal_deflate_icf_finish_hash_hist.S | 397 ++ .../igzip/aarch64/isal_update_histogram.S | 311 ++ .../isa-l/igzip/aarch64/lz0a_const_aarch64.h | 72 + .../src/isa-l/igzip/aarch64/options_aarch64.h | 71 + ceph/src/isa-l/igzip/aarch64/stdmac_aarch64.h | 57 + ceph/src/isa-l/igzip/adler32_avx2_4.asm | 292 + ceph/src/isa-l/igzip/adler32_base.c | 63 + ...igzip_sync_flush_perf.c => adler32_perf.c} | 76 +- ceph/src/isa-l/igzip/adler32_sse.asm | 249 + ceph/src/isa-l/igzip/bitbuf2.asm | 166 +- ceph/src/isa-l/igzip/bitbuf2.h | 88 +- ceph/src/isa-l/igzip/checksum32_funcs_test.c | 308 ++ .../{crc_inflate.h => checksum_test_ref.h} | 39 +- ceph/src/isa-l/igzip/crc32_gzip_base.c | 106 - ceph/src/isa-l/igzip/data_struct2.asm | 108 +- ceph/src/isa-l/igzip/encode_df.c | 4 +- ceph/src/isa-l/igzip/encode_df.h | 11 +- ceph/src/isa-l/igzip/encode_df_04.asm | 574 +- .../{encode_df_asm.asm => encode_df_06.asm} | 365 +- .../isa-l/igzip/generate_custom_hufftables.c | 11 +- .../src/isa-l/igzip/generate_static_inflate.c | 163 + ceph/src/isa-l/igzip/heap_macros.asm | 29 + ceph/src/isa-l/igzip/huff_codes.c | 1355 ++--- ceph/src/isa-l/igzip/huff_codes.h | 148 +- ceph/src/isa-l/igzip/huffman.asm | 1 + ceph/src/isa-l/igzip/huffman.h | 130 +- ceph/src/isa-l/igzip/hufftables_c.c | 10 + ceph/src/isa-l/igzip/igzip.c | 1367 +++-- ceph/src/isa-l/igzip/igzip_base.c | 60 +- ceph/src/isa-l/igzip/igzip_base_aliases.c | 90 +- ceph/src/isa-l/igzip/igzip_body.asm | 485 +- ceph/src/isa-l/igzip/igzip_body_01.asm | 7 - ceph/src/isa-l/igzip/igzip_body_02.asm | 7 - ceph/src/isa-l/igzip/igzip_body_04.asm | 8 - .../isa-l/igzip/igzip_build_hash_table_perf.c | 38 + ceph/src/isa-l/igzip/igzip_checksums.h | 12 + ceph/src/isa-l/igzip/igzip_compare_types.asm | 470 +- .../igzip/igzip_decode_block_stateless.asm | 391 +- ceph/src/isa-l/igzip/igzip_deflate_hash.asm | 165 + ceph/src/isa-l/igzip/igzip_file_perf.c | 253 +- ceph/src/isa-l/igzip/igzip_finish.asm | 39 +- ceph/src/isa-l/igzip/igzip_fuzz_inflate.c | 104 - .../isa-l/igzip/igzip_gen_icf_map_lh1_04.asm | 741 +++ .../isa-l/igzip/igzip_gen_icf_map_lh1_06.asm | 576 ++ ceph/src/isa-l/igzip/igzip_hist_perf.c | 43 +- ceph/src/isa-l/igzip/igzip_icf_base.c | 217 +- ceph/src/isa-l/igzip/igzip_icf_body.asm | 513 -- ceph/src/isa-l/igzip/igzip_icf_body.c | 326 ++ ceph/src/isa-l/igzip/igzip_icf_body_01.asm | 7 - ceph/src/isa-l/igzip/igzip_icf_body_02.asm | 7 - ceph/src/isa-l/igzip/igzip_icf_body_04.asm | 8 - .../isa-l/igzip/igzip_icf_body_h1_gr_bt.asm | 901 ++++ ceph/src/isa-l/igzip/igzip_icf_finish.asm | 137 +- ceph/src/isa-l/igzip/igzip_inflate.c | 1953 +++++-- .../isa-l/igzip/igzip_inflate_multibinary.asm | 6 - ceph/src/isa-l/igzip/igzip_inflate_perf.c | 241 - ceph/src/isa-l/igzip/igzip_inflate_test.c | 37 +- .../src/isa-l/igzip/igzip_level_buf_structs.h | 36 +- ceph/src/isa-l/igzip/igzip_multibinary.asm | 89 +- ceph/src/isa-l/igzip/igzip_perf.c | 818 ++- ceph/src/isa-l/igzip/igzip_rand_test.c | 1652 ++++-- .../isa-l/igzip/igzip_semi_dyn_file_perf.c | 198 +- .../isa-l/igzip/igzip_set_long_icf_fg_04.asm | 295 + .../isa-l/igzip/igzip_set_long_icf_fg_06.asm | 367 ++ .../isa-l/igzip/igzip_stateless_file_perf.c | 227 - .../isa-l/igzip/igzip_sync_flush_file_perf.c | 163 - .../isa-l/igzip/igzip_update_histogram.asm | 85 +- ceph/src/isa-l/igzip/igzip_wrapper.h | 52 + ceph/src/isa-l/igzip/igzip_wrapper_hdr_test.c | 890 ++++ ceph/src/isa-l/igzip/inflate_data_structs.asm | 39 +- ceph/src/isa-l/igzip/inflate_std_vects.h | 90 +- ceph/src/isa-l/igzip/lz0a_const.asm | 17 +- ceph/src/isa-l/igzip/options.asm | 9 - ceph/src/isa-l/igzip/proc_heap.asm | 29 + ceph/src/isa-l/igzip/proc_heap_base.c | 9 +- ceph/src/isa-l/igzip/rfc1951_lookup.asm | 31 +- ceph/src/isa-l/igzip/static_inflate.h | 1346 +++++ ceph/src/isa-l/igzip/stdmac.asm | 214 +- ceph/src/isa-l/include/aarch64_multibinary.h | 221 + ceph/src/isa-l/include/crc.h | 80 +- ceph/src/isa-l/include/crc64.h | 2 +- ceph/src/isa-l/include/erasure_code.h | 338 +- ceph/src/isa-l/include/gf_vect_mul.h | 18 +- ceph/src/isa-l/include/igzip_lib.h | 417 +- ceph/src/isa-l/include/mem_routines.h | 64 + ceph/src/isa-l/include/multibinary.asm | 149 +- ceph/src/isa-l/include/raid.h | 3 + ceph/src/isa-l/include/reg_sizes.asm | 105 +- ceph/src/isa-l/include/test.h | 258 +- ceph/src/isa-l/include/types.h | 13 +- ceph/src/isa-l/include/unaligned.h | 76 + ceph/src/isa-l/isa-l.def | 15 + ceph/src/isa-l/make.inc | 128 +- ceph/src/isa-l/mem/Makefile.am | 48 + ceph/src/isa-l/mem/aarch64/Makefile.am | 33 + .../mem/aarch64/mem_aarch64_dispatcher.c | 39 + .../isa-l/mem/aarch64/mem_multibinary_arm.S | 33 + .../isa-l/mem/aarch64/mem_zero_detect_neon.S | 243 + .../mem_multibinary.asm} | 57 +- ceph/src/isa-l/mem/mem_zero_detect_avx.asm | 189 + ceph/src/isa-l/mem/mem_zero_detect_base.c | 69 + .../isa-l/mem/mem_zero_detect_base_aliases.c | 38 + ceph/src/isa-l/mem/mem_zero_detect_perf.c | 60 + ceph/src/isa-l/mem/mem_zero_detect_sse.asm | 176 + ceph/src/isa-l/mem/mem_zero_detect_test.c | 226 + ceph/src/isa-l/programs/Makefile.am | 38 + ceph/src/isa-l/programs/igzip.1 | 87 + ceph/src/isa-l/programs/igzip.1.h2m | 31 + ceph/src/isa-l/programs/igzip_cli.c | 1155 ++++ ceph/src/isa-l/programs/igzip_cli_check.sh | 245 + ceph/src/isa-l/raid/Makefile.am | 5 +- ceph/src/isa-l/raid/aarch64/Makefile.am | 36 + ceph/src/isa-l/raid/aarch64/pq_check_neon.S | 341 ++ ceph/src/isa-l/raid/aarch64/pq_gen_neon.S | 282 + .../raid/aarch64/raid_aarch64_dispatcher.c | 61 + .../isa-l/raid/aarch64/raid_multibinary_arm.S | 36 + ceph/src/isa-l/raid/aarch64/xor_check_neon.S | 271 + ceph/src/isa-l/raid/aarch64/xor_gen_neon.S | 264 + ceph/src/isa-l/raid/pq_check_sse.asm | 2 +- ceph/src/isa-l/raid/pq_check_sse_i32.asm | 2 +- ceph/src/isa-l/raid/pq_check_test.c | 2 +- ceph/src/isa-l/raid/pq_gen_avx.asm | 2 +- ceph/src/isa-l/raid/pq_gen_avx2.asm | 2 +- ceph/src/isa-l/raid/pq_gen_avx512.asm | 2 +- ceph/src/isa-l/raid/pq_gen_perf.c | 17 +- ceph/src/isa-l/raid/pq_gen_sse.asm | 2 +- ceph/src/isa-l/raid/pq_gen_sse_i32.asm | 2 +- ceph/src/isa-l/raid/pq_gen_test.c | 2 +- ceph/src/isa-l/raid/raid_base.c | 2 +- ceph/src/isa-l/raid/raid_multibinary.asm | 20 +- ceph/src/isa-l/raid/raid_multibinary_i32.asm | 6 - ceph/src/isa-l/raid/xor_check_sse.asm | 2 +- ceph/src/isa-l/raid/xor_check_test.c | 2 +- ceph/src/isa-l/raid/xor_example.c | 10 +- ceph/src/isa-l/raid/xor_gen_avx.asm | 2 +- ceph/src/isa-l/raid/xor_gen_avx512.asm | 2 +- ceph/src/isa-l/raid/xor_gen_perf.c | 18 +- ceph/src/isa-l/raid/xor_gen_sse.asm | 2 +- ceph/src/isa-l/raid/xor_gen_test.c | 2 +- ceph/src/isa-l/tests/fuzz/Makefile.am | 52 + ceph/src/isa-l/tests/fuzz/Makefile.unx | 12 + .../fuzz/igzip_checked_inflate_fuzz_test.c | 72 + .../tests/fuzz/igzip_dump_inflate_corpus.c | 40 + .../src/isa-l/tests/fuzz/igzip_fuzz_inflate.c | 41 + .../fuzz/igzip_simple_inflate_fuzz_test.c | 22 + .../fuzz/igzip_simple_round_trip_fuzz_test.c | 130 + ceph/src/isa-l/tools/check_format.sh | 87 + ceph/src/isa-l/tools/iindent | 2 +- .../isa-l/tools/remove_trailing_whitespace.sh | 2 + ceph/src/isa-l/tools/test_autorun.sh | 63 + ceph/src/isa-l/tools/test_checks.sh | 115 + ceph/src/isa-l/tools/test_extended.sh | 208 + ceph/src/isa-l/tools/test_fuzz.sh | 171 + ceph/src/isa-l/tools/test_tools.sh | 11 + ceph/src/journal/ObjectRecorder.cc | 20 +- ceph/src/journal/ObjectRecorder.h | 2 +- ceph/src/krbd.cc | 57 +- ceph/src/librados/RadosClient.cc | 38 +- ceph/src/librados/RadosClient.h | 9 +- ceph/src/librbd/api/Migration.cc | 127 +- ceph/src/librbd/api/Migration.h | 3 + .../librbd/cache/ParentCacheObjectDispatch.cc | 117 +- .../librbd/cache/ParentCacheObjectDispatch.h | 29 +- ceph/src/librbd/image/ListWatchersRequest.cc | 14 + ceph/src/librbd/image/OpenRequest.cc | 7 +- ceph/src/librbd/image/RefreshRequest.cc | 68 +- ceph/src/librbd/image/RefreshRequest.h | 5 +- ceph/src/librbd/io/AioCompletion.cc | 10 +- ceph/src/librbd/io/ImageRequest.h | 2 +- ceph/src/librbd/io/ImageRequestWQ.cc | 10 +- ceph/src/librbd/io/ObjectRequest.cc | 50 +- ceph/src/librbd/io/Utils.cc | 54 + ceph/src/librbd/io/Utils.h | 12 + .../operation/DisableFeaturesRequest.cc | 2 +- ceph/src/mds/Anchor.cc | 7 +- ceph/src/mds/Anchor.h | 6 +- ceph/src/mds/BatchOp.h | 2 +- ceph/src/mds/Beacon.cc | 8 +- ceph/src/mds/CDir.cc | 46 +- ceph/src/mds/CDir.h | 8 +- ceph/src/mds/CInode.cc | 75 +- ceph/src/mds/CInode.h | 20 +- ceph/src/mds/Capability.cc | 7 +- ceph/src/mds/Capability.h | 3 + ceph/src/mds/FSMap.cc | 2 +- ceph/src/mds/Locker.cc | 149 +- ceph/src/mds/Locker.h | 6 +- ceph/src/mds/MDCache.cc | 131 +- ceph/src/mds/MDCache.h | 10 +- ceph/src/mds/MDSMap.cc | 4 +- ceph/src/mds/MDSRank.cc | 56 +- ceph/src/mds/MDSRank.h | 2 + ceph/src/mds/Migrator.cc | 32 + ceph/src/mds/Mutation.cc | 30 +- ceph/src/mds/Mutation.h | 11 +- ceph/src/mds/OpenFileTable.cc | 179 +- ceph/src/mds/OpenFileTable.h | 14 +- ceph/src/mds/PurgeQueue.cc | 12 +- ceph/src/mds/ScrubStack.h | 4 + ceph/src/mds/Server.cc | 187 +- ceph/src/mds/Server.h | 5 +- ceph/src/mds/SessionMap.cc | 3 +- ceph/src/mds/SessionMap.h | 4 + ceph/src/mds/StrayManager.cc | 3 +- ceph/src/mds/mdstypes.h | 5 + ceph/src/messages/MMDSBeacon.h | 12 +- ceph/src/messages/MMgrReport.h | 8 +- ceph/src/mgr/DaemonServer.cc | 17 +- ceph/src/mgr/DaemonServer.h | 3 +- ceph/src/mgr/MgrClient.cc | 1 + ceph/src/mgr/MgrStandby.cc | 2 +- ceph/src/mgr/PyModuleRegistry.h | 3 +- ceph/src/mon/MDSMonitor.cc | 6 +- ceph/src/mon/MgrStatMonitor.cc | 9 +- ceph/src/mon/MonClient.cc | 7 +- ceph/src/mon/MonMap.cc | 77 +- ceph/src/mon/MonMap.h | 17 +- ceph/src/mon/Monitor.cc | 11 +- ceph/src/mon/MonmapMonitor.cc | 27 +- ceph/src/mon/OSDMonitor.cc | 1 + ceph/src/mon/PGMap.cc | 5 +- ceph/src/mon/Session.h | 3 + ceph/src/msg/async/ProtocolV2.cc | 6 +- ceph/src/msg/async/Stack.cc | 4 +- ceph/src/msg/msg_types.cc | 16 +- ceph/src/mypy.ini | 4 + ceph/src/neorados/RADOSImpl.cc | 121 + ceph/src/os/ObjectStore.h | 7 + ceph/src/os/bluestore/BlueFS.cc | 33 +- ceph/src/os/bluestore/BlueStore.cc | 417 +- ceph/src/os/bluestore/BlueStore.h | 35 +- ceph/src/os/bluestore/bluestore_types.cc | 6 + ceph/src/os/bluestore/bluestore_types.h | 7 +- ceph/src/os/kstore/KStore.cc | 27 +- ceph/src/osd/OSD.cc | 2 +- ceph/src/osd/PG.cc | 36 +- ceph/src/osd/PGBackend.cc | 51 +- ceph/src/osd/PGBackend.h | 1 + ceph/src/osd/PrimaryLogPG.h | 3 + ceph/src/osdc/Journaler.cc | 6 +- ceph/src/osdc/ObjectCacher.cc | 9 + ceph/src/osdc/Objecter.cc | 28 +- ceph/src/osdc/Objecter.h | 4 +- ceph/src/pybind/ceph_volume_client.py | 110 +- ceph/src/pybind/cephfs/cephfs.pyx | 26 +- ceph/src/pybind/mgr/cephadm/inventory.py | 135 +- ceph/src/pybind/mgr/cephadm/migrations.py | 2 +- ceph/src/pybind/mgr/cephadm/module.py | 1037 ++-- ceph/src/pybind/mgr/cephadm/schedule.py | 72 +- ceph/src/pybind/mgr/cephadm/serve.py | 634 +++ .../mgr/cephadm/services/cephadmservice.py | 332 +- .../pybind/mgr/cephadm/services/container.py | 29 + ceph/src/pybind/mgr/cephadm/services/iscsi.py | 18 +- .../pybind/mgr/cephadm/services/monitoring.py | 40 +- ceph/src/pybind/mgr/cephadm/services/nfs.py | 278 +- ceph/src/pybind/mgr/cephadm/services/osd.py | 103 +- ceph/src/pybind/mgr/cephadm/template.py | 48 +- .../templates/blink_device_light_cmd.j2 | 1 + .../templates/services/nfs/ganesha.conf.j2 | 5 + .../services/prometheus/prometheus.yml.j2 | 3 +- ceph/src/pybind/mgr/cephadm/tests/fixtures.py | 25 +- .../pybind/mgr/cephadm/tests/test_cephadm.py | 308 +- .../mgr/cephadm/tests/test_completion.py | 1 - .../mgr/cephadm/tests/test_migration.py | 124 +- .../mgr/cephadm/tests/test_osd_removal.py | 32 +- .../mgr/cephadm/tests/test_scheduling.py | 280 +- .../pybind/mgr/cephadm/tests/test_services.py | 84 +- .../src/pybind/mgr/cephadm/tests/test_spec.py | 96 +- .../pybind/mgr/cephadm/tests/test_template.py | 5 +- .../pybind/mgr/cephadm/tests/test_upgrade.py | 65 +- .../pybind/mgr/cephadm/tests/test_utils.py | 34 - ceph/src/pybind/mgr/cephadm/upgrade.py | 143 +- ceph/src/pybind/mgr/cephadm/utils.py | 42 +- .../mgr/dashboard/controllers/__init__.py | 15 + .../pybind/mgr/dashboard/controllers/auth.py | 4 +- .../mgr/dashboard/controllers/cephfs.py | 8 +- .../pybind/mgr/dashboard/controllers/docs.py | 4 +- .../pybind/mgr/dashboard/controllers/host.py | 6 +- .../pybind/mgr/dashboard/controllers/iscsi.py | 99 +- .../mgr/dashboard/controllers/mgr_modules.py | 5 +- .../mgr/dashboard/controllers/nfsganesha.py | 77 +- .../pybind/mgr/dashboard/controllers/osd.py | 26 +- .../pybind/mgr/dashboard/controllers/rbd.py | 9 +- .../dashboard/controllers/rbd_mirroring.py | 4 +- .../pybind/mgr/dashboard/controllers/rgw.py | 24 +- .../mgr/dashboard/controllers/service.py | 4 +- .../pybind/mgr/dashboard/controllers/user.py | 3 +- .../cluster/mgr-modules.e2e-spec.ts | 2 +- .../integration/ui/dashboard.e2e-spec.ts | 21 +- .../dist/en-US/1.4a38b529302ffa3f0c24.js | 1 - .../dist/en-US/1.9e79c41bbaed982a50af.js | 1 + .../frontend/dist/en-US/3rdpartylicenses.txt | 24 + .../dist/en-US/5.3532f17ccbf4b268177b.js | 1 + .../dist/en-US/5.d6b08e1c042ba6fccdd6.js | 1 - .../dist/en-US/6.1818f80c2e905e85a90f.js | 1 - .../dist/en-US/6.9a60f7741889f52ed7ae.js | 1 + .../dist/en-US/7.1891b10149a7c2d765ac.js | 1 + .../dist/en-US/7.8c5c2bbb556260a5cc3e.js | 1 - .../dist/en-US/8.88af57a0fd5b75779391.js | 1 + .../dist/en-US/8.94051a156c6d11b38ada.js | 1 - .../dashboard/frontend/dist/en-US/index.html | 4 +- .../dist/en-US/main.c43d13b597196a5f022f.js | 2 + .../main.c43d13b597196a5f022f.js.LICENSE.txt | 115 + .../dist/en-US/main.cb10cd7f4a550e7a33c5.js | 1 - ...2.js => polyfills.14628a6f4ca10d3018fd.js} | 1 + ...yfills.14628a6f4ca10d3018fd.js.LICENSE.txt | 7 + .../en-US/runtime.0907482258dfadeab004.js | 1 - .../en-US/runtime.0e1c754813ff535e4bd5.js | 1 + .../en-US/scripts.76632aba1e576c7cc54a.js | 3 +- ...cripts.76632aba1e576c7cc54a.js.LICENSE.txt | 6 + .../en-US/styles.0d3cd206c82d5fe7076a.css | 17 + .../en-US/styles.58a2c96c3b87bd02e7a7.css | 17 - .../mgr/dashboard/frontend/package-lock.json | 4738 ++++++++++------- .../mgr/dashboard/frontend/package.json | 10 +- .../frontend/src/app/app-routing.module.ts | 2 +- .../iscsi-target-list.component.spec.ts | 12 +- .../iscsi-target-list.component.ts | 20 +- .../rbd-configuration-form.component.html | 4 +- .../rbd-details/rbd-details.component.html | 8 +- .../rbd-details/rbd-details.component.ts | 12 +- .../block/rbd-form/rbd-form.component.html | 4 +- .../block/rbd-list/rbd-list.component.spec.ts | 2 +- .../ceph/block/rbd-list/rbd-list.component.ts | 17 +- .../rbd-namespace-list.component.ts | 9 +- .../rbd-snapshot-actions.model.ts | 18 +- .../rbd-trash-move-modal.component.spec.ts | 2 +- .../configuration-form.component.html | 4 +- .../cluster/crushmap/crushmap.component.scss | 7 + .../host-details/host-details.component.html | 2 +- .../cluster/hosts/hosts.component.spec.ts | 50 +- .../app/ceph/cluster/hosts/hosts.component.ts | 35 +- .../mgr-module-list.component.ts | 15 +- .../osd-creation-preview-modal.component.html | 2 +- .../osd-details/osd-details.component.html | 2 +- .../osd-details/osd-details.component.spec.ts | 7 +- .../osd-flags-modal.component.html | 3 +- .../osd/osd-list/osd-list.component.html | 2 +- .../osd-pg-scrub-modal.component.html | 7 +- .../osd-recv-speed-modal.component.html | 3 +- .../rules-list/rules-list.component.html | 15 +- .../rules-list/rules-list.component.spec.ts | 4 +- .../telemetry/telemetry.component.html | 10 +- .../health-pie/health-pie-color.enum.ts | 7 - .../health-pie/health-pie.component.html | 1 + .../health-pie/health-pie.component.scss | 7 +- .../health-pie/health-pie.component.spec.ts | 10 + .../health-pie/health-pie.component.ts | 151 +- .../dashboard/health/health.component.html | 159 +- .../dashboard/health/health.component.spec.ts | 36 +- .../ceph/dashboard/health/health.component.ts | 160 +- .../info-card/info-card.component.html | 4 +- .../info-card/info-card.component.scss | 4 +- .../ceph/nfs/nfs-form/nfs-form.component.ts | 3 +- .../pool-details/pool-details.component.html | 2 +- .../pool/pool-form/pool-form.component.html | 4 + .../pool-form/pool-form.component.spec.ts | 11 + .../pool-list/pool-list.component.spec.ts | 8 +- .../pool/pool-list/pool-list.component.ts | 19 +- .../rgw-bucket-details.component.scss | 7 + .../rgw-bucket-details.component.spec.ts | 16 +- .../rgw-bucket-details.component.ts | 16 +- .../src/app/ceph/shared/ceph-shared.module.ts | 5 +- .../smart-list/smart-list.component.html | 75 +- .../smart-list/smart-list.component.spec.ts | 100 +- .../shared/smart-list/smart-list.component.ts | 8 +- .../login-password-form.component.html | 2 +- .../app/core/auth/login/login.component.scss | 4 - .../auth/user-form/user-form.component.html | 2 + .../navigation/navigation.component.html | 2 +- .../notifications.component.html | 3 + .../notifications.component.scss | 18 + .../notifications.component.spec.ts | 15 + .../notifications/notifications.component.ts | 8 + .../src/app/shared/api/nfs.service.spec.ts | 4 +- .../src/app/shared/api/nfs.service.ts | 4 +- .../app/shared/api/rgw-bucket.service.spec.ts | 29 +- .../src/app/shared/api/rgw-bucket.service.ts | 17 +- .../components/grafana/grafana.component.scss | 4 + .../components/grafana/grafana.component.ts | 3 +- .../telemetry-notification.component.spec.ts | 41 +- .../telemetry-notification.component.ts | 25 +- .../table-actions.component.spec.ts | 6 +- .../table-actions/table-actions.component.ts | 7 +- .../datatable/table/table.component.html | 5 +- .../shared/datatable/table/table.component.ts | 13 +- .../copy2clipboard-button.directive.spec.ts | 55 +- .../copy2clipboard-button.directive.ts | 37 +- .../shared/directives/directives.module.ts | 13 +- .../form-input-disable.directive.spec.ts | 75 + .../form-input-disable.directive.ts | 30 + .../directives/form-scope.directive.spec.ts | 8 + .../shared/directives/form-scope.directive.ts | 8 + .../src/app/shared/models/cd-table-action.ts | 16 +- .../mgr/dashboard/frontend/src/styles.scss | 34 +- .../src/styles/bootstrap-extends.scss | 19 + .../frontend/src/styles/defaults.scss | 15 + ceph/src/pybind/mgr/dashboard/module.py | 23 +- .../pybind/mgr/dashboard/services/ganesha.py | 316 +- .../mgr/dashboard/services/iscsi_client.py | 9 + .../mgr/dashboard/services/orchestrator.py | 11 +- .../mgr/dashboard/services/rgw_client.py | 41 +- ceph/src/pybind/mgr/dashboard/services/sso.py | 10 +- .../mgr/dashboard/tests/test_ganesha.py | 466 +- .../pybind/mgr/dashboard/tests/test_iscsi.py | 88 + .../mgr/dashboard/tests/test_rgw_client.py | 74 +- ceph/src/pybind/mgr/dashboard/tools.py | 15 +- ceph/src/pybind/mgr/devicehealth/module.py | 77 +- ceph/src/pybind/mgr/insights/tox.ini | 1 + .../src/pybind/mgr/orchestrator/_interface.py | 10 +- ceph/src/pybind/mgr/orchestrator/module.py | 140 +- .../orchestrator/tests/test_orchestrator.py | 62 +- ceph/src/pybind/mgr/prometheus/module.py | 9 +- ceph/src/pybind/mgr/rook/module.py | 13 +- ceph/src/pybind/mgr/telemetry/module.py | 16 +- ceph/src/pybind/mgr/tests/__init__.py | 22 +- .../src/pybind/mgr/volumes/fs/async_cloner.py | 3 + ceph/src/pybind/mgr/volumes/fs/async_job.py | 27 + ceph/src/pybind/mgr/volumes/fs/nfs.py | 5 +- .../pybind/mgr/volumes/fs/operations/lock.py | 6 + ceph/src/pybind/mgr/volumes/fs/volume.py | 3 +- ceph/src/pybind/mgr/volumes/module.py | 101 +- ceph/src/pybind/mgr/zabbix/module.py | 2 +- ceph/src/pybind/rados/rados.pyx | 20 +- ceph/src/pybind/rbd/rbd.pyx | 2 + .../ceph/deployment/drive_group.py | 30 +- .../deployment/drive_selection/selector.py | 12 +- .../ceph/deployment/inventory.py | 5 +- .../ceph/deployment/service_spec.py | 116 +- .../ceph/deployment/translate.py | 18 - .../python-common/ceph/deployment/utils.py | 32 + .../ceph/tests/test_disk_selector.py | 26 +- .../ceph/tests/test_drive_group.py | 30 +- .../ceph/tests/test_service_spec.py | 20 +- .../python-common/ceph/tests/test_utils.py | 37 + ceph/src/rgw/CMakeLists.txt | 45 +- ceph/src/rgw/jwt-cpp/base.h | 168 + ceph/src/rgw/jwt-cpp/jwt.h | 1605 ++++++ ceph/src/rgw/librgw.cc | 2 +- ceph/src/rgw/librgw_admin_user.cc | 184 - ceph/src/rgw/picojson/picojson.h | 1174 ++++ ceph/src/rgw/rgw-orphan-list | 60 +- ceph/src/rgw/rgw_admin.cc | 27 +- ceph/src/rgw/rgw_asio_frontend.cc | 74 +- ceph/src/rgw/rgw_asio_frontend.h | 1 + ceph/src/rgw/rgw_auth.cc | 71 +- ceph/src/rgw/rgw_auth.h | 33 +- ceph/src/rgw/rgw_auth_filters.h | 11 + ceph/src/rgw/rgw_auth_s3.cc | 2 +- ceph/src/rgw/rgw_auth_s3.h | 7 +- ceph/src/rgw/rgw_basic_types.h | 18 +- ceph/src/rgw/rgw_bucket.cc | 2 +- ceph/src/rgw/rgw_cache.cc | 3 + ceph/src/rgw/rgw_common.cc | 39 +- ceph/src/rgw/rgw_common.h | 12 +- ceph/src/rgw/rgw_file.cc | 11 +- ceph/src/rgw/rgw_file.h | 2 +- ceph/src/rgw/rgw_gc.cc | 15 +- ceph/src/rgw/rgw_iam_policy.cc | 19 + ceph/src/rgw/rgw_iam_policy.h | 6 +- ceph/src/rgw/rgw_json_enc.cc | 23 +- ceph/src/rgw/rgw_lc.cc | 24 +- ceph/src/rgw/rgw_lc.h | 14 +- ceph/src/rgw/rgw_lib_frontend.h | 2 + ceph/src/rgw/rgw_oidc_provider.cc | 296 + ceph/src/rgw/rgw_oidc_provider.h | 124 + ceph/src/rgw/rgw_op.cc | 12 +- ceph/src/rgw/rgw_rados.cc | 24 +- ceph/src/rgw/rgw_rest_client.cc | 11 +- ceph/src/rgw/rgw_rest_iam.cc | 9 + ceph/src/rgw/rgw_rest_oidc_provider.cc | 223 + ceph/src/rgw/rgw_rest_oidc_provider.h | 70 + ceph/src/rgw/rgw_rest_s3.cc | 36 +- ceph/src/rgw/rgw_rest_sts.cc | 308 +- ceph/src/rgw/rgw_rest_sts.h | 22 +- ceph/src/rgw/rgw_role.h | 4 +- ceph/src/rgw/rgw_sal.cc | 2 +- ceph/src/rgw/rgw_sts.cc | 5 + ceph/src/rgw/rgw_sts.h | 10 +- ceph/src/rgw/rgw_swift_auth.h | 14 +- ceph/src/rgw/rgw_web_idp.h | 4 +- ceph/src/rgw/rgw_zone.cc | 2 + ceph/src/rgw/rgw_zone.h | 11 +- ceph/src/rgw/services/svc_sys_obj_cache.cc | 24 +- ceph/src/test/CMakeLists.txt | 19 +- ceph/src/test/cli/osdmaptool/help.t | 1 + ceph/src/test/erasure-code/CMakeLists.txt | 4 +- .../immutable_object_cache/MockCacheDaemon.h | 10 +- .../test_object_store.cc | 3 +- ceph/src/test/libcephfs/CMakeLists.txt | 1 + ceph/src/test/libcephfs/monconfig.cc | 99 + ceph/src/test/libcephfs/test.cc | 8 +- ceph/src/test/librados/c_read_operations.cc | 12 +- ceph/src/test/librados/io.cc | 7 +- ceph/src/test/librados/misc_cxx.cc | 6 +- ceph/src/test/librbd/CMakeLists.txt | 2 +- ...=> test_mock_ParentCacheObjectDispatch.cc} | 174 +- ceph/src/test/librbd/fsx.cc | 2 +- .../librbd/io/test_mock_ImageRequestWQ.cc | 14 +- .../test/librbd/io/test_mock_ObjectRequest.cc | 74 +- ceph/src/test/librbd/mock/MockImageCtx.h | 1 + ceph/src/test/librbd/test_Migration.cc | 31 + ceph/src/test/mon/CMakeLists.txt | 7 + ceph/src/test/mon/MonMap.cc | 67 +- ceph/src/test/mon/test_mon_workloadgen.cc | 2 +- ceph/src/test/objectstore/store_test.cc | 215 +- .../test/objectstore/store_test_fixture.cc | 4 + .../src/test/objectstore/store_test_fixture.h | 9 + ceph/src/test/pybind/test_cephfs.py | 9 + ceph/src/test/strtol.cc | 248 +- ceph/src/test/test_librgw_admin_user.cc | 104 - ceph/src/tools/cephfs/JournalScanner.cc | 14 +- ceph/src/tools/cephfs/MDSUtility.cc | 2 +- .../immutable_object_cache/CacheClient.cc | 4 +- .../immutable_object_cache/CacheController.cc | 9 +- .../immutable_object_cache/CacheSession.cc | 8 + .../immutable_object_cache/CacheSession.h | 5 + .../ObjectCacheStore.cc | 43 +- .../immutable_object_cache/ObjectCacheStore.h | 1 + .../src/tools/immutable_object_cache/Policy.h | 1 + .../immutable_object_cache/SimplePolicy.cc | 8 +- .../src/tools/immutable_object_cache/Types.cc | 16 +- ceph/src/tools/immutable_object_cache/Types.h | 4 +- ceph/src/tools/immutable_object_cache/main.cc | 2 +- ceph/src/tools/osdmaptool.cc | 12 + ceph/src/tools/rados/rados.cc | 47 +- ceph/src/tools/rbd/action/Kernel.cc | 63 +- ceph/src/tools/rbd/action/MirrorPool.cc | 6 +- ceph/src/tools/rbd/action/Status.cc | 3 + ceph/src/tools/rbd_mirror/PoolWatcher.cc | 8 +- ceph/src/tools/rbd_nbd/rbd-nbd.cc | 4 +- ceph/src/vstart.sh | 2 +- ceph/sudoers.d/cephadm | 7 - ceph/systemd/ceph-fuse.target | 1 + .../ceph-immutable-object-cache.target | 1 + ceph/systemd/ceph-mds.target | 3 + ceph/systemd/ceph-mds@.service.in | 5 +- ceph/systemd/ceph-mgr.target | 3 + ceph/systemd/ceph-mgr@.service.in | 7 +- ceph/systemd/ceph-mon.target | 2 + ceph/systemd/ceph-mon@.service.in | 7 +- ceph/systemd/ceph-osd.target | 3 + ceph/systemd/ceph-osd@.service.in | 5 +- ceph/systemd/ceph-radosgw.target | 3 + ceph/systemd/ceph-radosgw@.service.in | 5 +- ceph/systemd/ceph-rbd-mirror.target | 1 + ceph/systemd/ceph.target | 1 + ceph/systemd/rbdmap.service.in | 5 +- ceph/udev/50-rbd.rules | 4 +- 1020 files changed, 70586 insertions(+), 20740 deletions(-) create mode 100644 ceph/.readthedocs.yml create mode 100644 ceph/admin/doc-read-the-docs.txt create mode 100644 ceph/doc/radosgw/oidc.rst create mode 100644 ceph/qa/cephfs/overrides/session_timeout.yaml create mode 100644 ceph/qa/distros/all/rhel_8.2.yaml create mode 120000 ceph/qa/distros/supported-all-distro/rhel_8.2.yaml delete mode 120000 ceph/qa/distros/supported-all-distro/rhel_8.yaml delete mode 100644 ceph/qa/erasure-code/ec-feature-plugins-v3.yaml create mode 120000 ceph/qa/suites/fs/basic_workload/overrides/session_timeout.yaml create mode 120000 ceph/qa/suites/fs/thrash/overrides/session_timeout.yaml create mode 120000 ceph/qa/suites/fs/verify/overrides/session_timeout.yaml create mode 100644 ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netns.yaml create mode 100644 ceph/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_symlinks.yaml rename ceph/{src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/__init__.py => qa/suites/rados/cephadm/upgrade/2-repo_digest/defaut.yaml} (100%) create mode 100644 ceph/qa/suites/rados/cephadm/upgrade/2-repo_digest/repo_digest.yaml rename ceph/qa/suites/rados/cephadm/upgrade/{2-start-upgrade.yaml => 3-start-upgrade.yaml} (100%) rename ceph/qa/suites/rados/cephadm/upgrade/{3-wait.yaml => 4-wait.yaml} (100%) delete mode 100644 ceph/qa/suites/rados/thrash-old-clients/1-install/hammer.yaml delete mode 120000 ceph/qa/suites/rados/thrash-old-clients/distro$/centos_7.6.yaml create mode 120000 ceph/qa/suites/rados/thrash-old-clients/distro$/ubuntu_18.04.yaml create mode 100644 ceph/qa/suites/rbd/immutable-object-cache/% create mode 120000 ceph/qa/suites/rbd/immutable-object-cache/.qa create mode 100644 ceph/qa/suites/rbd/immutable-object-cache/clusters/+ create mode 120000 ceph/qa/suites/rbd/immutable-object-cache/clusters/.qa create mode 100644 ceph/qa/suites/rbd/immutable-object-cache/clusters/fix-2.yaml create mode 100644 ceph/qa/suites/rbd/immutable-object-cache/clusters/openstack.yaml create mode 100644 ceph/qa/suites/rbd/immutable-object-cache/pool/ceph_and_immutable_object_cache.yaml create mode 120000 ceph/qa/suites/rbd/immutable-object-cache/supported-random-distro$ create mode 120000 ceph/qa/suites/rbd/immutable-object-cache/workloads/.qa create mode 120000 ceph/qa/suites/rbd/immutable-object-cache/workloads/c_api_tests_with_defaults.yaml create mode 100644 ceph/qa/suites/rbd/immutable-object-cache/workloads/fio_on_immutable_object_cache.yaml create mode 100644 ceph/qa/suites/rbd/immutable-object-cache/workloads/qemu_on_immutable_object_cache_and_thrash.yaml delete mode 100644 ceph/qa/suites/teuthology/buildpackages/tasks/branch.yaml create mode 100644 ceph/qa/tasks/immutable_object_cache.py create mode 100644 ceph/qa/tasks/immutable_object_cache_thrash.py create mode 100755 ceph/qa/workunits/rbd/krbd_udev_netns.sh create mode 100755 ceph/qa/workunits/rbd/krbd_udev_symlinks.sh delete mode 100644 ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/__init__.py delete mode 100644 ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py delete mode 100644 ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py delete mode 100644 ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py delete mode 100644 ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py delete mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py delete mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py delete mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py create mode 100644 ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py create mode 100644 ceph/src/ceph-volume/ceph_volume/util/lsmdisk.py create mode 100644 ceph/src/cephadm/samples/custom_container.json delete mode 100644 ceph/src/include/rgw/librgw_admin_user.h create mode 100644 ceph/src/isa-l/.drone.yml create mode 100644 ceph/src/isa-l/crc/aarch64/Makefile.am create mode 100644 ceph/src/isa-l/crc/aarch64/crc16_t10dif_copy_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc16_t10dif_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_gzip_refl_hw_fold.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_gzip_refl_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_gzip_refl_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_ieee_norm_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_ieee_norm_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_iscsi_refl_hw_fold.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_iscsi_refl_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_iscsi_refl_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_norm_common_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc32_refl_common_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_ecma_norm_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_ecma_norm_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_ecma_refl_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_ecma_refl_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_iso_norm_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_iso_norm_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_iso_refl_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_iso_refl_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_jones_norm_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_jones_norm_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_jones_refl_pmull.S create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_jones_refl_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_norm_common_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc64_refl_common_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc_aarch64_dispatcher.c create mode 100644 ceph/src/isa-l/crc/aarch64/crc_common_pmull.h create mode 100644 ceph/src/isa-l/crc/aarch64/crc_multibinary_arm.S create mode 100644 ceph/src/isa-l/crc/crc16_t10dif_02.asm create mode 100644 ceph/src/isa-l/crc/crc16_t10dif_by16_10.asm create mode 100644 ceph/src/isa-l/crc/crc16_t10dif_copy_by4.asm create mode 100644 ceph/src/isa-l/crc/crc16_t10dif_copy_by4_02.asm rename ceph/src/isa-l/{erasure_code/gf_vect_mul_sse_perf.c => crc/crc16_t10dif_copy_perf.c} (63%) create mode 100644 ceph/src/isa-l/crc/crc16_t10dif_copy_test.c create mode 100644 ceph/src/isa-l/crc/crc16_t10dif_op_perf.c create mode 100644 ceph/src/isa-l/crc/crc32_funcs_test.c create mode 100644 ceph/src/isa-l/crc/crc32_gzip_refl_by16_10.asm rename ceph/src/isa-l/{igzip/crc32_gzip.asm => crc/crc32_gzip_refl_by8.asm} (97%) create mode 100644 ceph/src/isa-l/crc/crc32_gzip_refl_by8_02.asm rename ceph/src/isa-l/{erasure_code/gf_vect_mul_avx_perf.c => crc/crc32_gzip_refl_perf.c} (63%) create mode 100644 ceph/src/isa-l/crc/crc32_ieee_02.asm create mode 100644 ceph/src/isa-l/crc/crc32_ieee_by16_10.asm delete mode 100644 ceph/src/isa-l/crc/crc32_ieee_test.c delete mode 100644 ceph/src/isa-l/crc/crc32_iscsi_test.c create mode 100644 ceph/src/isa-l/crc/crc64_ecma_norm_by16_10.asm create mode 100644 ceph/src/isa-l/crc/crc64_ecma_refl_by16_10.asm create mode 100644 ceph/src/isa-l/crc/crc64_iso_norm_by16_10.asm create mode 100644 ceph/src/isa-l/crc/crc64_iso_refl_by16_10.asm create mode 100644 ceph/src/isa-l/crc/crc64_jones_norm_by16_10.asm create mode 100644 ceph/src/isa-l/crc/crc64_jones_refl_by16_10.asm create mode 100644 ceph/src/isa-l/crc/crc64_ref.h create mode 100644 ceph/src/isa-l/crc/crc_ref.h create mode 100644 ceph/src/isa-l/erasure_code/aarch64/Makefile.am create mode 100644 ceph/src/isa-l/erasure_code/aarch64/ec_aarch64_dispatcher.c create mode 100644 ceph/src/isa-l/erasure_code/aarch64/ec_aarch64_highlevel_func.c create mode 100644 ceph/src/isa-l/erasure_code/aarch64/ec_multibinary_arm.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_2vect_dot_prod_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_2vect_mad_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_3vect_dot_prod_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_3vect_mad_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_4vect_dot_prod_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_4vect_mad_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_5vect_dot_prod_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_5vect_mad_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_6vect_mad_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_vect_dot_prod_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_vect_mad_neon.S create mode 100644 ceph/src/isa-l/erasure_code/aarch64/gf_vect_mul_neon.S delete mode 100644 ceph/src/isa-l/erasure_code/erasure_code_sse_perf.c delete mode 100644 ceph/src/isa-l/erasure_code/erasure_code_sse_test.c create mode 100644 ceph/src/isa-l/erasure_code/gen_rs_matrix_limits.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_2vect_dot_prod_sse_perf.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_3vect_dot_prod_sse_perf.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_4vect_dot_prod_sse_perf.c create mode 100644 ceph/src/isa-l/erasure_code/gf_5vect_dot_prod_avx512.asm delete mode 100644 ceph/src/isa-l/erasure_code/gf_5vect_dot_prod_sse_perf.c create mode 100644 ceph/src/isa-l/erasure_code/gf_5vect_mad_avx512.asm create mode 100644 ceph/src/isa-l/erasure_code/gf_6vect_dot_prod_avx512.asm delete mode 100644 ceph/src/isa-l/erasure_code/gf_6vect_dot_prod_sse_perf.c create mode 100644 ceph/src/isa-l/erasure_code/gf_6vect_mad_avx512.asm delete mode 100644 ceph/src/isa-l/erasure_code/gf_vect_dot_prod_avx_perf.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_vect_dot_prod_avx_test.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_vect_dot_prod_sse_perf.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_vect_dot_prod_sse_test.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_vect_mad_perf.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_vect_mul_avx_test.c delete mode 100644 ceph/src/isa-l/erasure_code/gf_vect_mul_sse_test.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/Makefile.am create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/ec_base_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/ec_base_vsx.h create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_2vect_dot_prod_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_2vect_mad_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_3vect_dot_prod_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_3vect_mad_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_4vect_dot_prod_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_4vect_mad_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_5vect_dot_prod_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_5vect_mad_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_6vect_dot_prod_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_6vect_mad_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_vect_dot_prod_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_vect_mad_vsx.c create mode 100644 ceph/src/isa-l/erasure_code/ppc64le/gf_vect_mul_vsx.c create mode 100644 ceph/src/isa-l/examples/ec/Makefile.am create mode 100644 ceph/src/isa-l/examples/ec/Makefile.unx create mode 100644 ceph/src/isa-l/examples/ec/ec_piggyback_example.c create mode 100644 ceph/src/isa-l/examples/ec/ec_simple_example.c create mode 100644 ceph/src/isa-l/igzip/aarch64/bitbuf2_aarch64.h create mode 100644 ceph/src/isa-l/igzip/aarch64/data_struct_aarch64.h create mode 100644 ceph/src/isa-l/igzip/aarch64/encode_df.S create mode 100644 ceph/src/isa-l/igzip/aarch64/gen_icf_map.S create mode 100644 ceph/src/isa-l/igzip/aarch64/huffman_aarch64.h create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_decode_huffman_code_block_aarch64.S create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_deflate_body_aarch64.S create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_deflate_finish_aarch64.S create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_deflate_hash_aarch64.S create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_inflate_multibinary_arm64.S create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_isal_adler32_neon.S create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_multibinary_aarch64_dispatcher.c create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_multibinary_arm64.S create mode 100644 ceph/src/isa-l/igzip/aarch64/igzip_set_long_icf_fg.S create mode 100644 ceph/src/isa-l/igzip/aarch64/isal_deflate_icf_body_hash_hist.S create mode 100644 ceph/src/isa-l/igzip/aarch64/isal_deflate_icf_finish_hash_hist.S create mode 100644 ceph/src/isa-l/igzip/aarch64/isal_update_histogram.S create mode 100644 ceph/src/isa-l/igzip/aarch64/lz0a_const_aarch64.h create mode 100644 ceph/src/isa-l/igzip/aarch64/options_aarch64.h create mode 100644 ceph/src/isa-l/igzip/aarch64/stdmac_aarch64.h create mode 100644 ceph/src/isa-l/igzip/adler32_avx2_4.asm create mode 100644 ceph/src/isa-l/igzip/adler32_base.c rename ceph/src/isa-l/igzip/{igzip_sync_flush_perf.c => adler32_perf.c} (59%) create mode 100644 ceph/src/isa-l/igzip/adler32_sse.asm create mode 100644 ceph/src/isa-l/igzip/checksum32_funcs_test.c rename ceph/src/isa-l/igzip/{crc_inflate.h => checksum_test_ref.h} (83%) delete mode 100644 ceph/src/isa-l/igzip/crc32_gzip_base.c rename ceph/src/isa-l/igzip/{encode_df_asm.asm => encode_df_06.asm} (54%) create mode 100644 ceph/src/isa-l/igzip/generate_static_inflate.c delete mode 100644 ceph/src/isa-l/igzip/igzip_body_01.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_body_02.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_body_04.asm create mode 100644 ceph/src/isa-l/igzip/igzip_build_hash_table_perf.c create mode 100644 ceph/src/isa-l/igzip/igzip_checksums.h create mode 100644 ceph/src/isa-l/igzip/igzip_deflate_hash.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_fuzz_inflate.c create mode 100644 ceph/src/isa-l/igzip/igzip_gen_icf_map_lh1_04.asm create mode 100644 ceph/src/isa-l/igzip/igzip_gen_icf_map_lh1_06.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_icf_body.asm create mode 100644 ceph/src/isa-l/igzip/igzip_icf_body.c delete mode 100644 ceph/src/isa-l/igzip/igzip_icf_body_01.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_icf_body_02.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_icf_body_04.asm create mode 100644 ceph/src/isa-l/igzip/igzip_icf_body_h1_gr_bt.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_inflate_perf.c create mode 100644 ceph/src/isa-l/igzip/igzip_set_long_icf_fg_04.asm create mode 100644 ceph/src/isa-l/igzip/igzip_set_long_icf_fg_06.asm delete mode 100644 ceph/src/isa-l/igzip/igzip_stateless_file_perf.c delete mode 100644 ceph/src/isa-l/igzip/igzip_sync_flush_file_perf.c create mode 100644 ceph/src/isa-l/igzip/igzip_wrapper.h create mode 100644 ceph/src/isa-l/igzip/igzip_wrapper_hdr_test.c create mode 100644 ceph/src/isa-l/igzip/static_inflate.h create mode 100644 ceph/src/isa-l/include/aarch64_multibinary.h create mode 100644 ceph/src/isa-l/include/mem_routines.h create mode 100644 ceph/src/isa-l/include/unaligned.h create mode 100644 ceph/src/isa-l/mem/Makefile.am create mode 100644 ceph/src/isa-l/mem/aarch64/Makefile.am create mode 100644 ceph/src/isa-l/mem/aarch64/mem_aarch64_dispatcher.c create mode 100644 ceph/src/isa-l/mem/aarch64/mem_multibinary_arm.S create mode 100644 ceph/src/isa-l/mem/aarch64/mem_zero_detect_neon.S rename ceph/src/isa-l/{igzip/detect_repeated_char.asm => mem/mem_multibinary.asm} (64%) create mode 100644 ceph/src/isa-l/mem/mem_zero_detect_avx.asm create mode 100644 ceph/src/isa-l/mem/mem_zero_detect_base.c create mode 100644 ceph/src/isa-l/mem/mem_zero_detect_base_aliases.c create mode 100644 ceph/src/isa-l/mem/mem_zero_detect_perf.c create mode 100644 ceph/src/isa-l/mem/mem_zero_detect_sse.asm create mode 100644 ceph/src/isa-l/mem/mem_zero_detect_test.c create mode 100644 ceph/src/isa-l/programs/Makefile.am create mode 100644 ceph/src/isa-l/programs/igzip.1 create mode 100644 ceph/src/isa-l/programs/igzip.1.h2m create mode 100644 ceph/src/isa-l/programs/igzip_cli.c create mode 100755 ceph/src/isa-l/programs/igzip_cli_check.sh create mode 100644 ceph/src/isa-l/raid/aarch64/Makefile.am create mode 100644 ceph/src/isa-l/raid/aarch64/pq_check_neon.S create mode 100644 ceph/src/isa-l/raid/aarch64/pq_gen_neon.S create mode 100644 ceph/src/isa-l/raid/aarch64/raid_aarch64_dispatcher.c create mode 100644 ceph/src/isa-l/raid/aarch64/raid_multibinary_arm.S create mode 100644 ceph/src/isa-l/raid/aarch64/xor_check_neon.S create mode 100644 ceph/src/isa-l/raid/aarch64/xor_gen_neon.S create mode 100644 ceph/src/isa-l/tests/fuzz/Makefile.am create mode 100644 ceph/src/isa-l/tests/fuzz/Makefile.unx create mode 100644 ceph/src/isa-l/tests/fuzz/igzip_checked_inflate_fuzz_test.c create mode 100644 ceph/src/isa-l/tests/fuzz/igzip_dump_inflate_corpus.c create mode 100644 ceph/src/isa-l/tests/fuzz/igzip_fuzz_inflate.c create mode 100644 ceph/src/isa-l/tests/fuzz/igzip_simple_inflate_fuzz_test.c create mode 100644 ceph/src/isa-l/tests/fuzz/igzip_simple_round_trip_fuzz_test.c create mode 100755 ceph/src/isa-l/tools/check_format.sh create mode 100755 ceph/src/isa-l/tools/remove_trailing_whitespace.sh create mode 100755 ceph/src/isa-l/tools/test_autorun.sh create mode 100755 ceph/src/isa-l/tools/test_checks.sh create mode 100755 ceph/src/isa-l/tools/test_extended.sh create mode 100755 ceph/src/isa-l/tools/test_fuzz.sh create mode 100755 ceph/src/isa-l/tools/test_tools.sh mode change 100644 => 100755 ceph/src/mds/CDir.cc create mode 100644 ceph/src/neorados/RADOSImpl.cc create mode 100644 ceph/src/pybind/mgr/cephadm/serve.py create mode 100644 ceph/src/pybind/mgr/cephadm/services/container.py create mode 100644 ceph/src/pybind/mgr/cephadm/templates/blink_device_light_cmd.j2 delete mode 100644 ceph/src/pybind/mgr/cephadm/tests/test_utils.py delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.4a38b529302ffa3f0c24.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.9e79c41bbaed982a50af.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/5.3532f17ccbf4b268177b.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/5.d6b08e1c042ba6fccdd6.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/6.1818f80c2e905e85a90f.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/6.9a60f7741889f52ed7ae.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/7.1891b10149a7c2d765ac.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/7.8c5c2bbb556260a5cc3e.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/8.88af57a0fd5b75779391.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/8.94051a156c6d11b38ada.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.c43d13b597196a5f022f.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.c43d13b597196a5f022f.js.LICENSE.txt delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.cb10cd7f4a550e7a33c5.js rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{polyfills.513b02b42d061373f212.js => polyfills.14628a6f4ca10d3018fd.js} (99%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/polyfills.14628a6f4ca10d3018fd.js.LICENSE.txt delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.0907482258dfadeab004.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.0e1c754813ff535e4bd5.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/scripts.76632aba1e576c7cc54a.js.LICENSE.txt create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.0d3cd206c82d5fe7076a.css delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.58a2c96c3b87bd02e7a7.css delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health-pie/health-pie-color.enum.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-input-disable.directive.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-input-disable.directive.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-scope.directive.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/directives/form-scope.directive.ts create mode 100644 ceph/src/python-common/ceph/deployment/utils.py create mode 100644 ceph/src/python-common/ceph/tests/test_utils.py create mode 100644 ceph/src/rgw/jwt-cpp/base.h create mode 100644 ceph/src/rgw/jwt-cpp/jwt.h delete mode 100644 ceph/src/rgw/librgw_admin_user.cc create mode 100644 ceph/src/rgw/picojson/picojson.h create mode 100644 ceph/src/rgw/rgw_oidc_provider.cc create mode 100644 ceph/src/rgw/rgw_oidc_provider.h create mode 100644 ceph/src/rgw/rgw_rest_oidc_provider.cc create mode 100644 ceph/src/rgw/rgw_rest_oidc_provider.h create mode 100644 ceph/src/test/libcephfs/monconfig.cc rename ceph/src/test/librbd/cache/{test_mock_ParentImageCache.cc => test_mock_ParentCacheObjectDispatch.cc} (68%) delete mode 100644 ceph/src/test/test_librgw_admin_user.cc delete mode 100644 ceph/sudoers.d/cephadm diff --git a/ceph/.github/CODEOWNERS b/ceph/.github/CODEOWNERS index 8eecd7ef5..5b1139f7f 100644 --- a/ceph/.github/CODEOWNERS +++ b/ceph/.github/CODEOWNERS @@ -32,6 +32,7 @@ /doc/mgr/orchestrator_modules.rst @ceph/orchestrators /doc/cephadm @ceph/orchestrators /doc/dev/cephadm.rst @ceph/orchestrators +/doc/man/8/cephadm.rst @ceph/orchestrators #ceph-volume /src/ceph-volume @ceph/ceph-volume diff --git a/ceph/.readthedocs.yml b/ceph/.readthedocs.yml new file mode 100644 index 000000000..24815ce24 --- /dev/null +++ b/ceph/.readthedocs.yml @@ -0,0 +1,16 @@ +--- +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 +formats: [] +build: + image: latest +python: + version: 3 + install: + - requirements: admin/doc-requirements.txt + - requirements: admin/doc-read-the-docs.txt +sphinx: + builder: dirhtml + configuration: doc/conf.py diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index e5218c1e5..7a71e53ad 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -667,4 +667,4 @@ add_custom_target(tags DEPENDS ctags) find_package(CppCheck) find_package(IWYU) -set(VERSION 15.2.6) +set(VERSION 15.2.8) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index 1c7ef2040..a1f05a987 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -1,15 +1,22 @@ ->=15.2.5 --------- +15.2.8 +------ -* CephFS: Automatic static subtree partitioning policies may now be configured - using the new distributed and random ephemeral pinning extended attributes on - directories. See the documentation for more information: - https://docs.ceph.com/docs/master/cephfs/multimds/ +* ceph-volume: The ``lvm batch` subcommand received a major rewrite. This closed + a number of bugs and improves usability in terms of size specification and + calculation, as well as idempotency behaviour and disk replacement process. + Please refer to https://docs.ceph.com/en/latest/ceph-volume/lvm/batch/ for + more detailed information. -* Monitors now have a config option ``mon_osd_warn_num_repaired``, 10 by default. - If any OSD has repaired more than this many I/O errors in stored data a - ``OSD_TOO_MANY_REPAIRS`` health warning is generated. +* MON: The cluster log now logs health detail every ``mon_health_to_clog_interval``, + which has been changed from 1hr to 10min. Logging of health detail will be + skipped if there is no change in health summary since last known. -* Now when noscrub and/or nodeep-scrub flags are set globally or per pool, - scheduled scrubs of the type disabled will be aborted. All user initiated - scrubs are NOT interrupted. +* The ``ceph df`` command now lists the number of pgs in each pool. + +* The ``bluefs_preextend_wal_files`` option has been removed. + +* It is now possible to specify the initial monitor to contact for Ceph tools + and daemons using the ``mon_host_override`` config option or + ``--mon-host-override `` command-line switch. This generally should only + be used for debugging and only affects initial communication with Ceph's + monitor cluster. diff --git a/ceph/admin/build-doc b/ceph/admin/build-doc index 5a81dad26..289a2a688 100755 --- a/ceph/admin/build-doc +++ b/ceph/admin/build-doc @@ -85,7 +85,7 @@ for bind in rados rbd cephfs rgw; do BUILD_DOC=1 \ CFLAGS="-iquote$TOPDIR/src/include" \ CPPFLAGS="-iquote$TOPDIR/src/include" \ - LDFLAGS="-L$vdir/lib -Wl,--no-as-needed" \ + LDFLAGS="-L$vdir/lib -Wl,--no-as-needed,-rpath=$vdir/lib" \ $vdir/bin/pip install --upgrade $TOPDIR/src/pybind/${bind} # rgwfile_version(), librgw_create(), rgw_mount() # since py3.5, distutils adds postfix in between ${bind} and so @@ -93,7 +93,12 @@ for bind in rados rbd cephfs rgw; do if [ ! -e $lib_fn ]; then lib_fn=$vdir/lib/python*/*-packages/${bind}.so fi - nm $lib_fn | grep -E "U (lib)?${bind}" | \ + if [ ${bind} = "cephfs" ]; then + func_prefix="ceph" + else + func_prefix="(lib)?${bind}" + fi + nm $lib_fn | grep -E "U ${func_prefix}" | \ awk '{ print "void "$2"(void) {}" }' | \ gcc -shared -o $vdir/lib/lib${bind}.so.1 -xc - if [ ${bind} != rados ]; then diff --git a/ceph/admin/doc-read-the-docs.txt b/ceph/admin/doc-read-the-docs.txt new file mode 100644 index 000000000..b65cc4638 --- /dev/null +++ b/ceph/admin/doc-read-the-docs.txt @@ -0,0 +1,2 @@ +plantweb +git+https://github.com/readthedocs/readthedocs-sphinx-search@master diff --git a/ceph/admin/doc-requirements.txt b/ceph/admin/doc-requirements.txt index cc6230d9c..6110e4be4 100644 --- a/ceph/admin/doc-requirements.txt +++ b/ceph/admin/doc-requirements.txt @@ -5,4 +5,6 @@ pyyaml >= 5.1.2 Cython prettytable sphinx-autodoc-typehints +sphinx-prompt +Sphinx-Substitution-Extensions typed-ast diff --git a/ceph/alpine/APKBUILD b/ceph/alpine/APKBUILD index 5a4bca6f2..8b1d3e98d 100644 --- a/ceph/alpine/APKBUILD +++ b/ceph/alpine/APKBUILD @@ -1,7 +1,7 @@ # Contributor: John Coyle # Maintainer: John Coyle pkgname=ceph -pkgver=15.2.6 +pkgver=15.2.8 pkgrel=0 pkgdesc="Ceph is a distributed object store and file system" pkgusers="ceph" @@ -63,7 +63,7 @@ makedepends=" xmlstarlet yasm " -source="ceph-15.2.6.tar.bz2" +source="ceph-15.2.8.tar.bz2" subpackages=" $pkgname-base $pkgname-common @@ -116,7 +116,7 @@ _sysconfdir=/etc _udevrulesdir=/etc/udev/rules.d _python_sitelib=/usr/lib/python2.7/site-packages -builddir=$srcdir/ceph-15.2.6 +builddir=$srcdir/ceph-15.2.8 build() { export CEPH_BUILD_VIRTUALENV=$builddir diff --git a/ceph/ceph.spec b/ceph/ceph.spec index a3502a7f8..d41e5b110 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -98,7 +98,7 @@ # main package definition ################################################################################# Name: ceph -Version: 15.2.6 +Version: 15.2.8 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -114,7 +114,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-15.2.6.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-15.2.8.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -475,6 +475,10 @@ Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-monstore-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%if 0%{?weak_deps} +Recommends: nvme-cli +Recommends: smartmontools +%endif %description mon ceph-mon is the cluster monitor daemon for the Ceph distributed file system. One or more instances of ceph-mon form a Paxos part-time @@ -751,6 +755,10 @@ Requires: lvm2 Requires: sudo Requires: libstoragemgmt Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} +%if 0%{?weak_deps} +Recommends: nvme-cli +Recommends: smartmontools +%endif %description osd ceph-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system @@ -1126,7 +1134,7 @@ This package provides Ceph’s default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-15.2.6 +%autosetup -p1 -n ceph-15.2.8 %build # LTO can be enabled as soon as the following GCC bug is fixed: @@ -1302,7 +1310,6 @@ install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules # sudoers.d install -m 0600 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl -install -m 0600 -D sudoers.d/cephadm %{buildroot}%{_sysconfdir}/sudoers.d/cephadm %if 0%{?rhel} >= 8 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* @@ -1459,7 +1466,6 @@ exit 0 %files -n cephadm %{_sbindir}/cephadm %{_mandir}/man8/cephadm.8* -%{_sysconfdir}/sudoers.d/cephadm %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys @@ -2139,7 +2145,6 @@ fi %files -n librgw2 %{_libdir}/librgw.so.* -%{_libdir}/librgw_admin_user.so.* %if %{with lttng} %{_libdir}/librgw_op_tp.so.* %{_libdir}/librgw_rados_tp.so.* @@ -2152,10 +2157,8 @@ fi %files -n librgw-devel %dir %{_includedir}/rados %{_includedir}/rados/librgw.h -%{_includedir}/rados/librgw_admin_user.h %{_includedir}/rados/rgw_file.h %{_libdir}/librgw.so -%{_libdir}/librgw_admin_user.so %if %{with lttng} %{_libdir}/librgw_op_tp.so %{_libdir}/librgw_rados_tp.so diff --git a/ceph/ceph.spec.in b/ceph/ceph.spec.in index 04122c997..70942aca7 100644 --- a/ceph/ceph.spec.in +++ b/ceph/ceph.spec.in @@ -475,6 +475,10 @@ Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-monstore-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%if 0%{?weak_deps} +Recommends: nvme-cli +Recommends: smartmontools +%endif %description mon ceph-mon is the cluster monitor daemon for the Ceph distributed file system. One or more instances of ceph-mon form a Paxos part-time @@ -751,6 +755,10 @@ Requires: lvm2 Requires: sudo Requires: libstoragemgmt Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} +%if 0%{?weak_deps} +Recommends: nvme-cli +Recommends: smartmontools +%endif %description osd ceph-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system @@ -1302,7 +1310,6 @@ install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules # sudoers.d install -m 0600 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl -install -m 0600 -D sudoers.d/cephadm %{buildroot}%{_sysconfdir}/sudoers.d/cephadm %if 0%{?rhel} >= 8 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* @@ -1459,7 +1466,6 @@ exit 0 %files -n cephadm %{_sbindir}/cephadm %{_mandir}/man8/cephadm.8* -%{_sysconfdir}/sudoers.d/cephadm %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys @@ -2139,7 +2145,6 @@ fi %files -n librgw2 %{_libdir}/librgw.so.* -%{_libdir}/librgw_admin_user.so.* %if %{with lttng} %{_libdir}/librgw_op_tp.so.* %{_libdir}/librgw_rados_tp.so.* @@ -2152,10 +2157,8 @@ fi %files -n librgw-devel %dir %{_includedir}/rados %{_includedir}/rados/librgw.h -%{_includedir}/rados/librgw_admin_user.h %{_includedir}/rados/rgw_file.h %{_libdir}/librgw.so -%{_libdir}/librgw_admin_user.so %if %{with lttng} %{_libdir}/librgw_op_tp.so %{_libdir}/librgw_rados_tp.so diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index 3ff5c5df0..8f14a46c1 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,7 +1,19 @@ -ceph (15.2.6-1bionic) bionic; urgency=medium +ceph (15.2.8-1bionic) bionic; urgency=medium - -- Jenkins Build Slave User Tue, 17 Nov 2020 18:25:05 +0000 + -- Jenkins Build Slave User Wed, 16 Dec 2020 18:29:12 +0000 + +ceph (15.2.8-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Wed, 16 Dec 2020 17:29:47 +0000 + +ceph (15.2.7-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Mon, 30 Nov 2020 19:58:28 +0000 ceph (15.2.6-1) stable; urgency=medium diff --git a/ceph/debian/cephadm.install b/ceph/debian/cephadm.install index 49d6cc34d..f30ed9c5f 100644 --- a/ceph/debian/cephadm.install +++ b/ceph/debian/cephadm.install @@ -1,3 +1,2 @@ usr/sbin/cephadm usr/share/man/man8/cephadm.8 -etc/sudoers.d/cephadm diff --git a/ceph/debian/control b/ceph/debian/control index 9abc2bd75..b5d49ced3 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -216,6 +216,7 @@ Depends: ceph-base (= ${binary:Version}), ceph-mgr-modules-core (= ${binary:Version}), python3-bcrypt, python3-cherrypy3, + python3-distutils, python3-jwt, python3-openssl, python3-pecan, @@ -382,6 +383,8 @@ Depends: ceph-base (= ${binary:Version}), ${shlibs:Depends}, Replaces: ceph (<< 10), ceph-test (<< 12.2.2-14) Breaks: ceph (<< 10), ceph-test (<< 12.2.2-14) +Recommends: nvme-cli, + smartmontools, Description: monitor server for the ceph storage system Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, @@ -415,6 +418,8 @@ Depends: ceph-base (= ${binary:Version}), ${shlibs:Depends}, Replaces: ceph (<< 10), ceph-test (<< 12.2.2-14) Breaks: ceph (<< 10), ceph-test (<< 12.2.2-14) +Recommends: nvme-cli, + smartmontools, Description: OSD server for the ceph storage system Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, diff --git a/ceph/debian/librgw-dev.install b/ceph/debian/librgw-dev.install index 1f4f1b190..ed2a81dba 100644 --- a/ceph/debian/librgw-dev.install +++ b/ceph/debian/librgw-dev.install @@ -1,5 +1,3 @@ usr/include/rados/librgw.h -usr/include/rados/librgw_admin_user.h usr/include/rados/rgw_file.h usr/lib/librgw.so -usr/lib/librgw_admin_user.so diff --git a/ceph/debian/librgw2.install b/ceph/debian/librgw2.install index 725f341af..b86fb891e 100644 --- a/ceph/debian/librgw2.install +++ b/ceph/debian/librgw2.install @@ -1,2 +1 @@ usr/lib/librgw.so.* -usr/lib/librgw_admin_user.so.* diff --git a/ceph/debian/rules b/ceph/debian/rules index 8f2e9afa4..dc32da308 100755 --- a/ceph/debian/rules +++ b/ceph/debian/rules @@ -62,7 +62,6 @@ override_dh_auto_install: install -D -m 644 src/etc-rbdmap $(DESTDIR)/etc/ceph/rbdmap install -D -m 644 etc/sysctl/90-ceph-osd.conf $(DESTDIR)/etc/sysctl.d/30-ceph-osd.conf install -D -m 600 sudoers.d/ceph-osd-smartctl $(DESTDIR)/etc/sudoers.d/ceph-osd-smartctl - install -D -m 600 sudoers.d/cephadm $(DESTDIR)/etc/sudoers.d/cephadm install -m 755 src/cephadm/cephadm $(DESTDIR)/usr/sbin/cephadm diff --git a/ceph/doc/architecture.rst b/ceph/doc/architecture.rst index d89ddb5b8..c1f7f5ff9 100644 --- a/ceph/doc/architecture.rst +++ b/ceph/doc/architecture.rst @@ -27,7 +27,9 @@ A Ceph Storage Cluster consists of two types of daemons: - :term:`Ceph Monitor` - :term:`Ceph OSD Daemon` -.. ditaa:: +---------------+ +---------------+ +.. ditaa:: + + +---------------+ +---------------+ | OSDs | | Monitors | +---------------+ +---------------+ @@ -56,7 +58,9 @@ comes through a :term:`Ceph Block Device`, :term:`Ceph Object Storage`, the file in a filesystem, which is stored on an :term:`Object Storage Device`. Ceph OSD Daemons handle the read/write operations on the storage disks. -.. ditaa:: /-----\ +-----+ +-----+ +.. ditaa:: + + /-----\ +-----+ +-----+ | obj |------>| {d} |------>| {s} | \-----/ +-----+ +-----+ @@ -70,7 +74,9 @@ attributes such as the file owner, created date, last modified date, and so forth. -.. ditaa:: /------+------------------------------+----------------\ +.. ditaa:: + + /------+------------------------------+----------------\ | ID | Binary Data | Metadata | +------+------------------------------+----------------+ | 1234 | 0101010101010100110101010010 | name1 = value1 | @@ -229,7 +235,9 @@ the client and the monitor share a secret key. .. note:: The ``client.admin`` user must provide the user ID and secret key to the user in a secure manner. -.. ditaa:: +---------+ +---------+ +.. ditaa:: + + +---------+ +---------+ | Client | | Monitor | +---------+ +---------+ | request to | @@ -252,7 +260,9 @@ user's secret key and transmits it back to the client. The client decrypts the ticket and uses it to sign requests to OSDs and metadata servers throughout the cluster. -.. ditaa:: +---------+ +---------+ +.. ditaa:: + + +---------+ +---------+ | Client | | Monitor | +---------+ +---------+ | authenticate | @@ -283,7 +293,9 @@ machine and the Ceph servers. Each message sent between a client and server, subsequent to the initial authentication, is signed using a ticket that the monitors, OSDs and metadata servers can verify with their shared secret. -.. ditaa:: +---------+ +---------+ +-------+ +-------+ +.. ditaa:: + + +---------+ +---------+ +-------+ +-------+ | Client | | Monitor | | MDS | | OSD | +---------+ +---------+ +-------+ +-------+ | request to | | | @@ -393,7 +405,8 @@ ability to leverage this computing power leads to several major benefits: and tertiary OSDs (as many OSDs as additional replicas), and responds to the client once it has confirmed the object was stored successfully. -.. ditaa:: +.. ditaa:: + +----------+ | Client | | | @@ -443,7 +456,8 @@ Ceph Clients retrieve a `Cluster Map`_ from a Ceph Monitor, and write objects to pools. The pool's ``size`` or number of replicas, the CRUSH rule and the number of placement groups determine how Ceph will place the data. -.. ditaa:: +.. ditaa:: + +--------+ Retrieves +---------------+ | Client |------------>| Cluster Map | +--------+ +---------------+ @@ -488,7 +502,8 @@ rebalance dynamically when new Ceph OSD Daemons and the underlying OSD devices come online. The following diagram depicts how CRUSH maps objects to placement groups, and placement groups to OSDs. -.. ditaa:: +.. ditaa:: + /-----\ /-----\ /-----\ /-----\ /-----\ | obj | | obj | | obj | | obj | | obj | \-----/ \-----/ \-----/ \-----/ \-----/ @@ -614,7 +629,8 @@ and each OSD gets some added capacity, so there are no load spikes on the new OSD after rebalancing is complete. -.. ditaa:: +.. ditaa:: + +--------+ +--------+ Before | OSD 1 | | OSD 2 | +--------+ +--------+ @@ -685,6 +701,7 @@ name. Chunk 1 contains ``ABC`` and is stored on **OSD5** while chunk 4 contains .. ditaa:: + +-------------------+ name | NYAN | +-------------------+ @@ -739,6 +756,7 @@ three chunks are read: **OSD2** was the slowest and its chunk was not taken into account. .. ditaa:: + +-------------------+ name | NYAN | +-------------------+ @@ -804,6 +822,7 @@ version 1). .. ditaa:: + Primary OSD +-------------+ @@ -934,6 +953,7 @@ object can be removed: ``D1v1`` on **OSD 1**, ``D2v1`` on **OSD 2** and ``C1v1`` on **OSD 3**. .. ditaa:: + Primary OSD +-------------+ @@ -972,6 +992,7 @@ to be available on all OSDs in the previous acting set ) is ``1,1`` and that will be the head of the new authoritative log. .. ditaa:: + +-------------+ | OSD 1 | | (down) | @@ -1017,6 +1038,7 @@ the erasure coding library during scrubbing and stored on the new primary .. ditaa:: + Primary OSD +-------------+ @@ -1068,7 +1090,8 @@ tier. So the cache tier and the backing storage tier are completely transparent to Ceph clients. -.. ditaa:: +.. ditaa:: + +-------------+ | Ceph Client | +------+------+ @@ -1150,7 +1173,8 @@ Cluster. Ceph packages this functionality into the ``librados`` library so that you can create your own custom Ceph Clients. The following diagram depicts the basic architecture. -.. ditaa:: +.. ditaa:: + +---------------------------------+ | Ceph Storage Cluster Protocol | | (librados) | @@ -1193,7 +1217,9 @@ notification. This enables a client to use any object as a synchronization/communication channel. -.. ditaa:: +----------+ +----------+ +----------+ +---------------+ +.. ditaa:: + + +----------+ +----------+ +----------+ +---------------+ | Client 1 | | Client 2 | | Client 3 | | OSD:Object ID | +----------+ +----------+ +----------+ +---------------+ | | | | @@ -1269,7 +1295,8 @@ take maximum advantage of Ceph's ability to distribute data across placement groups, and consequently doesn't improve performance very much. The following diagram depicts the simplest form of striping: -.. ditaa:: +.. ditaa:: + +---------------+ | Client Data | | Format | @@ -1327,7 +1354,8 @@ set (``object set 2`` in the following diagram), and begins writing to the first stripe (``stripe unit 16``) in the first object in the new object set (``object 4`` in the diagram below). -.. ditaa:: +.. ditaa:: + +---------------+ | Client Data | | Format | @@ -1443,6 +1471,7 @@ and high availability. The following diagram depicts the high-level architecture. .. ditaa:: + +--------------+ +----------------+ +-------------+ | Block Device | | Object Storage | | CephFS | +--------------+ +----------------+ +-------------+ @@ -1527,6 +1556,7 @@ Cluster. Ceph Clients mount a CephFS filesystem as a kernel object or as a Filesystem in User Space (FUSE). .. ditaa:: + +-----------------------+ +------------------------+ | CephFS Kernel Object | | CephFS FUSE | +-----------------------+ +------------------------+ diff --git a/ceph/doc/ceph-volume/intro.rst b/ceph/doc/ceph-volume/intro.rst index c49531d55..d59dd2b6c 100644 --- a/ceph/doc/ceph-volume/intro.rst +++ b/ceph/doc/ceph-volume/intro.rst @@ -74,16 +74,11 @@ involved at all. ------------------- By making use of :term:`LVM tags`, the :ref:`ceph-volume-lvm` sub-command is able to store and later re-discover and query devices associated with OSDs so -that they can later activated. This includes support for lvm-based technologies -like dm-cache as well. - -For ``ceph-volume``, the use of dm-cache is transparent, there is no difference -for the tool, and it treats dm-cache like a plain logical volume. +that they can later be activated. LVM performance penalty ----------------------- In short: we haven't been able to notice any significant performance penalties associated with the change to LVM. By being able to work closely with LVM, the -ability to work with other device mapper technologies (for example ``dmcache``) -was a given: there is no technical difficulty in working with anything that can -sit below a Logical Volume. +ability to work with other device mapper technologies was a given: there is no +technical difficulty in working with anything that can sit below a Logical Volume. diff --git a/ceph/doc/ceph-volume/lvm/batch.rst b/ceph/doc/ceph-volume/lvm/batch.rst index 17cbeecd4..6033b5aaf 100644 --- a/ceph/doc/ceph-volume/lvm/batch.rst +++ b/ceph/doc/ceph-volume/lvm/batch.rst @@ -2,142 +2,177 @@ ``batch`` =========== -This subcommand allows for multiple OSDs to be created at the same time given -an input of devices. Depending on the device type (spinning drive, or solid -state), the internal engine will decide the best approach to create the OSDs. +The subcommand allows to create multiple OSDs at the same time given +an input of devices. The ``batch`` subcommand is closely related to +drive-groups. One individual drive group specification translates to a single +``batch`` invocation. -This decision abstracts away the many nuances when creating an OSD: how large -should a ``block.db`` be? How can one mix a solid state device with spinning -devices in an efficient way? - -The process is similar to :ref:`ceph-volume-lvm-create`, and will do the -preparation and activation at once, following the same workflow for each OSD. -However, If the ``--prepare`` flag is passed then only the prepare step is taken -and the OSDs are not activated. +The subcommand is based to :ref:`ceph-volume-lvm-create`, and will use the very +same code path. All ``batch`` does is to calculate the appropriate sizes of all +volumes and skip over already created volumes. All the features that ``ceph-volume lvm create`` supports, like ``dmcrypt``, avoiding ``systemd`` units from starting, defining bluestore or filestore, -are supported. Any fine-grained option that may affect a single OSD is not -supported, for example: specifying where journals should be placed. - +are supported. +.. _ceph-volume-lvm-batch_auto: -.. _ceph-volume-lvm-batch_bluestore: - -``bluestore`` -------------- -The :term:`bluestore` objectstore (the default) is used when creating multiple OSDs -with the ``batch`` sub-command. It allows a few different scenarios depending -on the input of devices: +Automatic sorting of disks +-------------------------- +If ``batch`` receives only a single list of data devices and other options are +passed , ``ceph-volume`` will auto-sort disks by its rotational +property and use non-rotating disks for ``block.db`` or ``journal`` depending +on the objectstore used. If all devices are to be used for standalone OSDs, +no matter if rotating or solid state, pass ``--no-auto``. +For example assuming :term:`bluestore` is used and ``--no-auto`` is not passed, +the deprecated behavior would deploy the following, depending on the devices +passed: #. Devices are all spinning HDDs: 1 OSD is created per device #. Devices are all SSDs: 2 OSDs are created per device #. Devices are a mix of HDDs and SSDs: data is placed on the spinning device, the ``block.db`` is created on the SSD, as large as possible. - .. note:: Although operations in ``ceph-volume lvm create`` allow usage of - ``block.wal`` it isn't supported with the ``batch`` sub-command + ``block.wal`` it isn't supported with the ``auto`` behavior. +This default auto-sorting behavior is now DEPRECATED and will be changed in future releases. +Instead devices are not automatically sorted unless the ``--auto`` option is passed -.. _ceph-volume-lvm-batch_filestore: +It is recommended to make use of the explicit device lists for ``block.db``, + ``block.wal`` and ``journal``. -``filestore`` -------------- -The :term:`filestore` objectstore can be used when creating multiple OSDs -with the ``batch`` sub-command. It allows two different scenarios depending -on the input of devices: +.. _ceph-volume-lvm-batch_bluestore: -#. Devices are all the same type (for example all spinning HDD or all SSDs): - 1 OSD is created per device, collocating the journal in the same HDD. -#. Devices are a mix of HDDs and SSDs: data is placed on the spinning device, - while the journal is created on the SSD using the sizing options from - ceph.conf and falling back to the default journal size of 5GB. +Reporting +========= +By default ``batch`` will print a report of the computed OSD layout and ask the +user to confirm. This can be overridden by passing ``--yes``. +If one wants to try out several invocations with being asked to deploy +``--report`` can be passed. ``ceph-volume`` will exit after printing the report. -When a mix of solid and spinning devices are used, ``ceph-volume`` will try to -detect existing volume groups on the solid devices. If a VG is found, it will -try to create the logical volume from there, otherwise raising an error if -space is insufficient. +Consider the following invocation:: -If a raw solid device is used along with a device that has a volume group in -addition to some spinning devices, ``ceph-volume`` will try to extend the -existing volume group and then create a logical volume. + $ ceph-volume lvm batch --report /dev/sdb /dev/sdc /dev/sdd --db-devices /dev/nvme0n1 -.. _ceph-volume-lvm-batch_report: +This will deploy three OSDs with external ``db`` and ``wal`` volumes on +an NVME device. -Reporting -========= -When a call is received to create OSDs, the tool will prompt the user to -continue if the pre-computed output is acceptable. This output is useful to -understand the outcome of the received devices. Once confirmation is accepted, -the process continues. +**pretty reporting** +The ``pretty`` report format (the default) would +look like this:: -Although prompts are good to understand outcomes, it is incredibly useful to -try different inputs to find the best product possible. With the ``--report`` -flag, one can prevent any actual operations and just verify outcomes from -inputs. + $ ceph-volume lvm batch --report /dev/sdb /dev/sdc /dev/sdd --db-devices /dev/nvme0n1 + --> passed data devices: 3 physical, 0 LVM + --> relative data size: 1.0 + --> passed block_db devices: 1 physical, 0 LVM -**pretty reporting** -For two spinning devices, this is how the ``pretty`` report (the default) would -look:: + Total OSDs: 3 - $ ceph-volume lvm batch --report /dev/sdb /dev/sdc + Type Path LV Size % of device + ---------------------------------------------------------------------------------------------------- + data /dev/sdb 300.00 GB 100.00% + block_db /dev/nvme0n1 66.67 GB 33.33% + ---------------------------------------------------------------------------------------------------- + data /dev/sdc 300.00 GB 100.00% + block_db /dev/nvme0n1 66.67 GB 33.33% + ---------------------------------------------------------------------------------------------------- + data /dev/sdd 300.00 GB 100.00% + block_db /dev/nvme0n1 66.67 GB 33.33% - Total OSDs: 2 - Type Path LV Size % of device - -------------------------------------------------------------------------------- - [data] /dev/sdb 10.74 GB 100% - -------------------------------------------------------------------------------- - [data] /dev/sdc 10.74 GB 100% **JSON reporting** -Reporting can produce a richer output with ``JSON``, which gives a few more -hints on sizing. This feature might be better for other tooling to consume -information that will need to be transformed. - -For two spinning devices, this is how the ``JSON`` report would look:: - - $ ceph-volume lvm batch --report --format=json /dev/sdb /dev/sdc - { - "osds": [ - { - "block.db": {}, - "data": { - "human_readable_size": "10.74 GB", - "parts": 1, - "path": "/dev/sdb", - "percentage": 100, - "size": 11534336000.0 - } - }, - { - "block.db": {}, - "data": { - "human_readable_size": "10.74 GB", - "parts": 1, - "path": "/dev/sdc", - "percentage": 100, - "size": 11534336000.0 - } - } - ], - "vgs": [ - { - "devices": [ - "/dev/sdb" - ], - "parts": 1 - }, - { - "devices": [ - "/dev/sdc" - ], - "parts": 1 - } - ] - } +Reporting can produce a structured output with ``--format json`` or +``--format json-pretty``:: + + $ ceph-volume lvm batch --report --format json-pretty /dev/sdb /dev/sdc /dev/sdd --db-devices /dev/nvme0n1 + --> passed data devices: 3 physical, 0 LVM + --> relative data size: 1.0 + --> passed block_db devices: 1 physical, 0 LVM + [ + { + "block_db": "/dev/nvme0n1", + "block_db_size": "66.67 GB", + "data": "/dev/sdb", + "data_size": "300.00 GB", + "encryption": "None" + }, + { + "block_db": "/dev/nvme0n1", + "block_db_size": "66.67 GB", + "data": "/dev/sdc", + "data_size": "300.00 GB", + "encryption": "None" + }, + { + "block_db": "/dev/nvme0n1", + "block_db_size": "66.67 GB", + "data": "/dev/sdd", + "data_size": "300.00 GB", + "encryption": "None" + } + ] + +Sizing +====== +When no sizing arguments are passed, `ceph-volume` will derive the sizing from +the passed device lists (or the sorted lists when using the automatic sorting). +`ceph-volume batch` will attempt to fully utilize a device's available capacity. +Relying on automatic sizing is recommended. + +If one requires a different sizing policy for wal, db or journal devices, +`ceph-volume` offers implicit and explicit sizing rules. + +Implicit sizing +--------------- +Scenarios in which either devices are under-comitted or not all data devices are +currently ready for use (due to a broken disk for example), one can still rely +on `ceph-volume` automatic sizing. +Users can provide hints to `ceph-volume` as to how many data devices should have +their external volumes on a set of fast devices. These options are: + +* ``--block-db-slots`` +* ``--block-wal-slots`` +* ``--journal-slots`` + +For example, consider an OSD host that is supposed to contain 5 data devices and +one device for wal/db volumes. However, one data device is currently broken and +is being replaced. Instead of calculating the explicit sizes for the wal/db +volume, one can simply call:: + + $ ceph-volume lvm batch --report /dev/sdb /dev/sdc /dev/sdd /dev/sde --db-devices /dev/nvme0n1 --block-db-slots 5 + +Explicit sizing +--------------- +It is also possible to provide explicit sizes to `ceph-volume` via the arguments + +* ``--block-db-size`` +* ``--block-wal-size`` +* ``--journal-size`` + +`ceph-volume` will try to satisfy the requested sizes given the passed disks. If +this is not possible, no OSDs will be deployed. + + +Idempotency and disk replacements +================================= +`ceph-volume lvm batch` intends to be idempotent, i.e. calling the same command +repeatedly must result in the same outcome. For example calling:: + + $ ceph-volume lvm batch --report /dev/sdb /dev/sdc /dev/sdd --db-devices /dev/nvme0n1 + +will result in three deployed OSDs (if all disks were available). Calling this +command again, you will still end up with three OSDs and ceph-volume will exit +with return code 0. + +Suppose /dev/sdc goes bad and needs to be replaced. After destroying the OSD and +replacing the hardware, you can again call the same command and `ceph-volume` +will detect that only two out of the three wanted OSDs are setup and re-create +the missing OSD. + +This idempotency notion is tightly coupled to and extensively used by :ref:`drivegroups`. diff --git a/ceph/doc/ceph-volume/lvm/prepare.rst b/ceph/doc/ceph-volume/lvm/prepare.rst index 3683f6f5b..fd40b7e1e 100644 --- a/ceph/doc/ceph-volume/lvm/prepare.rst +++ b/ceph/doc/ceph-volume/lvm/prepare.rst @@ -253,28 +253,20 @@ work for both bluestore and filestore OSDs:: ``multipath`` support --------------------- -Devices that come from ``multipath`` are not supported as-is. The tool will -refuse to consume a raw multipath device and will report a message like:: +``multipath`` devices are support if ``lvm`` is configured properly. - --> RuntimeError: Cannot use device (/dev/mapper/). A vg/lv path or an existing device is needed +**Leave it to LVM** -The reason for not supporting multipath is that depending on the type of the -multipath setup, if using an active/passive array as the underlying physical -devices, filters are required in ``lvm.conf`` to exclude the disks that are part of -those underlying devices. +Most Linux distributions should ship their LVM2 package with +``multipath_component_detection = 1`` in the default configuration. With this +setting ``LVM`` ignores any device that is a multipath component and +``ceph-volume`` will accordingly not touch these devices. -It is unfeasible for ceph-volume to understand what type of configuration is -needed for LVM to be able to work in various different multipath scenarios. The -functionality to create the LV for you is merely a (naive) convenience, -anything that involves different settings or configuration must be provided by -a config management system which can then provide VGs and LVs for ceph-volume -to consume. - -This situation will only arise when trying to use the ceph-volume functionality -that creates a volume group and logical volume from a device. If a multipath -device is already a logical volume it *should* work, given that the LVM -configuration is done correctly to avoid issues. +**Using filters** +Should this setting be unavailable, a correct ``filter`` expression must be +provided in ``lvm.conf``. ``ceph-volume`` must not be able to use both the +multipath device and its multipath components. Storing metadata ---------------- diff --git a/ceph/doc/cephadm/adoption.rst b/ceph/doc/cephadm/adoption.rst index 701c3d75f..5c1d2ad1b 100644 --- a/ceph/doc/cephadm/adoption.rst +++ b/ceph/doc/cephadm/adoption.rst @@ -75,11 +75,11 @@ Adoption process #. Generate an SSH key:: # ceph cephadm generate-key - # ceph cephadm get-pub-key > ceph.pub + # ceph cephadm get-pub-key > ~/ceph.pub #. Install the cluster SSH key on each host in the cluster:: - # ssh-copy-id -f -i ceph.pub root@ + # ssh-copy-id -f -i ~/ceph.pub root@ .. note:: It is also possible to import an existing ssh key. See diff --git a/ceph/doc/cephadm/client-setup.rst b/ceph/doc/cephadm/client-setup.rst index dd0bc3285..3efc1cc11 100644 --- a/ceph/doc/cephadm/client-setup.rst +++ b/ceph/doc/cephadm/client-setup.rst @@ -15,7 +15,9 @@ Config File Setup Client machines can generally get away with a smaller config file than a full-fledged cluster member. To generate a minimal config file, log into a host that is already configured as a client or running a cluster -daemon, and then run:: +daemon, and then run + +.. code-block:: bash ceph config generate-minimal-conf @@ -28,7 +30,9 @@ Keyring Setup Most Ceph clusters are run with authentication enabled, and the client will need keys in order to communicate with cluster machines. To generate a keyring file with credentials for `client.fs`, log into an extant cluster -member and run:: +member and run + +.. code-block:: bash ceph auth get-or-create client.fs diff --git a/ceph/doc/cephadm/concepts.rst b/ceph/doc/cephadm/concepts.rst index 8b1743799..7d11d22db 100644 --- a/ceph/doc/cephadm/concepts.rst +++ b/ceph/doc/cephadm/concepts.rst @@ -49,7 +49,7 @@ host name: domain name (the part after the first dot). You can check the FQDN using ``hostname --fqdn`` or the domain name using ``dnsdomainname``. - :: + .. code-block:: none You cannot change the FQDN with hostname or dnsdomainname. @@ -117,4 +117,4 @@ candidate hosts. However, there is a special cases that cephadm needs to consider. In case the are fewer hosts selected by the placement specification than -demanded by ``count``, cephadm will only deploy on selected hosts. \ No newline at end of file +demanded by ``count``, cephadm will only deploy on selected hosts. diff --git a/ceph/doc/cephadm/drivegroups.rst b/ceph/doc/cephadm/drivegroups.rst index f1dd523e2..a1397af01 100644 --- a/ceph/doc/cephadm/drivegroups.rst +++ b/ceph/doc/cephadm/drivegroups.rst @@ -8,9 +8,11 @@ OSD Service Specification It gives the user an abstract way tell ceph which disks should turn into an OSD with which configuration without knowing the specifics of device names and paths. -Instead of doing this:: +Instead of doing this - [monitor 1] # ceph orch daemon add osd **:** +.. prompt:: bash [monitor.1]# + + ceph orch daemon add osd **:** for each device and each host, we can define a yaml|json file that allows us to describe the layout. Here's the most basic example. @@ -32,9 +34,11 @@ Turn any available(ceph-volume decides what 'available' is) into an OSD on all h the glob pattern '*'. (The glob pattern matches against the registered hosts from `host ls`) There will be a more detailed section on host_pattern down below. -and pass it to `osd create` like so:: +and pass it to `osd create` like so + +.. prompt:: bash [monitor.1]# - [monitor 1] # ceph orch apply osd -i /path/to/osd_spec.yml + ceph orch apply osd -i /path/to/osd_spec.yml This will go out on all the matching hosts and deploy these OSDs. @@ -43,9 +47,11 @@ Since we want to have more complex setups, there are more filters than just the Also, there is a `--dry-run` flag that can be passed to the `apply osd` command, which gives you a synopsis of the proposed layout. -Example:: +Example - [monitor 1] # ceph orch apply osd -i /path/to/osd_spec.yml --dry-run +.. prompt:: bash [monitor.1]# + + [monitor.1]# ceph orch apply osd -i /path/to/osd_spec.yml --dry-run @@ -64,7 +70,9 @@ Filters You can assign disks to certain groups by their attributes using filters. The attributes are based off of ceph-volume's disk query. You can retrieve the information -with:: +with + +.. code-block:: bash ceph-volume inventory @@ -105,20 +113,28 @@ Size specification of format can be of form: Concrete examples: -Includes disks of an exact size:: +Includes disks of an exact size + +.. code-block:: yaml size: '10G' -Includes disks which size is within the range:: +Includes disks which size is within the range + +.. code-block:: yaml size: '10G:40G' -Includes disks less than or equal to 10G in size:: +Includes disks less than or equal to 10G in size + +.. code-block:: yaml size: ':10G' -Includes disks equal to or greater than 40G in size:: +Includes disks equal to or greater than 40G in size + +.. code-block:: yaml size: '40G:' @@ -206,7 +222,9 @@ Examples The simple case --------------- -All nodes with the same setup:: +All nodes with the same setup + +.. code-block:: none 20 HDDs Vendor: VendorA @@ -265,7 +283,9 @@ Note: All of the above DriveGroups are equally valid. Which of those you want to The advanced case ----------------- -Here we have two distinct setups:: +Here we have two distinct setups + +.. code-block:: none 20 HDDs Vendor: VendorA @@ -317,7 +337,9 @@ The advanced case (with non-uniform nodes) The examples above assumed that all nodes have the same drives. That's however not always the case. -Node1-5:: +Node1-5 + +.. code-block:: none 20 HDDs Vendor: Intel @@ -328,7 +350,9 @@ Node1-5:: Model: MC-55-44-ZX Size: 512GB -Node6-10:: +Node6-10 + +.. code-block:: none 5 NVMEs Vendor: Intel @@ -371,7 +395,7 @@ Dedicated wal + db All previous cases co-located the WALs with the DBs. It's however possible to deploy the WAL on a dedicated device as well, if it makes sense. -:: +.. code-block:: none 20 HDDs Vendor: VendorA diff --git a/ceph/doc/cephadm/install.rst b/ceph/doc/cephadm/install.rst index e486231c6..a8c5de804 100644 --- a/ceph/doc/cephadm/install.rst +++ b/ceph/doc/cephadm/install.rst @@ -405,6 +405,9 @@ Alternatively, the realm, zonegroup, and zone can be manually created using ``ra See :ref:`orchestrator-cli-placement-spec` for details of the placement specification. + +.. _deploy-cephadm-nfs-ganesha: + Deploying NFS ganesha ===================== diff --git a/ceph/doc/cephadm/monitoring.rst b/ceph/doc/cephadm/monitoring.rst index 4b6471caf..b1a415773 100644 --- a/ceph/doc/cephadm/monitoring.rst +++ b/ceph/doc/cephadm/monitoring.rst @@ -41,24 +41,34 @@ did not do this (by passing ``--skip-monitoring-stack``, or if you converted an existing cluster to cephadm management, you can set up monitoring by following the steps below. -#. Enable the prometheus module in the ceph-mgr daemon. This exposes the internal Ceph metrics so that prometheus can scrape them.:: +#. Enable the prometheus module in the ceph-mgr daemon. This exposes the internal Ceph metrics so that prometheus can scrape them. + + .. code-block:: bash ceph mgr module enable prometheus -#. Deploy a node-exporter service on every node of the cluster. The node-exporter provides host-level metrics like CPU and memory utilization.:: +#. Deploy a node-exporter service on every node of the cluster. The node-exporter provides host-level metrics like CPU and memory utilization. + + .. code-block:: bash ceph orch apply node-exporter '*' -#. Deploy alertmanager:: +#. Deploy alertmanager + + .. code-block:: bash ceph orch apply alertmanager 1 #. Deploy prometheus. A single prometheus instance is sufficient, but - for HA you may want to deploy two.:: + for HA you may want to deploy two. + + .. code-block:: bash ceph orch apply prometheus 1 # or 2 -#. Deploy grafana:: +#. Deploy grafana + + .. code-block:: bash ceph orch apply grafana 1 @@ -66,7 +76,9 @@ Cephadm handles the prometheus, grafana, and alertmanager configurations automatically. It may take a minute or two for services to be deployed. Once -completed, you should see something like this from ``ceph orch ls``:: +completed, you should see something like this from ``ceph orch ls`` + +.. code-block:: console $ ceph orch ls NAME RUNNING REFRESHED IMAGE NAME IMAGE ID SPEC @@ -88,11 +100,15 @@ configuration first. The following configuration options are available. - ``container_image_alertmanager`` - ``container_image_node_exporter`` -Custom images can be set with the ``ceph config`` command:: +Custom images can be set with the ``ceph config`` command + +.. code-block:: bash ceph config set mgr mgr/cephadm/ -For example:: +For example + +.. code-block:: bash ceph config set mgr mgr/cephadm/container_image_prometheus prom/prometheus:v1.4.1 @@ -107,11 +123,15 @@ For example:: If you choose to go with the recommendations instead, you can reset the custom image you have set before. After that, the default value will be - used again. Use ``ceph config rm`` to reset the configuration option:: + used again. Use ``ceph config rm`` to reset the configuration option + + .. code-block:: bash ceph config rm mgr mgr/cephadm/ - For example:: + For example + + .. code-block:: bash ceph config rm mgr mgr/cephadm/container_image_prometheus @@ -119,7 +139,9 @@ Disabling monitoring -------------------- If you have deployed monitoring and would like to remove it, you can do -so with:: +so with + +.. code-block:: bash ceph orch rm grafana ceph orch rm prometheus --force # this will delete metrics data collected so far @@ -135,7 +157,9 @@ If you have an existing prometheus monitoring infrastructure, or would like to manage it yourself, you need to configure it to integrate with your Ceph cluster. -* Enable the prometheus module in the ceph-mgr daemon:: +* Enable the prometheus module in the ceph-mgr daemon + + .. code-block:: bash ceph mgr module enable prometheus diff --git a/ceph/doc/cephadm/operations.rst b/ceph/doc/cephadm/operations.rst index 198286a3a..456f48628 100644 --- a/ceph/doc/cephadm/operations.rst +++ b/ceph/doc/cephadm/operations.rst @@ -278,19 +278,30 @@ You can disable this health warning with:: /etc/ceph/ceph.conf =================== -Cephadm uses a minimized ``ceph.conf`` that only contains +Cephadm distributes a minimized ``ceph.conf`` that only contains a minimal set of information to connect to the Ceph cluster. -To update the configuration settings, use:: +To update the configuration settings, instead of manually editing +the ``ceph.conf`` file, use the config database instead:: ceph config set ... +See :ref:`ceph-conf-database` for details. -To set up an initial configuration before calling -`bootstrap`, create an initial ``ceph.conf`` file. For example:: +By default, cephadm does not deploy that minimized ``ceph.conf`` across the +cluster. To enable the management of ``/etc/ceph/ceph.conf`` files on all +hosts, please enable this by running:: + + ceph config set mgr mgr/cephadm/manage_etc_ceph_ceph_conf true + +To set up an initial configuration before bootstrapping +the cluster, create an initial ``ceph.conf`` file. For example:: cat < /etc/ceph/ceph.conf [global] osd crush chooseleaf type = 0 EOF + +Then, run bootstrap referencing this file:: + cephadm bootstrap -c /root/ceph.conf ... diff --git a/ceph/doc/cephadm/troubleshooting.rst b/ceph/doc/cephadm/troubleshooting.rst index a439b3d7d..5d7a29c3a 100644 --- a/ceph/doc/cephadm/troubleshooting.rst +++ b/ceph/doc/cephadm/troubleshooting.rst @@ -110,44 +110,44 @@ ssh errors Error message:: - xxxxxx.gateway_bootstrap.HostNotFound: -F /tmp/cephadm-conf-kbqvkrkw root@10.10.1.2 - raise OrchestratorError('Failed to connect to %s (%s). Check that the host is reachable and accepts connections using the cephadm SSH key' % (host, addr)) from - orchestrator._interface.OrchestratorError: Failed to connect to 10.10.1.2 (10.10.1.2). Check that the host is reachable and accepts connections using the cephadm SSH key + execnet.gateway_bootstrap.HostNotFound: -F /tmp/cephadm-conf-73z09u6g -i /tmp/cephadm-identity-ky7ahp_5 root@10.10.1.2 + ... + raise OrchestratorError(msg) from e + orchestrator._interface.OrchestratorError: Failed to connect to 10.10.1.2 (10.10.1.2). + Please make sure that the host is reachable and accepts connections using the cephadm SSH key + ... Things users can do: 1. Ensure cephadm has an SSH identity key:: - - [root@mon1~]# cephadm shell -- ceph config-key get mgr/cephadm/ssh_identity_key > key + + [root@mon1~]# cephadm shell -- ceph config-key get mgr/cephadm/ssh_identity_key > ~/cephadm_private_key INFO:cephadm:Inferring fsid f8edc08a-7f17-11ea-8707-000c2915dd98 INFO:cephadm:Using recent ceph image docker.io/ceph/ceph:v15 obtained 'mgr/cephadm/ssh_identity_key' - [root@mon1 ~] # chmod 0600 key + [root@mon1 ~] # chmod 0600 ~/cephadm_private_key If this fails, cephadm doesn't have a key. Fix this by running the following command:: - + [root@mon1 ~]# cephadm shell -- ceph cephadm generate-ssh-key or:: - - [root@mon1 ~]# cat key | cephadm shell -- ceph cephadm set-ssk-key -i - + + [root@mon1 ~]# cat ~/cephadm_private_key | cephadm shell -- ceph cephadm set-ssk-key -i - 2. Ensure that the ssh config is correct:: - + [root@mon1 ~]# cephadm shell -- ceph cephadm get-ssh-config > config 3. Verify that we can connect to the host:: - - [root@mon1 ~]# ssh -F config -i key root@mon1 - - + [root@mon1 ~]# ssh -F config -i ~/cephadm_private_key root@mon1 Verifying that the Public Key is Listed in the authorized_keys file ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To verify that the public key is in the authorized_keys file, run the following commands:: - [root@mon1 ~]# cephadm shell -- ceph config-key get mgr/cephadm/ssh_identity_pub > key.pub - [root@mon1 ~]# grep "`cat key.pub`" /root/.ssh/authorized_keys + [root@mon1 ~]# cephadm shell -- ceph cephadm get-pub-key > ~/ceph.pub + [root@mon1 ~]# grep "`cat ~/ceph.pub`" /root/.ssh/authorized_keys Failed to infer CIDR network error ---------------------------------- diff --git a/ceph/doc/cephfs/cephfs-io-path.rst b/ceph/doc/cephfs/cephfs-io-path.rst index 61ce379f5..8c7810ba0 100644 --- a/ceph/doc/cephfs/cephfs-io-path.rst +++ b/ceph/doc/cephfs/cephfs-io-path.rst @@ -19,6 +19,7 @@ client cache. .. ditaa:: + +---------------------+ | Application | +---------------------+ diff --git a/ceph/doc/cephfs/fs-nfs-exports.rst b/ceph/doc/cephfs/fs-nfs-exports.rst index 9740404bd..e655c9233 100644 --- a/ceph/doc/cephfs/fs-nfs-exports.rst +++ b/ceph/doc/cephfs/fs-nfs-exports.rst @@ -17,15 +17,39 @@ Create NFS Ganesha Cluster .. code:: bash - $ ceph nfs cluster create [] + $ ceph nfs cluster create [] -This creates a common recovery pool for all Ganesha daemons, new user based on -cluster_id and common ganesha config rados object. +This creates a common recovery pool for all NFS Ganesha daemons, new user based on +cluster_id, and a common NFS Ganesha config RADOS object. -Here type is export type and placement specifies the size of cluster and hosts. -For more details on placement specification refer the `orchestrator doc -`_. -Currently only CephFS export type is supported. +NOTE: Since this command also brings up NFS Ganesha daemons using a ceph-mgr +orchestrator module (see :doc:`/mgr/orchestrator`) such as "mgr/cephadm", at +least one such module must be enabled for it to work. + + signifies the export type, which corresponds to the NFS Ganesha file +system abstraction layer (FSAL). Permissible values are "cephfs" or "rgw", but +currently only "cephfs" is supported. + + is an arbitrary string by which this NFS Ganesha cluster will be +known. + + is an optional string signifying which hosts should have NFS Ganesha +daemon containers running on them and, optionally, the total number of NFS +Ganesha daemons the cluster (should you want to have more than one NFS Ganesha +daemon running per node). For example, the following placement string means +"deploy NFS Ganesha daemons on nodes host1 and host2 (one daemon per host): + + "host1,host2" + +and this placement specification says to deploy two NFS Ganesha daemons each +on nodes host1 and host2 (for a total of four NFS Ganesha daemons in the +cluster): + + "4 host1,host2" + +For more details on placement specification refer to the `orchestrator doc +`_ +but keep in mind that specifying the placement via a YAML file is not supported. Update NFS Ganesha Cluster ========================== @@ -63,8 +87,8 @@ Show NFS Ganesha Cluster Information This displays ip and port of deployed cluster. -Set Customized Ganesha Configuration -==================================== +Set Customized NFS Ganesha Configuration +======================================== .. code:: bash @@ -73,8 +97,8 @@ Set Customized Ganesha Configuration With this the nfs cluster will use the specified config and it will have precedence over default config blocks. -Reset Ganesha Configuration -=========================== +Reset NFS Ganesha Configuration +=============================== .. code:: bash @@ -89,8 +113,14 @@ Create CephFS Export $ ceph nfs export create cephfs [--readonly] [--path=/path/in/cephfs] -It creates export rados objects containing the export block. Here binding is -the pseudo root name and type is export type. +This creates export RADOS objects containing the export block, where + +``fsname`` is the name of the FS volume used by the NFS Ganesha cluster that will +serve this export. + +``clusterid`` is the NFS Ganesha cluster ID. + +``binding`` is the pseudo root path (must be an absolute path). Delete CephFS Export ==================== @@ -99,17 +129,24 @@ Delete CephFS Export $ ceph nfs export delete -It deletes an export in cluster based on pseudo root name (binding). +This deletes an export in an NFS Ganesha cluster, where: + +``clusterid`` is the NFS Ganesha cluster ID. + +``binding`` is the pseudo root path (must be an absolute path). -List CephFS Export -================== +List CephFS Exports +=================== .. code:: bash $ ceph nfs export ls [--detailed] -It lists export for a cluster. With detailed option enabled it shows entire -export block. +It lists exports for a cluster, where: + +``clusterid`` is the NFS Ganesha cluster ID. + +With the ``--detailed`` option enabled it shows entire export block. Get CephFS Export ================= @@ -118,18 +155,25 @@ Get CephFS Export $ ceph nfs export get -It displays export block for a cluster based on pseudo root name (binding). +This displays export block for a cluster based on pseudo root name (binding), +where: -Configuring NFS-Ganesha to export CephFS with vstart +``clusterid`` is the NFS Ganesha cluster ID. + +``binding`` is the pseudo root path (must be an absolute path). + +Configuring NFS Ganesha to export CephFS with vstart ==================================================== -1) Using cephadm +1) Using ``cephadm`` .. code:: bash $ MDS=1 MON=1 OSD=3 NFS=1 ../src/vstart.sh -n -d --cephadm - It can deploy only single ganesha daemon with vstart on default ganesha port. + This will deploy a single NFS Ganesha daemon using ``vstart.sh``, where: + + The daemon will listen on the default NFS Ganesha port. 2) Using test orchestrator @@ -137,15 +181,17 @@ Configuring NFS-Ganesha to export CephFS with vstart $ MDS=1 MON=1 OSD=3 NFS=1 ../src/vstart.sh -n -d - It can deploy multiple ganesha daemons on random port. But this requires - ganesha packages to be installed. + This will deploy multiple NFS Ganesha daemons, each listening on a random + port, where: + + ``NFS`` is the number of NFS Ganesha clusters to be created. -NFS: It is the number of NFS-Ganesha clusters to be created. + NOTE: NFS Ganesha packages must be pre-installed for this to work. Mount ===== -After the exports are successfully created and Ganesha daemons are no longer in +After the exports are successfully created and NFS Ganesha daemons are no longer in grace period. The exports can be mounted by .. code:: bash diff --git a/ceph/doc/cephfs/fs-volumes.rst b/ceph/doc/cephfs/fs-volumes.rst index 4efe26d8b..807340334 100644 --- a/ceph/doc/cephfs/fs-volumes.rst +++ b/ceph/doc/cephfs/fs-volumes.rst @@ -45,9 +45,29 @@ Create a volume using:: $ ceph fs volume create [] -This creates a CephFS file system and its data and metadata pools. It also tries -to create MDSes for the filesystem using the enabled ceph-mgr orchestrator -module (see :doc:`/mgr/orchestrator`) , e.g., rook. +This creates a CephFS file system and its data and metadata pools. It can also +try to create MDSes for the filesystem using the enabled ceph-mgr orchestrator +module (see :doc:`/mgr/orchestrator`), e.g. rook. + + is the volume name (an arbitrary string), and + + is an optional string signifying which hosts should have NFS Ganesha +daemon containers running on them and, optionally, the total number of NFS +Ganesha daemons the cluster (should you want to have more than one NFS Ganesha +daemon running per node). For example, the following placement string means +"deploy NFS Ganesha daemons on nodes host1 and host2 (one daemon per host): + + "host1,host2" + +and this placement specification says to deploy two NFS Ganesha daemons each +on nodes host1 and host2 (for a total of four NFS Ganesha daemons in the +cluster): + + "4 host1,host2" + +For more details on placement specification refer to the `orchestrator doc +`_ +but keep in mind that specifying the placement via a YAML file is not supported. Remove a volume using:: @@ -251,6 +271,10 @@ Similar to specifying a pool layout when creating a subvolume, pool layout can b $ ceph fs subvolume snapshot clone --pool_layout +Configure maximum number of concurrent clones. The default is set to 4:: + + $ ceph config set mgr mgr/volumes/max_concurrent_clones + To check the status of a clone operation use:: $ ceph fs clone status [--group_name ] diff --git a/ceph/doc/cephfs/health-messages.rst b/ceph/doc/cephfs/health-messages.rst index 2e79c7bfa..54e334111 100644 --- a/ceph/doc/cephfs/health-messages.rst +++ b/ceph/doc/cephfs/health-messages.rst @@ -59,8 +59,8 @@ by the setting ``mds_log_max_segments``, and when the number of segments exceeds that setting the MDS starts writing back metadata so that it can remove (trim) the oldest segments. If this writeback is happening too slowly, or a software bug is preventing trimming, then this health -message may appear. The threshold for this message to appear is for the -number of segments to be double ``mds_log_max_segments``. +message may appear. The threshold for this message to appear is controlled by +the config option ``mds_log_warn_factor``, the default is 2.0. Message: "Client *name* failing to respond to capability release" Code: MDS_HEALTH_CLIENT_LATE_RELEASE, MDS_HEALTH_CLIENT_LATE_RELEASE_MANY diff --git a/ceph/doc/conf.py b/ceph/doc/conf.py index 6ed0f7ee5..7c84eaf84 100644 --- a/ceph/doc/conf.py +++ b/ceph/doc/conf.py @@ -1,3 +1,4 @@ +import shutil import sys import os @@ -53,14 +54,35 @@ extensions = [ 'sphinx_autodoc_typehints', 'sphinx.ext.graphviz', 'sphinx.ext.todo', - 'sphinxcontrib.ditaa', + 'sphinx-prompt', + 'sphinx_substitution_extensions', 'breathe', 'edit_on_github', 'ceph_releases', ] -ditaa = 'ditaa' + +ditaa = shutil.which("ditaa") +if ditaa is not None: + extensions += ['sphinxcontrib.ditaa'] +else: + extensions += ['plantweb.directive'] + plantweb_defaults = { + 'engine': 'ditaa' + } + +build_with_rtd = os.environ.get('READTHEDOCS') == 'True' +if build_with_rtd: + extensions += ['sphinx_search.extension'] + +# sphinx.ext.todo todo_include_todos = True +# sphinx_substitution_extensions +# TODO: read from doc/releases/releases.yml +rst_prolog = """ +.. |stable-release| replace:: octopus +""" + top_level = os.path.dirname( os.path.dirname( os.path.abspath(__file__) @@ -87,6 +109,10 @@ edit_on_github_branch = 'master' # handles edit-on-github and old version warning display def setup(app): app.add_javascript('js/ceph.js') + if ditaa is None: + # add "ditaa" as an alias of "diagram" + from plantweb.directive import DiagramDirective + app.add_directive('ditaa', DiagramDirective) # mocking ceph_module offered by ceph-mgr. `ceph_module` is required by # mgr.mgr_module @@ -110,8 +136,21 @@ class Mock(object): sys.modules['ceph_module'] = Mock() -for pybind in [os.path.join(top_level, 'src/pybind'), - os.path.join(top_level, 'src/pybind/mgr'), - os.path.join(top_level, 'src/python-common')]: +if build_with_rtd: + exclude_patterns += ['**/api/*', + '**/api.rst'] + autodoc_mock_imports = ['cephfs', + 'rados', + 'rbd', + 'ceph'] + pybinds = ['pybind/mgr', + 'python-common'] +else: + pybinds = ['pybind', + 'pybind/mgr', + 'python-common'] + +for c in pybinds: + pybind = os.path.join(top_level, 'src', c) if pybind not in sys.path: sys.path.insert(0, pybind) diff --git a/ceph/doc/dev/deduplication.rst b/ceph/doc/dev/deduplication.rst index ffb2a283b..def15955e 100644 --- a/ceph/doc/dev/deduplication.rst +++ b/ceph/doc/dev/deduplication.rst @@ -50,7 +50,8 @@ More details in https://ieeexplore.ieee.org/document/8416369 Design ====== -.. ditaa:: +.. ditaa:: + +-------------+ | Ceph Client | +------+------+ diff --git a/ceph/doc/dev/msgr2.rst b/ceph/doc/dev/msgr2.rst index 0eed9e67f..585dc34d2 100644 --- a/ceph/doc/dev/msgr2.rst +++ b/ceph/doc/dev/msgr2.rst @@ -74,7 +74,9 @@ If the remote party advertises required features we don't support, we can disconnect. -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | send banner | @@ -291,7 +293,9 @@ Authentication Example of authentication phase interaction when the client uses an allowed authentication method: -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | auth request | @@ -308,7 +312,9 @@ allowed authentication method: Example of authentication phase interaction when the client uses a forbidden authentication method as the first attempt: -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | auth request | @@ -615,7 +621,9 @@ Example of failure scenarios: * First client's client_ident message is lost, and then client reconnects. -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | | @@ -633,7 +641,9 @@ Example of failure scenarios: * Server's server_ident message is lost, and then client reconnects. -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | | @@ -654,7 +664,9 @@ Example of failure scenarios: * Server's server_ident message is lost, and then server reconnects. -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | | @@ -678,7 +690,9 @@ Example of failure scenarios: * Connection failure after session is established, and then client reconnects. -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | | @@ -696,7 +710,9 @@ Example of failure scenarios: * Connection failure after session is established because server reset, and then client reconnects. -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | | @@ -722,7 +738,9 @@ of the connection. * Connection failure after session is established because client reset, and then client reconnects. -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | | @@ -789,7 +807,9 @@ Example of protocol interaction (WIP) _____________________________________ -.. ditaa:: +---------+ +--------+ +.. ditaa:: + + +---------+ +--------+ | Client | | Server | +---------+ +--------+ | send banner | diff --git a/ceph/doc/install/ceph-deploy/quick-ceph-deploy.rst b/ceph/doc/install/ceph-deploy/quick-ceph-deploy.rst index c4589c7b3..03a59636b 100644 --- a/ceph/doc/install/ceph-deploy/quick-ceph-deploy.rst +++ b/ceph/doc/install/ceph-deploy/quick-ceph-deploy.rst @@ -163,6 +163,7 @@ cluster. Then add a Ceph Monitor and Ceph Manager to ``node2`` and ``node3`` to improve reliability and availability. .. ditaa:: + /------------------\ /----------------\ | ceph-deploy | | node1 | | Admin Node | | cCCC | diff --git a/ceph/doc/install/ceph-deploy/quick-cephfs.rst b/ceph/doc/install/ceph-deploy/quick-cephfs.rst index e8ca28f86..989721957 100644 --- a/ceph/doc/install/ceph-deploy/quick-cephfs.rst +++ b/ceph/doc/install/ceph-deploy/quick-cephfs.rst @@ -44,6 +44,7 @@ For example:: Now, your Ceph cluster would look like this: .. ditaa:: + /------------------\ /----------------\ | ceph-deploy | | node1 | | Admin Node | | cCCC | diff --git a/ceph/doc/install/ceph-deploy/quick-common.rst b/ceph/doc/install/ceph-deploy/quick-common.rst index 915a7b886..25668f798 100644 --- a/ceph/doc/install/ceph-deploy/quick-common.rst +++ b/ceph/doc/install/ceph-deploy/quick-common.rst @@ -1,4 +1,5 @@ -.. ditaa:: +.. ditaa:: + /------------------\ /-----------------\ | admin-node | | node1 | | +-------->+ cCCC | diff --git a/ceph/doc/install/get-packages.rst b/ceph/doc/install/get-packages.rst index ab518c033..c14ea60a1 100644 --- a/ceph/doc/install/get-packages.rst +++ b/ceph/doc/install/get-packages.rst @@ -127,32 +127,42 @@ Debian Packages Add a Ceph package repository to your system's list of APT sources. For newer versions of Debian/Ubuntu, call ``lsb_release -sc`` on the command line to -get the short codename, and replace ``{codename}`` in the following command. :: +get the short codename, and replace ``{codename}`` in the following command. - sudo apt-add-repository 'deb https://download.ceph.com/debian-luminous/ {codename} main' +.. prompt:: bash $ + :substitutions: -For early Linux distributions, you may execute the following command:: + sudo apt-add-repository 'deb https://download.ceph.com/debian-|stable-release|/ {codename} main' - echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list +For early Linux distributions, you may execute the following command + +.. prompt:: bash $ + :substitutions: + + echo deb https://download.ceph.com/debian-|stable-release|/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list For earlier Ceph releases, replace ``{release-name}`` with the name with the name of the Ceph release. You may call ``lsb_release -sc`` on the command line to get the short codename, and replace ``{codename}`` in the following command. -:: +.. prompt:: bash $ - sudo apt-add-repository 'deb https://download.ceph.com/debian-{release-name}/ {codename} main' + sudo apt-add-repository 'deb https://download.ceph.com/debian-{release-name}/ {codename} main' For older Linux distributions, replace ``{release-name}`` with the name of the -release:: +release + +.. prompt:: bash $ echo deb https://download.ceph.com/debian-{release-name}/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list For development release packages, add our package repository to your system's list of APT sources. See `the testing Debian repository`_ for a complete list -of Debian and Ubuntu releases supported. :: +of Debian and Ubuntu releases supported. - echo deb https://download.ceph.com/debian-testing/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list +.. prompt:: bash $ + + echo deb https://download.ceph.com/debian-testing/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list .. tip:: For non-US users: There might be a mirror close to you where to download Ceph from. For more information see: `Ceph Mirrors`_. @@ -166,12 +176,14 @@ RHEL For major releases, you may add a Ceph entry to the ``/etc/yum.repos.d`` directory. Create a ``ceph.repo`` file. In the example below, replace -``{ceph-release}`` with a major release of Ceph (e.g., ``luminous``, ``mimic``, ``nautilus``, -etc.) and ``{distro}`` with your Linux distribution (e.g., ``el7``, etc.). You +``{ceph-release}`` with a major release of Ceph (e.g., ``|stable-release|``) +and ``{distro}`` with your Linux distribution (e.g., ``el8``, etc.). You may view https://download.ceph.com/rpm-{ceph-release}/ directory to see which distributions Ceph supports. Some Ceph packages (e.g., EPEL) must take priority over standard packages, so you must ensure that you set -``priority=2``. :: +``priority=2``. + +.. code-block:: ini [ceph] name=Ceph packages for $basearch @@ -206,13 +218,17 @@ weeks of QA prior to release. The repository package installs the repository details on your local system for use with ``yum``. Replace ``{distro}`` with your Linux distribution, and -``{release}`` with the specific release of Ceph:: +``{release}`` with the specific release of Ceph + +.. prompt:: bash $ su -c 'rpm -Uvh https://download.ceph.com/rpms/{distro}/x86_64/ceph-{release}.el7.noarch.rpm' -You can download the RPMs directly from:: +You can download the RPMs directly from + +.. code-block:: none - https://download.ceph.com/rpm-testing + https://download.ceph.com/rpm-testing .. tip:: For non-US users: There might be a mirror close to you where to download Ceph from. For more information see: `Ceph Mirrors`_. @@ -220,7 +236,9 @@ You can download the RPMs directly from:: openSUSE Leap 15.1 ^^^^^^^^^^^^^^^^^^ -You need to add the Ceph package repository to your list of zypper sources. This can be done with the following command :: +You need to add the Ceph package repository to your list of zypper sources. This can be done with the following command + +.. code-block:: bash zypper ar https://download.opensuse.org/repositories/filesystems:/ceph/openSUSE_Leap_15.1/filesystems:ceph.repo @@ -248,7 +266,9 @@ only. Add the package repository to your system's list of APT sources, but replace ``{BRANCH}`` with the branch you'd like to use (e.g., wip-hack, master). See `the shaman page`_ for a complete -list of distributions we build. :: +list of distributions we build. + +.. prompt:: bash $ curl -L https://shaman.ceph.com/api/repos/ceph/{BRANCH}/latest/ubuntu/$(lsb_release -sc)/repo/ | sudo tee /etc/apt/sources.list.d/shaman.list @@ -256,7 +276,9 @@ list of distributions we build. :: The use of ``latest`` in the url, means it will figure out which is the last commit that has been built. Alternatively, a specific sha1 can be specified. -For Ubuntu Xenial and the master branch of Ceph, it would look like:: +For Ubuntu Xenial and the master branch of Ceph, it would look like + +.. prompt:: bash $ curl -L https://shaman.ceph.com/api/repos/ceph/master/53e772a45fdf2d211c0c383106a66e1feedec8fd/ubuntu/xenial/repo/ | sudo tee /etc/apt/sources.list.d/shaman.list @@ -268,13 +290,17 @@ RPM Packages For current development branches, you may add a Ceph entry to the ``/etc/yum.repos.d`` directory. The `the shaman page`_ can be used to retrieve the full details -of a repo file. It can be retrieved via an HTTP request, for example:: +of a repo file. It can be retrieved via an HTTP request, for example + +.. prompt:: bash $ curl -L https://shaman.ceph.com/api/repos/ceph/{BRANCH}/latest/centos/7/repo/ | sudo tee /etc/yum.repos.d/shaman.repo The use of ``latest`` in the url, means it will figure out which is the last commit that has been built. Alternatively, a specific sha1 can be specified. -For CentOS 7 and the master branch of Ceph, it would look like:: +For CentOS 7 and the master branch of Ceph, it would look like + +.. prompt:: bash $ curl -L https://shaman.ceph.com/api/repos/ceph/master/53e772a45fdf2d211c0c383106a66e1feedec8fd/centos/7/repo/ | sudo tee /etc/apt/sources.list.d/shaman.list @@ -309,7 +335,7 @@ use with ``apt``. Replace ``{release}`` with the latest Ceph release. Replace ``{version}`` with the latest Ceph version number. Replace ``{distro}`` with your Linux distribution codename. Replace ``{arch}`` with the CPU architecture. -:: +.. prompt:: bash $ wget -q https://download.ceph.com/debian-{release}/pool/main/c/ceph/ceph_{version}{distro}_{arch}.deb @@ -318,7 +344,9 @@ RPM Packages ~~~~~~~~~~~~ Ceph requires additional additional third party libraries. -To add the EPEL repository, execute the following:: +To add the EPEL repository, execute the following + +.. prompt:: bash $ sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm @@ -333,22 +361,33 @@ Ceph requires the following packages: Packages are currently built for the RHEL/CentOS7 (``el7``) platforms. The repository package installs the repository details on your local system for use -with ``yum``. Replace ``{distro}`` with your distribution. :: +with ``yum``. Replace ``{distro}`` with your distribution. - su -c 'rpm -Uvh https://download.ceph.com/rpm-luminous/{distro}/noarch/ceph-{version}.{distro}.noarch.rpm' +.. prompt:: bash $ + :substitutions: -For example, for CentOS 7 (``el7``):: + su -c 'rpm -Uvh https://download.ceph.com/rpm-|stable-release|/{distro}/noarch/ceph-{version}.{distro}.noarch.rpm' - su -c 'rpm -Uvh https://download.ceph.com/rpm-luminous/el7/noarch/ceph-release-1-0.el7.noarch.rpm' +For example, for CentOS 8 (``el8``) -You can download the RPMs directly from:: +.. prompt:: bash $ + :substitutions: - https://download.ceph.com/rpm-luminous + su -c 'rpm -Uvh https://download.ceph.com/rpm-|stable-release|/el8/noarch/ceph-release-1-0.el8.noarch.rpm' + +You can download the RPMs directly from + +.. code-block:: none + :substitutions: + + https://download.ceph.com/rpm-|stable-release| For earlier Ceph releases, replace ``{release-name}`` with the name with the name of the Ceph release. You may call ``lsb_release -sc`` on the command -line to get the short codename. :: +line to get the short codename. + +.. prompt:: bash $ su -c 'rpm -Uvh https://download.ceph.com/rpm-{release-name}/{distro}/noarch/ceph-{version}.{distro}.noarch.rpm' diff --git a/ceph/doc/install/install-vm-cloud.rst b/ceph/doc/install/install-vm-cloud.rst index 3e1db1237..876422865 100644 --- a/ceph/doc/install/install-vm-cloud.rst +++ b/ceph/doc/install/install-vm-cloud.rst @@ -9,7 +9,9 @@ Examples of VMs include: QEMU/KVM, XEN, VMWare, LXC, VirtualBox, etc. Examples of Cloud Platforms include OpenStack, CloudStack, OpenNebula, etc. -.. ditaa:: +---------------------------------------------------+ +.. ditaa:: + + +---------------------------------------------------+ | libvirt | +------------------------+--------------------------+ | diff --git a/ceph/doc/install/manual-deployment.rst b/ceph/doc/install/manual-deployment.rst index a42a80f83..103c4dcd5 100644 --- a/ceph/doc/install/manual-deployment.rst +++ b/ceph/doc/install/manual-deployment.rst @@ -18,6 +18,7 @@ OSD nodes. .. ditaa:: + /------------------\ /----------------\ | Admin Node | | node1 | | +-------->+ | diff --git a/ceph/doc/install/manual-freebsd-deployment.rst b/ceph/doc/install/manual-freebsd-deployment.rst index d4eb14718..5f8f768c9 100644 --- a/ceph/doc/install/manual-freebsd-deployment.rst +++ b/ceph/doc/install/manual-freebsd-deployment.rst @@ -22,6 +22,7 @@ OSD nodes. .. ditaa:: + /------------------\ /----------------\ | Admin Node | | node1 | | +-------->+ | diff --git a/ceph/doc/man/8/cephadm.rst b/ceph/doc/man/8/cephadm.rst index 72bbf22ed..7a2473485 100644 --- a/ceph/doc/man/8/cephadm.rst +++ b/ceph/doc/man/8/cephadm.rst @@ -21,13 +21,14 @@ Synopsis | **cephadm** **inspect-image** -| **cephadm** **ls** +| **cephadm** **ls** [-h] [--no-detail] [--legacy-dir LEGACY_DIR] | **cephadm** **list-networks** | **cephadm** **adopt** [-h] --name NAME --style STYLE [--cluster CLUSTER] | [--legacy-dir LEGACY_DIR] [--config-json CONFIG_JSON] | [--skip-firewalld] [--skip-pull] +| [--container-init] | **cephadm** **rm-daemon** [-h] --name NAME --fsid FSID [--force] | [--force-delete-data] @@ -37,7 +38,7 @@ Synopsis | **cephadm** **run** [-h] --name NAME --fsid FSID | **cephadm** **shell** [-h] [--fsid FSID] [--name NAME] [--config CONFIG] - [--keyring KEYRING] [--mount MOUNT] [--env ENV] + [--keyring KEYRING] --mount [MOUNT [MOUNT ...]] [--env ENV] [--] [command [command ...]] | **cephadm** **enter** [-h] [--fsid FSID] --name NAME [command [command ...]] @@ -77,6 +78,7 @@ Synopsis | [--registry-username REGISTRY_USERNAME] | [--registry-password REGISTRY_PASSWORD] | [--registry-json REGISTRY_JSON] +| [--container-init] @@ -84,6 +86,7 @@ Synopsis | [--config-json CONFIG_JSON] [--keyring KEYRING] | [--key KEY] [--osd-fsid OSD_FSID] [--skip-firewalld] | [--tcp-ports TCP_PORTS] [--reconfig] [--allow-ptrace] +| [--container-init] | **cephadm** **check-host** [-h] [--expect-hostname EXPECT_HOSTNAME] @@ -235,6 +238,8 @@ Arguments: * [--registry-username REGISTRY_USERNAME] username of account to login to on custom registry * [--registry-password REGISTRY_PASSWORD] password of account to login to on custom registry * [--registry-json REGISTRY_JSON] JSON file containing registry login info (see registry-login command documentation) +* [--container-init] Run podman/docker with `--init` + ceph-volume ----------- @@ -284,6 +289,7 @@ Arguments: * [--tcp-ports List of tcp ports to open in the host firewall * [--reconfig] Reconfigure a previously deployed daemon * [--allow-ptrace] Allow SYS_PTRACE on daemon container +* [--container-init] Run podman/docker with `--init` enter @@ -346,6 +352,11 @@ list daemon instances known to cephadm on **this** host:: }, ... +Arguments: + +* [--no-detail] Do not include daemon status +* [--legacy-dir LEGACY_DIR] Base directory for legacy daemon data + logs ---- @@ -357,6 +368,21 @@ This is similar to:: journalctl -u mgr.myhost.ysubfo +Can also specify additional journal arguments:: + + cephadm logs --name mgr.myhost.ysubfo -- -n 20 # last 20 lines + cephadm logs --name mgr.myhost.ysubfo -- -f # follow the log + + +Positional arguments: + +* [command] command (optional) + +Arguments: + +* [--fsid FSID] cluster FSID +* [--name NAME, -n NAME] daemon name (type.id) + prepare-host ------------ diff --git a/ceph/doc/man/8/osdmaptool.rst b/ceph/doc/man/8/osdmaptool.rst index 81a962a00..131e981fa 100644 --- a/ceph/doc/man/8/osdmaptool.rst +++ b/ceph/doc/man/8/osdmaptool.rst @@ -131,6 +131,10 @@ Options clears pg_temp and primary_temp variables. +.. option:: --clean-temps + + clean pg_temps. + .. option:: --health dump health checks diff --git a/ceph/doc/man/8/rbd.rst b/ceph/doc/man/8/rbd.rst index 52a684fcd..cc920a1af 100644 --- a/ceph/doc/man/8/rbd.rst +++ b/ceph/doc/man/8/rbd.rst @@ -762,7 +762,7 @@ Per client instance `rbd device map` options: Per mapping (block device) `rbd device map` options: -* rw - Map the image read-write (default). +* rw - Map the image read-write (default). Overridden by --read-only. * ro - Map the image read-only. Equivalent to --read-only. @@ -772,6 +772,7 @@ Per mapping (block device) `rbd device map` options: discards (since 4.9). * exclusive - Disable automatic exclusive lock transitions (since 4.12). + Equivalent to --exclusive. * lock_timeout=x - A timeout on waiting for the acquisition of exclusive lock (since 4.17, default is 0 seconds, meaning no timeout). @@ -843,12 +844,25 @@ Per mapping (block device) `rbd device map` options: backend that the data is incompressible, disabling compression in aggressive mode (since 5.8). +* udev - Wait for udev device manager to finish executing all matching + "add" rules and release the device before exiting (default). This option + is not passed to the kernel. + +* noudev - Don't wait for udev device manager. When enabled, the device may + not be fully usable immediately on exit. + `rbd device unmap` options: * force - Force the unmapping of a block device that is open (since 4.9). The driver will wait for running requests to complete and then unmap; requests sent to the driver after initiating the unmap will be failed. +* udev - Wait for udev device manager to finish executing all matching + "remove" rules and clean up after the device before exiting (default). + This option is not passed to the kernel. + +* noudev - Don't wait for udev device manager. + Examples ======== diff --git a/ceph/doc/mgr/dashboard.rst b/ceph/doc/mgr/dashboard.rst index be298a569..7d815d8b9 100644 --- a/ceph/doc/mgr/dashboard.rst +++ b/ceph/doc/mgr/dashboard.rst @@ -468,6 +468,14 @@ More details can be found in the documentation of the :ref:`mgr-prometheus`. [security] allow_embedding = true +Enabling RBD-Image monitoring +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Due to performance reasons, monitoring of RBD images is disabled by default. For +more information please see :ref:`prometheus-rbd-io-statistics`. If disabled, +the overview and details dashboards will stay empty in Grafana and the metrics +will not be visible in Prometheus. + After you have set up Grafana and Prometheus, you will need to configure the connection information that the Ceph Dashboard will use to access Grafana. @@ -1086,6 +1094,16 @@ When configuring the Ceph Dashboard with multiple NFS-Ganesha clusters, the Web UI will automatically allow to choose to which cluster an export belongs. +Support for NFS-Ganesha Clusters Deployed by the Orchestrator +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ceph Dashboard can be used to manage NFS-Ganesha clusters deployed by the +Orchestrator. It can detect the clusters automatically. For more details +on deploying NFS-Ganesha clusters with the Orchestrator, please see :ref:`orchestrator-cli-stateless-services`. +Or particularly, see :ref:`deploy-cephadm-nfs-ganesha` for how to deploy +NFS-Ganesha clusters with the Cephadm backend. + + Plug-ins -------- diff --git a/ceph/doc/mgr/orchestrator.rst b/ceph/doc/mgr/orchestrator.rst index 5da26429f..fba1b5ce0 100644 --- a/ceph/doc/mgr/orchestrator.rst +++ b/ceph/doc/mgr/orchestrator.rst @@ -39,15 +39,14 @@ for the orchestrator module is needed. The relation between the names is the following: -* A *service* has a specfic *service type* +* A *service* has a specific *service type* * A *daemon* is a physical instance of a *service type* .. note:: Orchestrator modules may only implement a subset of the commands listed below. - Also, the implementation of the commands are orchestrator module dependent and will - differ between implementations. + Also, the implementation of the commands may differ between modules. Status ====== @@ -56,8 +55,8 @@ Status ceph orch status -Show current orchestrator mode and high-level status (whether the module able -to talk to it) +Show current orchestrator mode and high-level status (whether the orchestrator +plugin is available and operational) Host Management =============== @@ -97,7 +96,7 @@ Many hosts can be added at once using addr: node-02 hostname: node-02 -This can be combined with service specifications (below) to create a cluster spec file to deploy a whole cluster in one command. see ``cephadm bootstrap --apply-spec`` also to do this during bootstrap. Cluster SSH Keys must be copied to hosts prior. +This can be combined with service specifications (below) to create a cluster spec file to deploy a whole cluster in one command. see ``cephadm bootstrap --apply-spec`` also to do this during bootstrap. Cluster SSH Keys must be copied to hosts prior to adding them. OSD Management ============== @@ -132,7 +131,7 @@ Example:: Erase Devices (Zap Devices) --------------------------- -Erase (zap) a device so that it can be resued. ``zap`` calls ``ceph-volume zap`` on the remote host. +Erase (zap) a device so that it can be reused. ``zap`` calls ``ceph-volume zap`` on the remote host. :: @@ -144,7 +143,7 @@ Example command:: .. note:: Cephadm orchestrator will automatically deploy drives that match the DriveGroup in your OSDSpec if the unmanaged flag is unset. - For example, if you use the ``all-available-devices`` option when creating OSD's, when you ``zap`` a device the cephadm orchestrator will automatically create a new OSD in the device . + For example, if you use the ``all-available-devices`` option when creating OSDs, when you ``zap`` a device the cephadm orchestrator will automatically create a new OSD in the device . To disable this behavior, see :ref:`orchestrator-cli-create-osds`. .. _orchestrator-cli-create-osds: @@ -160,11 +159,11 @@ Another way of doing it is using ``apply`` interface:: ceph orch apply osd -i [--dry-run] -Where the ``json_file/yaml_file`` is a DriveGroup specification. +where the ``json_file/yaml_file`` is a DriveGroup specification. For a more in-depth guide to DriveGroups please refer to :ref:`drivegroups` -Along with ``apply`` interface if ``dry-run`` option is used, it will present a -preview of what will happen. +``dry-run`` will cause the orchestrator to present a preview of what will happen +without actually creating the OSDs. Example:: @@ -174,23 +173,19 @@ Example:: all-available-devices node2 /dev/vdc - - all-available-devices node3 /dev/vdd - - -.. note:: - Example output from cephadm orchestrator - When the parameter ``all-available-devices`` or a DriveGroup specification is used, a cephadm service is created. -This service guarantees that all available devices or devices included in the DriveGroup will be used for OSD's. -Take into account the implications of this behavior, which is automatic and enabled by default. - -For example: +This service guarantees that all available devices or devices included in the DriveGroup will be used for OSDs. +Note that the effect of ``--all-available-devices`` is persistent; that is, drives which are added to the system +or become available (say, by zapping) after the command is complete will be automatically found and added to the cluster. -After using:: +That is, after using:: ceph orch apply osd --all-available-devices -* If you add new disks to the cluster they will automatically be used to create new OSD's. +* If you add new disks to the cluster they will automatically be used to create new OSDs. * A new OSD will be created automatically if you remove an OSD and clean the LVM physical volume. -If you want to avoid this behavior (disable automatic creation of OSD in available devices), use the ``unmanaged`` parameter:: +If you want to avoid this behavior (disable automatic creation of OSD on available devices), use the ``unmanaged`` parameter:: ceph orch apply osd --all-available-devices --unmanaged=true @@ -198,7 +193,7 @@ Remove an OSD ------------- :: - ceph orch osd rm [--replace] [--force] + ceph orch osd rm [--replace] [--force] Evacuates PGs from an OSD and removes it from the cluster. @@ -219,7 +214,7 @@ You can query the state of the operation with:: 4 cephadm-dev started 42 False True 2020-07-17 13:01:45.162158 -When no PGs are left on the osd, it will be decommissioned and removed from the cluster. +When no PGs are left on the OSD, it will be decommissioned and removed from the cluster. .. note:: After removing an OSD, if you wipe the LVM physical volume in the device used by the removed OSD, a new OSD will be created. @@ -228,7 +223,7 @@ When no PGs are left on the osd, it will be decommissioned and removed from the Stopping OSD Removal -------------------- -You can stop the operation with +You can stop the queued OSD removal operation with :: @@ -239,7 +234,7 @@ Example:: # ceph orch osd rm stop 4 Stopped OSD(s) removal -This will reset the initial state of the OSD and remove it from the queue. +This will reset the initial state of the OSD and take it off the removal queue. Replace an OSD @@ -255,16 +250,16 @@ Example:: This follows the same procedure as the "Remove OSD" part with the exception that the OSD is not permanently removed -from the crush hierarchy, but is assigned a 'destroyed' flag. +from the CRUSH hierarchy, but is assigned a 'destroyed' flag. **Preserving the OSD ID** -The previously set the 'destroyed' flag is used to determined osd ids that will be reused in the next osd deployment. +The previously-set 'destroyed' flag is used to determine OSD ids that will be reused in the next OSD deployment. -If you use OSDSpecs for osd deployment, your newly added disks will be assigned with the osd ids of their replaced -counterpart, granted the new disk still match the OSDSpecs. +If you use OSDSpecs for OSD deployment, your newly added disks will be assigned the OSD ids of their replaced +counterparts, assuming the new disks still match the OSDSpecs. -For assistance in this process you can use the '--dry-run' feature: +For assistance in this process you can use the '--dry-run' feature. Tip: The name of your OSDSpec can be retrieved from **ceph orch ls** @@ -279,8 +274,8 @@ If this matches your anticipated behavior, just omit the --dry-run flag to execu .. - Blink Device Lights - ^^^^^^^^^^^^^^^^^^^ + Turn On Device Lights + ^^^^^^^^^^^^^^^^^^^^^ :: ceph orch device ident-on @@ -302,8 +297,8 @@ If this matches your anticipated behavior, just omit the --dry-run flag to execu ceph orch osd fault-on {primary,journal,db,wal,all} ceph orch osd fault-off {primary,journal,db,wal,all} - Where ``journal`` is the filestore journal, ``wal`` is the write ahead log of - bluestore and ``all`` stands for all devices associated with the osd + where ``journal`` is the filestore journal device, ``wal`` is the bluestore + write ahead log device, and ``all`` stands for all devices associated with the OSD Monitor and manager management @@ -314,13 +309,17 @@ error if it doesn't know how to do this transition. Update the number of monitor hosts:: - ceph orch apply mon [host, host:network...] [--dry-run] + ceph orch apply mon --placement= [--dry-run] + +Where ``placement`` is a :ref:`orchestrator-cli-placement-spec`. Each host can optionally specify a network for the monitor to listen on. Update the number of manager hosts:: - ceph orch apply mgr [host...] [--dry-run] + ceph orch apply mgr --placement= [--dry-run] + +Where ``placement`` is a :ref:`orchestrator-cli-placement-spec`. .. .. note:: @@ -370,24 +369,27 @@ the id is the numeric OSD ID, for MDS services it is the file system name:: .. _orchestrator-cli-cephfs: -Depoying CephFS -=============== +Deploying CephFS +================ In order to set up a :term:`CephFS`, execute:: ceph fs volume create -Where ``name`` is the name of the CephFS, ``placement`` is a +where ``name`` is the name of the CephFS and ``placement`` is a :ref:`orchestrator-cli-placement-spec`. This command will create the required Ceph pools, create the new CephFS, and deploy mds servers. + +.. _orchestrator-cli-stateless-services: + Stateless services (MDS/RGW/NFS/rbd-mirror/iSCSI) ================================================= -The orchestrator is not responsible for configuring the services. Please look into the corresponding -documentation for details. +(Please note: The orchestrator will not configure the services. Please look into the corresponding +documentation for service configuration details.) The ``name`` parameter is an identifier of the group of instances: @@ -401,7 +403,7 @@ Creating/growing/shrinking/removing services:: ceph orch apply nfs [--namespace=] [--placement=] [--dry-run] ceph orch rm [--force] -Where ``placement`` is a :ref:`orchestrator-cli-placement-spec`. +where ``placement`` is a :ref:`orchestrator-cli-placement-spec`. e.g., ``ceph orch apply mds myfs --placement="3 host1 host2 host3"`` @@ -409,13 +411,91 @@ Service Commands:: ceph orch +Deploying custom containers +=========================== + +The orchestrator enables custom containers to be deployed using a YAML file. +A corresponding :ref:`orchestrator-cli-service-spec` must look like: + +.. code-block:: yaml + + service_type: container + service_id: foo + placement: + ... + image: docker.io/library/foo:latest + entrypoint: /usr/bin/foo + uid: 1000 + gid: 1000 + args: + - "--net=host" + - "--cpus=2" + ports: + - 8080 + - 8443 + envs: + - SECRET=mypassword + - PORT=8080 + - PUID=1000 + - PGID=1000 + volume_mounts: + CONFIG_DIR: /etc/foo + bind_mounts: + - ['type=bind', 'source=lib/modules', 'destination=/lib/modules', 'ro=true'] + dirs: + - CONFIG_DIR + files: + CONFIG_DIR/foo.conf: + - refresh=true + - username=xyz + - "port: 1234" + +where the properties of a service specification are: + +* ``service_id`` + A unique name of the service. +* ``image`` + The name of the Docker image. +* ``uid`` + The UID to use when creating directories and files in the host system. +* ``gid`` + The GID to use when creating directories and files in the host system. +* ``entrypoint`` + Overwrite the default ENTRYPOINT of the image. +* ``args`` + A list of additional Podman/Docker command line arguments. +* ``ports`` + A list of TCP ports to open in the host firewall. +* ``envs`` + A list of environment variables. +* ``bind_mounts`` + When you use a bind mount, a file or directory on the host machine + is mounted into the container. Relative `source=...` paths will be + located below `/var/lib/ceph//`. +* ``volume_mounts`` + When you use a volume mount, a new directory is created within + Docker’s storage directory on the host machine, and Docker manages + that directory’s contents. Relative source paths will be located below + `/var/lib/ceph//`. +* ``dirs`` + A list of directories that are created below + `/var/lib/ceph//`. +* ``files`` + A dictionary, where the key is the relative path of the file and the + value the file content. The content must be double quoted when using + a string. Use '\\n' for line breaks in that case. Otherwise define + multi-line content as list of strings. The given files will be created + below the directory `/var/lib/ceph//`. + The absolute path of the directory where the file will be created must + exist. Use the `dirs` property to create them if necessary. + .. _orchestrator-cli-service-spec: Service Specification ===================== -As *Service Specification* is a data structure often represented as YAML -to specify the deployment of services. For example: +A *Service Specification* is a data structure represented as YAML +to specify the deployment of services. For example: .. code-block:: yaml @@ -426,30 +506,33 @@ to specify the deployment of services. For example: - host1 - host2 - host3 - spec: ... unmanaged: false - -Where the properties of a service specification are the following: - -* ``service_type`` is the type of the service. Needs to be either a Ceph - service (``mon``, ``crash``, ``mds``, ``mgr``, ``osd`` or - ``rbd-mirror``), a gateway (``nfs`` or ``rgw``), or part of the - monitoring stack (``alertmanager``, ``grafana``, ``node-exporter`` or - ``prometheus``). -* ``service_id`` is the name of the service. Omit the service time -* ``placement`` is a :ref:`orchestrator-cli-placement-spec` -* ``spec``: additional specifications for a specific service. -* ``unmanaged``: If set to ``true``, the orchestrator will not deploy nor - remove any daemon associated with this service. Placement and all other - properties will be ignored. This is useful, if this service should not - be managed temporarily. - -Each service type can have different requirements for the spec. + ... + +where the properties of a service specification are: + +* ``service_type`` + The type of the service. Needs to be either a Ceph + service (``mon``, ``crash``, ``mds``, ``mgr``, ``osd`` or + ``rbd-mirror``), a gateway (``nfs`` or ``rgw``), part of the + monitoring stack (``alertmanager``, ``grafana``, ``node-exporter`` or + ``prometheus``) or (``container``) for custom containers. +* ``service_id`` + The name of the service. +* ``placement`` + See :ref:`orchestrator-cli-placement-spec`. +* ``unmanaged`` + If set to ``true``, the orchestrator will not deploy nor + remove any daemon associated with this service. Placement and all other + properties will be ignored. This is useful, if this service should not + be managed temporarily. + +Each service type can have additional service specific properties. Service specifications of type ``mon``, ``mgr``, and the monitoring -types do not require a ``service_id`` +types do not require a ``service_id``. -A service of type ``nfs`` requires a pool name and contain +A service of type ``nfs`` requires a pool name and may contain an optional namespace: .. code-block:: yaml @@ -464,13 +547,13 @@ an optional namespace: pool: mypool namespace: mynamespace -Where ``pool`` is a RADOS pool where NFS client recovery data is stored +where ``pool`` is a RADOS pool where NFS client recovery data is stored and ``namespace`` is a RADOS namespace where NFS client recovery data is stored in the pool. -A service of type ``osd`` is in detail described in :ref:`drivegroups` +A service of type ``osd`` is described in :ref:`drivegroups` -Many service specifications can then be applied at once using +Many service specifications can be applied at once using ``ceph orch apply -i`` by submitting a multi-document YAML file:: cat <| Cluster Map | +--------+ +---------------+ @@ -217,7 +217,8 @@ these capabilities. The following diagram provides a high-level flow for the initial connection. -.. ditaa:: +---------+ +---------+ +.. ditaa:: + +---------+ +---------+ | Client | | Monitor | +---------+ +---------+ | | @@ -521,7 +522,8 @@ functionality includes: - Snapshot pools, list snapshots, etc. -.. ditaa:: +---------+ +---------+ +---------+ +.. ditaa:: + +---------+ +---------+ +---------+ | Client | | Monitor | | OSD | +---------+ +---------+ +---------+ | | | diff --git a/ceph/doc/rados/configuration/ceph-conf.rst b/ceph/doc/rados/configuration/ceph-conf.rst index 8f0a32d0c..6a8d47723 100644 --- a/ceph/doc/rados/configuration/ceph-conf.rst +++ b/ceph/doc/rados/configuration/ceph-conf.rst @@ -67,6 +67,11 @@ configuration, they may need to be stored locally on the node and set in a local configuration file. These options include: - ``mon_host``, the list of monitors for the cluster + - ``mon_host_override``, the list of monitors for the cluster to + **initially** contact when beginning a new instance of communication with the + Ceph cluster. This overrides the known monitor list derived from MonMap + updates sent to older Ceph instances (like librados cluster handles). It is + expected this option is primarily useful for debugging. - ``mon_dns_serv_name`` (default: `ceph-mon`), the name of the DNS SRV record to check to identify the cluster monitors via DNS - ``mon_data``, ``osd_data``, ``mds_data``, ``mgr_data``, and @@ -326,6 +331,8 @@ like secret = "i love \# and \[" +.. _ceph-conf-database: + Monitor configuration database ============================== diff --git a/ceph/doc/rados/configuration/mon-config-ref.rst b/ceph/doc/rados/configuration/mon-config-ref.rst index dbfc20b90..e93cd28b7 100644 --- a/ceph/doc/rados/configuration/mon-config-ref.rst +++ b/ceph/doc/rados/configuration/mon-config-ref.rst @@ -34,8 +34,7 @@ Monitors can query the most recent version of the cluster map during sync operations. Ceph Monitors leverage the key/value store's snapshots and iterators (using leveldb) to perform store-wide synchronization. -.. ditaa:: - +.. ditaa:: /-------------\ /-------------\ | Monitor | Write Changes | Paxos | | cCCC +-------------->+ cCCC | @@ -505,7 +504,6 @@ Ceph Clients to read and write data. So the Ceph Storage Cluster's operating capacity is 95TB, not 99TB. .. ditaa:: - +--------+ +--------+ +--------+ +--------+ +--------+ +--------+ | Rack 1 | | Rack 2 | | Rack 3 | | Rack 4 | | Rack 5 | | Rack 6 | | cCCC | | cF00 | | cCCC | | cCCC | | cCCC | | cCCC | @@ -636,7 +634,8 @@ fallen behind the other monitors. The requester asks the leader to synchronize, and the leader tells the requester to synchronize with a provider. -.. ditaa:: +-----------+ +---------+ +----------+ +.. ditaa:: + +-----------+ +---------+ +----------+ | Requester | | Leader | | Provider | +-----------+ +---------+ +----------+ | | | diff --git a/ceph/doc/rados/configuration/mon-osd-interaction.rst b/ceph/doc/rados/configuration/mon-osd-interaction.rst index a7324ebb0..6ef662655 100644 --- a/ceph/doc/rados/configuration/mon-osd-interaction.rst +++ b/ceph/doc/rados/configuration/mon-osd-interaction.rst @@ -34,7 +34,8 @@ and ``[osd]`` or ``[global]`` section of your Ceph configuration file, or by setting the value at runtime. -.. ditaa:: +---------+ +---------+ +.. ditaa:: + +---------+ +---------+ | OSD 1 | | OSD 2 | +---------+ +---------+ | | @@ -89,7 +90,9 @@ and ``mon osd reporter subtree level`` settings under the ``[mon]`` section of your Ceph configuration file, or by setting the value at runtime. -.. ditaa:: +---------+ +---------+ +---------+ +.. ditaa:: + + +---------+ +---------+ +---------+ | OSD 1 | | OSD 2 | | Monitor | +---------+ +---------+ +---------+ | | | @@ -118,7 +121,9 @@ Ceph Monitor heartbeat interval by adding an ``osd mon heartbeat interval`` setting under the ``[osd]`` section of your Ceph configuration file, or by setting the value at runtime. -.. ditaa:: +---------+ +---------+ +-------+ +---------+ +.. ditaa:: + + +---------+ +---------+ +-------+ +---------+ | OSD 1 | | OSD 2 | | OSD 3 | | Monitor | +---------+ +---------+ +-------+ +---------+ | | | | @@ -161,7 +166,9 @@ interval max`` setting under the ``[osd]`` section of your Ceph configuration file, or by setting the value at runtime. -.. ditaa:: +---------+ +---------+ +.. ditaa:: + + +---------+ +---------+ | OSD 1 | | Monitor | +---------+ +---------+ | | diff --git a/ceph/doc/rados/configuration/network-config-ref.rst b/ceph/doc/rados/configuration/network-config-ref.rst index 41da0a175..bd49a87b3 100644 --- a/ceph/doc/rados/configuration/network-config-ref.rst +++ b/ceph/doc/rados/configuration/network-config-ref.rst @@ -112,7 +112,7 @@ Each Ceph OSD Daemon on a Ceph Node may use up to four ports: #. One for sending data to other OSDs. #. Two for heartbeating on each interface. -.. ditaa:: +.. ditaa:: /---------------\ | OSD | | +---+----------------+-----------+ diff --git a/ceph/doc/rados/operations/cache-tiering.rst b/ceph/doc/rados/operations/cache-tiering.rst index c825c22c3..237b6e3c9 100644 --- a/ceph/doc/rados/operations/cache-tiering.rst +++ b/ceph/doc/rados/operations/cache-tiering.rst @@ -13,7 +13,7 @@ tier. So the cache tier and the backing storage tier are completely transparent to Ceph clients. -.. ditaa:: +.. ditaa:: +-------------+ | Ceph Client | +------+------+ diff --git a/ceph/doc/rados/operations/devices.rst b/ceph/doc/rados/operations/devices.rst index 98691a043..5e35a3a67 100644 --- a/ceph/doc/rados/operations/devices.rst +++ b/ceph/doc/rados/operations/devices.rst @@ -47,6 +47,23 @@ By default, the `identification` light is used. ceph orch status +The command behind the scene to blink the drive LEDs is `lsmcli`. If you need +to customize this command you can configure this via a Jinja2 template:: + + ceph config-key set mgr/cephadm/blink_device_light_cmd "