From a4b75251e677cd644c8d7771f62d4819aabe7d6c Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 8 Dec 2021 13:07:09 +0100 Subject: [PATCH] import ceph 16.2.7 Signed-off-by: Thomas Lamprecht --- ceph/.github/labeler.yml | 25 + ceph/.github/workflows/pr-triage.yml | 13 +- ceph/CMakeLists.txt | 2 +- ceph/PendingReleaseNotes | 47 + ceph/admin/build-doc | 7 +- ceph/admin/doc-requirements.txt | 2 +- ceph/ceph.spec | 69 +- ceph/ceph.spec.in | 63 +- ceph/changelog.upstream | 6 + ceph/cmake/modules/Buildpmem.cmake | 46 +- ceph/cmake/modules/Findpmem.cmake | 10 + ceph/debian/ceph-base.install | 1 + ceph/debian/ceph-osd.install | 1 - ceph/debian/control | 8 +- ceph/debian/rules | 2 +- ceph/doc/ceph-volume/lvm/activate.rst | 8 + ceph/doc/cephadm/adoption.rst | 6 + ceph/doc/cephadm/host-management.rst | 31 +- ceph/doc/cephadm/index.rst | 12 +- ceph/doc/cephadm/install.rst | 2 +- ceph/doc/cephadm/operations.rst | 83 +- .../{ => services}/custom-container.rst | 35 +- .../index.rst} | 202 +- ceph/doc/cephadm/{ => services}/iscsi.rst | 13 +- ceph/doc/cephadm/{ => services}/mds.rst | 3 + ceph/doc/cephadm/services/mgr.rst | 37 + ceph/doc/cephadm/{ => services}/mon.rst | 10 +- .../doc/cephadm/{ => services}/monitoring.rst | 190 +- ceph/doc/cephadm/{ => services}/nfs.rst | 9 +- ceph/doc/cephadm/{ => services}/osd.rst | 173 +- ceph/doc/cephadm/{ => services}/rgw.rst | 41 +- ceph/doc/cephadm/troubleshooting.rst | 25 + ceph/doc/cephadm/upgrade.rst | 10 +- ceph/doc/cephfs/cephfs-mirroring.rst | 2 +- ceph/doc/cephfs/cephfs-shell.rst | 2 +- ceph/doc/cephfs/fs-nfs-exports.rst | 390 - ceph/doc/cephfs/fs-volumes.rst | 34 + ceph/doc/cephfs/index.rst | 1 - ceph/doc/cephfs/multimds.rst | 4 + ceph/doc/cephfs/nfs.rst | 22 +- ceph/doc/dev/cephfs-mirroring.rst | 4 +- ceph/doc/dev/developer_guide/dash-devel.rst | 13 +- .../dev/developer_guide/tests-unit-tests.rst | 1 - ceph/doc/dev/vstart-ganesha.rst | 45 + ceph/doc/man/8/cephfs-top.rst | 24 + ceph/doc/mgr/dashboard.rst | 93 +- ceph/doc/mgr/index.rst | 1 + ceph/doc/mgr/nfs.rst | 606 + ceph/doc/mgr/orchestrator_modules.rst | 4 + ceph/doc/mgr/prometheus.rst | 17 +- ceph/doc/mgr/rook.rst | 8 +- .../rados/configuration/mclock-config-ref.rst | 239 +- ceph/doc/rados/index.rst | 2 + ceph/doc/rados/operations/control.rst | 2 + ceph/doc/rados/operations/index.rst | 2 + .../doc/rados/operations/placement-groups.rst | 18 +- ceph/doc/rados/operations/stretch-mode.rst | 19 +- .../troubleshooting/troubleshooting-mon.rst | 2 + ceph/doc/radosgw/config-ref.rst | 7 + ceph/doc/radosgw/nfs.rst | 35 +- ceph/doc/radosgw/notifications.rst | 4 +- .../radosgw/s3-notification-compatibility.rst | 4 + ceph/doc/start/documenting-ceph.rst | 16 +- ceph/doc_deps.deb.txt | 3 +- ceph/examples/boto3/README.md | 5 - ceph/install-deps.sh | 3 +- ceph/make-dist | 21 + .../grafana/dashboards/ceph-cluster.json | 25 +- .../grafana/dashboards/cephfs-overview.json | 608 +- .../grafana/dashboards/host-details.json | 2366 ++-- .../grafana/dashboards/hosts-overview.json | 1658 ++- .../jsonnet/grafana_dashboards.jsonnet | 718 +- .../dashboards/osd-device-details.json | 1590 +-- .../grafana/dashboards/osds-overview.json | 1675 ++- .../grafana/dashboards/pool-detail.json | 1300 +- .../grafana/dashboards/pool-overview.json | 3009 +++-- .../grafana/dashboards/radosgw-detail.json | 964 +- .../grafana/dashboards/radosgw-overview.json | 1265 +- .../dashboards/radosgw-sync-overview.json | 9 +- .../grafana/dashboards/rbd-details.json | 800 +- .../grafana/dashboards/rbd-overview.json | 1345 +- .../prometheus/alerts/ceph_default_alerts.yml | 2 +- .../prometheus/alerts/test_alerts.yml | 20 +- .../overrides/{frag_enable.yaml => frag.yaml} | 4 +- .../centos_8.stream_container_tools.yaml | 14 + .../distros/podman/rhel_8.3_kubic_stable.yaml | 18 - .../podman/rhel_8.4_container_tools_3.0.yaml | 13 + .../rhel_8.4_container_tools_rhel8.yaml | 13 + .../podman/ubuntu_18.04_kubic_stable.yaml | 13 - .../podman/ubuntu_20.04_kubic_stable.yaml | 13 - .../podman/ubuntu_20.04_kubic_testing.yaml | 13 - ceph/qa/standalone/scrub/osd-scrub-repair.sh | 8 +- .../fs/32bits/overrides/frag_enable.yaml | 1 - .../overrides/frag_enable.yaml | 1 - .../suites/fs/full/overrides/frag_enable.yaml | 1 - .../fs/functional/overrides/frag_enable.yaml | 1 - .../fs/libcephfs/overrides/frag_enable.yaml | 1 - ceph/qa/suites/fs/libcephfs/tasks/libcephfs/+ | 0 .../suites/fs/libcephfs/tasks/libcephfs/.qa | 1 + .../fs/libcephfs/tasks/libcephfs/frag.yaml | 1 + .../{libcephfs.yaml => libcephfs/test.yaml} | 0 ceph/qa/suites/fs/mirror-ha/% | 0 ceph/qa/suites/fs/mirror-ha/.qa | 1 + ceph/qa/suites/fs/mirror-ha/begin.yaml | 1 + .../cephfs-mirror/three-per-cluster.yaml | 12 + ceph/qa/suites/fs/mirror-ha/clients/+ | 0 ceph/qa/suites/fs/mirror-ha/clients/.qa | 1 + .../suites/fs/mirror-ha/clients/mirror.yaml | 32 + ceph/qa/suites/fs/mirror-ha/cluster/+ | 0 .../suites/fs/mirror-ha/cluster/1-node.yaml | 20 + ceph/qa/suites/fs/mirror-ha/objectstore/.qa | 1 + .../objectstore/bluestore-bitmap.yaml | 1 + ceph/qa/suites/fs/mirror-ha/overrides/+ | 0 ceph/qa/suites/fs/mirror-ha/overrides/.qa | 1 + .../mirror-ha/overrides/whitelist_health.yaml | 14 + .../fs/mirror-ha/supported-random-distro$ | 1 + ceph/qa/suites/fs/mirror-ha/workloads/.qa | 1 + .../workloads/cephfs-mirror-ha-workunit.yaml | 37 + ceph/qa/suites/fs/mirror/tasks/mirror.yaml | 2 +- .../mixed-clients/overrides/frag_enable.yaml | 1 - .../fs/multiclient/overrides/frag_enable.yaml | 1 - .../fs/multifs/overrides/frag_enable.yaml | 1 - ceph/qa/suites/fs/multifs/tasks/failover.yaml | 2 + .../fs/permission/overrides/frag_enable.yaml | 1 - .../fs/shell/overrides/frag_enable.yaml | 1 - .../fs/snaps/overrides/frag_enable.yaml | 1 - .../fs/thrash/multifs/overrides/frag.yaml | 1 + .../thrash/multifs/overrides/frag_enable.yaml | 1 - .../fs/thrash/workloads/overrides/frag.yaml | 1 + .../workloads/overrides/frag_enable.yaml | 1 - .../suites/fs/traceless/overrides/frag.yaml | 1 + .../fs/traceless/overrides/frag_enable.yaml | 1 - .../old_client/overrides/frag_enable.yaml | 1 - .../overrides/frag_enable.yaml | 1 - .../suites/fs/upgrade/mds_upgrade_sequence/% | 0 .../fs/upgrade/mds_upgrade_sequence/.qa | 1 + .../bluestore-bitmap.yaml | 1 + .../centos_8.stream_container_tools.yaml | 1 + .../fs/upgrade/mds_upgrade_sequence/conf | 1 + .../upgrade/mds_upgrade_sequence/overrides/% | 0 .../mds_upgrade_sequence/overrides/.qa | 1 + .../overrides/pg-warn.yaml | 5 + .../overrides/whitelist_health.yaml | 1 + .../whitelist_wrongly_marked_down.yaml | 1 + .../upgrade/mds_upgrade_sequence/roles.yaml | 11 + .../fs/upgrade/mds_upgrade_sequence/tasks/% | 0 .../fs/upgrade/mds_upgrade_sequence/tasks/.qa | 1 + .../mds_upgrade_sequence/tasks/0-from/.qa | 1 + .../tasks/0-from/v16.2.4.yaml | 30 + .../mds_upgrade_sequence/tasks/1-volume/% | 0 .../mds_upgrade_sequence/tasks/1-volume/.qa | 1 + .../tasks/1-volume/0-create.yaml | 5 + .../tasks/1-volume/1-ranks/.qa | 1 + .../tasks/1-volume/1-ranks/1.yaml | 4 + .../tasks/1-volume/1-ranks/2.yaml | 4 + .../tasks/1-volume/2-allow_standby_replay/.qa | 1 + .../1-volume/2-allow_standby_replay/no.yaml | 4 + .../1-volume/2-allow_standby_replay/yes.yaml | 4 + .../tasks/1-volume/3-verify.yaml | 7 + .../mds_upgrade_sequence/tasks/2-client.yaml | 3 + .../tasks/3-upgrade-with-workload.yaml | 33 + .../mds_upgrade_sequence/tasks/4-verify.yaml | 5 + .../import-legacy/overrides/frag_enable.yaml | 1 - .../fs/valgrind/mirror/tasks/mirror.yaml | 2 +- .../fs/verify/overrides/frag_enable.yaml | 1 - .../fs/volumes/overrides/frag_enable.yaml | 1 - .../qa/suites/fs/workload/overrides/frag.yaml | 1 + .../fs/workload/overrides/frag_enable.yaml | 1 - .../centos_8.3_container_tools_3.0.yaml | 1 + .../suites/orch/cephadm/mds_upgrade_sequence | 1 + ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/% | 0 .../suites/orch/cephadm/mgr-nfs-upgrade/.qa | 1 + .../orch/cephadm/mgr-nfs-upgrade/0-distro/.qa | 1 + .../centos_8.3_container_tools_3.0.yaml | 1 + .../centos_8.stream_container_tools.yaml | 1 + .../mgr-nfs-upgrade/1-bootstrap/16.2.4.yaml | 8 + .../mgr-nfs-upgrade/1-bootstrap/16.2.5.yaml | 6 + .../mgr-nfs-upgrade/1-bootstrap/octopus.yaml | 8 + .../orch/cephadm/mgr-nfs-upgrade/1-start.yaml | 29 + .../orch/cephadm/mgr-nfs-upgrade/2-nfs.yaml | 29 + .../3-upgrade-with-workload.yaml | 41 + .../orch/cephadm/mgr-nfs-upgrade/4-final.yaml | 10 + .../orch/cephadm/osds/2-ops/rm-zap-flag.yaml | 15 + .../2-services/nfs-ingress-rgw-bucket.yaml | 89 + .../2-services/nfs-ingress-rgw-user.yaml | 90 + .../2-services/nfs-ingress.yaml | 2 +- .../2-services/nfs-ingress2.yaml | 26 +- .../centos_8.2_container_tools_3.0.yaml | 0 .../centos_8.stream_container_tools.yaml | 1 + .../distro/rhel_8.4_container_tools_3.0.yaml | 1 + .../rhel_8.4_container_tools_rhel8.yaml | 1 + ...tart-centos_8.stream_container-tools.yaml} | 21 +- .../orch/cephadm/upgrade/5-upgrade-ls.yaml | 6 + .../centos_8.stream_container_tools.yaml | 1 + .../orch/cephadm/workunits/task/test_nfs.yaml | 17 + .../cephadm/workunits/task/test_orch_cli.yaml | 1 - ceph/qa/suites/perf-basic/ubuntu_18.04.yaml | 1 - ceph/qa/suites/perf-basic/ubuntu_latest.yaml | 1 + .../workloads/cosbench_64K_write.yaml | 30 - .../rados/dashboard/tasks/dashboard.yaml | 1 + .../rados/mgr/tasks/module_selftest.yaml | 1 + ceph/qa/suites/rados/mgr/tasks/progress.yaml | 1 + ceph/qa/suites/rados/perf/ceph.yaml | 1 - ceph/qa/suites/rados/perf/ubuntu_18.04.yaml | 1 - ceph/qa/suites/rados/perf/ubuntu_latest.yaml | 1 + .../workloads/cosbench_64K_read_write.yaml | 25 - .../perf/workloads/cosbench_64K_write.yaml | 25 - .../rados/singleton/all/backfill-toofull.yaml | 37 + .../singleton/all/ec-inconsistent-hinfo.yaml | 36 + .../rados/singleton/all/rebuild-mondb.yaml | 3 + ...bd-mirror-journal-bootstrap-workunit.yaml} | 2 + ...bd-mirror-snapshot-bootstrap-workunit.yaml | 13 + .../4-pool/big-cache.yaml | 15 + .../suites/rgw/verify/tasks/s3tests-java.yaml | 2 +- .../point-to-point-upgrade.yaml | 16 +- .../{pacific..yaml => pacific.yaml} | 6 +- .../6-final-workload/rbd-python.yaml | 2 +- ceph/qa/tasks/backfill_toofull.py | 193 + ceph/qa/tasks/barbican.py | 7 +- ceph/qa/tasks/ceph.py | 30 +- ceph/qa/tasks/ceph_manager.py | 20 +- ceph/qa/tasks/ceph_test_case.py | 14 +- ceph/qa/tasks/cephfs/filesystem.py | 12 + ceph/qa/tasks/cephfs/mount.py | 1 + ceph/qa/tasks/cephfs/test_failover.py | 16 + ceph/qa/tasks/cephfs/test_mirroring.py | 10 +- ceph/qa/tasks/cephfs/test_nfs.py | 149 +- ceph/qa/tasks/cephfs/test_snapshots.py | 2 +- ceph/qa/tasks/cephfs/test_volumes.py | 35 +- ceph/qa/tasks/cephfs_mirror.py | 2 + ceph/qa/tasks/cephfs_mirror_thrash.py | 219 + ceph/qa/tasks/cram.py | 2 +- ceph/qa/tasks/ec_inconsistent_hinfo.py | 225 + ceph/qa/tasks/fs.py | 68 + ceph/qa/tasks/kubeadm.py | 26 + ceph/qa/tasks/mgr/dashboard/__init__.py | 2 +- ceph/qa/tasks/mgr/dashboard/helper.py | 23 +- ceph/qa/tasks/mgr/dashboard/test_api.py | 4 +- ceph/qa/tasks/mgr/dashboard/test_auth.py | 42 +- ceph/qa/tasks/mgr/dashboard/test_cluster.py | 23 + ceph/qa/tasks/mgr/dashboard/test_ganesha.py | 208 - ceph/qa/tasks/mgr/dashboard/test_host.py | 14 +- ceph/qa/tasks/mgr/dashboard/test_pool.py | 29 +- ceph/qa/tasks/mgr/dashboard/test_requests.py | 6 +- ceph/qa/tasks/mgr/dashboard/test_rgw.py | 12 +- ceph/qa/tasks/mgr/dashboard/test_user.py | 3 + ceph/qa/tasks/mgr/test_module_selftest.py | 10 +- ceph/qa/tasks/mgr/test_orchestrator_cli.py | 3 +- ceph/qa/tasks/mgr/test_progress.py | 152 +- ceph/qa/tasks/pykmip.py | 2 +- ceph/qa/tasks/python.py | 45 + ceph/qa/tasks/s3a_hadoop.py | 4 +- ceph/qa/tasks/tox.py | 14 +- ceph/qa/tasks/vip.py | 1 + ceph/qa/tasks/vstart_runner.py | 6 + .../workunits/cephadm/test_dashboard_e2e.sh | 13 - ceph/qa/workunits/fs/cephfs_mirror_ha_gen.sh | 69 + .../workunits/fs/cephfs_mirror_ha_verify.sh | 40 + ceph/qa/workunits/fs/cephfs_mirror_helpers.sh | 66 + ceph/qa/workunits/mon/pg_autoscaler.sh | 50 + ceph/qa/workunits/rbd/rbd_mirror_bootstrap.sh | 2 +- ceph/qa/workunits/rbd/rbd_mirror_helpers.sh | 70 + ceph/qa/workunits/rbd/rbd_mirror_journal.sh | 39 + ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh | 38 + ceph/qa/workunits/rbd/rbd_mirror_stress.sh | 5 + ceph/src/.git_version | 4 +- ceph/src/CMakeLists.txt | 1 + ceph/src/auth/cephx/CephxKeyServer.cc | 56 +- ceph/src/auth/cephx/CephxKeyServer.h | 7 +- ceph/src/ceph-volume/CMakeLists.txt | 2 +- ceph/src/ceph-volume/ceph_volume/api/lvm.py | 52 +- .../ceph_volume/devices/lvm/activate.py | 2 +- .../ceph_volume/devices/lvm/batch.py | 75 +- .../ceph_volume/devices/lvm/common.py | 3 + .../ceph_volume/devices/lvm/migrate.py | 7 +- .../ceph_volume/devices/lvm/prepare.py | 18 +- .../ceph_volume/devices/lvm/zap.py | 8 +- .../ceph_volume/devices/simple/scan.py | 2 +- .../ceph_volume/tests/api/test_lvm.py | 227 +- .../ceph-volume/ceph_volume/tests/conftest.py | 8 +- .../tests/devices/lvm/test_activate.py | 16 +- .../tests/devices/lvm/test_batch.py | 4 + .../tests/devices/lvm/test_listing.py | 4 +- .../tests/devices/lvm/test_migrate.py | 6 +- .../tests/devices/lvm/test_prepare.py | 16 +- .../ceph_volume/tests/devices/lvm/test_zap.py | 5 + .../ceph_volume/tests/functional/Vagrantfile | 2 +- .../batch/playbooks/setup_mixed_type.yml | 6 +- .../ceph_volume/tests/util/test_prepare.py | 11 +- .../ceph-volume/ceph_volume/util/device.py | 6 +- .../ceph-volume/ceph_volume/util/prepare.py | 3 +- ceph/src/cephadm/cephadm | 214 +- ceph/src/cephadm/tests/test_cephadm.py | 277 +- ceph/src/client/Client.cc | 26 +- ceph/src/client/Client.h | 16 + ceph/src/cls/rbd/cls_rbd.cc | 18 +- ceph/src/cls/rbd/cls_rbd_client.cc | 14 + ceph/src/cls/rbd/cls_rbd_client.h | 4 + ceph/src/common/Timer.cc | 48 +- ceph/src/common/Timer.h | 28 +- ceph/src/common/config.cc | 10 + ceph/src/common/config.h | 3 + ceph/src/common/config_proxy.h | 3 + ceph/src/common/fair_mutex.h | 80 + ceph/src/common/legacy_config_opts.h | 2 + ceph/src/common/options.cc | 39 +- ceph/src/include/CompatSet.h | 1 + ceph/src/include/cephfs/metrics/Types.h | 97 +- ceph/src/journal/JournalMetadata.h | 3 +- ceph/src/journal/JournalPlayer.h | 3 +- ceph/src/journal/JournalRecorder.h | 3 +- ceph/src/journal/Journaler.h | 2 +- ceph/src/journal/ObjectPlayer.h | 3 +- ceph/src/journal/ObjectRecorder.h | 3 +- ceph/src/librbd/ImageCtx.cc | 2 +- ceph/src/librbd/ImageCtx.h | 3 +- ceph/src/librbd/Journal.h | 2 +- ceph/src/librbd/api/Pool.cc | 5 +- ceph/src/librbd/cache/pwl/AbstractWriteLog.cc | 160 +- ceph/src/librbd/cache/pwl/AbstractWriteLog.h | 40 +- ceph/src/librbd/cache/pwl/InitRequest.cc | 4 +- ceph/src/librbd/cache/pwl/LogEntry.cc | 8 - ceph/src/librbd/cache/pwl/LogEntry.h | 5 +- ceph/src/librbd/cache/pwl/LogOperation.cc | 21 +- ceph/src/librbd/cache/pwl/LogOperation.h | 10 +- ceph/src/librbd/cache/pwl/Request.h | 2 +- ceph/src/librbd/cache/pwl/Types.h | 15 +- ceph/src/librbd/cache/pwl/rwl/LogEntry.cc | 8 + ceph/src/librbd/cache/pwl/rwl/LogEntry.h | 1 + ceph/src/librbd/cache/pwl/rwl/Request.cc | 1 + ceph/src/librbd/cache/pwl/rwl/WriteLog.cc | 113 +- ceph/src/librbd/cache/pwl/rwl/WriteLog.h | 18 +- ceph/src/librbd/cache/pwl/ssd/LogEntry.cc | 6 +- ceph/src/librbd/cache/pwl/ssd/LogEntry.h | 6 + ceph/src/librbd/cache/pwl/ssd/Request.cc | 5 +- ceph/src/librbd/cache/pwl/ssd/WriteLog.cc | 734 +- ceph/src/librbd/cache/pwl/ssd/WriteLog.h | 32 +- ceph/src/librbd/image/CreateRequest.cc | 3 +- ceph/src/librbd/image/RemoveRequest.h | 2 +- ceph/src/librbd/image/ValidatePoolRequest.cc | 23 +- ceph/src/librbd/image/ValidatePoolRequest.h | 7 +- ceph/src/librbd/io/TypeTraits.h | 2 +- ceph/src/librbd/journal/CreateRequest.h | 2 +- ceph/src/librbd/journal/RemoveRequest.h | 2 +- ceph/src/librbd/journal/ResetRequest.h | 2 +- .../librbd/mirror/snapshot/PromoteRequest.h | 2 +- ceph/src/librbd/object_map/DiffRequest.cc | 4 +- ceph/src/librbd/plugin/Api.h | 3 +- ceph/src/mds/FSMap.cc | 31 +- ceph/src/mds/FSMap.h | 11 +- ceph/src/mds/MDCache.cc | 8 +- ceph/src/mds/MDSDaemon.h | 5 +- ceph/src/mds/MDSMap.cc | 19 + ceph/src/mds/MDSMap.h | 10 +- ceph/src/mds/MDSPerfMetricTypes.h | 70 +- ceph/src/mds/MDSRank.cc | 53 +- ceph/src/mds/MDSRank.h | 20 +- ceph/src/mds/MetricAggregator.cc | 12 + ceph/src/mds/MetricsHandler.cc | 36 + ceph/src/mds/MetricsHandler.h | 2 + ceph/src/mds/Server.cc | 8 +- ceph/src/mds/cephfs_features.h | 2 + ceph/src/messages/MClientSession.h | 13 +- ceph/src/mgr/ActivePyModules.cc | 25 +- ceph/src/mgr/BaseMgrModule.cc | 2 + ceph/src/mgr/MDSPerfMetricTypes.cc | 10 + ceph/src/mgr/MDSPerfMetricTypes.h | 4 + ceph/src/mgr/MetricCollector.h | 4 +- ceph/src/mon/AuthMonitor.cc | 40 +- ceph/src/mon/AuthMonitor.h | 2 - ceph/src/mon/KVMonitor.cc | 2 +- ceph/src/mon/MDSMonitor.cc | 134 +- ceph/src/mon/MonCap.cc | 3 + ceph/src/mon/MonCommands.h | 5 + ceph/src/mon/MonMap.cc | 12 +- ceph/src/mon/MonmapMonitor.cc | 92 +- ceph/src/msg/async/AsyncMessenger.cc | 11 +- ceph/src/msg/async/AsyncMessenger.h | 3 +- ceph/src/msg/async/ProtocolV2.cc | 8 +- ceph/src/mypy.ini | 3 + ceph/src/os/bluestore/BlueFS.cc | 5 +- ceph/src/os/bluestore/BlueStore.cc | 180 +- ceph/src/os/bluestore/BlueStore.h | 6 + ceph/src/osd/ECBackend.cc | 28 +- ceph/src/osd/ECBackend.h | 2 +- ceph/src/osd/OSD.cc | 405 +- ceph/src/osd/OSD.h | 9 + ceph/src/osd/OSDMap.cc | 20 +- ceph/src/osd/PGLog.cc | 3 +- ceph/src/osd/PeeringState.cc | 5 +- ceph/src/osd/PrimaryLogPG.cc | 12 +- ceph/src/osd/scheduler/OpScheduler.h | 7 + ceph/src/osd/scheduler/mClockScheduler.cc | 35 +- ceph/src/osd/scheduler/mClockScheduler.h | 3 + ceph/src/pmdk/.cirrus.yml | 10 + ceph/src/pmdk/.codecov.yml | 18 + ceph/src/pmdk/.gitattributes | 7 + ceph/src/pmdk/.github/ISSUE_TEMPLATE.md | 28 + .../pmdk/.github/ISSUE_TEMPLATE/bug_report.md | 49 + .../pmdk/.github/ISSUE_TEMPLATE/feature.md | 26 + .../pmdk/.github/ISSUE_TEMPLATE/question.md | 15 + ceph/src/pmdk/.github/workflows/coverity.yml | 41 + ceph/src/pmdk/.github/workflows/gha.yml | 155 + ceph/src/pmdk/.mailmap | 29 + ceph/src/pmdk/.skip-doc | 0 ceph/src/pmdk/.travis.yml | 42 + ceph/src/pmdk/CODING_STYLE.md | 140 + ceph/src/pmdk/CONTRIBUTING.md | 153 + ceph/src/pmdk/ChangeLog | 866 ++ ceph/src/pmdk/LICENSE | 39 + ceph/src/pmdk/Makefile | 136 + ceph/src/pmdk/README.md | 386 + ceph/src/pmdk/VERSION | 1 + ceph/src/pmdk/appveyor.yml | 95 + ceph/src/pmdk/res/PMDK.ico | Bin 0 -> 53067 bytes ceph/src/pmdk/src/.clang-format | 33 + ceph/src/pmdk/src/LongPath.manifest | 7 + ceph/src/pmdk/src/LongPathSupport.props | 10 + ceph/src/pmdk/src/Makefile | 216 + ceph/src/pmdk/src/Makefile.inc | 318 + ceph/src/pmdk/src/PMDK.sln | 2240 ++++ ceph/src/pmdk/src/README | 16 + ceph/src/pmdk/src/common.inc | 400 + ceph/src/pmdk/src/common/.cstyleignore | 1 + ceph/src/pmdk/src/common/Makefile | 15 + ceph/src/pmdk/src/common/bad_blocks.c | 264 + ceph/src/pmdk/src/common/badblocks.h | 77 + ceph/src/pmdk/src/common/common.rc | 80 + ceph/src/pmdk/src/common/ctl.c | 578 + ceph/src/pmdk/src/common/ctl.h | 202 + ceph/src/pmdk/src/common/ctl_cow.c | 51 + ceph/src/pmdk/src/common/ctl_fallocate.c | 46 + ceph/src/pmdk/src/common/ctl_global.h | 33 + ceph/src/pmdk/src/common/ctl_prefault.c | 69 + ceph/src/pmdk/src/common/ctl_sds.c | 46 + ceph/src/pmdk/src/common/dlsym.h | 103 + ceph/src/pmdk/src/common/file.c | 618 + ceph/src/pmdk/src/common/file.h | 115 + ceph/src/pmdk/src/common/file_posix.c | 264 + ceph/src/pmdk/src/common/file_windows.c | 196 + .../src/pmdk/src/common/libpmemcommon.vcxproj | 163 + .../src/common/libpmemcommon.vcxproj.filters | 149 + ceph/src/pmdk/src/common/mmap.c | 504 + ceph/src/pmdk/src/common/mmap.h | 142 + ceph/src/pmdk/src/common/mmap_posix.c | 193 + ceph/src/pmdk/src/common/mmap_windows.c | 150 + ceph/src/pmdk/src/common/os_deep.h | 27 + ceph/src/pmdk/src/common/os_deep_linux.c | 177 + ceph/src/pmdk/src/common/os_deep_windows.c | 75 + ceph/src/pmdk/src/common/page_size.h | 22 + ceph/src/pmdk/src/common/pmemcommon.h | 39 + ceph/src/pmdk/src/common/pmemcommon.inc | 55 + ceph/src/pmdk/src/common/pool_hdr.c | 345 + ceph/src/pmdk/src/common/pool_hdr.h | 259 + ceph/src/pmdk/src/common/queue.h | 634 + ceph/src/pmdk/src/common/rand.c | 124 + ceph/src/pmdk/src/common/rand.h | 29 + ceph/src/pmdk/src/common/ravl.c | 577 + ceph/src/pmdk/src/common/ravl.h | 54 + ceph/src/pmdk/src/common/set.c | 4439 +++++++ ceph/src/pmdk/src/common/set.h | 440 + ceph/src/pmdk/src/common/set_badblocks.c | 254 + ceph/src/pmdk/src/common/set_badblocks.h | 28 + ceph/src/pmdk/src/common/shutdown_state.c | 234 + ceph/src/pmdk/src/common/shutdown_state.h | 41 + ceph/src/pmdk/src/common/sys_util.h | 315 + ceph/src/pmdk/src/common/util_pmem.h | 47 + ceph/src/pmdk/src/common/uuid.c | 83 + ceph/src/pmdk/src/common/uuid.h | 55 + ceph/src/pmdk/src/common/uuid_freebsd.c | 24 + ceph/src/pmdk/src/common/uuid_linux.c | 49 + ceph/src/pmdk/src/common/uuid_windows.c | 23 + ceph/src/pmdk/src/common/vec.h | 157 + ceph/src/pmdk/src/common/vecq.h | 128 + ceph/src/pmdk/src/core/Makefile | 12 + ceph/src/pmdk/src/core/alloc.c | 119 + ceph/src/pmdk/src/core/alloc.h | 49 + ceph/src/pmdk/src/core/errno_freebsd.h | 19 + ceph/src/pmdk/src/core/fault_injection.h | 39 + ceph/src/pmdk/src/core/fs.h | 51 + ceph/src/pmdk/src/core/fs_posix.c | 84 + ceph/src/pmdk/src/core/fs_windows.c | 123 + ceph/src/pmdk/src/core/libpmemcore.vcxproj | 135 + .../pmdk/src/core/libpmemcore.vcxproj.filters | 71 + ceph/src/pmdk/src/core/os.h | 115 + ceph/src/pmdk/src/core/os_posix.c | 353 + ceph/src/pmdk/src/core/os_thread.h | 181 + ceph/src/pmdk/src/core/os_thread_posix.c | 436 + ceph/src/pmdk/src/core/os_thread_windows.c | 655 + ceph/src/pmdk/src/core/os_windows.c | 741 ++ ceph/src/pmdk/src/core/out.c | 592 + ceph/src/pmdk/src/core/out.h | 231 + ceph/src/pmdk/src/core/pmemcore.h | 44 + ceph/src/pmdk/src/core/pmemcore.inc | 41 + ceph/src/pmdk/src/core/util.c | 494 + ceph/src/pmdk/src/core/util.h | 541 + ceph/src/pmdk/src/core/util_posix.c | 126 + ceph/src/pmdk/src/core/util_windows.c | 320 + ceph/src/pmdk/src/core/valgrind/.cstyleignore | 5 + ceph/src/pmdk/src/core/valgrind/README | 2 + ceph/src/pmdk/src/core/valgrind/drd.h | 571 + ceph/src/pmdk/src/core/valgrind/helgrind.h | 841 ++ ceph/src/pmdk/src/core/valgrind/memcheck.h | 320 + ceph/src/pmdk/src/core/valgrind/pmemcheck.h | 186 + ceph/src/pmdk/src/core/valgrind/valgrind.h | 6647 ++++++++++ ceph/src/pmdk/src/core/valgrind_internal.h | 478 + ceph/src/pmdk/src/freebsd/README | 13 + ceph/src/pmdk/src/freebsd/include/endian.h | 8 + ceph/src/pmdk/src/freebsd/include/features.h | 6 + .../pmdk/src/freebsd/include/linux/kdev_t.h | 6 + .../pmdk/src/freebsd/include/linux/limits.h | 6 + .../pmdk/src/freebsd/include/sys/sysmacros.h | 6 + ceph/src/pmdk/src/include/.cstyleignore | 1 + ceph/src/pmdk/src/include/README | 27 + ceph/src/pmdk/src/include/libpmem.h | 131 + ceph/src/pmdk/src/include/libpmem2.h | 272 + ceph/src/pmdk/src/include/libpmemblk.h | 164 + ceph/src/pmdk/src/include/libpmemlog.h | 152 + .../pmdk/src/include/libpmemobj++/README.md | 2 + .../src/include/libpmemobj++/detail/README.md | 2 + ceph/src/pmdk/src/include/libpmemobj.h | 26 + ceph/src/pmdk/src/include/libpmemobj/action.h | 33 + .../pmdk/src/include/libpmemobj/action_base.h | 74 + ceph/src/pmdk/src/include/libpmemobj/atomic.h | 45 + .../pmdk/src/include/libpmemobj/atomic_base.h | 93 + ceph/src/pmdk/src/include/libpmemobj/base.h | 299 + ceph/src/pmdk/src/include/libpmemobj/ctl.h | 175 + .../pmdk/src/include/libpmemobj/iterator.h | 82 + .../src/include/libpmemobj/iterator_base.h | 39 + .../src/include/libpmemobj/lists_atomic.h | 164 + .../include/libpmemobj/lists_atomic_base.h | 39 + ceph/src/pmdk/src/include/libpmemobj/pool.h | 17 + .../pmdk/src/include/libpmemobj/pool_base.h | 105 + ceph/src/pmdk/src/include/libpmemobj/thread.h | 71 + ceph/src/pmdk/src/include/libpmemobj/tx.h | 185 + .../src/pmdk/src/include/libpmemobj/tx_base.h | 450 + ceph/src/pmdk/src/include/libpmemobj/types.h | 205 + ceph/src/pmdk/src/include/libpmempool.h | 334 + ceph/src/pmdk/src/include/librpmem.h | 98 + ceph/src/pmdk/src/include/pmemcompat.h | 63 + ceph/src/pmdk/src/libpmem/Makefile | 68 + ceph/src/pmdk/src/libpmem/libpmem.c | 125 + ceph/src/pmdk/src/libpmem/libpmem.def | 66 + ceph/src/pmdk/src/libpmem/libpmem.link.in | 35 + ceph/src/pmdk/src/libpmem/libpmem.rc | 12 + ceph/src/pmdk/src/libpmem/libpmem.vcxproj | 162 + .../pmdk/src/libpmem/libpmem.vcxproj.filters | 243 + ceph/src/pmdk/src/libpmem/libpmem_main.c | 36 + ceph/src/pmdk/src/libpmem/pmem.c | 957 ++ ceph/src/pmdk/src/libpmem/pmem.h | 58 + ceph/src/pmdk/src/libpmem/pmem_posix.c | 81 + ceph/src/pmdk/src/libpmem/pmem_windows.c | 215 + ceph/src/pmdk/src/libpmem2/Makefile | 66 + .../pmdk/src/libpmem2/aarch64/arm_cacheops.h | 62 + ceph/src/pmdk/src/libpmem2/aarch64/flags.inc | 11 + ceph/src/pmdk/src/libpmem2/aarch64/flush.h | 31 + ceph/src/pmdk/src/libpmem2/aarch64/init.c | 47 + .../src/pmdk/src/libpmem2/aarch64/sources.inc | 8 + ceph/src/pmdk/src/libpmem2/auto_flush.h | 21 + ceph/src/pmdk/src/libpmem2/auto_flush_linux.c | 184 + ceph/src/pmdk/src/libpmem2/auto_flush_none.c | 16 + .../pmdk/src/libpmem2/auto_flush_windows.c | 197 + .../pmdk/src/libpmem2/auto_flush_windows.h | 50 + ceph/src/pmdk/src/libpmem2/badblocks.c | 41 + ceph/src/pmdk/src/libpmem2/badblocks_ndctl.c | 771 ++ ceph/src/pmdk/src/libpmem2/badblocks_none.c | 50 + ceph/src/pmdk/src/libpmem2/config.c | 218 + ceph/src/pmdk/src/libpmem2/config.h | 34 + ceph/src/pmdk/src/libpmem2/deep_flush.c | 42 + ceph/src/pmdk/src/libpmem2/deep_flush.h | 27 + ceph/src/pmdk/src/libpmem2/deep_flush_linux.c | 124 + ceph/src/pmdk/src/libpmem2/deep_flush_other.c | 47 + .../pmdk/src/libpmem2/deep_flush_windows.c | 47 + ceph/src/pmdk/src/libpmem2/errormsg.c | 97 + ceph/src/pmdk/src/libpmem2/extent.h | 37 + ceph/src/pmdk/src/libpmem2/extent_linux.c | 164 + ceph/src/pmdk/src/libpmem2/extent_none.c | 31 + ceph/src/pmdk/src/libpmem2/libpmem2.c | 48 + ceph/src/pmdk/src/libpmem2/libpmem2.def | 55 + ceph/src/pmdk/src/libpmem2/libpmem2.link.in | 51 + ceph/src/pmdk/src/libpmem2/libpmem2.rc | 12 + ceph/src/pmdk/src/libpmem2/libpmem2.vcxproj | 144 + .../src/libpmem2/libpmem2.vcxproj.filters | 223 + ceph/src/pmdk/src/libpmem2/libpmem2_main.c | 28 + ceph/src/pmdk/src/libpmem2/map.c | 294 + ceph/src/pmdk/src/libpmem2/map.h | 63 + ceph/src/pmdk/src/libpmem2/map_posix.c | 609 + ceph/src/pmdk/src/libpmem2/map_windows.c | 590 + ceph/src/pmdk/src/libpmem2/memops_generic.c | 339 + ceph/src/pmdk/src/libpmem2/persist.c | 610 + ceph/src/pmdk/src/libpmem2/persist.h | 29 + ceph/src/pmdk/src/libpmem2/persist_posix.c | 50 + ceph/src/pmdk/src/libpmem2/persist_windows.c | 36 + ceph/src/pmdk/src/libpmem2/pmem2.h | 27 + ceph/src/pmdk/src/libpmem2/pmem2_arch.h | 59 + ceph/src/pmdk/src/libpmem2/pmem2_utils.c | 95 + ceph/src/pmdk/src/libpmem2/pmem2_utils.h | 55 + .../src/pmdk/src/libpmem2/pmem2_utils_linux.c | 70 + .../src/pmdk/src/libpmem2/pmem2_utils_ndctl.c | 91 + ceph/src/pmdk/src/libpmem2/pmem2_utils_none.c | 33 + .../src/pmdk/src/libpmem2/pmem2_utils_other.c | 59 + .../src/pmdk/src/libpmem2/ppc64/.cstyleignore | 1 + ceph/src/pmdk/src/libpmem2/ppc64/flags.inc | 9 + ceph/src/pmdk/src/libpmem2/ppc64/init.c | 66 + ceph/src/pmdk/src/libpmem2/ppc64/sources.inc | 4 + ceph/src/pmdk/src/libpmem2/ravl_interval.c | 260 + ceph/src/pmdk/src/libpmem2/ravl_interval.h | 36 + ceph/src/pmdk/src/libpmem2/region_namespace.h | 26 + .../src/libpmem2/region_namespace_ndctl.c | 258 + .../src/libpmem2/region_namespace_ndctl.h | 32 + .../pmdk/src/libpmem2/region_namespace_none.c | 16 + ceph/src/pmdk/src/libpmem2/source.c | 37 + ceph/src/pmdk/src/libpmem2/source.h | 49 + ceph/src/pmdk/src/libpmem2/source_posix.c | 196 + ceph/src/pmdk/src/libpmem2/source_windows.c | 183 + ceph/src/pmdk/src/libpmem2/usc_ndctl.c | 149 + ceph/src/pmdk/src/libpmem2/usc_none.c | 23 + ceph/src/pmdk/src/libpmem2/usc_windows.c | 230 + ceph/src/pmdk/src/libpmem2/vm_reservation.c | 294 + ceph/src/pmdk/src/libpmem2/vm_reservation.h | 25 + .../pmdk/src/libpmem2/vm_reservation_posix.c | 85 + .../src/libpmem2/vm_reservation_windows.c | 120 + ceph/src/pmdk/src/libpmem2/x86_64/avx.h | 86 + ceph/src/pmdk/src/libpmem2/x86_64/cpu.c | 174 + ceph/src/pmdk/src/libpmem2/x86_64/cpu.h | 18 + ceph/src/pmdk/src/libpmem2/x86_64/flags.inc | 31 + ceph/src/pmdk/src/libpmem2/x86_64/flush.h | 118 + ceph/src/pmdk/src/libpmem2/x86_64/init.c | 528 + .../src/libpmem2/x86_64/memcpy/memcpy_avx.h | 100 + .../libpmem2/x86_64/memcpy/memcpy_avx512f.h | 18 + .../libpmem2/x86_64/memcpy/memcpy_nt_avx.c | 443 + .../x86_64/memcpy/memcpy_nt_avx512f.c | 459 + .../libpmem2/x86_64/memcpy/memcpy_nt_sse2.c | 428 + .../src/libpmem2/x86_64/memcpy/memcpy_sse2.h | 116 + .../src/libpmem2/x86_64/memcpy/memcpy_t_avx.c | 281 + .../libpmem2/x86_64/memcpy/memcpy_t_avx512f.c | 438 + .../libpmem2/x86_64/memcpy/memcpy_t_sse2.c | 246 + .../pmdk/src/libpmem2/x86_64/memcpy_memset.h | 273 + .../src/libpmem2/x86_64/memset/memset_avx.h | 97 + .../libpmem2/x86_64/memset/memset_avx512f.h | 18 + .../libpmem2/x86_64/memset/memset_nt_avx.c | 286 + .../x86_64/memset/memset_nt_avx512f.c | 282 + .../libpmem2/x86_64/memset/memset_nt_sse2.c | 273 + .../src/libpmem2/x86_64/memset/memset_sse2.h | 104 + .../src/libpmem2/x86_64/memset/memset_t_avx.c | 178 + .../libpmem2/x86_64/memset/memset_t_avx512f.c | 285 + .../libpmem2/x86_64/memset/memset_t_sse2.c | 153 + ceph/src/pmdk/src/libpmem2/x86_64/sources.inc | 30 + ceph/src/pmdk/src/libpmemblk/Makefile | 22 + ceph/src/pmdk/src/libpmemblk/blk.c | 948 ++ ceph/src/pmdk/src/libpmemblk/blk.h | 102 + ceph/src/pmdk/src/libpmemblk/btt.c | 2051 +++ ceph/src/pmdk/src/libpmemblk/btt.h | 59 + ceph/src/pmdk/src/libpmemblk/btt_layout.h | 107 + ceph/src/pmdk/src/libpmemblk/libpmemblk.c | 200 + ceph/src/pmdk/src/libpmemblk/libpmemblk.def | 36 + .../pmdk/src/libpmemblk/libpmemblk.link.in | 28 + ceph/src/pmdk/src/libpmemblk/libpmemblk.rc | 12 + .../pmdk/src/libpmemblk/libpmemblk.vcxproj | 133 + .../src/libpmemblk/libpmemblk.vcxproj.filters | 217 + .../src/pmdk/src/libpmemblk/libpmemblk_main.c | 32 + ceph/src/pmdk/src/libpmemlog/Makefile | 23 + ceph/src/pmdk/src/libpmemlog/libpmemlog.c | 201 + ceph/src/pmdk/src/libpmemlog/libpmemlog.def | 36 + .../pmdk/src/libpmemlog/libpmemlog.link.in | 28 + ceph/src/pmdk/src/libpmemlog/libpmemlog.rc | 12 + .../pmdk/src/libpmemlog/libpmemlog.vcxproj | 130 + .../src/libpmemlog/libpmemlog.vcxproj.filters | 208 + .../src/pmdk/src/libpmemlog/libpmemlog_main.c | 32 + ceph/src/pmdk/src/libpmemlog/log.c | 895 ++ ceph/src/pmdk/src/libpmemlog/log.h | 115 + ceph/src/pmdk/src/libpmemobj/Makefile | 41 + ceph/src/pmdk/src/libpmemobj/alloc_class.c | 636 + ceph/src/pmdk/src/libpmemobj/alloc_class.h | 79 + ceph/src/pmdk/src/libpmemobj/bucket.c | 99 + ceph/src/pmdk/src/libpmemobj/bucket.h | 50 + ceph/src/pmdk/src/libpmemobj/container.h | 48 + ceph/src/pmdk/src/libpmemobj/container_ravl.c | 188 + ceph/src/pmdk/src/libpmemobj/container_ravl.h | 23 + .../pmdk/src/libpmemobj/container_seglists.c | 171 + .../pmdk/src/libpmemobj/container_seglists.h | 24 + ceph/src/pmdk/src/libpmemobj/critnib.c | 651 + ceph/src/pmdk/src/libpmemobj/critnib.h | 31 + ceph/src/pmdk/src/libpmemobj/ctl_debug.c | 61 + ceph/src/pmdk/src/libpmemobj/ctl_debug.h | 22 + ceph/src/pmdk/src/libpmemobj/heap.c | 1893 +++ ceph/src/pmdk/src/libpmemobj/heap.h | 132 + ceph/src/pmdk/src/libpmemobj/heap_layout.h | 206 + ceph/src/pmdk/src/libpmemobj/lane.c | 572 + ceph/src/pmdk/src/libpmemobj/lane.h | 149 + ceph/src/pmdk/src/libpmemobj/libpmemobj.c | 136 + ceph/src/pmdk/src/libpmemobj/libpmemobj.def | 124 + .../pmdk/src/libpmemobj/libpmemobj.link.in | 121 + ceph/src/pmdk/src/libpmemobj/libpmemobj.rc | 12 + .../pmdk/src/libpmemobj/libpmemobj.vcxproj | 187 + .../src/libpmemobj/libpmemobj.vcxproj.filters | 384 + .../src/pmdk/src/libpmemobj/libpmemobj_main.c | 32 + ceph/src/pmdk/src/libpmemobj/list.c | 939 ++ ceph/src/pmdk/src/libpmemobj/list.h | 64 + ceph/src/pmdk/src/libpmemobj/memblock.c | 1520 +++ ceph/src/pmdk/src/libpmemobj/memblock.h | 306 + ceph/src/pmdk/src/libpmemobj/memops.c | 837 ++ ceph/src/pmdk/src/libpmemobj/memops.h | 84 + ceph/src/pmdk/src/libpmemobj/obj.c | 3447 +++++ ceph/src/pmdk/src/libpmemobj/obj.h | 289 + ceph/src/pmdk/src/libpmemobj/palloc.c | 1336 ++ ceph/src/pmdk/src/libpmemobj/palloc.h | 113 + ceph/src/pmdk/src/libpmemobj/pmalloc.c | 797 ++ ceph/src/pmdk/src/libpmemobj/pmalloc.h | 50 + ceph/src/pmdk/src/libpmemobj/pmemops.h | 104 + ceph/src/pmdk/src/libpmemobj/recycler.c | 303 + ceph/src/pmdk/src/libpmemobj/recycler.h | 52 + ceph/src/pmdk/src/libpmemobj/stats.c | 151 + ceph/src/pmdk/src/libpmemobj/stats.h | 108 + ceph/src/pmdk/src/libpmemobj/sync.c | 642 + ceph/src/pmdk/src/libpmemobj/sync.h | 112 + ceph/src/pmdk/src/libpmemobj/tx.c | 2375 ++++ ceph/src/pmdk/src/libpmemobj/tx.h | 54 + ceph/src/pmdk/src/libpmemobj/ulog.c | 883 ++ ceph/src/pmdk/src/libpmemobj/ulog.h | 166 + ceph/src/pmdk/src/libpmempool/Makefile | 61 + ceph/src/pmdk/src/libpmempool/check.c | 232 + ceph/src/pmdk/src/libpmempool/check.h | 30 + ceph/src/pmdk/src/libpmempool/check_backup.c | 367 + .../pmdk/src/libpmempool/check_bad_blocks.c | 60 + ceph/src/pmdk/src/libpmempool/check_blk.c | 237 + .../src/pmdk/src/libpmempool/check_btt_info.c | 509 + .../pmdk/src/libpmempool/check_btt_map_flog.c | 685 + ceph/src/pmdk/src/libpmempool/check_log.c | 209 + .../src/pmdk/src/libpmempool/check_pool_hdr.c | 1010 ++ ceph/src/pmdk/src/libpmempool/check_sds.c | 289 + ceph/src/pmdk/src/libpmempool/check_util.c | 669 + ceph/src/pmdk/src/libpmempool/check_util.h | 196 + ceph/src/pmdk/src/libpmempool/check_write.c | 246 + ceph/src/pmdk/src/libpmempool/feature.c | 789 ++ ceph/src/pmdk/src/libpmempool/libpmempool.c | 417 + ceph/src/pmdk/src/libpmempool/libpmempool.def | 32 + .../pmdk/src/libpmempool/libpmempool.link.in | 23 + ceph/src/pmdk/src/libpmempool/libpmempool.rc | 12 + .../pmdk/src/libpmempool/libpmempool.vcxproj | 162 + .../libpmempool/libpmempool.vcxproj.filters | 253 + .../pmdk/src/libpmempool/libpmempool_main.c | 34 + ceph/src/pmdk/src/libpmempool/pmempool.h | 48 + ceph/src/pmdk/src/libpmempool/pool.c | 1123 ++ ceph/src/pmdk/src/libpmempool/pool.h | 163 + ceph/src/pmdk/src/libpmempool/replica.c | 2503 ++++ ceph/src/pmdk/src/libpmempool/replica.h | 211 + ceph/src/pmdk/src/libpmempool/rm.c | 251 + ceph/src/pmdk/src/libpmempool/sync.c | 1646 +++ ceph/src/pmdk/src/libpmempool/transform.c | 1017 ++ ceph/src/pmdk/src/librpmem/Makefile | 43 + ceph/src/pmdk/src/librpmem/README | 7 + ceph/src/pmdk/src/librpmem/librpmem.c | 84 + ceph/src/pmdk/src/librpmem/librpmem.link.in | 24 + ceph/src/pmdk/src/librpmem/rpmem.c | 914 ++ ceph/src/pmdk/src/librpmem/rpmem.h | 34 + ceph/src/pmdk/src/librpmem/rpmem_cmd.c | 239 + ceph/src/pmdk/src/librpmem/rpmem_cmd.h | 39 + ceph/src/pmdk/src/librpmem/rpmem_fip.c | 1987 +++ ceph/src/pmdk/src/librpmem/rpmem_fip.h | 61 + ceph/src/pmdk/src/librpmem/rpmem_obc.c | 677 + ceph/src/pmdk/src/librpmem/rpmem_obc.h | 47 + ceph/src/pmdk/src/librpmem/rpmem_ssh.c | 442 + ceph/src/pmdk/src/librpmem/rpmem_ssh.h | 34 + ceph/src/pmdk/src/librpmem/rpmem_util.c | 239 + ceph/src/pmdk/src/librpmem/rpmem_util.h | 47 + ceph/src/pmdk/src/libvmem/README.md | 2 + ceph/src/pmdk/src/libvmmalloc/README.md | 2 + ceph/src/pmdk/src/rpmem_common/Makefile | 33 + ceph/src/pmdk/src/rpmem_common/rpmem_common.c | 314 + ceph/src/pmdk/src/rpmem_common/rpmem_common.h | 139 + .../pmdk/src/rpmem_common/rpmem_common_log.h | 38 + .../pmdk/src/rpmem_common/rpmem_fip_common.c | 332 + .../pmdk/src/rpmem_common/rpmem_fip_common.h | 89 + .../pmdk/src/rpmem_common/rpmem_fip_lane.h | 127 + .../src/pmdk/src/rpmem_common/rpmem_fip_msg.h | 146 + ceph/src/pmdk/src/rpmem_common/rpmem_proto.h | 545 + ceph/src/pmdk/src/tools/Makefile | 45 + ceph/src/pmdk/src/tools/Makefile.inc | 342 + ceph/src/pmdk/src/tools/daxio/Makefile | 36 + ceph/src/pmdk/src/tools/daxio/README | 47 + ceph/src/pmdk/src/tools/daxio/daxio.c | 607 + ceph/src/pmdk/src/tools/pmempool/Makefile | 58 + ceph/src/pmdk/src/tools/pmempool/README | 306 + .../tools/pmempool/bash_completion/pmempool | 168 + ceph/src/pmdk/src/tools/pmempool/check.c | 315 + ceph/src/pmdk/src/tools/pmempool/check.h | 9 + ceph/src/pmdk/src/tools/pmempool/common.c | 1382 ++ ceph/src/pmdk/src/tools/pmempool/common.h | 203 + ceph/src/pmdk/src/tools/pmempool/convert.c | 111 + ceph/src/pmdk/src/tools/pmempool/convert.h | 11 + ceph/src/pmdk/src/tools/pmempool/create.c | 668 + ceph/src/pmdk/src/tools/pmempool/create.h | 9 + ceph/src/pmdk/src/tools/pmempool/dump.c | 391 + ceph/src/pmdk/src/tools/pmempool/dump.h | 9 + ceph/src/pmdk/src/tools/pmempool/feature.c | 207 + ceph/src/pmdk/src/tools/pmempool/feature.h | 9 + ceph/src/pmdk/src/tools/pmempool/info.c | 1034 ++ ceph/src/pmdk/src/tools/pmempool/info.h | 166 + ceph/src/pmdk/src/tools/pmempool/info_blk.c | 567 + ceph/src/pmdk/src/tools/pmempool/info_log.c | 160 + ceph/src/pmdk/src/tools/pmempool/info_obj.c | 962 ++ ceph/src/pmdk/src/tools/pmempool/output.c | 844 ++ ceph/src/pmdk/src/tools/pmempool/output.h | 48 + ceph/src/pmdk/src/tools/pmempool/pmempool.c | 302 + ceph/src/pmdk/src/tools/pmempool/pmempool.rc | Bin 0 -> 3722 bytes .../pmdk/src/tools/pmempool/pmempool.vcxproj | 178 + .../tools/pmempool/pmempool.vcxproj.filters | 157 + ceph/src/pmdk/src/tools/pmempool/rm.c | 372 + ceph/src/pmdk/src/tools/pmempool/rm.h | 9 + .../src/pmdk/src/tools/pmempool/synchronize.c | 157 + .../src/pmdk/src/tools/pmempool/synchronize.h | 9 + ceph/src/pmdk/src/tools/pmempool/transform.c | 160 + ceph/src/pmdk/src/tools/pmempool/transform.h | 9 + ceph/src/pmdk/src/tools/pmreorder/Makefile | 16 + .../tools/pmreorder/binaryoutputhandler.py | 218 + .../tools/pmreorder/consistencycheckwrap.py | 112 + .../src/tools/pmreorder/loggingfacility.py | 77 + .../pmdk/src/tools/pmreorder/markerparser.py | 52 + .../src/tools/pmreorder/memoryoperations.py | 413 + .../src/tools/pmreorder/operationfactory.py | 145 + .../pmdk/src/tools/pmreorder/opscontext.py | 68 + .../src/pmdk/src/tools/pmreorder/pmreorder.py | 88 + .../src/tools/pmreorder/reorderengines.py | 341 + .../src/tools/pmreorder/reorderexceptions.py | 10 + .../pmdk/src/tools/pmreorder/statemachine.py | 364 + ceph/src/pmdk/src/tools/pmreorder/utils.py | 102 + ceph/src/pmdk/src/tools/rpmemd/Makefile | 47 + ceph/src/pmdk/src/tools/rpmemd/README | 8 + ceph/src/pmdk/src/tools/rpmemd/rpmemd.c | 803 ++ ceph/src/pmdk/src/tools/rpmemd/rpmemd.h | 8 + .../src/pmdk/src/tools/rpmemd/rpmemd_config.c | 640 + .../src/pmdk/src/tools/rpmemd/rpmemd_config.h | 45 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_db.c | 635 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_db.h | 33 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_fip.c | 1216 ++ ceph/src/pmdk/src/tools/rpmemd/rpmemd_fip.h | 37 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_log.c | 250 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_log.h | 75 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_obc.c | 548 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_obc.h | 39 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_util.c | 119 + ceph/src/pmdk/src/tools/rpmemd/rpmemd_util.h | 13 + ceph/src/pmdk/src/windows/README | 19 + .../src/pmdk/src/windows/getopt/.cstyleignore | 2 + ceph/src/pmdk/src/windows/getopt/LICENSE.txt | 24 + ceph/src/pmdk/src/windows/getopt/README | 9 + ceph/src/pmdk/src/windows/getopt/getopt.c | 293 + ceph/src/pmdk/src/windows/getopt/getopt.h | 58 + .../pmdk/src/windows/getopt/getopt.vcxproj | 88 + .../src/windows/getopt/getopt.vcxproj.filters | 23 + .../pmdk/src/windows/include/.cstyleignore | 1 + ceph/src/pmdk/src/windows/include/dirent.h | 6 + ceph/src/pmdk/src/windows/include/endian.h | 32 + ceph/src/pmdk/src/windows/include/err.h | 41 + ceph/src/pmdk/src/windows/include/features.h | 6 + ceph/src/pmdk/src/windows/include/libgen.h | 6 + .../pmdk/src/windows/include/linux/limits.h | 15 + ceph/src/pmdk/src/windows/include/platform.h | 226 + ceph/src/pmdk/src/windows/include/sched.h | 6 + ceph/src/pmdk/src/windows/include/strings.h | 6 + ceph/src/pmdk/src/windows/include/sys/file.h | 37 + ceph/src/pmdk/src/windows/include/sys/mman.h | 46 + ceph/src/pmdk/src/windows/include/sys/mount.h | 6 + ceph/src/pmdk/src/windows/include/sys/param.h | 23 + .../pmdk/src/windows/include/sys/resource.h | 6 + .../pmdk/src/windows/include/sys/statvfs.h | 6 + ceph/src/pmdk/src/windows/include/sys/uio.h | 23 + ceph/src/pmdk/src/windows/include/sys/wait.h | 6 + ceph/src/pmdk/src/windows/include/unistd.h | 136 + ceph/src/pmdk/src/windows/include/win_mmap.h | 81 + ceph/src/pmdk/src/windows/libs_debug.props | 34 + ceph/src/pmdk/src/windows/libs_release.props | 36 + .../src/windows/srcversion/srcversion.vcxproj | 108 + ceph/src/pmdk/src/windows/win_mmap.c | 1132 ++ ceph/src/pmdk/utils/CHECK_WHITESPACE.PS1 | 20 + ceph/src/pmdk/utils/CREATE-ZIP.PS1 | 83 + ceph/src/pmdk/utils/CSTYLE.ps1 | 37 + ceph/src/pmdk/utils/Makefile | 12 + ceph/src/pmdk/utils/README | 5 + ceph/src/pmdk/utils/SRCVERSION.ps1 | 158 + ceph/src/pmdk/utils/build-dpkg.sh | 836 ++ ceph/src/pmdk/utils/build-rpm.sh | 263 + ceph/src/pmdk/utils/check-area.sh | 74 + ceph/src/pmdk/utils/check-commit.sh | 50 + ceph/src/pmdk/utils/check-commits.sh | 43 + ceph/src/pmdk/utils/check-manpage | 62 + ceph/src/pmdk/utils/check-manpages | 16 + ceph/src/pmdk/utils/check-os.sh | 30 + ceph/src/pmdk/utils/check-shebang.sh | 30 + .../pmdk/utils/check_license/check-headers.sh | 192 + .../utils/check_license/check-ms-license.pl | 62 + .../utils/check_license/file-exceptions.sh | 7 + ceph/src/pmdk/utils/check_sdk_version.py | 76 + ceph/src/pmdk/utils/check_whitespace | 210 + ceph/src/pmdk/utils/copy-source.sh | 36 + ceph/src/pmdk/utils/cstyle | 1037 ++ ...-fix-travisci_build_coverity_scan.sh.patch | 27 + ceph/src/pmdk/utils/docker/README | 19 + ceph/src/pmdk/utils/docker/build-CI.sh | 143 + ceph/src/pmdk/utils/docker/build-local.sh | 111 + ceph/src/pmdk/utils/docker/configure-tests.sh | 105 + ...-gcov-files-and-turn-off-verbose-log.patch | 37 + .../utils/docker/images/Dockerfile.fedora-31 | 120 + .../docker/images/Dockerfile.ubuntu-19.10 | 121 + ceph/src/pmdk/utils/docker/images/README | 6 + .../pmdk/utils/docker/images/build-image.sh | 53 + .../utils/docker/images/download-scripts.sh | 32 + .../utils/docker/images/install-libfabric.sh | 40 + .../utils/docker/images/install-libndctl.sh | 60 + .../utils/docker/images/install-valgrind.sh | 52 + .../pmdk/utils/docker/images/push-image.sh | 51 + ceph/src/pmdk/utils/docker/ppc64le.blacklist | 19 + .../pmdk/utils/docker/prepare-for-build.sh | 25 + .../utils/docker/pull-or-rebuild-image.sh | 112 + .../pmdk/utils/docker/run-build-package.sh | 47 + ceph/src/pmdk/utils/docker/run-build.sh | 34 + ceph/src/pmdk/utils/docker/run-coverage.sh | 52 + ceph/src/pmdk/utils/docker/run-coverity.sh | 71 + ceph/src/pmdk/utils/docker/run-doc-update.sh | 76 + ceph/src/pmdk/utils/docker/set-ci-vars.sh | 96 + ceph/src/pmdk/utils/docker/set-vars.sh | 12 + .../pmdk/utils/docker/test_package/Makefile | 21 + .../src/pmdk/utils/docker/test_package/README | 6 + .../utils/docker/test_package/test_package.c | 41 + ceph/src/pmdk/utils/docker/valid-branches.sh | 12 + ceph/src/pmdk/utils/get_aliases.sh | 110 + ceph/src/pmdk/utils/git-years | 8 + ceph/src/pmdk/utils/libpmem.pc.in | 9 + ceph/src/pmdk/utils/libpmem2.pc.in | 9 + ceph/src/pmdk/utils/libpmemblk.pc.in | 9 + ceph/src/pmdk/utils/libpmemlog.pc.in | 9 + ceph/src/pmdk/utils/libpmemobj.pc.in | 10 + ceph/src/pmdk/utils/libpmempool.pc.in | 10 + ceph/src/pmdk/utils/librpmem.pc.in | 9 + ceph/src/pmdk/utils/magic-install.sh | 15 + ceph/src/pmdk/utils/magic-uninstall.sh | 21 + ceph/src/pmdk/utils/md2man.sh | 67 + ceph/src/pmdk/utils/os-banned | 63 + ceph/src/pmdk/utils/pkg-common.sh | 56 + ceph/src/pmdk/utils/pkg-config.sh | 17 + ceph/src/pmdk/utils/pmdk.magic | 15 + ceph/src/pmdk/utils/pmdk.spec.in | 710 ++ ceph/src/pmdk/utils/ps_analyze.ps1 | 31 + ceph/src/pmdk/utils/sort_solution | 128 + ceph/src/pmdk/utils/style_check.sh | 137 + ceph/src/pmdk/utils/version.sh | 62 + ceph/src/pybind/ceph_argparse.py | 8 +- ceph/src/pybind/mgr/CMakeLists.txt | 10 +- ceph/src/pybind/mgr/cephadm/inventory.py | 37 +- ceph/src/pybind/mgr/cephadm/migrations.py | 124 +- ceph/src/pybind/mgr/cephadm/module.py | 212 +- ceph/src/pybind/mgr/cephadm/registry.py | 61 + ceph/src/pybind/mgr/cephadm/schedule.py | 8 +- ceph/src/pybind/mgr/cephadm/serve.py | 141 +- .../mgr/cephadm/services/cephadmservice.py | 30 +- .../pybind/mgr/cephadm/services/ingress.py | 2 +- ceph/src/pybind/mgr/cephadm/services/iscsi.py | 12 +- .../pybind/mgr/cephadm/services/monitoring.py | 37 +- ceph/src/pybind/mgr/cephadm/services/nfs.py | 44 +- ceph/src/pybind/mgr/cephadm/services/osd.py | 90 +- .../services/alertmanager/alertmanager.yml.j2 | 2 +- .../services/grafana/ceph-dashboard.yml.j2 | 6 +- .../services/iscsi/iscsi-gateway.cfg.j2 | 2 +- .../templates/services/nfs/ganesha.conf.j2 | 8 - ceph/src/pybind/mgr/cephadm/tests/fixtures.py | 10 +- .../pybind/mgr/cephadm/tests/test_cephadm.py | 340 +- .../pybind/mgr/cephadm/tests/test_facts.py | 12 + .../mgr/cephadm/tests/test_migration.py | 72 +- .../mgr/cephadm/tests/test_osd_removal.py | 35 +- .../mgr/cephadm/tests/test_scheduling.py | 3 +- .../pybind/mgr/cephadm/tests/test_services.py | 239 +- .../src/pybind/mgr/cephadm/tests/test_spec.py | 1 + ceph/src/pybind/mgr/cephadm/upgrade.py | 204 +- ceph/src/pybind/mgr/cephadm/utils.py | 3 + ceph/src/pybind/mgr/dashboard/CMakeLists.txt | 5 +- ceph/src/pybind/mgr/dashboard/__init__.py | 2 - .../dashboard/ci/cephadm/bootstrap-cluster.sh | 7 +- .../mgr/dashboard/controllers/__init__.py | 1102 +- .../mgr/dashboard/controllers/_api_router.py | 13 + .../pybind/mgr/dashboard/controllers/_auth.py | 18 + .../dashboard/controllers/_base_controller.py | 314 + .../pybind/mgr/dashboard/controllers/_docs.py | 128 + .../mgr/dashboard/controllers/_endpoint.py | 82 + .../mgr/dashboard/controllers/_helpers.py | 127 + .../mgr/dashboard/controllers/_permissions.py | 60 + .../dashboard/controllers/_rest_controller.py | 249 + .../mgr/dashboard/controllers/_router.py | 69 + .../pybind/mgr/dashboard/controllers/_task.py | 79 + .../mgr/dashboard/controllers/_ui_router.py | 13 + .../mgr/dashboard/controllers/_version.py | 75 + .../pybind/mgr/dashboard/controllers/auth.py | 9 +- .../mgr/dashboard/controllers/cephfs.py | 13 +- .../mgr/dashboard/controllers/cluster.py | 21 + .../controllers/cluster_configuration.py | 6 +- .../mgr/dashboard/controllers/crush_rule.py | 12 +- .../pybind/mgr/dashboard/controllers/docs.py | 39 +- .../controllers/erasure_code_profile.py | 11 +- .../dashboard/controllers/frontend_logging.py | 4 +- .../mgr/dashboard/controllers/grafana.py | 8 +- .../mgr/dashboard/controllers/health.py | 8 +- .../pybind/mgr/dashboard/controllers/home.py | 8 +- .../pybind/mgr/dashboard/controllers/host.py | 68 +- .../pybind/mgr/dashboard/controllers/iscsi.py | 15 +- .../pybind/mgr/dashboard/controllers/logs.py | 8 +- .../mgr/dashboard/controllers/mgr_modules.py | 6 +- .../mgr/dashboard/controllers/monitor.py | 6 +- .../controllers/{nfsganesha.py => nfs.py} | 244 +- .../mgr/dashboard/controllers/orchestrator.py | 6 +- .../pybind/mgr/dashboard/controllers/osd.py | 14 +- .../dashboard/controllers/perf_counters.py | 34 +- .../pybind/mgr/dashboard/controllers/pool.py | 12 +- .../mgr/dashboard/controllers/prometheus.py | 12 +- .../pybind/mgr/dashboard/controllers/rbd.py | 21 +- .../dashboard/controllers/rbd_mirroring.py | 26 +- .../pybind/mgr/dashboard/controllers/rgw.py | 39 +- .../pybind/mgr/dashboard/controllers/role.py | 9 +- .../pybind/mgr/dashboard/controllers/saml2.py | 4 +- .../mgr/dashboard/controllers/service.py | 8 +- .../mgr/dashboard/controllers/settings.py | 8 +- .../mgr/dashboard/controllers/summary.py | 6 +- .../pybind/mgr/dashboard/controllers/task.py | 6 +- .../mgr/dashboard/controllers/telemetry.py | 6 +- .../pybind/mgr/dashboard/controllers/user.py | 16 +- .../mgr/dashboard/frontend/angular.json | 1 - .../dashboard/frontend/applitools.config.js | 14 + .../integration/block/images.e2e-spec.ts | 4 +- .../integration/block/mirroring.e2e-spec.ts | 2 +- .../cluster/configuration.e2e-spec.ts | 26 +- .../integration/cluster/create-cluster.po.ts | 70 + .../cypress/integration/cluster/hosts.po.ts | 47 +- .../integration/cluster/logs.e2e-spec.ts | 2 +- .../integration/cluster/services.po.ts | 83 +- .../01-hosts-force-maintenance.e2e-spec.ts | 49 - .../orchestrator/01-hosts.e2e-spec.ts | 30 +- .../02-hosts-inventory.e2e-spec.ts | 2 +- .../orchestrator/03-inventory.e2e-spec.ts | 2 +- .../orchestrator/05-services.e2e-spec.ts | 13 +- ...01-create-cluster-welcome-page.e2e-spec.ts | 19 + .../workflow/01-hosts.e2e-spec.ts | 57 - .../02-create-cluster-add-host.e2e-spec.ts | 71 + .../03-create-cluster-create-osds.e2e-spec.ts | 40 + ...create-cluster-create-services.e2e-spec.ts | 46 + .../05-create-cluster-review.e2e-spec.ts | 76 + .../workflow/06-cluster-check.e2e-spec.ts | 90 + .../workflow/07-nfs-exports.e2e-spec.ts | 81 + .../workflow/nfs/nfs-export.po.ts | 50 + .../cypress/integration/page-helper.po.ts | 41 +- .../integration/pools/pools.e2e-spec.ts | 8 +- .../cypress/integration/pools/pools.po.ts | 6 - .../cypress/integration/rgw/users.po.ts | 2 +- .../visualTests/dashboard.vrt-spec.ts | 22 + .../integration/visualTests/login.vrt-spec.ts | 19 + .../frontend/cypress/plugins/index.js | 2 + .../frontend/cypress/support/commands.ts | 3 +- .../frontend/cypress/support/eyes-index.d.ts | 1 + .../frontend/cypress/support/index.ts | 2 + .../dashboard/frontend/cypress/tsconfig.json | 3 +- .../dist/en-US/1.6da7b376fa1a8a3df154.js | 1 - .../dist/en-US/1.e0a29c1d4fcf893cf11d.js | 1 + .../frontend/dist/en-US/3rdpartylicenses.txt | 326 +- .../dist/en-US/5.0a363eda73eafe0c0332.js | 1 - .../dist/en-US/5.f6931a7617fe7f8fd244.js | 1 + .../dist/en-US/6.115992dc55f8e1abedbc.js | 1 - .../dist/en-US/6.e0fafffe422f8212d682.js | 1 + .../dashboard/frontend/dist/en-US/index.html | 4 +- .../dist/en-US/main.b78c1bf5c30e15315e18.js | 3 - .../dist/en-US/main.d269a7c492a93e2ebedb.js | 3 + .../en-US/polyfills.69188bf73a1e0d939338.js | 1 + .../en-US/polyfills.b66d1515aae6fe3887b1.js | 1 - .../en-US/runtime.fc090c2eb8af922526a6.js | 1 + .../en-US/runtime.fcd694c3eff5ef104b53.js | 1 - .../en-US/scripts.6bda3fa7e09a87cd4228.js | 2 +- .../en-US/styles.0520b6222fe6dab25bab.css | 19 + .../en-US/styles.7918cb8dc788b3eedc95.css | 19 - .../frontend/dist/en-US/swagger-ui-bundle.js | 3 - .../frontend/dist/en-US/swagger-ui.css | 4 - .../mgr/dashboard/frontend/package-lock.json | 10571 +++++++++------- .../mgr/dashboard/frontend/package.json | 36 +- .../frontend/src/app/app-routing.module.ts | 34 +- .../iscsi-setting/iscsi-setting.component.ts | 4 +- .../iscsi-target-form.component.html | 6 +- .../block/rbd-form/rbd-form.component.html | 2 +- .../block/rbd-list/rbd-list.component.html | 20 + .../block/rbd-list/rbd-list.component.spec.ts | 37 + .../ceph/block/rbd-list/rbd-list.component.ts | 10 +- .../rbd-snapshot-form-modal.component.html | 2 +- .../src/app/ceph/cluster/cluster.module.ts | 11 +- .../create-cluster-review.component.html | 50 + .../create-cluster-review.component.scss | 5 + .../create-cluster-review.component.spec.ts | 55 + .../create-cluster-review.component.ts | 117 + .../create-cluster.component.html | 95 + .../create-cluster.component.scss | 30 + .../create-cluster.component.spec.ts | 153 + .../create-cluster.component.ts | 183 + .../hosts/host-form/host-form.component.html | 172 +- .../host-form/host-form.component.spec.ts | 94 +- .../hosts/host-form/host-form.component.ts | 119 +- .../ceph/cluster/hosts/hosts.component.html | 21 +- .../cluster/hosts/hosts.component.spec.ts | 141 +- .../app/ceph/cluster/hosts/hosts.component.ts | 205 +- .../inventory-devices.component.html | 2 +- .../inventory-devices.component.spec.ts | 12 + .../inventory-devices.component.ts | 17 + .../inventory/inventory.component.spec.ts | 7 +- ...sd-devices-selection-groups.component.html | 2 +- ...devices-selection-groups.component.spec.ts | 6 +- .../osd-devices-selection-groups.component.ts | 34 +- ...osd-devices-selection-modal.component.html | 1 + .../osd-devices-selection-modal.component.ts | 4 +- .../osd-flags-indiv-modal.component.spec.ts | 4 +- .../osd/osd-form/osd-form.component.html | 6 +- .../osd/osd-form/osd-form.component.ts | 16 +- .../osd-pg-scrub-modal.component.html | 2 +- .../rules-list/rules-list.component.html | 5 +- .../silence-form/silence-form.component.html | 4 +- .../service-daemon-list.component.html | 27 +- .../service-daemon-list.component.scss | 5 +- .../service-form/service-form.component.html | 73 +- .../service-form.component.spec.ts | 39 +- .../service-form/service-form.component.ts | 147 +- .../cluster/services/services.component.html | 3 +- .../cluster/services/services.component.ts | 69 +- .../src/app/ceph/nfs/models/nfs.fsal.ts | 5 + .../src/app/ceph/nfs/nfs-cluster-type.enum.ts | 4 - .../nfs-details/nfs-details.component.spec.ts | 19 +- .../nfs/nfs-details/nfs-details.component.ts | 3 +- .../nfs-form-client.component.html | 18 +- .../nfs-form-client.component.ts | 19 +- .../ceph/nfs/nfs-form/nfs-form.component.html | 266 +- .../ceph/nfs/nfs-form/nfs-form.component.scss | 8 + .../nfs/nfs-form/nfs-form.component.spec.ts | 156 +- .../ceph/nfs/nfs-form/nfs-form.component.ts | 385 +- .../nfs/nfs-list/nfs-list.component.spec.ts | 4 - .../ceph/nfs/nfs-list/nfs-list.component.ts | 39 +- .../crush-rule-form-modal.component.html | 2 +- ...ure-code-profile-form-modal.component.html | 2 +- .../src/app/ceph/rgw/models/rgw-daemon.ts | 1 + .../rgw-bucket-form.component.html | 8 +- .../rgw-bucket-form.component.spec.ts | 88 +- .../rgw-bucket-form.component.ts | 115 +- .../rgw-bucket-list.component.ts | 2 +- .../rgw-daemon-list.component.spec.ts | 31 +- .../rgw-daemon-list.component.ts | 29 +- .../rgw-user-capability-modal.component.html | 2 +- .../rgw-user-details.component.html | 5 + .../rgw-user-details.component.spec.ts | 20 + .../rgw-user-form.component.html | 2 +- .../rgw-user-list/rgw-user-list.component.ts | 1 - .../rgw-user-s3-key-modal.component.html | 2 +- .../rgw-user-subuser-modal.component.html | 2 +- .../rgw-user-swift-key-modal.component.html | 5 +- .../core/auth/login/login.component.spec.ts | 26 + .../app/core/auth/login/login.component.ts | 8 +- .../auth/role-form/role-form.component.html | 2 +- .../auth/user-form/user-form.component.html | 2 +- .../user-password-form.component.html | 2 +- .../src/app/shared/api/api-client.spec.ts | 11 + .../frontend/src/app/shared/api/api-client.ts | 5 + .../app/shared/api/cluster.service.spec.ts | 42 + .../src/app/shared/api/cluster.service.ts | 27 + .../src/app/shared/api/host.service.spec.ts | 4 +- .../src/app/shared/api/host.service.ts | 21 +- .../src/app/shared/api/nfs.service.spec.ts | 36 +- .../src/app/shared/api/nfs.service.ts | 77 +- .../src/app/shared/api/osd.service.ts | 2 + .../app/shared/api/rgw-bucket.service.spec.ts | 10 +- .../src/app/shared/api/rgw-bucket.service.ts | 19 +- .../app/shared/api/rgw-daemon.service.spec.ts | 2 +- .../src/app/shared/api/rgw-daemon.service.ts | 16 +- .../src/app/shared/api/rgw-site.service.ts | 14 + .../src/app/shared/classes/cd-helper.class.ts | 4 + .../shared/components/components.module.ts | 7 +- .../components/modal/modal.component.html | 30 +- .../components/modal/modal.component.spec.ts | 15 +- .../components/modal/modal.component.ts | 9 +- .../components/select/select.component.html | 1 + .../components/wizard/wizard.component.html | 19 + .../components/wizard/wizard.component.scss | 30 + .../wizard/wizard.component.spec.ts | 25 + .../components/wizard/wizard.component.ts | 39 + .../src/app/shared/constants/app.constants.ts | 1 + .../src/app/shared/forms/cd-form-builder.ts | 4 +- .../app/shared/forms/cd-validators.spec.ts | 152 +- .../src/app/shared/forms/cd-validators.ts | 119 +- .../models/inventory-device-type.model.ts | 9 + .../app/shared/models/orchestrator.enum.ts | 6 +- .../app/shared/models/service.interface.ts | 28 + .../src/app/shared/models/wizard-steps.ts | 4 + .../services/api-interceptor.service.ts | 4 +- .../module-status-guard.service.spec.ts | 29 +- .../services/module-status-guard.service.ts | 23 +- .../shared/services/task-message.service.ts | 9 +- .../services/wizard-steps.service.spec.ts | 16 + .../shared/services/wizard-steps.service.ts | 58 + .../src/styles/ceph-custom/_forms.scss | 11 + ceph/src/pybind/mgr/dashboard/module.py | 8 +- ceph/src/pybind/mgr/dashboard/openapi.yaml | 254 +- .../mgr/dashboard/plugins/feature_toggles.py | 10 +- ceph/src/pybind/mgr/dashboard/plugins/motd.py | 4 +- .../mgr/dashboard/requirements-lint.txt | 6 +- .../mgr/dashboard/requirements-test.txt | 2 +- .../mgr/dashboard/run-backend-api-tests.sh | 4 +- .../mgr/dashboard/services/ceph_service.py | 6 +- .../pybind/mgr/dashboard/services/cephx.py | 29 - .../pybind/mgr/dashboard/services/cluster.py | 26 + .../pybind/mgr/dashboard/services/ganesha.py | 1129 -- .../mgr/dashboard/services/orchestrator.py | 9 +- .../mgr/dashboard/services/rgw_client.py | 13 + .../pybind/mgr/dashboard/tests/__init__.py | 34 +- .../dashboard/tests/test_access_control.py | 2 +- .../mgr/dashboard/tests/test_api_auditing.py | 6 +- .../pybind/mgr/dashboard/tests/test_auth.py | 46 + .../mgr/dashboard/tests/test_ceph_service.py | 5 +- .../pybind/mgr/dashboard/tests/test_cephfs.py | 2 +- .../mgr/dashboard/tests/test_controllers.py | 10 +- .../pybind/mgr/dashboard/tests/test_docs.py | 24 +- .../tests/test_erasure_code_profile.py | 4 +- .../mgr/dashboard/tests/test_exceptions.py | 6 +- .../dashboard/tests/test_feature_toggles.py | 6 +- .../mgr/dashboard/tests/test_ganesha.py | 1058 -- .../mgr/dashboard/tests/test_grafana.py | 4 +- .../pybind/mgr/dashboard/tests/test_home.py | 2 +- .../pybind/mgr/dashboard/tests/test_host.py | 146 +- .../pybind/mgr/dashboard/tests/test_iscsi.py | 8 +- .../pybind/mgr/dashboard/tests/test_nfs.py | 229 + .../mgr/dashboard/tests/test_orchestrator.py | 4 +- .../pybind/mgr/dashboard/tests/test_osd.py | 3 +- .../mgr/dashboard/tests/test_plugin_debug.py | 2 +- .../pybind/mgr/dashboard/tests/test_pool.py | 4 +- .../mgr/dashboard/tests/test_prometheus.py | 4 +- .../mgr/dashboard/tests/test_rbd_mirroring.py | 15 +- .../mgr/dashboard/tests/test_rest_tasks.py | 8 +- .../pybind/mgr/dashboard/tests/test_rgw.py | 9 +- .../mgr/dashboard/tests/test_rgw_client.py | 2 +- .../mgr/dashboard/tests/test_settings.py | 5 +- .../pybind/mgr/dashboard/tests/test_sso.py | 2 +- .../pybind/mgr/dashboard/tests/test_tools.py | 39 +- .../mgr/dashboard/tests/test_versioning.py | 49 +- ceph/src/pybind/mgr/dashboard/tools.py | 10 + ceph/src/pybind/mgr/influx/module.py | 6 +- ceph/src/pybind/mgr/k8sevents/module.py | 4 +- ceph/src/pybind/mgr/mgr_module.py | 33 +- .../pybind/mgr/mirroring/fs/dir_map/policy.py | 7 +- .../mgr/mirroring/fs/snapshot_mirror.py | 7 +- ceph/src/pybind/mgr/mirroring/module.py | 5 +- ceph/src/pybind/mgr/nfs/__init__.py | 6 + ceph/src/pybind/mgr/nfs/cluster.py | 192 +- ceph/src/pybind/mgr/nfs/exception.py | 17 +- ceph/src/pybind/mgr/nfs/export.py | 824 +- ceph/src/pybind/mgr/nfs/export_utils.py | 439 +- ceph/src/pybind/mgr/nfs/module.py | 135 +- ceph/src/pybind/mgr/nfs/tests/__init__.py | 0 ceph/src/pybind/mgr/nfs/tests/test_nfs.py | 1020 ++ ceph/src/pybind/mgr/nfs/utils.py | 14 +- .../src/pybind/mgr/orchestrator/_interface.py | 28 +- ceph/src/pybind/mgr/orchestrator/module.py | 89 +- .../orchestrator/tests/test_orchestrator.py | 19 +- ceph/src/pybind/mgr/pg_autoscaler/module.py | 8 +- ceph/src/pybind/mgr/progress/module.py | 155 +- ceph/src/pybind/mgr/progress/test_progress.py | 6 +- ceph/src/pybind/mgr/prometheus/module.py | 28 +- ceph/src/pybind/mgr/rook/module.py | 4 +- ceph/src/pybind/mgr/rook/rook_cluster.py | 8 +- ceph/src/pybind/mgr/selftest/module.py | 20 +- .../mgr/snap_schedule/fs/schedule_client.py | 7 +- .../tests/fs/test_schedule_client.py | 17 + ceph/src/pybind/mgr/stats/fs/perf_stats.py | 6 +- .../mgr/test_orchestrator/dummy_data.json | 2 - .../pybind/mgr/test_orchestrator/module.py | 2 +- ceph/src/pybind/mgr/tests/__init__.py | 11 +- ceph/src/pybind/mgr/tox.ini | 8 +- ceph/src/pybind/mgr/volumes/fs/fs_util.py | 12 + .../pybind/mgr/volumes/fs/operations/group.py | 8 +- .../fs/operations/versions/subvolume_v1.py | 4 +- .../fs/operations/versions/subvolume_v2.py | 4 +- ceph/src/pybind/mgr/volumes/fs/vol_spec.py | 2 + .../ceph/deployment/service_spec.py | 89 +- .../ceph/tests/test_service_spec.py | 2 +- ceph/src/python-common/ceph/utils.py | 16 +- ceph/src/rgw/librgw.cc | 19 +- ceph/src/rgw/rgw_bucket.cc | 6 +- ceph/src/rgw/rgw_common.cc | 5 + ceph/src/rgw/rgw_dencoder.cc | 19 +- ceph/src/rgw/rgw_lib.h | 4 +- ceph/src/rgw/rgw_log.cc | 222 +- ceph/src/rgw/rgw_log.h | 69 +- ceph/src/rgw/rgw_lua_request.cc | 20 +- ceph/src/rgw/rgw_lua_request.h | 4 +- ceph/src/rgw/rgw_main.cc | 16 +- ceph/src/rgw/rgw_multi.cc | 38 +- ceph/src/rgw/rgw_multi.h | 24 +- ceph/src/rgw/rgw_notify.cc | 2 +- ceph/src/rgw/rgw_notify_event_type.cc | 4 + ceph/src/rgw/rgw_notify_event_type.h | 2 + ceph/src/rgw/rgw_op.cc | 273 +- ceph/src/rgw/rgw_process.cc | 4 +- ceph/src/rgw/rgw_process.h | 6 +- ceph/src/rgw/rgw_pubsub.cc | 2 +- ceph/src/rgw/rgw_pubsub.h | 2 +- ceph/src/rgw/rgw_rados.h | 7 +- ceph/src/rgw/rgw_rest_bucket.cc | 2 +- ceph/src/rgw/rgw_rest_s3.cc | 26 +- ceph/src/rgw/rgw_sal.h | 2 +- ceph/src/rgw/rgw_sal_rados.cc | 11 +- ceph/src/rgw/rgw_sal_rados.h | 2 +- ceph/src/rgw/rgw_sts.cc | 2 +- ceph/src/rgw/rgw_sync_module_pubsub.cc | 2 +- ceph/src/rgw/rgw_user.cc | 3 +- ceph/src/script/ceph-release-notes | 2 +- ceph/src/script/run_tox.sh | 20 +- ceph/src/test/cls_rbd/test_cls_rbd.cc | 5 + ceph/src/test/common/CMakeLists.txt | 5 + ceph/src/test/common/test_fair_mutex.cc | 68 + ceph/src/test/fio/fio_librgw.cc | 2 +- ceph/src/test/libcephfs/test.cc | 8 +- ceph/src/test/librados/tier_cxx.cc | 20 +- .../image/test_mock_ValidatePoolRequest.cc | 24 +- .../object_map/test_mock_DiffRequest.cc | 4 +- ceph/src/test/librbd/test_librbd.cc | 29 + ceph/src/test/objectstore/store_test.cc | 523 + .../test/objectstore/test_bluestore_types.cc | 49 + ceph/src/test/osd/TestOSDMap.cc | 156 +- .../test_mock_TrashMoveRequest.cc | 59 +- .../test_mock_PrepareLocalImageRequest.cc | 100 + .../src/test/rbd_mirror/test_ImageReplayer.cc | 2 +- .../rbd_mirror/test_mock_ImageReplayer.cc | 72 +- .../test_mock_MirrorStatusUpdater.cc | 138 +- ceph/src/test/rgw/rgw_multi/tests_ps.py | 33 +- ceph/src/test/rgw/rgw_multi/zone_ps.py | 15 +- ceph/src/test/run-cli-tests | 7 +- ceph/src/tools/CMakeLists.txt | 1 + ceph/src/tools/ceph_monstore_tool.cc | 14 + ceph/src/tools/cephfs/top/cephfs-top | 131 +- .../src/tools/cephfs_mirror/ClusterWatcher.cc | 10 +- ceph/src/tools/cephfs_mirror/ClusterWatcher.h | 1 + ceph/src/tools/cephfs_mirror/Mirror.cc | 4 +- ceph/src/tools/cephfs_mirror/ServiceDaemon.h | 3 +- .../ObjectCacheStore.cc | 4 +- .../immutable_object_cache/ObjectCacheStore.h | 1 + ceph/src/tools/rbd_mirror/ImageDeleter.h | 2 +- ceph/src/tools/rbd_mirror/ImageMap.cc | 2 +- ceph/src/tools/rbd_mirror/ImageReplayer.cc | 68 +- ceph/src/tools/rbd_mirror/ImageReplayer.h | 5 + .../tools/rbd_mirror/MirrorStatusUpdater.cc | 38 +- .../tools/rbd_mirror/MirrorStatusUpdater.h | 5 +- ceph/src/tools/rbd_mirror/RemotePoolPoller.cc | 4 +- ceph/src/tools/rbd_mirror/Threads.h | 2 +- ceph/src/tools/rbd_mirror/Types.cc | 2 +- .../image_deleter/TrashMoveRequest.cc | 23 +- .../tools/rbd_mirror/image_map/LoadRequest.cc | 78 +- .../tools/rbd_mirror/image_map/LoadRequest.h | 13 + ceph/src/tools/rbd_mirror/image_map/Policy.cc | 1 + .../image_replayer/BootstrapRequest.cc | 8 +- .../image_replayer/BootstrapRequest.h | 2 +- .../PrepareLocalImageRequest.cc | 32 +- .../image_replayer/PrepareLocalImageRequest.h | 7 + .../PrepareRemoteImageRequest.cc | 2 +- .../rbd_mirror/image_replayer/StateBuilder.h | 4 +- ceph/src/tools/setup-virtualenv.sh | 11 +- ceph/src/tracing/CMakeLists.txt | 4 +- ceph/src/vstart.sh | 87 +- .../{ceph-osd-smartctl => ceph-smartctl} | 2 +- 1364 files changed, 159110 insertions(+), 22674 deletions(-) rename ceph/doc/cephadm/{ => services}/custom-container.rst (83%) rename ceph/doc/cephadm/{service-management.rst => services/index.rst} (81%) rename ceph/doc/cephadm/{ => services}/iscsi.rst (88%) rename ceph/doc/cephadm/{ => services}/mds.rst (94%) create mode 100644 ceph/doc/cephadm/services/mgr.rst rename ceph/doc/cephadm/{ => services}/mon.rst (96%) rename ceph/doc/cephadm/{ => services}/monitoring.rst (90%) rename ceph/doc/cephadm/{ => services}/nfs.rst (94%) rename ceph/doc/cephadm/{ => services}/osd.rst (87%) rename ceph/doc/cephadm/{ => services}/rgw.rst (92%) delete mode 100644 ceph/doc/cephfs/fs-nfs-exports.rst create mode 100644 ceph/doc/dev/vstart-ganesha.rst create mode 100644 ceph/doc/mgr/nfs.rst rename ceph/qa/cephfs/overrides/{frag_enable.yaml => frag.yaml} (86%) create mode 100644 ceph/qa/distros/podman/centos_8.stream_container_tools.yaml delete mode 100644 ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml create mode 100644 ceph/qa/distros/podman/rhel_8.4_container_tools_3.0.yaml create mode 100644 ceph/qa/distros/podman/rhel_8.4_container_tools_rhel8.yaml delete mode 100644 ceph/qa/distros/podman/ubuntu_18.04_kubic_stable.yaml delete mode 100644 ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml delete mode 100644 ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml delete mode 120000 ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/full/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/functional/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/libcephfs/overrides/frag_enable.yaml create mode 100644 ceph/qa/suites/fs/libcephfs/tasks/libcephfs/+ create mode 120000 ceph/qa/suites/fs/libcephfs/tasks/libcephfs/.qa create mode 120000 ceph/qa/suites/fs/libcephfs/tasks/libcephfs/frag.yaml rename ceph/qa/suites/fs/libcephfs/tasks/{libcephfs.yaml => libcephfs/test.yaml} (100%) create mode 100644 ceph/qa/suites/fs/mirror-ha/% create mode 120000 ceph/qa/suites/fs/mirror-ha/.qa create mode 120000 ceph/qa/suites/fs/mirror-ha/begin.yaml create mode 100644 ceph/qa/suites/fs/mirror-ha/cephfs-mirror/three-per-cluster.yaml create mode 100644 ceph/qa/suites/fs/mirror-ha/clients/+ create mode 120000 ceph/qa/suites/fs/mirror-ha/clients/.qa create mode 100644 ceph/qa/suites/fs/mirror-ha/clients/mirror.yaml create mode 100644 ceph/qa/suites/fs/mirror-ha/cluster/+ create mode 100644 ceph/qa/suites/fs/mirror-ha/cluster/1-node.yaml create mode 120000 ceph/qa/suites/fs/mirror-ha/objectstore/.qa create mode 120000 ceph/qa/suites/fs/mirror-ha/objectstore/bluestore-bitmap.yaml create mode 100644 ceph/qa/suites/fs/mirror-ha/overrides/+ create mode 120000 ceph/qa/suites/fs/mirror-ha/overrides/.qa create mode 100644 ceph/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml create mode 120000 ceph/qa/suites/fs/mirror-ha/supported-random-distro$ create mode 120000 ceph/qa/suites/fs/mirror-ha/workloads/.qa create mode 100644 ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml delete mode 120000 ceph/qa/suites/fs/mixed-clients/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/permission/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/shell/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml create mode 120000 ceph/qa/suites/fs/thrash/multifs/overrides/frag.yaml delete mode 120000 ceph/qa/suites/fs/thrash/multifs/overrides/frag_enable.yaml create mode 120000 ceph/qa/suites/fs/thrash/workloads/overrides/frag.yaml delete mode 120000 ceph/qa/suites/fs/thrash/workloads/overrides/frag_enable.yaml create mode 120000 ceph/qa/suites/fs/traceless/overrides/frag.yaml delete mode 120000 ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/% create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/.qa create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/bluestore-bitmap.yaml create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/centos_8.stream_container_tools.yaml create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/conf create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/% create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/.qa create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/pg-warn.yaml create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_health.yaml create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_wrongly_marked_down.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/roles.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/% create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/.qa create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/.qa create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/v16.2.4.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/% create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/.qa create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/0-create.yaml create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/.qa create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/1.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/2.yaml create mode 120000 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/.qa create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/no.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/yes.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/3-verify.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/3-upgrade-with-workload.yaml create mode 100644 ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/4-verify.yaml delete mode 120000 ceph/qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/verify/overrides/frag_enable.yaml delete mode 120000 ceph/qa/suites/fs/volumes/overrides/frag_enable.yaml create mode 120000 ceph/qa/suites/fs/workload/overrides/frag.yaml delete mode 120000 ceph/qa/suites/fs/workload/overrides/frag_enable.yaml create mode 120000 ceph/qa/suites/orch/cephadm/dashboard/0-distro/centos_8.3_container_tools_3.0.yaml create mode 120000 ceph/qa/suites/orch/cephadm/mds_upgrade_sequence create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/% create mode 120000 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/.qa create mode 120000 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/.qa create mode 120000 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.3_container_tools_3.0.yaml create mode 120000 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.stream_container_tools.yaml create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.4.yaml create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.5.yaml create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/octopus.yaml create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-start.yaml create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/2-nfs.yaml create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/3-upgrade-with-workload.yaml create mode 100644 ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/4-final.yaml create mode 100644 ceph/qa/suites/orch/cephadm/osds/2-ops/rm-zap-flag.yaml create mode 100644 ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml create mode 100644 ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml rename ceph/qa/suites/orch/cephadm/{dashboard/0-distro => smoke/distro}/centos_8.2_container_tools_3.0.yaml (100%) create mode 120000 ceph/qa/suites/orch/cephadm/smoke/distro/centos_8.stream_container_tools.yaml create mode 120000 ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_3.0.yaml create mode 120000 ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_rhel8.yaml rename ceph/qa/suites/orch/cephadm/upgrade/1-start-distro/{1-start-centos_8.3-octopus.yaml => 1-start-centos_8.stream_container-tools.yaml} (52%) create mode 100644 ceph/qa/suites/orch/cephadm/upgrade/5-upgrade-ls.yaml create mode 120000 ceph/qa/suites/orch/cephadm/workunits/0-distro/centos_8.stream_container_tools.yaml create mode 100644 ceph/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml delete mode 120000 ceph/qa/suites/perf-basic/ubuntu_18.04.yaml create mode 120000 ceph/qa/suites/perf-basic/ubuntu_latest.yaml delete mode 100644 ceph/qa/suites/perf-basic/workloads/cosbench_64K_write.yaml delete mode 120000 ceph/qa/suites/rados/perf/ubuntu_18.04.yaml create mode 120000 ceph/qa/suites/rados/perf/ubuntu_latest.yaml delete mode 100644 ceph/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml delete mode 100644 ceph/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml create mode 100644 ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml create mode 100644 ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml rename ceph/qa/suites/rbd/mirror/workloads/{rbd-mirror-bootstrap-workunit.yaml => rbd-mirror-journal-bootstrap-workunit.yaml} (83%) create mode 100644 ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-bootstrap-workunit.yaml create mode 100644 ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml rename ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/{pacific..yaml => pacific.yaml} (81%) create mode 100644 ceph/qa/tasks/backfill_toofull.py create mode 100644 ceph/qa/tasks/cephfs_mirror_thrash.py create mode 100644 ceph/qa/tasks/ec_inconsistent_hinfo.py create mode 100644 ceph/qa/tasks/mgr/dashboard/test_cluster.py delete mode 100644 ceph/qa/tasks/mgr/dashboard/test_ganesha.py create mode 100644 ceph/qa/tasks/python.py create mode 100755 ceph/qa/workunits/fs/cephfs_mirror_ha_gen.sh create mode 100755 ceph/qa/workunits/fs/cephfs_mirror_ha_verify.sh create mode 100644 ceph/qa/workunits/fs/cephfs_mirror_helpers.sh create mode 100644 ceph/src/common/fair_mutex.h create mode 100644 ceph/src/pmdk/.cirrus.yml create mode 100644 ceph/src/pmdk/.codecov.yml create mode 100644 ceph/src/pmdk/.gitattributes create mode 100644 ceph/src/pmdk/.github/ISSUE_TEMPLATE.md create mode 100644 ceph/src/pmdk/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 ceph/src/pmdk/.github/ISSUE_TEMPLATE/feature.md create mode 100644 ceph/src/pmdk/.github/ISSUE_TEMPLATE/question.md create mode 100644 ceph/src/pmdk/.github/workflows/coverity.yml create mode 100644 ceph/src/pmdk/.github/workflows/gha.yml create mode 100644 ceph/src/pmdk/.mailmap create mode 100644 ceph/src/pmdk/.skip-doc create mode 100644 ceph/src/pmdk/.travis.yml create mode 100644 ceph/src/pmdk/CODING_STYLE.md create mode 100644 ceph/src/pmdk/CONTRIBUTING.md create mode 100644 ceph/src/pmdk/ChangeLog create mode 100644 ceph/src/pmdk/LICENSE create mode 100644 ceph/src/pmdk/Makefile create mode 100644 ceph/src/pmdk/README.md create mode 100644 ceph/src/pmdk/VERSION create mode 100644 ceph/src/pmdk/appveyor.yml create mode 100644 ceph/src/pmdk/res/PMDK.ico create mode 100644 ceph/src/pmdk/src/.clang-format create mode 100644 ceph/src/pmdk/src/LongPath.manifest create mode 100644 ceph/src/pmdk/src/LongPathSupport.props create mode 100644 ceph/src/pmdk/src/Makefile create mode 100644 ceph/src/pmdk/src/Makefile.inc create mode 100644 ceph/src/pmdk/src/PMDK.sln create mode 100644 ceph/src/pmdk/src/README create mode 100644 ceph/src/pmdk/src/common.inc create mode 100644 ceph/src/pmdk/src/common/.cstyleignore create mode 100644 ceph/src/pmdk/src/common/Makefile create mode 100644 ceph/src/pmdk/src/common/bad_blocks.c create mode 100644 ceph/src/pmdk/src/common/badblocks.h create mode 100644 ceph/src/pmdk/src/common/common.rc create mode 100644 ceph/src/pmdk/src/common/ctl.c create mode 100644 ceph/src/pmdk/src/common/ctl.h create mode 100644 ceph/src/pmdk/src/common/ctl_cow.c create mode 100644 ceph/src/pmdk/src/common/ctl_fallocate.c create mode 100644 ceph/src/pmdk/src/common/ctl_global.h create mode 100644 ceph/src/pmdk/src/common/ctl_prefault.c create mode 100644 ceph/src/pmdk/src/common/ctl_sds.c create mode 100644 ceph/src/pmdk/src/common/dlsym.h create mode 100644 ceph/src/pmdk/src/common/file.c create mode 100644 ceph/src/pmdk/src/common/file.h create mode 100644 ceph/src/pmdk/src/common/file_posix.c create mode 100644 ceph/src/pmdk/src/common/file_windows.c create mode 100644 ceph/src/pmdk/src/common/libpmemcommon.vcxproj create mode 100644 ceph/src/pmdk/src/common/libpmemcommon.vcxproj.filters create mode 100644 ceph/src/pmdk/src/common/mmap.c create mode 100644 ceph/src/pmdk/src/common/mmap.h create mode 100644 ceph/src/pmdk/src/common/mmap_posix.c create mode 100644 ceph/src/pmdk/src/common/mmap_windows.c create mode 100644 ceph/src/pmdk/src/common/os_deep.h create mode 100644 ceph/src/pmdk/src/common/os_deep_linux.c create mode 100644 ceph/src/pmdk/src/common/os_deep_windows.c create mode 100644 ceph/src/pmdk/src/common/page_size.h create mode 100644 ceph/src/pmdk/src/common/pmemcommon.h create mode 100644 ceph/src/pmdk/src/common/pmemcommon.inc create mode 100644 ceph/src/pmdk/src/common/pool_hdr.c create mode 100644 ceph/src/pmdk/src/common/pool_hdr.h create mode 100644 ceph/src/pmdk/src/common/queue.h create mode 100644 ceph/src/pmdk/src/common/rand.c create mode 100644 ceph/src/pmdk/src/common/rand.h create mode 100644 ceph/src/pmdk/src/common/ravl.c create mode 100644 ceph/src/pmdk/src/common/ravl.h create mode 100644 ceph/src/pmdk/src/common/set.c create mode 100644 ceph/src/pmdk/src/common/set.h create mode 100644 ceph/src/pmdk/src/common/set_badblocks.c create mode 100644 ceph/src/pmdk/src/common/set_badblocks.h create mode 100644 ceph/src/pmdk/src/common/shutdown_state.c create mode 100644 ceph/src/pmdk/src/common/shutdown_state.h create mode 100644 ceph/src/pmdk/src/common/sys_util.h create mode 100644 ceph/src/pmdk/src/common/util_pmem.h create mode 100644 ceph/src/pmdk/src/common/uuid.c create mode 100644 ceph/src/pmdk/src/common/uuid.h create mode 100644 ceph/src/pmdk/src/common/uuid_freebsd.c create mode 100644 ceph/src/pmdk/src/common/uuid_linux.c create mode 100644 ceph/src/pmdk/src/common/uuid_windows.c create mode 100644 ceph/src/pmdk/src/common/vec.h create mode 100644 ceph/src/pmdk/src/common/vecq.h create mode 100644 ceph/src/pmdk/src/core/Makefile create mode 100644 ceph/src/pmdk/src/core/alloc.c create mode 100644 ceph/src/pmdk/src/core/alloc.h create mode 100644 ceph/src/pmdk/src/core/errno_freebsd.h create mode 100644 ceph/src/pmdk/src/core/fault_injection.h create mode 100644 ceph/src/pmdk/src/core/fs.h create mode 100644 ceph/src/pmdk/src/core/fs_posix.c create mode 100644 ceph/src/pmdk/src/core/fs_windows.c create mode 100644 ceph/src/pmdk/src/core/libpmemcore.vcxproj create mode 100644 ceph/src/pmdk/src/core/libpmemcore.vcxproj.filters create mode 100644 ceph/src/pmdk/src/core/os.h create mode 100644 ceph/src/pmdk/src/core/os_posix.c create mode 100644 ceph/src/pmdk/src/core/os_thread.h create mode 100644 ceph/src/pmdk/src/core/os_thread_posix.c create mode 100644 ceph/src/pmdk/src/core/os_thread_windows.c create mode 100644 ceph/src/pmdk/src/core/os_windows.c create mode 100644 ceph/src/pmdk/src/core/out.c create mode 100644 ceph/src/pmdk/src/core/out.h create mode 100644 ceph/src/pmdk/src/core/pmemcore.h create mode 100644 ceph/src/pmdk/src/core/pmemcore.inc create mode 100644 ceph/src/pmdk/src/core/util.c create mode 100644 ceph/src/pmdk/src/core/util.h create mode 100644 ceph/src/pmdk/src/core/util_posix.c create mode 100644 ceph/src/pmdk/src/core/util_windows.c create mode 100644 ceph/src/pmdk/src/core/valgrind/.cstyleignore create mode 100644 ceph/src/pmdk/src/core/valgrind/README create mode 100644 ceph/src/pmdk/src/core/valgrind/drd.h create mode 100644 ceph/src/pmdk/src/core/valgrind/helgrind.h create mode 100644 ceph/src/pmdk/src/core/valgrind/memcheck.h create mode 100644 ceph/src/pmdk/src/core/valgrind/pmemcheck.h create mode 100644 ceph/src/pmdk/src/core/valgrind/valgrind.h create mode 100644 ceph/src/pmdk/src/core/valgrind_internal.h create mode 100644 ceph/src/pmdk/src/freebsd/README create mode 100644 ceph/src/pmdk/src/freebsd/include/endian.h create mode 100644 ceph/src/pmdk/src/freebsd/include/features.h create mode 100644 ceph/src/pmdk/src/freebsd/include/linux/kdev_t.h create mode 100644 ceph/src/pmdk/src/freebsd/include/linux/limits.h create mode 100644 ceph/src/pmdk/src/freebsd/include/sys/sysmacros.h create mode 100644 ceph/src/pmdk/src/include/.cstyleignore create mode 100644 ceph/src/pmdk/src/include/README create mode 100644 ceph/src/pmdk/src/include/libpmem.h create mode 100644 ceph/src/pmdk/src/include/libpmem2.h create mode 100644 ceph/src/pmdk/src/include/libpmemblk.h create mode 100644 ceph/src/pmdk/src/include/libpmemlog.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj++/README.md create mode 100644 ceph/src/pmdk/src/include/libpmemobj++/detail/README.md create mode 100644 ceph/src/pmdk/src/include/libpmemobj.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/action.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/action_base.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/atomic.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/atomic_base.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/base.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/ctl.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/iterator.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/iterator_base.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/lists_atomic.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/lists_atomic_base.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/pool.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/pool_base.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/thread.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/tx.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/tx_base.h create mode 100644 ceph/src/pmdk/src/include/libpmemobj/types.h create mode 100644 ceph/src/pmdk/src/include/libpmempool.h create mode 100644 ceph/src/pmdk/src/include/librpmem.h create mode 100644 ceph/src/pmdk/src/include/pmemcompat.h create mode 100644 ceph/src/pmdk/src/libpmem/Makefile create mode 100644 ceph/src/pmdk/src/libpmem/libpmem.c create mode 100644 ceph/src/pmdk/src/libpmem/libpmem.def create mode 100644 ceph/src/pmdk/src/libpmem/libpmem.link.in create mode 100644 ceph/src/pmdk/src/libpmem/libpmem.rc create mode 100644 ceph/src/pmdk/src/libpmem/libpmem.vcxproj create mode 100644 ceph/src/pmdk/src/libpmem/libpmem.vcxproj.filters create mode 100644 ceph/src/pmdk/src/libpmem/libpmem_main.c create mode 100644 ceph/src/pmdk/src/libpmem/pmem.c create mode 100644 ceph/src/pmdk/src/libpmem/pmem.h create mode 100644 ceph/src/pmdk/src/libpmem/pmem_posix.c create mode 100644 ceph/src/pmdk/src/libpmem/pmem_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/Makefile create mode 100644 ceph/src/pmdk/src/libpmem2/aarch64/arm_cacheops.h create mode 100644 ceph/src/pmdk/src/libpmem2/aarch64/flags.inc create mode 100644 ceph/src/pmdk/src/libpmem2/aarch64/flush.h create mode 100644 ceph/src/pmdk/src/libpmem2/aarch64/init.c create mode 100644 ceph/src/pmdk/src/libpmem2/aarch64/sources.inc create mode 100644 ceph/src/pmdk/src/libpmem2/auto_flush.h create mode 100644 ceph/src/pmdk/src/libpmem2/auto_flush_linux.c create mode 100644 ceph/src/pmdk/src/libpmem2/auto_flush_none.c create mode 100644 ceph/src/pmdk/src/libpmem2/auto_flush_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/auto_flush_windows.h create mode 100644 ceph/src/pmdk/src/libpmem2/badblocks.c create mode 100644 ceph/src/pmdk/src/libpmem2/badblocks_ndctl.c create mode 100644 ceph/src/pmdk/src/libpmem2/badblocks_none.c create mode 100644 ceph/src/pmdk/src/libpmem2/config.c create mode 100644 ceph/src/pmdk/src/libpmem2/config.h create mode 100644 ceph/src/pmdk/src/libpmem2/deep_flush.c create mode 100644 ceph/src/pmdk/src/libpmem2/deep_flush.h create mode 100644 ceph/src/pmdk/src/libpmem2/deep_flush_linux.c create mode 100644 ceph/src/pmdk/src/libpmem2/deep_flush_other.c create mode 100644 ceph/src/pmdk/src/libpmem2/deep_flush_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/errormsg.c create mode 100644 ceph/src/pmdk/src/libpmem2/extent.h create mode 100644 ceph/src/pmdk/src/libpmem2/extent_linux.c create mode 100644 ceph/src/pmdk/src/libpmem2/extent_none.c create mode 100644 ceph/src/pmdk/src/libpmem2/libpmem2.c create mode 100644 ceph/src/pmdk/src/libpmem2/libpmem2.def create mode 100644 ceph/src/pmdk/src/libpmem2/libpmem2.link.in create mode 100644 ceph/src/pmdk/src/libpmem2/libpmem2.rc create mode 100644 ceph/src/pmdk/src/libpmem2/libpmem2.vcxproj create mode 100644 ceph/src/pmdk/src/libpmem2/libpmem2.vcxproj.filters create mode 100644 ceph/src/pmdk/src/libpmem2/libpmem2_main.c create mode 100644 ceph/src/pmdk/src/libpmem2/map.c create mode 100644 ceph/src/pmdk/src/libpmem2/map.h create mode 100644 ceph/src/pmdk/src/libpmem2/map_posix.c create mode 100644 ceph/src/pmdk/src/libpmem2/map_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/memops_generic.c create mode 100644 ceph/src/pmdk/src/libpmem2/persist.c create mode 100644 ceph/src/pmdk/src/libpmem2/persist.h create mode 100644 ceph/src/pmdk/src/libpmem2/persist_posix.c create mode 100644 ceph/src/pmdk/src/libpmem2/persist_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2.h create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2_arch.h create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2_utils.c create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2_utils.h create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2_utils_linux.c create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2_utils_ndctl.c create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2_utils_none.c create mode 100644 ceph/src/pmdk/src/libpmem2/pmem2_utils_other.c create mode 100644 ceph/src/pmdk/src/libpmem2/ppc64/.cstyleignore create mode 100644 ceph/src/pmdk/src/libpmem2/ppc64/flags.inc create mode 100644 ceph/src/pmdk/src/libpmem2/ppc64/init.c create mode 100644 ceph/src/pmdk/src/libpmem2/ppc64/sources.inc create mode 100644 ceph/src/pmdk/src/libpmem2/ravl_interval.c create mode 100644 ceph/src/pmdk/src/libpmem2/ravl_interval.h create mode 100644 ceph/src/pmdk/src/libpmem2/region_namespace.h create mode 100644 ceph/src/pmdk/src/libpmem2/region_namespace_ndctl.c create mode 100644 ceph/src/pmdk/src/libpmem2/region_namespace_ndctl.h create mode 100644 ceph/src/pmdk/src/libpmem2/region_namespace_none.c create mode 100644 ceph/src/pmdk/src/libpmem2/source.c create mode 100644 ceph/src/pmdk/src/libpmem2/source.h create mode 100644 ceph/src/pmdk/src/libpmem2/source_posix.c create mode 100644 ceph/src/pmdk/src/libpmem2/source_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/usc_ndctl.c create mode 100644 ceph/src/pmdk/src/libpmem2/usc_none.c create mode 100644 ceph/src/pmdk/src/libpmem2/usc_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/vm_reservation.c create mode 100644 ceph/src/pmdk/src/libpmem2/vm_reservation.h create mode 100644 ceph/src/pmdk/src/libpmem2/vm_reservation_posix.c create mode 100644 ceph/src/pmdk/src/libpmem2/vm_reservation_windows.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/avx.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/cpu.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/cpu.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/flags.inc create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/flush.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/init.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_avx.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_avx512f.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_nt_avx.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_nt_avx512f.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_nt_sse2.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_sse2.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_t_avx.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_t_avx512f.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy/memcpy_t_sse2.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memcpy_memset.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_avx.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_avx512f.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_nt_avx.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_nt_avx512f.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_nt_sse2.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_sse2.h create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_t_avx.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_t_avx512f.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/memset/memset_t_sse2.c create mode 100644 ceph/src/pmdk/src/libpmem2/x86_64/sources.inc create mode 100644 ceph/src/pmdk/src/libpmemblk/Makefile create mode 100644 ceph/src/pmdk/src/libpmemblk/blk.c create mode 100644 ceph/src/pmdk/src/libpmemblk/blk.h create mode 100644 ceph/src/pmdk/src/libpmemblk/btt.c create mode 100644 ceph/src/pmdk/src/libpmemblk/btt.h create mode 100644 ceph/src/pmdk/src/libpmemblk/btt_layout.h create mode 100644 ceph/src/pmdk/src/libpmemblk/libpmemblk.c create mode 100644 ceph/src/pmdk/src/libpmemblk/libpmemblk.def create mode 100644 ceph/src/pmdk/src/libpmemblk/libpmemblk.link.in create mode 100644 ceph/src/pmdk/src/libpmemblk/libpmemblk.rc create mode 100644 ceph/src/pmdk/src/libpmemblk/libpmemblk.vcxproj create mode 100644 ceph/src/pmdk/src/libpmemblk/libpmemblk.vcxproj.filters create mode 100644 ceph/src/pmdk/src/libpmemblk/libpmemblk_main.c create mode 100644 ceph/src/pmdk/src/libpmemlog/Makefile create mode 100644 ceph/src/pmdk/src/libpmemlog/libpmemlog.c create mode 100644 ceph/src/pmdk/src/libpmemlog/libpmemlog.def create mode 100644 ceph/src/pmdk/src/libpmemlog/libpmemlog.link.in create mode 100644 ceph/src/pmdk/src/libpmemlog/libpmemlog.rc create mode 100644 ceph/src/pmdk/src/libpmemlog/libpmemlog.vcxproj create mode 100644 ceph/src/pmdk/src/libpmemlog/libpmemlog.vcxproj.filters create mode 100644 ceph/src/pmdk/src/libpmemlog/libpmemlog_main.c create mode 100644 ceph/src/pmdk/src/libpmemlog/log.c create mode 100644 ceph/src/pmdk/src/libpmemlog/log.h create mode 100644 ceph/src/pmdk/src/libpmemobj/Makefile create mode 100644 ceph/src/pmdk/src/libpmemobj/alloc_class.c create mode 100644 ceph/src/pmdk/src/libpmemobj/alloc_class.h create mode 100644 ceph/src/pmdk/src/libpmemobj/bucket.c create mode 100644 ceph/src/pmdk/src/libpmemobj/bucket.h create mode 100644 ceph/src/pmdk/src/libpmemobj/container.h create mode 100644 ceph/src/pmdk/src/libpmemobj/container_ravl.c create mode 100644 ceph/src/pmdk/src/libpmemobj/container_ravl.h create mode 100644 ceph/src/pmdk/src/libpmemobj/container_seglists.c create mode 100644 ceph/src/pmdk/src/libpmemobj/container_seglists.h create mode 100644 ceph/src/pmdk/src/libpmemobj/critnib.c create mode 100644 ceph/src/pmdk/src/libpmemobj/critnib.h create mode 100644 ceph/src/pmdk/src/libpmemobj/ctl_debug.c create mode 100644 ceph/src/pmdk/src/libpmemobj/ctl_debug.h create mode 100644 ceph/src/pmdk/src/libpmemobj/heap.c create mode 100644 ceph/src/pmdk/src/libpmemobj/heap.h create mode 100644 ceph/src/pmdk/src/libpmemobj/heap_layout.h create mode 100644 ceph/src/pmdk/src/libpmemobj/lane.c create mode 100644 ceph/src/pmdk/src/libpmemobj/lane.h create mode 100644 ceph/src/pmdk/src/libpmemobj/libpmemobj.c create mode 100644 ceph/src/pmdk/src/libpmemobj/libpmemobj.def create mode 100644 ceph/src/pmdk/src/libpmemobj/libpmemobj.link.in create mode 100644 ceph/src/pmdk/src/libpmemobj/libpmemobj.rc create mode 100644 ceph/src/pmdk/src/libpmemobj/libpmemobj.vcxproj create mode 100644 ceph/src/pmdk/src/libpmemobj/libpmemobj.vcxproj.filters create mode 100644 ceph/src/pmdk/src/libpmemobj/libpmemobj_main.c create mode 100644 ceph/src/pmdk/src/libpmemobj/list.c create mode 100644 ceph/src/pmdk/src/libpmemobj/list.h create mode 100644 ceph/src/pmdk/src/libpmemobj/memblock.c create mode 100644 ceph/src/pmdk/src/libpmemobj/memblock.h create mode 100644 ceph/src/pmdk/src/libpmemobj/memops.c create mode 100644 ceph/src/pmdk/src/libpmemobj/memops.h create mode 100644 ceph/src/pmdk/src/libpmemobj/obj.c create mode 100644 ceph/src/pmdk/src/libpmemobj/obj.h create mode 100644 ceph/src/pmdk/src/libpmemobj/palloc.c create mode 100644 ceph/src/pmdk/src/libpmemobj/palloc.h create mode 100644 ceph/src/pmdk/src/libpmemobj/pmalloc.c create mode 100644 ceph/src/pmdk/src/libpmemobj/pmalloc.h create mode 100644 ceph/src/pmdk/src/libpmemobj/pmemops.h create mode 100644 ceph/src/pmdk/src/libpmemobj/recycler.c create mode 100644 ceph/src/pmdk/src/libpmemobj/recycler.h create mode 100644 ceph/src/pmdk/src/libpmemobj/stats.c create mode 100644 ceph/src/pmdk/src/libpmemobj/stats.h create mode 100644 ceph/src/pmdk/src/libpmemobj/sync.c create mode 100644 ceph/src/pmdk/src/libpmemobj/sync.h create mode 100644 ceph/src/pmdk/src/libpmemobj/tx.c create mode 100644 ceph/src/pmdk/src/libpmemobj/tx.h create mode 100644 ceph/src/pmdk/src/libpmemobj/ulog.c create mode 100644 ceph/src/pmdk/src/libpmemobj/ulog.h create mode 100644 ceph/src/pmdk/src/libpmempool/Makefile create mode 100644 ceph/src/pmdk/src/libpmempool/check.c create mode 100644 ceph/src/pmdk/src/libpmempool/check.h create mode 100644 ceph/src/pmdk/src/libpmempool/check_backup.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_bad_blocks.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_blk.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_btt_info.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_btt_map_flog.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_log.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_pool_hdr.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_sds.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_util.c create mode 100644 ceph/src/pmdk/src/libpmempool/check_util.h create mode 100644 ceph/src/pmdk/src/libpmempool/check_write.c create mode 100644 ceph/src/pmdk/src/libpmempool/feature.c create mode 100644 ceph/src/pmdk/src/libpmempool/libpmempool.c create mode 100644 ceph/src/pmdk/src/libpmempool/libpmempool.def create mode 100644 ceph/src/pmdk/src/libpmempool/libpmempool.link.in create mode 100644 ceph/src/pmdk/src/libpmempool/libpmempool.rc create mode 100644 ceph/src/pmdk/src/libpmempool/libpmempool.vcxproj create mode 100644 ceph/src/pmdk/src/libpmempool/libpmempool.vcxproj.filters create mode 100644 ceph/src/pmdk/src/libpmempool/libpmempool_main.c create mode 100644 ceph/src/pmdk/src/libpmempool/pmempool.h create mode 100644 ceph/src/pmdk/src/libpmempool/pool.c create mode 100644 ceph/src/pmdk/src/libpmempool/pool.h create mode 100644 ceph/src/pmdk/src/libpmempool/replica.c create mode 100644 ceph/src/pmdk/src/libpmempool/replica.h create mode 100644 ceph/src/pmdk/src/libpmempool/rm.c create mode 100644 ceph/src/pmdk/src/libpmempool/sync.c create mode 100644 ceph/src/pmdk/src/libpmempool/transform.c create mode 100644 ceph/src/pmdk/src/librpmem/Makefile create mode 100644 ceph/src/pmdk/src/librpmem/README create mode 100644 ceph/src/pmdk/src/librpmem/librpmem.c create mode 100644 ceph/src/pmdk/src/librpmem/librpmem.link.in create mode 100644 ceph/src/pmdk/src/librpmem/rpmem.c create mode 100644 ceph/src/pmdk/src/librpmem/rpmem.h create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_cmd.c create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_cmd.h create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_fip.c create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_fip.h create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_obc.c create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_obc.h create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_ssh.c create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_ssh.h create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_util.c create mode 100644 ceph/src/pmdk/src/librpmem/rpmem_util.h create mode 100644 ceph/src/pmdk/src/libvmem/README.md create mode 100644 ceph/src/pmdk/src/libvmmalloc/README.md create mode 100644 ceph/src/pmdk/src/rpmem_common/Makefile create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_common.c create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_common.h create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_common_log.h create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_fip_common.c create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_fip_common.h create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_fip_lane.h create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_fip_msg.h create mode 100644 ceph/src/pmdk/src/rpmem_common/rpmem_proto.h create mode 100644 ceph/src/pmdk/src/tools/Makefile create mode 100644 ceph/src/pmdk/src/tools/Makefile.inc create mode 100644 ceph/src/pmdk/src/tools/daxio/Makefile create mode 100644 ceph/src/pmdk/src/tools/daxio/README create mode 100644 ceph/src/pmdk/src/tools/daxio/daxio.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/Makefile create mode 100644 ceph/src/pmdk/src/tools/pmempool/README create mode 100644 ceph/src/pmdk/src/tools/pmempool/bash_completion/pmempool create mode 100644 ceph/src/pmdk/src/tools/pmempool/check.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/check.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/common.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/common.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/convert.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/convert.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/create.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/create.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/dump.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/dump.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/feature.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/feature.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/info.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/info.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/info_blk.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/info_log.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/info_obj.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/output.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/output.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/pmempool.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/pmempool.rc create mode 100644 ceph/src/pmdk/src/tools/pmempool/pmempool.vcxproj create mode 100644 ceph/src/pmdk/src/tools/pmempool/pmempool.vcxproj.filters create mode 100644 ceph/src/pmdk/src/tools/pmempool/rm.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/rm.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/synchronize.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/synchronize.h create mode 100644 ceph/src/pmdk/src/tools/pmempool/transform.c create mode 100644 ceph/src/pmdk/src/tools/pmempool/transform.h create mode 100644 ceph/src/pmdk/src/tools/pmreorder/Makefile create mode 100644 ceph/src/pmdk/src/tools/pmreorder/binaryoutputhandler.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/consistencycheckwrap.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/loggingfacility.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/markerparser.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/memoryoperations.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/operationfactory.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/opscontext.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/pmreorder.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/reorderengines.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/reorderexceptions.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/statemachine.py create mode 100644 ceph/src/pmdk/src/tools/pmreorder/utils.py create mode 100644 ceph/src/pmdk/src/tools/rpmemd/Makefile create mode 100644 ceph/src/pmdk/src/tools/rpmemd/README create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd.c create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd.h create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_config.c create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_config.h create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_db.c create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_db.h create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_fip.c create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_fip.h create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_log.c create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_log.h create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_obc.c create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_obc.h create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_util.c create mode 100644 ceph/src/pmdk/src/tools/rpmemd/rpmemd_util.h create mode 100644 ceph/src/pmdk/src/windows/README create mode 100644 ceph/src/pmdk/src/windows/getopt/.cstyleignore create mode 100644 ceph/src/pmdk/src/windows/getopt/LICENSE.txt create mode 100644 ceph/src/pmdk/src/windows/getopt/README create mode 100644 ceph/src/pmdk/src/windows/getopt/getopt.c create mode 100644 ceph/src/pmdk/src/windows/getopt/getopt.h create mode 100644 ceph/src/pmdk/src/windows/getopt/getopt.vcxproj create mode 100644 ceph/src/pmdk/src/windows/getopt/getopt.vcxproj.filters create mode 100644 ceph/src/pmdk/src/windows/include/.cstyleignore create mode 100644 ceph/src/pmdk/src/windows/include/dirent.h create mode 100644 ceph/src/pmdk/src/windows/include/endian.h create mode 100644 ceph/src/pmdk/src/windows/include/err.h create mode 100644 ceph/src/pmdk/src/windows/include/features.h create mode 100644 ceph/src/pmdk/src/windows/include/libgen.h create mode 100644 ceph/src/pmdk/src/windows/include/linux/limits.h create mode 100644 ceph/src/pmdk/src/windows/include/platform.h create mode 100644 ceph/src/pmdk/src/windows/include/sched.h create mode 100644 ceph/src/pmdk/src/windows/include/strings.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/file.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/mman.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/mount.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/param.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/resource.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/statvfs.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/uio.h create mode 100644 ceph/src/pmdk/src/windows/include/sys/wait.h create mode 100644 ceph/src/pmdk/src/windows/include/unistd.h create mode 100644 ceph/src/pmdk/src/windows/include/win_mmap.h create mode 100644 ceph/src/pmdk/src/windows/libs_debug.props create mode 100644 ceph/src/pmdk/src/windows/libs_release.props create mode 100644 ceph/src/pmdk/src/windows/srcversion/srcversion.vcxproj create mode 100644 ceph/src/pmdk/src/windows/win_mmap.c create mode 100644 ceph/src/pmdk/utils/CHECK_WHITESPACE.PS1 create mode 100644 ceph/src/pmdk/utils/CREATE-ZIP.PS1 create mode 100644 ceph/src/pmdk/utils/CSTYLE.ps1 create mode 100644 ceph/src/pmdk/utils/Makefile create mode 100644 ceph/src/pmdk/utils/README create mode 100644 ceph/src/pmdk/utils/SRCVERSION.ps1 create mode 100755 ceph/src/pmdk/utils/build-dpkg.sh create mode 100755 ceph/src/pmdk/utils/build-rpm.sh create mode 100755 ceph/src/pmdk/utils/check-area.sh create mode 100755 ceph/src/pmdk/utils/check-commit.sh create mode 100755 ceph/src/pmdk/utils/check-commits.sh create mode 100755 ceph/src/pmdk/utils/check-manpage create mode 100755 ceph/src/pmdk/utils/check-manpages create mode 100755 ceph/src/pmdk/utils/check-os.sh create mode 100755 ceph/src/pmdk/utils/check-shebang.sh create mode 100755 ceph/src/pmdk/utils/check_license/check-headers.sh create mode 100755 ceph/src/pmdk/utils/check_license/check-ms-license.pl create mode 100755 ceph/src/pmdk/utils/check_license/file-exceptions.sh create mode 100755 ceph/src/pmdk/utils/check_sdk_version.py create mode 100755 ceph/src/pmdk/utils/check_whitespace create mode 100755 ceph/src/pmdk/utils/copy-source.sh create mode 100755 ceph/src/pmdk/utils/cstyle create mode 100644 ceph/src/pmdk/utils/docker/0001-travis-fix-travisci_build_coverity_scan.sh.patch create mode 100644 ceph/src/pmdk/utils/docker/README create mode 100755 ceph/src/pmdk/utils/docker/build-CI.sh create mode 100755 ceph/src/pmdk/utils/docker/build-local.sh create mode 100755 ceph/src/pmdk/utils/docker/configure-tests.sh create mode 100644 ceph/src/pmdk/utils/docker/images/0001-fix-generating-gcov-files-and-turn-off-verbose-log.patch create mode 100644 ceph/src/pmdk/utils/docker/images/Dockerfile.fedora-31 create mode 100644 ceph/src/pmdk/utils/docker/images/Dockerfile.ubuntu-19.10 create mode 100644 ceph/src/pmdk/utils/docker/images/README create mode 100755 ceph/src/pmdk/utils/docker/images/build-image.sh create mode 100755 ceph/src/pmdk/utils/docker/images/download-scripts.sh create mode 100755 ceph/src/pmdk/utils/docker/images/install-libfabric.sh create mode 100755 ceph/src/pmdk/utils/docker/images/install-libndctl.sh create mode 100755 ceph/src/pmdk/utils/docker/images/install-valgrind.sh create mode 100755 ceph/src/pmdk/utils/docker/images/push-image.sh create mode 100644 ceph/src/pmdk/utils/docker/ppc64le.blacklist create mode 100755 ceph/src/pmdk/utils/docker/prepare-for-build.sh create mode 100755 ceph/src/pmdk/utils/docker/pull-or-rebuild-image.sh create mode 100755 ceph/src/pmdk/utils/docker/run-build-package.sh create mode 100755 ceph/src/pmdk/utils/docker/run-build.sh create mode 100755 ceph/src/pmdk/utils/docker/run-coverage.sh create mode 100755 ceph/src/pmdk/utils/docker/run-coverity.sh create mode 100755 ceph/src/pmdk/utils/docker/run-doc-update.sh create mode 100755 ceph/src/pmdk/utils/docker/set-ci-vars.sh create mode 100755 ceph/src/pmdk/utils/docker/set-vars.sh create mode 100644 ceph/src/pmdk/utils/docker/test_package/Makefile create mode 100644 ceph/src/pmdk/utils/docker/test_package/README create mode 100644 ceph/src/pmdk/utils/docker/test_package/test_package.c create mode 100755 ceph/src/pmdk/utils/docker/valid-branches.sh create mode 100755 ceph/src/pmdk/utils/get_aliases.sh create mode 100755 ceph/src/pmdk/utils/git-years create mode 100644 ceph/src/pmdk/utils/libpmem.pc.in create mode 100644 ceph/src/pmdk/utils/libpmem2.pc.in create mode 100644 ceph/src/pmdk/utils/libpmemblk.pc.in create mode 100644 ceph/src/pmdk/utils/libpmemlog.pc.in create mode 100644 ceph/src/pmdk/utils/libpmemobj.pc.in create mode 100644 ceph/src/pmdk/utils/libpmempool.pc.in create mode 100644 ceph/src/pmdk/utils/librpmem.pc.in create mode 100644 ceph/src/pmdk/utils/magic-install.sh create mode 100644 ceph/src/pmdk/utils/magic-uninstall.sh create mode 100755 ceph/src/pmdk/utils/md2man.sh create mode 100644 ceph/src/pmdk/utils/os-banned create mode 100644 ceph/src/pmdk/utils/pkg-common.sh create mode 100644 ceph/src/pmdk/utils/pkg-config.sh create mode 100644 ceph/src/pmdk/utils/pmdk.magic create mode 100644 ceph/src/pmdk/utils/pmdk.spec.in create mode 100644 ceph/src/pmdk/utils/ps_analyze.ps1 create mode 100755 ceph/src/pmdk/utils/sort_solution create mode 100755 ceph/src/pmdk/utils/style_check.sh create mode 100755 ceph/src/pmdk/utils/version.sh create mode 100644 ceph/src/pybind/mgr/cephadm/registry.py create mode 100644 ceph/src/pybind/mgr/cephadm/tests/test_facts.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_api_router.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_auth.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_base_controller.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_docs.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_endpoint.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_helpers.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_permissions.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_rest_controller.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_router.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_task.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_ui_router.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/_version.py create mode 100644 ceph/src/pybind/mgr/dashboard/controllers/cluster.py rename ceph/src/pybind/mgr/dashboard/controllers/{nfsganesha.py => nfs.py} (52%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/applitools.config.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/create-cluster.po.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/01-hosts-force-maintenance.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome-page.e2e-spec.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-osds.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-services.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-nfs-exports.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/visualTests/dashboard.vrt-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/visualTests/login.vrt-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/support/eyes-index.d.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.6da7b376fa1a8a3df154.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/1.e0a29c1d4fcf893cf11d.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/5.0a363eda73eafe0c0332.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/5.f6931a7617fe7f8fd244.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/6.115992dc55f8e1abedbc.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/6.e0fafffe422f8212d682.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.b78c1bf5c30e15315e18.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.d269a7c492a93e2ebedb.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/polyfills.69188bf73a1e0d939338.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/polyfills.b66d1515aae6fe3887b1.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.fc090c2eb8af922526a6.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.fcd694c3eff5ef104b53.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.0520b6222fe6dab25bab.css delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.7918cb8dc788b3eedc95.css delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/swagger-ui-bundle.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/swagger-ui.css create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster-review.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/create-cluster/create-cluster.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/models/nfs.fsal.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-cluster-type.enum.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/api-client.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/api-client.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cluster.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cluster.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/wizard/wizard.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/inventory-device-type.model.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/wizard-steps.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/services/wizard-steps.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/services/wizard-steps.service.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/services/cephx.py create mode 100644 ceph/src/pybind/mgr/dashboard/services/cluster.py delete mode 100644 ceph/src/pybind/mgr/dashboard/services/ganesha.py delete mode 100644 ceph/src/pybind/mgr/dashboard/tests/test_ganesha.py create mode 100644 ceph/src/pybind/mgr/dashboard/tests/test_nfs.py create mode 100644 ceph/src/pybind/mgr/nfs/tests/__init__.py create mode 100644 ceph/src/pybind/mgr/nfs/tests/test_nfs.py create mode 100644 ceph/src/test/common/test_fair_mutex.cc rename ceph/sudoers.d/{ceph-osd-smartctl => ceph-smartctl} (60%) diff --git a/ceph/.github/labeler.yml b/ceph/.github/labeler.yml index 5719e113f..a9c6a1179 100644 --- a/ceph/.github/labeler.yml +++ b/ceph/.github/labeler.yml @@ -225,3 +225,28 @@ tests: - qa/tasks/** - qa/workunits/** - src/test/** + +nfs: + - src/pybind/mgr/nfs/** + - src/pybind/mgr/cephadm/services/nfs.py + - src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 + - src/pybind/mgr/dashboard/services/ganesha.py + - src/pybind/mgr/dashboard/tests/test_ganesha.py + - qa/tasks/cephfs/test_nfs.py + - qa/tasks/mgr/dashboard/test_ganesha.py + - doc/mgr/nfs.rst + - doc/cephfs/nfs.rst + - doc/cephadm/nfs.rst + - doc/radosgw/nfs.rst + - doc/dev/vstart-ganesha.rst + +monitoring: + - doc/cephadm/monitoring.rst + - src/pybind/mgr/cephadm/services/monitoring.py + - src/pybind/mgr/cephadm/templates/services/alertmanager/** + - src/pybind/mgr/cephadm/templates/services/grafana/** + - src/pybind/mgr/cephadm/templates/services/prometheus/** + - src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py + - src/pybind/mgr/prometheus/** + - monitoring/** + diff --git a/ceph/.github/workflows/pr-triage.yml b/ceph/.github/workflows/pr-triage.yml index 31791d4ae..77fcff462 100644 --- a/ceph/.github/workflows/pr-triage.yml +++ b/ceph/.github/workflows/pr-triage.yml @@ -1,15 +1,24 @@ --- name: "Pull Request Triage" on: pull_request_target +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: pr-triage: runs-on: ubuntu-latest steps: - - uses: actions/labeler@9794b1493b6f1fa7b006c5f8635a19c76c98be95 + - name: Assign labels based on modified files + uses: actions/labeler@9794b1493b6f1fa7b006c5f8635a19c76c98be95 with: sync-labels: '' repo-token: "${{ secrets.GITHUB_TOKEN }}" - - uses: iyu/actions-milestone@dbf7e5348844c9ddc6b803a5721b85fa70fe3bb9 + - name: Assign milestone based on target brach name + uses: iyu/actions-milestone@dbf7e5348844c9ddc6b803a5721b85fa70fe3bb9 with: configuration-path: .github/milestone.yml repo-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Assign to Dashboard project + uses: srggrs/assign-one-project-github-action@65a8ddab497df42ef268001e67bbf976f8fd39e1 + if: contains(github.event.pull_request.labels.*.name, 'dashboard') + with: + project: https://github.com/ceph/ceph/projects/6 diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index f7056c209..5253399df 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.10.2) # remove cmake/modules/FindPython* once 3.12 is required project(ceph - VERSION 16.2.6 + VERSION 16.2.7 LANGUAGES CXX C ASM) foreach(policy diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index f5ac346a4..12b47c132 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -20,6 +20,9 @@ * MDS upgrades no longer require stopping all standby MDS daemons before upgrading the sole active MDS for a file system. +* CephFS: Failure to replay the journal by a standby-replay daemon will now + cause the rank to be marked damaged. + * RGW: It is possible to specify ssl options and ciphers for beast frontend now. The default ssl options setting is "no_sslv2:no_sslv3:no_tlsv1:no_tlsv1_1". If you want to return back the old behavior add 'ssl_options=' (empty) to @@ -29,6 +32,22 @@ in certain recovery scenarios, e.g., monitor database lost and rebuilt, and the restored file system is expected to have the same ID as before. +>=16.2.7 +-------- + +* Critical bug in OMAP format upgrade is fixed. This could cause data corruption + (improperly formatted OMAP keys) after pre-Pacific cluster upgrade if + bluestore-quick-fix-on-mount parameter is set to true or ceph-bluestore-tool's + quick-fix/repair commands are invoked. + Relevant tracker: https://tracker.ceph.com/issues/53062 + +* MGR: The pg_autoscaler will use the 'scale-up' profile as the default profile. + 16.2.6 changed the default profile to 'scale-down' but we ran into issues + with the device_health_metrics pool consuming too many PGs, which is not ideal + for performance. So we will continue to use the 'scale-up' profile by default, + until we implement a limit on the number of PGs default pools should consume, + in combination with the 'scale-down' profile. + >=16.2.6 -------- @@ -42,6 +61,9 @@ >=16.0.0 -------- +* RGW: S3 bucket notification events now contain an `eTag` key instead of `etag`, + and eventName values no longer carry the `s3:` prefix, fixing deviations from + the message format observed on AWS. * `ceph-mgr-modules-core` debian package does not recommend `ceph-mgr-rook` anymore. As the latter depends on `python3-numpy` which cannot be imported in different Python sub-interpreters multi-times if the version of @@ -72,6 +94,21 @@ deprecated and will be removed in a future release. Please use ``nfs cluster rm`` and ``nfs export rm`` instead. +* The ``nfs export create`` CLI arguments have changed, with the + *fsname* or *bucket-name* argument position moving to the right of + *the *cluster-id* and *pseudo-path*. Consider transitioning to + *using named arguments instead of positional arguments (e.g., ``ceph + *nfs export create cephfs --cluster-id mycluster --pseudo-path /foo + *--fsname myfs`` instead of ``ceph nfs export create cephfs + *mycluster /foo myfs`` to ensure correct behavior with any + *version. + +* mgr-pg_autoscaler: Autoscaler will now start out by scaling each + pool to have a full complements of pgs from the start and will only + decrease it when other pools need more pgs due to increased usage. + This improves out of the box performance of Ceph by allowing more PGs + to be created for a given pool. + * CephFS: Disabling allow_standby_replay on a file system will also stop all standby-replay daemons for that file system. @@ -374,3 +411,13 @@ zone are provided, the user is now responsible for setting up the multisite configuration beforehand--cephadm no longer attempts to create missing realms or zones. + +* The cephadm NFS support has been simplified to no longer allow the + pool and namespace where configuration is stored to be customized. + As a result, the ``ceph orch apply nfs`` command no longer has + ``--pool`` or ``--namespace`` arguments. + + Existing cephadm NFS deployments (from earlier version of Pacific or + from Octopus) will be automatically migrated when the cluster is + upgraded. Note that the NFS ganesha daemons will be redeployed and + it is possible that their IPs will change. diff --git a/ceph/admin/build-doc b/ceph/admin/build-doc index bb2a46282..345324a14 100755 --- a/ceph/admin/build-doc +++ b/ceph/admin/build-doc @@ -20,7 +20,7 @@ if command -v dpkg >/dev/null; then exit 1 fi elif command -v yum >/dev/null; then - for package in ant ditaa doxygen libxslt-devel libxml2-devel graphviz python3-devel python3-pip python3-virtualenv python3-Cython; do + for package in ant ditaa doxygen libxslt-devel libxml2-devel graphviz python3-devel python3-pip python3-Cython; do if ! rpm -q --whatprovides $package >/dev/null ; then missing="${missing:+$missing }$package" fi @@ -31,7 +31,7 @@ elif command -v yum >/dev/null; then exit 1 fi else - for command in dot virtualenv doxygen ant ditaa cython; do + for command in dot doxygen ant ditaa cython; do if ! command -v "$command" > /dev/null; then # add a space after old values missing="${missing:+$missing }$command" @@ -51,9 +51,10 @@ set -e [ -z "$vdir" ] && vdir="$TOPDIR/build-doc/virtualenv" if [ ! -e $vdir ]; then - virtualenv --python=python3 $vdir + python3 -m venv $vdir fi +$vdir/bin/pip install --quiet wheel $vdir/bin/pip install --quiet \ -r $TOPDIR/admin/doc-requirements.txt \ -r $TOPDIR/admin/doc-python-common-requirements.txt diff --git a/ceph/admin/doc-requirements.txt b/ceph/admin/doc-requirements.txt index 3ad2f6edb..4a9090436 100644 --- a/ceph/admin/doc-requirements.txt +++ b/ceph/admin/doc-requirements.txt @@ -1,4 +1,4 @@ -Sphinx == 3.2.1 +Sphinx == 3.5.4 git+https://github.com/ceph/sphinx-ditaa.git@py3#egg=sphinx-ditaa breathe >= 4.20.0 Jinja2 diff --git a/ceph/ceph.spec b/ceph/ceph.spec index e456f9b13..c4ac0356d 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -29,12 +29,12 @@ %else %bcond_without tcmalloc %endif +%bcond_with system_pmdk %if 0%{?fedora} || 0%{?rhel} %bcond_without selinux %ifarch x86_64 ppc64le %bcond_without rbd_rwl_cache %bcond_without rbd_ssd_cache -%global _system_pmdk 1 %else %bcond_with rbd_rwl_cache %bcond_with rbd_ssd_cache @@ -60,12 +60,10 @@ %bcond_with libradosstriper %ifarch x86_64 aarch64 ppc64le %bcond_without lttng -%global _system_pmdk 1 %bcond_without rbd_rwl_cache %bcond_without rbd_ssd_cache %else %bcond_with lttng -%global _system_pmdk 0 %bcond_with rbd_rwl_cache %bcond_with rbd_ssd_cache %endif @@ -126,7 +124,7 @@ # main package definition ################################################################################# Name: ceph -Version: 16.2.6 +Version: 16.2.7 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -142,7 +140,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-16.2.6.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-16.2.7.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -235,7 +233,6 @@ BuildRequires: python%{python3_pkgversion}-nose BuildRequires: python%{python3_pkgversion}-pecan BuildRequires: python%{python3_pkgversion}-requests BuildRequires: python%{python3_pkgversion}-dateutil -BuildRequires: python%{python3_pkgversion}-virtualenv BuildRequires: python%{python3_pkgversion}-coverage BuildRequires: python%{python3_pkgversion}-pyOpenSSL BuildRequires: socat @@ -255,7 +252,7 @@ BuildRequires: nlohmann_json-devel BuildRequires: libevent-devel BuildRequires: yaml-cpp-devel %endif -%if 0%{?_system_pmdk} +%if 0%{with system_pmdk} BuildRequires: libpmem-devel BuildRequires: libpmemobj-devel %endif @@ -439,6 +436,12 @@ Requires: gperftools-libs >= 2.6.1 %endif %if 0%{?weak_deps} Recommends: chrony +Recommends: nvme-cli +%if 0%{?suse_version} +Requires: smartmontools +%else +Recommends: smartmontools +%endif %endif %description base Base is the package that includes all the files shared amongst ceph servers @@ -507,14 +510,6 @@ Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-monstore-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} -%if 0%{?weak_deps} -Recommends: nvme-cli -%if 0%{?suse_version} -Requires: smartmontools -%else -Recommends: smartmontools -%endif -%endif %if 0%{with jaeger} Requires: libjaeger = %{_epoch_prefix}%{version}-%{release} %endif @@ -785,14 +780,6 @@ Requires: lvm2 Requires: sudo Requires: libstoragemgmt Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} -%if 0%{?weak_deps} -Recommends: nvme-cli -%if 0%{?suse_version} -Requires: smartmontools -%else -Recommends: smartmontools -%endif -%endif %description osd ceph-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system @@ -1208,7 +1195,7 @@ This package provides Ceph default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-16.2.6 +%autosetup -p1 -n ceph-16.2.7 %build # LTO can be enabled as soon as the following GCC bug is fixed: @@ -1334,7 +1321,7 @@ ${CMAKE} .. \ %if 0%{with rbd_ssd_cache} -DWITH_RBD_SSD_CACHE=ON \ %endif -%if 0%{?_system_pmdk} +%if 0%{with system_pmdk} -DWITH_SYSTEM_PMDK:BOOL=ON \ %endif -DBOOST_J=$CEPH_SMP_NCPUS \ @@ -1405,7 +1392,7 @@ ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules # sudoers.d -install -m 0440 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl +install -m 0440 -D sudoers.d/ceph-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-smartctl %if 0%{?rhel} >= 8 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* @@ -1506,6 +1493,7 @@ rm -rf %{buildroot} %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror +%{_sysconfdir}/sudoers.d/ceph-smartctl %post base /sbin/ldconfig @@ -2087,7 +2075,6 @@ fi %{_unitdir}/ceph-volume@.service %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd %config(noreplace) %{_sysctldir}/90-ceph-osd.conf -%{_sysconfdir}/sudoers.d/ceph-osd-smartctl %post osd %if 0%{?suse_version} @@ -2388,13 +2375,21 @@ if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then exit 0 fi +# Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes +SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph +if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH +fi + # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi fi # Relabel the files fix for first package install @@ -2406,7 +2401,9 @@ rm -f ${FILE_CONTEXT}.pre # Start the daemons iff they were running before if test $STATUS -eq 0; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi fi exit 0 @@ -2426,13 +2423,21 @@ if [ $1 -eq 0 ]; then exit 0 fi + # Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi fi /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null @@ -2442,7 +2447,9 @@ if [ $1 -eq 0 ]; then # Start the daemons if they were running before if test $STATUS -eq 0; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi fi fi exit 0 diff --git a/ceph/ceph.spec.in b/ceph/ceph.spec.in index 14fdadaed..ebc2893cb 100644 --- a/ceph/ceph.spec.in +++ b/ceph/ceph.spec.in @@ -29,12 +29,12 @@ %else %bcond_without tcmalloc %endif +%bcond_with system_pmdk %if 0%{?fedora} || 0%{?rhel} %bcond_without selinux %ifarch x86_64 ppc64le %bcond_without rbd_rwl_cache %bcond_without rbd_ssd_cache -%global _system_pmdk 1 %else %bcond_with rbd_rwl_cache %bcond_with rbd_ssd_cache @@ -60,12 +60,10 @@ %bcond_with libradosstriper %ifarch x86_64 aarch64 ppc64le %bcond_without lttng -%global _system_pmdk 1 %bcond_without rbd_rwl_cache %bcond_without rbd_ssd_cache %else %bcond_with lttng -%global _system_pmdk 0 %bcond_with rbd_rwl_cache %bcond_with rbd_ssd_cache %endif @@ -235,7 +233,6 @@ BuildRequires: python%{python3_pkgversion}-nose BuildRequires: python%{python3_pkgversion}-pecan BuildRequires: python%{python3_pkgversion}-requests BuildRequires: python%{python3_pkgversion}-dateutil -BuildRequires: python%{python3_pkgversion}-virtualenv BuildRequires: python%{python3_pkgversion}-coverage BuildRequires: python%{python3_pkgversion}-pyOpenSSL BuildRequires: socat @@ -255,7 +252,7 @@ BuildRequires: nlohmann_json-devel BuildRequires: libevent-devel BuildRequires: yaml-cpp-devel %endif -%if 0%{?_system_pmdk} +%if 0%{with system_pmdk} BuildRequires: libpmem-devel BuildRequires: libpmemobj-devel %endif @@ -439,6 +436,12 @@ Requires: gperftools-libs >= 2.6.1 %endif %if 0%{?weak_deps} Recommends: chrony +Recommends: nvme-cli +%if 0%{?suse_version} +Requires: smartmontools +%else +Recommends: smartmontools +%endif %endif %description base Base is the package that includes all the files shared amongst ceph servers @@ -507,14 +510,6 @@ Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-monstore-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} -%if 0%{?weak_deps} -Recommends: nvme-cli -%if 0%{?suse_version} -Requires: smartmontools -%else -Recommends: smartmontools -%endif -%endif %if 0%{with jaeger} Requires: libjaeger = %{_epoch_prefix}%{version}-%{release} %endif @@ -785,14 +780,6 @@ Requires: lvm2 Requires: sudo Requires: libstoragemgmt Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} -%if 0%{?weak_deps} -Recommends: nvme-cli -%if 0%{?suse_version} -Requires: smartmontools -%else -Recommends: smartmontools -%endif -%endif %description osd ceph-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system @@ -1334,7 +1321,7 @@ ${CMAKE} .. \ %if 0%{with rbd_ssd_cache} -DWITH_RBD_SSD_CACHE=ON \ %endif -%if 0%{?_system_pmdk} +%if 0%{with system_pmdk} -DWITH_SYSTEM_PMDK:BOOL=ON \ %endif -DBOOST_J=$CEPH_SMP_NCPUS \ @@ -1405,7 +1392,7 @@ ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules # sudoers.d -install -m 0440 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl +install -m 0440 -D sudoers.d/ceph-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-smartctl %if 0%{?rhel} >= 8 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* @@ -1506,6 +1493,7 @@ rm -rf %{buildroot} %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror +%{_sysconfdir}/sudoers.d/ceph-smartctl %post base /sbin/ldconfig @@ -2087,7 +2075,6 @@ fi %{_unitdir}/ceph-volume@.service %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd %config(noreplace) %{_sysctldir}/90-ceph-osd.conf -%{_sysconfdir}/sudoers.d/ceph-osd-smartctl %post osd %if 0%{?suse_version} @@ -2388,13 +2375,21 @@ if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then exit 0 fi +# Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes +SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph +if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH +fi + # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi fi # Relabel the files fix for first package install @@ -2406,7 +2401,9 @@ rm -f ${FILE_CONTEXT}.pre # Start the daemons iff they were running before if test $STATUS -eq 0; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi fi exit 0 @@ -2426,13 +2423,21 @@ if [ $1 -eq 0 ]; then exit 0 fi + # Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + fi fi /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null @@ -2442,7 +2447,9 @@ if [ $1 -eq 0 ]; then # Start the daemons if they were running before if test $STATUS -eq 0; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + fi fi fi exit 0 diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index 51595d76d..e886fd13e 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,3 +1,9 @@ +ceph (16.2.7-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Tue, 07 Dec 2021 16:15:46 +0000 + ceph (16.2.6-1) stable; urgency=medium * New upstream release diff --git a/ceph/cmake/modules/Buildpmem.cmake b/ceph/cmake/modules/Buildpmem.cmake index 2439ac0fd..ead5c80ae 100644 --- a/ceph/cmake/modules/Buildpmem.cmake +++ b/ceph/cmake/modules/Buildpmem.cmake @@ -1,7 +1,18 @@ function(build_pmem) - include(ExternalProject) - set(PMDK_SRC "${CMAKE_BINARY_DIR}/src/pmdk/src") - set(PMDK_INCLUDE "${PMDK_SRC}/include") + include(FindMake) + find_make("MAKE_EXECUTABLE" "make_cmd") + + if(EXISTS "${PROJECT_SOURCE_DIR}/src/pmdk/Makefile") + set(source_dir_args + SOURCE_DIR "${PROJECT_SOURCE_DIR}/src/pmdk") + else() + set(source_dir_args + SOURCE_DIR ${CMAKE_BINARY_DIR}/src/pmdk + GIT_REPOSITORY https://github.com/ceph/pmdk.git + GIT_TAG "1.10" + GIT_SHALLOW TRUE + GIT_CONFIG advice.detachedHead=false) + endif() # Use debug PMDK libs in debug lib/rbd builds if(CMAKE_BUILD_TYPE STREQUAL Debug) @@ -9,37 +20,38 @@ function(build_pmem) else() set(PMDK_LIB_DIR "nondebug") endif() - set(PMDK_LIB "${PMDK_SRC}/${PMDK_LIB_DIR}") - - include(FindMake) - find_make("MAKE_EXECUTABLE" "make_cmd") + include(ExternalProject) ExternalProject_Add(pmdk_ext - GIT_REPOSITORY "https://github.com/ceph/pmdk.git" - GIT_TAG "1.7" - GIT_SHALLOW TRUE - SOURCE_DIR ${CMAKE_BINARY_DIR}/src/pmdk + ${source_dir_args} CONFIGURE_COMMAND "" # Explicitly built w/o NDCTL, otherwise if ndtcl is present on the # build system tests statically linking to librbd (which uses # libpmemobj) will not link (because we don't build the ndctl # static library here). - BUILD_COMMAND ${make_cmd} CC=${CMAKE_C_COMPILER} NDCTL_ENABLE=n + BUILD_COMMAND ${make_cmd} CC=${CMAKE_C_COMPILER} NDCTL_ENABLE=n BUILD_EXAMPLES=n BUILD_BENCHMARKS=n DOC=n BUILD_IN_SOURCE 1 - BUILD_BYPRODUCTS "${PMDK_LIB}/libpmem.a" "${PMDK_LIB}/libpmemobj.a" - INSTALL_COMMAND "true") + BUILD_BYPRODUCTS "/src/${PMDK_LIB_DIR}/libpmem.a" "/src/${PMDK_LIB_DIR}/libpmemobj.a" + INSTALL_COMMAND "") + unset(make_cmd) + + ExternalProject_Get_Property(pmdk_ext source_dir) + set(PMDK_SRC "${source_dir}/src") + set(PMDK_INCLUDE "${source_dir}/src/include") + set(PMDK_LIB "${source_dir}/src/${PMDK_LIB_DIR}") # libpmem - add_library(pmem::pmem STATIC IMPORTED) + add_library(pmem::pmem STATIC IMPORTED GLOBAL) add_dependencies(pmem::pmem pmdk_ext) file(MAKE_DIRECTORY ${PMDK_INCLUDE}) + find_package(Threads) set_target_properties(pmem::pmem PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${PMDK_INCLUDE} IMPORTED_LOCATION "${PMDK_LIB}/libpmem.a" - INTERFACE_LINK_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) + INTERFACE_LINK_LIBRARIES Threads::Threads) # libpmemobj - add_library(pmem::pmemobj STATIC IMPORTED) + add_library(pmem::pmemobj STATIC IMPORTED GLOBAL) add_dependencies(pmem::pmemobj pmdk_ext) set_target_properties(pmem::pmemobj PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${PMDK_INCLUDE} diff --git a/ceph/cmake/modules/Findpmem.cmake b/ceph/cmake/modules/Findpmem.cmake index b30d9ae6b..9c8e21b91 100644 --- a/ceph/cmake/modules/Findpmem.cmake +++ b/ceph/cmake/modules/Findpmem.cmake @@ -14,6 +14,16 @@ foreach(component pmem ${pmem_FIND_COMPONENTS}) else() message(FATAL_ERROR "unknown libpmem component: ${component}") endif() + pkg_check_modules(PKG_${component} QUIET "lib${component}") + if(NOT pmem_VERSION_STRING OR PKG_${component}_VERSION VERSION_LESS pmem_VERSION_STRING) + set(pmem_VERSION_STRING ${PKG_${component}_VERSION}) + endif() + find_path(pmem_${component}_INCLUDE_DIR + NAMES lib${component}.h + HINTS ${PKG_${component}_INCLUDE_DIRS}) + find_library(pmem_${component}_LIBRARY + NAMES ${component} + HINTS ${PKG_${component}_LIBRARY_DIRS}) mark_as_advanced( pmem_${component}_INCLUDE_DIR pmem_${component}_LIBRARY) diff --git a/ceph/debian/ceph-base.install b/ceph/debian/ceph-base.install index 8ee22ebe5..3c3195126 100644 --- a/ceph/debian/ceph-base.install +++ b/ceph/debian/ceph-base.install @@ -20,3 +20,4 @@ usr/share/man/man8/crushtool.8 usr/share/man/man8/monmaptool.8 usr/share/man/man8/osdmaptool.8 usr/share/man/man8/ceph-kvstore-tool.8 +etc/sudoers.d/ceph-smartctl diff --git a/ceph/debian/ceph-osd.install b/ceph/debian/ceph-osd.install index 3a72447a8..0db4460da 100755 --- a/ceph/debian/ceph-osd.install +++ b/ceph/debian/ceph-osd.install @@ -23,4 +23,3 @@ usr/share/man/man8/ceph-volume-systemd.8 usr/share/man/man8/ceph-osd.8 usr/share/man/man8/ceph-bluestore-tool.8 etc/sysctl.d/30-ceph-osd.conf -etc/sudoers.d/ceph-osd-smartctl diff --git a/ceph/debian/control b/ceph/debian/control index 753c5266d..cb31919af 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -96,13 +96,13 @@ Build-Depends: automake, python3-sphinx, # Make-Check python3-werkzeug, python3-setuptools, + python3-venv, # Crimson ragel, # Make-Check socat, # Crimson systemtap-sdt-dev, # Make-Check uuid-dev, uuid-runtime, valgrind, - virtualenv, xfslibs-dev, # Make-Check xfsprogs, # Make-Check xmlstarlet, @@ -144,6 +144,8 @@ Recommends: btrfs-tools, libradosstriper1 (= ${binary:Version}), librbd1 (= ${binary:Version}), ntp | time-daemon, + nvme-cli, + smartmontools, Replaces: ceph (<< 10), ceph-common (<< 0.78-500), ceph-test (<< 12.2.2-14), @@ -376,8 +378,6 @@ Depends: ceph-base (= ${binary:Version}), ${shlibs:Depends}, Replaces: ceph (<< 10), ceph-test (<< 12.2.2-14) Breaks: ceph (<< 10), ceph-test (<< 12.2.2-14) -Recommends: nvme-cli, - smartmontools, Description: monitor server for the ceph storage system Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, @@ -411,8 +411,6 @@ Depends: ceph-base (= ${binary:Version}), ${shlibs:Depends}, Replaces: ceph (<< 10), ceph-test (<< 12.2.2-14) Breaks: ceph (<< 10), ceph-test (<< 12.2.2-14) -Recommends: nvme-cli, - smartmontools, Description: OSD server for the ceph storage system Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, diff --git a/ceph/debian/rules b/ceph/debian/rules index c8c6f3e80..1c215a76e 100755 --- a/ceph/debian/rules +++ b/ceph/debian/rules @@ -56,7 +56,7 @@ override_dh_auto_install: install -D -m 644 udev/50-rbd.rules $(DESTDIR)/lib/udev/rules.d/50-rbd.rules install -D -m 644 src/etc-rbdmap $(DESTDIR)/etc/ceph/rbdmap install -D -m 644 etc/sysctl/90-ceph-osd.conf $(DESTDIR)/etc/sysctl.d/30-ceph-osd.conf - install -D -m 440 sudoers.d/ceph-osd-smartctl $(DESTDIR)/etc/sudoers.d/ceph-osd-smartctl + install -D -m 440 sudoers.d/ceph-smartctl $(DESTDIR)/etc/sudoers.d/ceph-smartctl install -D -m 755 src/tools/rbd_nbd/rbd-nbd_quiesce $(DESTDIR)/usr/libexec/rbd-nbd/rbd-nbd_quiesce install -m 755 src/cephadm/cephadm $(DESTDIR)/usr/sbin/cephadm diff --git a/ceph/doc/ceph-volume/lvm/activate.rst b/ceph/doc/ceph-volume/lvm/activate.rst index 5e43d4cb5..eef5a0101 100644 --- a/ceph/doc/ceph-volume/lvm/activate.rst +++ b/ceph/doc/ceph-volume/lvm/activate.rst @@ -2,6 +2,7 @@ ``activate`` ============ + Once :ref:`ceph-volume-lvm-prepare` is completed, and all the various steps that entails are done, the volume is ready to get "activated". @@ -12,6 +13,9 @@ understand what OSD is enabled and needs to be mounted. .. note:: The execution of this call is fully idempotent, and there is no side-effects when running multiple times +For OSDs deployed by cephadm, please refer to :ref:cephadm-osd-activate: +instead. + New OSDs -------- To activate newly prepared OSDs both the :term:`OSD id` and :term:`OSD uuid` @@ -24,6 +28,10 @@ need to be supplied. For example:: Activating all OSDs ------------------- + +.. note:: For OSDs deployed by cephadm, please refer to :ref:cephadm-osd-activate: + instead. + It is possible to activate all existing OSDs at once by using the ``--all`` flag. For example:: diff --git a/ceph/doc/cephadm/adoption.rst b/ceph/doc/cephadm/adoption.rst index db4fded39..e06422a71 100644 --- a/ceph/doc/cephadm/adoption.rst +++ b/ceph/doc/cephadm/adoption.rst @@ -118,6 +118,12 @@ Adoption process document for instructions that describe how to import existing ssh keys. + .. note:: + It is also possible to have cephadm use a non-root user to ssh + into cluster hosts. This user needs to have passwordless sudo access. + Use ``ceph cephadm set-user `` and copy the ssh key to that user. + See :ref:`cephadm-ssh-user` + #. Tell cephadm which hosts to manage: .. prompt:: bash # diff --git a/ceph/doc/cephadm/host-management.rst b/ceph/doc/cephadm/host-management.rst index 621f2a753..c109e2dc7 100644 --- a/ceph/doc/cephadm/host-management.rst +++ b/ceph/doc/cephadm/host-management.rst @@ -169,13 +169,14 @@ Where the force flag when entering maintenance allows the user to bypass warning See also :ref:`cephadm-fqdn` -Host Specification -================== +Creating many hosts at once +=========================== Many hosts can be added at once using -``ceph orch apply -i`` by submitting a multi-document YAML file:: +``ceph orch apply -i`` by submitting a multi-document YAML file: + +.. code-block:: yaml - --- service_type: host hostname: node-00 addr: 192.168.0.10 @@ -197,6 +198,26 @@ This can be combined with service specifications (below) to create a cluster spe file to deploy a whole cluster in one command. see ``cephadm bootstrap --apply-spec`` also to do this during bootstrap. Cluster SSH Keys must be copied to hosts prior to adding them. +Setting the initial CRUSH location of host +========================================== + +Hosts can contain a ``location`` identifier which will instruct cephadm to +create a new CRUSH host located in the specified hierachy. + +.. code-block:: yaml + + service_type: host + hostname: node-00 + addr: 192.168.0.10 + location: + rack: rack1 + +.. note:: + + The ``location`` attribute will be only affect the initial CRUSH location. Subsequent + changes of the ``location`` property will be ignored. Also, removing a host will no remove + any CRUSH buckets. + SSH Configuration ================= @@ -233,6 +254,8 @@ You will then need to restart the mgr daemon to reload the configuration with:: ceph mgr fail +.. _cephadm-ssh-user: + Configuring a different SSH user ---------------------------------- diff --git a/ceph/doc/cephadm/index.rst b/ceph/doc/cephadm/index.rst index 394f7b1cc..d57fe288a 100644 --- a/ceph/doc/cephadm/index.rst +++ b/ceph/doc/cephadm/index.rst @@ -21,21 +21,13 @@ either via the Ceph command-line interface (CLI) or via the dashboard (GUI). versions of Ceph. .. toctree:: - :maxdepth: 1 + :maxdepth: 2 compatibility install adoption host-management - mon - osd - rgw - mds - nfs - iscsi - custom-container - monitoring - service-management + Service Management upgrade Cephadm operations Client Setup diff --git a/ceph/doc/cephadm/install.rst b/ceph/doc/cephadm/install.rst index 25907296c..269bdfbca 100644 --- a/ceph/doc/cephadm/install.rst +++ b/ceph/doc/cephadm/install.rst @@ -176,7 +176,7 @@ available options. * By default, Ceph daemons send their log output to stdout/stderr, which is picked up by the container runtime (docker or podman) and (on most systems) sent to journald. If you want Ceph to write traditional log files to ``/var/log/ceph/$fsid``, - use ``--log-to-file`` option during bootstrap. + use the ``--log-to-file`` option during bootstrap. * Larger Ceph clusters perform better when (external to the Ceph cluster) public network traffic is separated from (internal to the Ceph cluster) diff --git a/ceph/doc/cephadm/operations.rst b/ceph/doc/cephadm/operations.rst index 45ae1575b..08e493bd7 100644 --- a/ceph/doc/cephadm/operations.rst +++ b/ceph/doc/cephadm/operations.rst @@ -178,30 +178,30 @@ running, but are not registered as hosts managed by *cephadm*. This means that those services cannot currently be managed by cephadm (e.g., restarted, upgraded, included in `ceph orch ps`). -You can manage the host(s) by running the following command: +* You can manage the host(s) by running the following command: -.. prompt:: bash # + .. prompt:: bash # - ceph orch host add ** + ceph orch host add ** -.. note:: + .. note:: - You might need to configure SSH access to the remote host - before this will work. + You might need to configure SSH access to the remote host + before this will work. -Alternatively, you can manually connect to the host and ensure that -services on that host are removed or migrated to a host that is -managed by *cephadm*. +* See :ref:`cephadm-fqdn` for more information about host names and + domain names. -This warning can be disabled entirely by running the following -command: +* Alternatively, you can manually connect to the host and ensure that + services on that host are removed or migrated to a host that is + managed by *cephadm*. -.. prompt:: bash # +* This warning can be disabled entirely by running the following + command: - ceph config set mgr mgr/cephadm/warn_on_stray_hosts false + .. prompt:: bash # -See :ref:`cephadm-fqdn` for more information about host names and -domain names. + ceph config set mgr mgr/cephadm/warn_on_stray_hosts false CEPHADM_STRAY_DAEMON ~~~~~~~~~~~~~~~~~~~~ @@ -212,16 +212,30 @@ tool, or because they were started manually. Those services cannot currently be managed by cephadm (e.g., restarted, upgraded, or included in `ceph orch ps`). -If the daemon is a stateful one (monitor or OSD), it should be adopted -by cephadm; see :ref:`cephadm-adoption`. For stateless daemons, it is -usually easiest to provision a new daemon with the ``ceph orch apply`` -command and then stop the unmanaged daemon. +* If the daemon is a stateful one (monitor or OSD), it should be adopted + by cephadm; see :ref:`cephadm-adoption`. For stateless daemons, it is + usually easiest to provision a new daemon with the ``ceph orch apply`` + command and then stop the unmanaged daemon. -This warning can be disabled entirely by running the following command: +* If the stray daemon(s) are running on hosts not managed by cephadm, you can manage the host(s) by running the following command: -.. prompt:: bash # + .. prompt:: bash # + + ceph orch host add ** + + .. note:: - ceph config set mgr mgr/cephadm/warn_on_stray_daemons false + You might need to configure SSH access to the remote host + before this will work. + +* See :ref:`cephadm-fqdn` for more information about host names and + domain names. + +* This warning can be disabled entirely by running the following command: + + .. prompt:: bash # + + ceph config set mgr mgr/cephadm/warn_on_stray_daemons false CEPHADM_HOST_CHECK_FAILED ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -504,3 +518,28 @@ For example, to distribute configs to hosts with the ``bare_config`` label, run ceph config set mgr mgr/cephadm/manage_etc_ceph_ceph_conf_hosts label:bare_config (See :ref:`orchestrator-cli-placement-spec` for more information about placement specs.) + +Purging a cluster +================= + +.. danger:: THIS OPERATION WILL DESTROY ALL DATA STORED IN THIS CLUSTER + +In order to destory a cluster and delete all data stored in this cluster, pause +cephadm to avoid deploying new daemons. + +.. prompt:: bash # + + ceph orch pause + +Then verify the FSID of the cluster: + +.. prompt:: bash # + + ceph fsid + +Purge ceph daemons from all hosts in the cluster + +.. prompt:: bash # + + # For each host: + cephadm rm-cluster --force --zap-osds --fsid diff --git a/ceph/doc/cephadm/custom-container.rst b/ceph/doc/cephadm/services/custom-container.rst similarity index 83% rename from ceph/doc/cephadm/custom-container.rst rename to ceph/doc/cephadm/services/custom-container.rst index 542fcf162..3ece248c5 100644 --- a/ceph/doc/cephadm/custom-container.rst +++ b/ceph/doc/cephadm/services/custom-container.rst @@ -11,32 +11,33 @@ A corresponding :ref:`orchestrator-cli-service-spec` must look like: service_id: foo placement: ... - image: docker.io/library/foo:latest - entrypoint: /usr/bin/foo - uid: 1000 - gid: 1000 - args: + spec: + image: docker.io/library/foo:latest + entrypoint: /usr/bin/foo + uid: 1000 + gid: 1000 + args: - "--net=host" - "--cpus=2" - ports: + ports: - 8080 - 8443 - envs: + envs: - SECRET=mypassword - PORT=8080 - PUID=1000 - PGID=1000 - volume_mounts: + volume_mounts: CONFIG_DIR: /etc/foo - bind_mounts: - - ['type=bind', 'source=lib/modules', 'destination=/lib/modules', 'ro=true'] - dirs: - - CONFIG_DIR - files: - CONFIG_DIR/foo.conf: - - refresh=true - - username=xyz - - "port: 1234" + bind_mounts: + - ['type=bind', 'source=lib/modules', 'destination=/lib/modules', 'ro=true'] + dirs: + - CONFIG_DIR + files: + CONFIG_DIR/foo.conf: + - refresh=true + - username=xyz + - "port: 1234" where the properties of a service specification are: diff --git a/ceph/doc/cephadm/service-management.rst b/ceph/doc/cephadm/services/index.rst similarity index 81% rename from ceph/doc/cephadm/service-management.rst rename to ceph/doc/cephadm/services/index.rst index c8f47d9ed..f34180eb2 100644 --- a/ceph/doc/cephadm/service-management.rst +++ b/ceph/doc/cephadm/services/index.rst @@ -2,10 +2,27 @@ Service Management ================== +A service is a group of daemons configured together. See these chapters +for details on individual services: + +.. toctree:: + :maxdepth: 1 + + mon + mgr + osd + rgw + mds + nfs + iscsi + custom-container + monitoring + Service Status ============== -A service is a group of daemons configured together. To see the status of one + +To see the status of one of the services running in the Ceph cluster, do the following: #. Use the command line to print a list of services. @@ -80,24 +97,17 @@ deployment of services. Here is an example of a service specification in YAML: - host2 - host3 unmanaged: false - ... + networks: + - 192.169.142.0/24 + spec: + # Additional service specific attributes. In this example, the properties of this service specification are: -* ``service_type`` - The type of the service. Needs to be either a Ceph - service (``mon``, ``crash``, ``mds``, ``mgr``, ``osd`` or - ``rbd-mirror``), a gateway (``nfs`` or ``rgw``), part of the - monitoring stack (``alertmanager``, ``grafana``, ``node-exporter`` or - ``prometheus``) or (``container``) for custom containers. -* ``service_id`` - The name of the service. -* ``placement`` - See :ref:`orchestrator-cli-placement-spec`. -* ``unmanaged`` If set to ``true``, the orchestrator will not deploy nor remove - any daemon associated with this service. Placement and all other properties - will be ignored. This is useful, if you do not want this service to be - managed temporarily. For cephadm, See :ref:`cephadm-spec-unmanaged` +.. py:currentmodule:: ceph.deployment.service_spec + +.. autoclass:: ServiceSpec + :members: Each service type can have additional service-specific properties. @@ -144,10 +154,36 @@ following these instructions: The Specification can then be changed and re-applied as above. +Updating Service Specifications +------------------------------- + +The Ceph Orchestrator maintains a declarative state of each +service in a ``ServiceSpec``. For certain operations, like updating +the RGW HTTP port, we need to update the existing +specification. + +1. List the current ``ServiceSpec``: + + .. prompt:: bash # + + ceph orch ls --service_name= --export > myservice.yaml + +2. Update the yaml file: + + .. prompt:: bash # + + vi myservice.yaml + +3. Apply the new ``ServiceSpec``: + + .. prompt:: bash # + + ceph orch apply -i myservice.yaml [--dry-run] + .. _orchestrator-cli-placement-spec: -Placement Specification -======================= +Daemon Placement +================ For the orchestrator to deploy a *service*, it needs to know where to deploy *daemons*, and how many to deploy. This is the role of a placement @@ -158,53 +194,55 @@ or in a YAML files. cephadm will not deploy daemons on hosts with the ``_no_schedule`` label; see :ref:`cephadm-special-host-labels`. - .. note:: - The **apply** command can be confusing. For this reason, we recommend using - YAML specifications. +.. note:: + The **apply** command can be confusing. For this reason, we recommend using + YAML specifications. - Each ``ceph orch apply `` command supersedes the one before it. - If you do not use the proper syntax, you will clobber your work - as you go. + Each ``ceph orch apply `` command supersedes the one before it. + If you do not use the proper syntax, you will clobber your work + as you go. - For example: + For example: - .. prompt:: bash # + .. prompt:: bash # - ceph orch apply mon host1 - ceph orch apply mon host2 - ceph orch apply mon host3 + ceph orch apply mon host1 + ceph orch apply mon host2 + ceph orch apply mon host3 - This results in only one host having a monitor applied to it: host 3. + This results in only one host having a monitor applied to it: host 3. - (The first command creates a monitor on host1. Then the second command - clobbers the monitor on host1 and creates a monitor on host2. Then the - third command clobbers the monitor on host2 and creates a monitor on - host3. In this scenario, at this point, there is a monitor ONLY on - host3.) + (The first command creates a monitor on host1. Then the second command + clobbers the monitor on host1 and creates a monitor on host2. Then the + third command clobbers the monitor on host2 and creates a monitor on + host3. In this scenario, at this point, there is a monitor ONLY on + host3.) - To make certain that a monitor is applied to each of these three hosts, - run a command like this: + To make certain that a monitor is applied to each of these three hosts, + run a command like this: - .. prompt:: bash # + .. prompt:: bash # - ceph orch apply mon "host1,host2,host3" + ceph orch apply mon "host1,host2,host3" - There is another way to apply monitors to multiple hosts: a ``yaml`` file - can be used. Instead of using the "ceph orch apply mon" commands, run a - command of this form: + There is another way to apply monitors to multiple hosts: a ``yaml`` file + can be used. Instead of using the "ceph orch apply mon" commands, run a + command of this form: - .. prompt:: bash # + .. prompt:: bash # + + ceph orch apply -i file.yaml - ceph orch apply -i file.yaml + Here is a sample **file.yaml** file - Here is a sample **file.yaml** file:: + .. code-block:: yaml - service_type: mon - placement: - hosts: - - host1 - - host2 - - host3 + service_type: mon + placement: + hosts: + - host1 + - host2 + - host3 Explicit placements ------------------- @@ -320,8 +358,8 @@ Or in YAML: host_pattern: "*" -Changing the number of monitors -------------------------------- +Changing the number of daemons +------------------------------ By specifying ``count``, only the number of daemons specified will be created: @@ -363,38 +401,11 @@ YAML can also be used to specify limits on hosts: - host2 - host3 -Updating Service Specifications -=============================== - -The Ceph Orchestrator maintains a declarative state of each -service in a ``ServiceSpec``. For certain operations, like updating -the RGW HTTP port, we need to update the existing -specification. - -1. List the current ``ServiceSpec``: - - .. prompt:: bash # - - ceph orch ls --service_name= --export > myservice.yaml - -2. Update the yaml file: - - .. prompt:: bash # - - vi myservice.yaml - -3. Apply the new ``ServiceSpec``: - - .. prompt:: bash # +Algorithm description +--------------------- - ceph orch apply -i myservice.yaml [--dry-run] - -Deployment of Daemons -===================== - -Cephadm uses a declarative state to define the layout of the cluster. This -state consists of a list of service specifications containing placement -specifications (See :ref:`orchestrator-cli-service-spec` ). +Cephadm's declarative state consists of a list of service specifications +containing placement specifications. Cephadm continually compares a list of daemons actually running in the cluster against the list in the service specifications. Cephadm adds new daemons and @@ -437,12 +448,29 @@ Finally, cephadm removes daemons on hosts that are outside of the list of candidate hosts. .. note:: - + There is a special case that cephadm must consider. - If there are fewer hosts selected by the placement specification than + If there are fewer hosts selected by the placement specification than demanded by ``count``, cephadm will deploy only on the selected hosts. +.. _orch-rm: + +Removing a Service +================== + +In order to remove a service including the removal +of all daemons of that service, run + +.. prompt:: bash + + ceph orch rm + +For example: + +.. prompt:: bash + + ceph orch rm rgw.myrgw .. _cephadm-spec-unmanaged: @@ -452,6 +480,8 @@ Disabling automatic deployment of daemons Cephadm supports disabling the automated deployment and removal of daemons on a per service basis. The CLI supports two commands for this. +In order to fully remove a service, see :ref:`orch-rm`. + Disabling automatic management of daemons ----------------------------------------- diff --git a/ceph/doc/cephadm/iscsi.rst b/ceph/doc/cephadm/services/iscsi.rst similarity index 88% rename from ceph/doc/cephadm/iscsi.rst rename to ceph/doc/cephadm/services/iscsi.rst index d34ff6abf..e039e8d9a 100644 --- a/ceph/doc/cephadm/iscsi.rst +++ b/ceph/doc/cephadm/services/iscsi.rst @@ -44,7 +44,6 @@ For example: trusted_ip_list: "IP_ADDRESS_1,IP_ADDRESS_2,IP_ADDRESS_3,..." api_user: API_USERNAME api_password: API_PASSWORD - api_secure: true ssl_cert: | -----BEGIN CERTIFICATE----- MIIDtTCCAp2gAwIBAgIYMC4xNzc1NDQxNjEzMzc2MjMyXzxvQ7EcMA0GCSqGSIb3 @@ -58,6 +57,11 @@ For example: [...] -----END PRIVATE KEY----- +.. py:currentmodule:: ceph.deployment.service_spec + +.. autoclass:: IscsiServiceSpec + :members: + The specification can then be applied using: @@ -67,3 +71,10 @@ The specification can then be applied using: See :ref:`orchestrator-cli-placement-spec` for details of the placement specification. + +See also: :ref:`orchestrator-cli-service-spec`. + +Further Reading +=============== + +* RBD: :ref:`ceph-iscsi` diff --git a/ceph/doc/cephadm/mds.rst b/ceph/doc/cephadm/services/mds.rst similarity index 94% rename from ceph/doc/cephadm/mds.rst rename to ceph/doc/cephadm/services/mds.rst index fbee8d6bd..949a0fa5d 100644 --- a/ceph/doc/cephadm/mds.rst +++ b/ceph/doc/cephadm/services/mds.rst @@ -41,6 +41,9 @@ The specification can then be applied using: See :ref:`orchestrator-cli-stateless-services` for manually deploying MDS daemons on the CLI. +Further Reading +=============== +* :ref:`ceph-file-system` diff --git a/ceph/doc/cephadm/services/mgr.rst b/ceph/doc/cephadm/services/mgr.rst new file mode 100644 index 000000000..98a54398b --- /dev/null +++ b/ceph/doc/cephadm/services/mgr.rst @@ -0,0 +1,37 @@ +.. _mgr-cephadm-mgr: + +=========== +MGR Service +=========== + +The cephadm MGR service is hosting different modules, like the :ref:`mgr-dashboard` +and the cephadm manager module. + +.. _cephadm-mgr-networks: + +Specifying Networks +------------------- + +The MGR service supports binding only to a specific IP within a network. + +example spec file (leveraging a default placement): + +.. code-block:: yaml + + service_type: mgr + networks: + - 192.169.142.0/24 + +Allow co-location of MGR daemons +================================ + +In deployment scenarios with just a single host, cephadm still needs +to deploy at least two MGR daemons. See ``mgr_standby_modules`` in +the :ref:`mgr-administrator-guide` for further details. + +Further Reading +=============== + +* :ref:`ceph-manager-daemon` +* :ref:`cephadm-manually-deploy-mgr` + diff --git a/ceph/doc/cephadm/mon.rst b/ceph/doc/cephadm/services/mon.rst similarity index 96% rename from ceph/doc/cephadm/mon.rst rename to ceph/doc/cephadm/services/mon.rst index e66df6171..6326b73f4 100644 --- a/ceph/doc/cephadm/mon.rst +++ b/ceph/doc/cephadm/services/mon.rst @@ -5,7 +5,7 @@ MON Service .. _deploy_additional_monitors: Deploying additional monitors ------------------------------ +============================= A typical Ceph cluster has three or five monitor daemons that are spread across different hosts. We recommend deploying five monitors if there are @@ -169,3 +169,11 @@ network ``10.1.2.0/24``, run the following commands: .. prompt:: bash # ceph orch apply mon --placement="newhost1,newhost2,newhost3" + +Futher Reading +============== + +* :ref:`rados-operations` +* :ref:`rados-troubleshooting-mon` +* :ref:`cephadm-restore-quorum` + diff --git a/ceph/doc/cephadm/monitoring.rst b/ceph/doc/cephadm/services/monitoring.rst similarity index 90% rename from ceph/doc/cephadm/monitoring.rst rename to ceph/doc/cephadm/services/monitoring.rst index 38f4b4bb4..91b8742f3 100644 --- a/ceph/doc/cephadm/monitoring.rst +++ b/ceph/doc/cephadm/services/monitoring.rst @@ -56,87 +56,34 @@ steps below: .. prompt:: bash # - ceph orch apply node-exporter '*' + ceph orch apply node-exporter #. Deploy alertmanager: .. prompt:: bash # - ceph orch apply alertmanager 1 + ceph orch apply alertmanager #. Deploy Prometheus. A single Prometheus instance is sufficient, but for high availablility (HA) you might want to deploy two: .. prompt:: bash # - ceph orch apply prometheus 1 + ceph orch apply prometheus or .. prompt:: bash # - ceph orch apply prometheus 2 + ceph orch apply prometheus --placement 'count:2' #. Deploy grafana: .. prompt:: bash # - ceph orch apply grafana 1 + ceph orch apply grafana -Manually setting the Grafana URL -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cephadm automatically configures Prometheus, Grafana, and Alertmanager in -all cases except one. - -In a some setups, the Dashboard user's browser might not be able to access the -Grafana URL that is configured in Ceph Dashboard. This can happen when the -cluster and the accessing user are in different DNS zones. - -If this is the case, you can use a configuration option for Ceph Dashboard -to set the URL that the user's browser will use to access Grafana. This -value will never be altered by cephadm. To set this configuration option, -issue the following command: - - .. prompt:: bash $ - - ceph dashboard set-grafana-frontend-api-url - -It might take a minute or two for services to be deployed. After the -services have been deployed, you should see something like this when you issue the command ``ceph orch ls``: - -.. code-block:: console - - $ ceph orch ls - NAME RUNNING REFRESHED IMAGE NAME IMAGE ID SPEC - alertmanager 1/1 6s ago docker.io/prom/alertmanager:latest 0881eb8f169f present - crash 2/2 6s ago docker.io/ceph/daemon-base:latest-master-devel mix present - grafana 1/1 0s ago docker.io/pcuzner/ceph-grafana-el8:latest f77afcf0bcf6 absent - node-exporter 2/2 6s ago docker.io/prom/node-exporter:latest e5a616e4b9cf present - prometheus 1/1 6s ago docker.io/prom/prometheus:latest e935122ab143 present - -Configuring SSL/TLS for Grafana -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``cephadm`` deploys Grafana using the certificate defined in the ceph -key/value store. If no certificate is specified, ``cephadm`` generates a -self-signed certificate during the deployment of the Grafana service. - -A custom certificate can be configured using the following commands: - -.. prompt:: bash # - - ceph config-key set mgr/cephadm/grafana_key -i $PWD/key.pem - ceph config-key set mgr/cephadm/grafana_crt -i $PWD/certificate.pem - -If you have already deployed Grafana, run ``reconfig`` on the service to -update its configuration: - -.. prompt:: bash # - - ceph orch reconfig grafana - -The ``reconfig`` command also sets the proper URL for Ceph Dashboard. +.. _cephadm-monitoring-networks-ports: Networks and Ports ~~~~~~~~~~~~~~~~~~ @@ -291,6 +238,26 @@ Example # reconfig the prometheus service ceph orch reconfig prometheus +Deploying monitoring without cephadm +------------------------------------ + +If you have an existing prometheus monitoring infrastructure, or would like +to manage it yourself, you need to configure it to integrate with your Ceph +cluster. + +* Enable the prometheus module in the ceph-mgr daemon + + .. code-block:: bash + + ceph mgr module enable prometheus + + By default, ceph-mgr presents prometheus metrics on port 9283 on each host + running a ceph-mgr daemon. Configure prometheus to scrape these. + +* To enable the dashboard's prometheus-based alerting, see :ref:`dashboard-alerting`. + +* To enable dashboard integration with Grafana, see :ref:`dashboard-grafana`. + Disabling monitoring -------------------- @@ -304,30 +271,101 @@ To disable monitoring and remove the software that supports it, run the followin $ ceph orch rm alertmanager $ ceph mgr module disable prometheus +See also :ref:`orch-rm`. -Deploying monitoring manually ------------------------------ +Setting up RBD-Image monitoring +------------------------------- -If you have an existing prometheus monitoring infrastructure, or would like -to manage it yourself, you need to configure it to integrate with your Ceph -cluster. +Due to performance reasons, monitoring of RBD images is disabled by default. For more information please see +:ref:`prometheus-rbd-io-statistics`. If disabled, the overview and details dashboards will stay empty in Grafana +and the metrics will not be visible in Prometheus. -* Enable the prometheus module in the ceph-mgr daemon +Setting up Grafana +------------------ - .. code-block:: bash +Manually setting the Grafana URL +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ceph mgr module enable prometheus +Cephadm automatically configures Prometheus, Grafana, and Alertmanager in +all cases except one. - By default, ceph-mgr presents prometheus metrics on port 9283 on each host - running a ceph-mgr daemon. Configure prometheus to scrape these. +In a some setups, the Dashboard user's browser might not be able to access the +Grafana URL that is configured in Ceph Dashboard. This can happen when the +cluster and the accessing user are in different DNS zones. -* To enable the dashboard's prometheus-based alerting, see :ref:`dashboard-alerting`. +If this is the case, you can use a configuration option for Ceph Dashboard +to set the URL that the user's browser will use to access Grafana. This +value will never be altered by cephadm. To set this configuration option, +issue the following command: -* To enable dashboard integration with Grafana, see :ref:`dashboard-grafana`. + .. prompt:: bash $ -Enabling RBD-Image monitoring ---------------------------------- + ceph dashboard set-grafana-frontend-api-url -Due to performance reasons, monitoring of RBD images is disabled by default. For more information please see -:ref:`prometheus-rbd-io-statistics`. If disabled, the overview and details dashboards will stay empty in Grafana -and the metrics will not be visible in Prometheus. +It might take a minute or two for services to be deployed. After the +services have been deployed, you should see something like this when you issue the command ``ceph orch ls``: + +.. code-block:: console + + $ ceph orch ls + NAME RUNNING REFRESHED IMAGE NAME IMAGE ID SPEC + alertmanager 1/1 6s ago docker.io/prom/alertmanager:latest 0881eb8f169f present + crash 2/2 6s ago docker.io/ceph/daemon-base:latest-master-devel mix present + grafana 1/1 0s ago docker.io/pcuzner/ceph-grafana-el8:latest f77afcf0bcf6 absent + node-exporter 2/2 6s ago docker.io/prom/node-exporter:latest e5a616e4b9cf present + prometheus 1/1 6s ago docker.io/prom/prometheus:latest e935122ab143 present + +Configuring SSL/TLS for Grafana +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``cephadm`` deploys Grafana using the certificate defined in the ceph +key/value store. If no certificate is specified, ``cephadm`` generates a +self-signed certificate during the deployment of the Grafana service. + +A custom certificate can be configured using the following commands: + +.. prompt:: bash # + + ceph config-key set mgr/cephadm/grafana_key -i $PWD/key.pem + ceph config-key set mgr/cephadm/grafana_crt -i $PWD/certificate.pem + +If you have already deployed Grafana, run ``reconfig`` on the service to +update its configuration: + +.. prompt:: bash # + + ceph orch reconfig grafana + +The ``reconfig`` command also sets the proper URL for Ceph Dashboard. + +Setting up Alertmanager +----------------------- + +Adding Alertmanager webhooks +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To add new webhooks to the Alertmanager configuration, add additional +webhook urls like so: + +.. code-block:: yaml + + service_type: alertmanager + spec: + user_data: + default_webhook_urls: + - "https://foo" + - "https://bar" + +Where ``default_webhook_urls`` is a list of additional URLs that are +added to the default receivers' ```` configuration. + +Run ``reconfig`` on the service to update its configuration: + +.. prompt:: bash # + + ceph orch reconfig alertmanager + +Further Reading +--------------- + +* :ref:`mgr-prometheus` diff --git a/ceph/doc/cephadm/nfs.rst b/ceph/doc/cephadm/services/nfs.rst similarity index 94% rename from ceph/doc/cephadm/nfs.rst rename to ceph/doc/cephadm/services/nfs.rst index 631fe51bd..c48d0f765 100644 --- a/ceph/doc/cephadm/nfs.rst +++ b/ceph/doc/cephadm/services/nfs.rst @@ -7,7 +7,7 @@ NFS Service .. note:: Only the NFSv4 protocol is supported. The simplest way to manage NFS is via the ``ceph nfs cluster ...`` -commands; see :ref:`cephfs-nfs`. This document covers how to manage the +commands; see :ref:`mgr-nfs`. This document covers how to manage the cephadm services directly, which should only be necessary for unusual NFS configurations. @@ -59,6 +59,7 @@ The specification can then be applied by running the following command: ceph orch apply -i nfs.yaml +.. _cephadm-ha-nfs: High-availability NFS ===================== @@ -111,3 +112,9 @@ A few notes: * The backend service (``nfs.mynfs`` in this example) should include a *port* property that is not 2049 to avoid conflicting with the ingress service, which could be placed on the same host(s). + +Further Reading +=============== + +* CephFS: :ref:`cephfs-nfs` +* MGR: :ref:`mgr-nfs` diff --git a/ceph/doc/cephadm/osd.rst b/ceph/doc/cephadm/services/osd.rst similarity index 87% rename from ceph/doc/cephadm/osd.rst rename to ceph/doc/cephadm/services/osd.rst index f0bf47cfe..08cf7669b 100644 --- a/ceph/doc/cephadm/osd.rst +++ b/ceph/doc/cephadm/services/osd.rst @@ -305,6 +305,10 @@ This follows the same procedure as the procedure in the "Remove OSD" section, wi one exception: the OSD is not permanently removed from the CRUSH hierarchy, but is instead assigned a 'destroyed' flag. +.. note:: + The new OSD that will replace the removed OSD must be created on the same host + as the OSD that was removed. + **Preserving the OSD ID** The 'destroyed' flag is used to determine which OSD ids will be reused in the @@ -358,7 +362,7 @@ Example command: .. note:: If the unmanaged flag is unset, cephadm automatically deploys drives that - match the DriveGroup in your OSDSpec. For example, if you use the + match the OSDSpec. For example, if you use the ``all-available-devices`` option when creating OSDs, when you ``zap`` a device the cephadm orchestrator automatically creates a new OSD in the device. To disable this behavior, see :ref:`cephadm-osd-declarative`. @@ -437,11 +441,12 @@ Create a file called (for example) ``osd_spec.yml``: .. code-block:: yaml service_type: osd - service_id: default_drive_group <- name of the drive_group (name can be custom) + service_id: default_drive_group # custom name of the osd spec placement: - host_pattern: '*' <- which hosts to target, currently only supports globs - data_devices: <- the type of devices you are applying specs to - all: true <- a filter, check below for a full list + host_pattern: '*' # which hosts to target + spec: + data_devices: # the type of devices you are applying specs to + all: true # a filter, check below for a full list This means : @@ -579,7 +584,7 @@ All This will take all disks that are 'available' -Note: This is exclusive for the data_devices section. +.. note:: This is exclusive for the data_devices section. .. code-block:: yaml @@ -604,14 +609,14 @@ but want to use only the first two, you could use `limit`: vendor: VendorA limit: 2 -Note: `limit` is a last resort and shouldn't be used if it can be avoided. +.. note:: `limit` is a last resort and shouldn't be used if it can be avoided. Additional Options ------------------ There are multiple optional settings you can use to change the way OSDs are deployed. -You can add these options to the base level of a DriveGroup for it to take effect. +You can add these options to the base level of an OSD spec for it to take effect. This example would deploy all OSDs with encryption enabled. @@ -621,9 +626,10 @@ This example would deploy all OSDs with encryption enabled. service_id: example_osd_spec placement: host_pattern: '*' - data_devices: - all: true - encrypted: true + spec: + data_devices: + all: true + encrypted: true See a full list in the DriveGroupSpecs @@ -634,10 +640,10 @@ See a full list in the DriveGroupSpecs :exclude-members: from_json Examples --------- +======== The simple case -^^^^^^^^^^^^^^^ +--------------- All nodes with the same setup @@ -661,10 +667,11 @@ This is a common setup and can be described quite easily: service_id: osd_spec_default placement: host_pattern: '*' - data_devices: - model: HDD-123-foo <- note that HDD-123 would also be valid - db_devices: - model: MC-55-44-XZ <- same here, MC-55-44 is valid + spec: + data_devices: + model: HDD-123-foo # Note, HDD-123 would also be valid + db_devices: + model: MC-55-44-XZ # Same here, MC-55-44 is valid However, we can improve it by reducing the filters on core properties of the drives: @@ -674,10 +681,11 @@ However, we can improve it by reducing the filters on core properties of the dri service_id: osd_spec_default placement: host_pattern: '*' - data_devices: - rotational: 1 - db_devices: - rotational: 0 + spec: + data_devices: + rotational: 1 + db_devices: + rotational: 0 Now, we enforce all rotating devices to be declared as 'data devices' and all non-rotating devices will be used as shared_devices (wal, db) @@ -689,16 +697,17 @@ If you know that drives with more than 2TB will always be the slower data device service_id: osd_spec_default placement: host_pattern: '*' - data_devices: - size: '2TB:' - db_devices: - size: ':2TB' + spec: + data_devices: + size: '2TB:' + db_devices: + size: ':2TB' -Note: All of the above DriveGroups are equally valid. Which of those you want to use depends on taste and on how much you expect your node layout to change. +.. note:: All of the above OSD specs are equally valid. Which of those you want to use depends on taste and on how much you expect your node layout to change. -The advanced case -^^^^^^^^^^^^^^^^^ +Multiple OSD specs for a single host +------------------------------------ Here we have two distinct setups @@ -731,28 +740,38 @@ This can be described with two layouts. service_id: osd_spec_hdd placement: host_pattern: '*' - data_devices: - rotational: 0 - db_devices: - model: MC-55-44-XZ - limit: 2 (db_slots is actually to be favoured here, but it's not implemented yet) + spec: + data_devices: + rotational: 0 + db_devices: + model: MC-55-44-XZ + limit: 2 # db_slots is actually to be favoured here, but it's not implemented yet --- service_type: osd service_id: osd_spec_ssd placement: host_pattern: '*' - data_devices: - model: MC-55-44-XZ - db_devices: - vendor: VendorC + spec: + data_devices: + model: MC-55-44-XZ + db_devices: + vendor: VendorC This would create the desired layout by using all HDDs as data_devices with two SSD assigned as dedicated db/wal devices. The remaining SSDs(8) will be data_devices that have the 'VendorC' NVMEs assigned as dedicated db/wal devices. -The advanced case (with non-uniform nodes) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Multiple hosts with the same disk layout +---------------------------------------- + +Assuming the cluster has different kinds of hosts each with similar disk +layout, it is recommended to apply different OSD specs matching only one +set of hosts. Typically you will have a spec for multiple hosts with the +same layout. -The examples above assumed that all nodes have the same drives. That's however not always the case. +The sevice id as the unique key: In case a new OSD spec with an already +applied service id is applied, the existing OSD spec will be superseeded. +cephadm will now create new OSD daemons based on the new spec +definition. Existing OSD daemons will not be affected. See :ref:`cephadm-osd-declarative`. Node1-5 @@ -780,33 +799,41 @@ Node6-10 Model: MC-55-44-ZX Size: 512GB -You can use the 'host_pattern' key in the layout to target certain nodes. Salt target notation helps to keep things easy. - +You can use the 'placement' key in the layout to target certain nodes. .. code-block:: yaml service_type: osd - service_id: osd_spec_node_one_to_five + service_id: disk_layout_a placement: - host_pattern: 'node[1-5]' - data_devices: - rotational: 1 - db_devices: - rotational: 0 + label: disk_layout_a + spec: + data_devices: + rotational: 1 + db_devices: + rotational: 0 --- service_type: osd - service_id: osd_spec_six_to_ten + service_id: disk_layout_b placement: - host_pattern: 'node[6-10]' - data_devices: - model: MC-55-44-XZ - db_devices: - model: SSD-123-foo + label: disk_layout_b + spec: + data_devices: + model: MC-55-44-XZ + db_devices: + model: SSD-123-foo + +This applies different OSD specs to different hosts depending on the `placement` key. +See :ref:`orchestrator-cli-placement-spec` + +.. note:: + + Assuming each host has a unique disk layout, each OSD + spec needs to have a different service id -This applies different OSD specs to different hosts depending on the `host_pattern` key. Dedicated wal + db -^^^^^^^^^^^^^^^^^^ +------------------ All previous cases co-located the WALs with the DBs. It's however possible to deploy the WAL on a dedicated device as well, if it makes sense. @@ -837,12 +864,13 @@ The OSD spec for this case would look like the following (using the `model` filt service_id: osd_spec_default placement: host_pattern: '*' - data_devices: - model: MC-55-44-XZ - db_devices: - model: SSD-123-foo - wal_devices: - model: NVME-QQQQ-987 + spec: + data_devices: + model: MC-55-44-XZ + db_devices: + model: SSD-123-foo + wal_devices: + model: NVME-QQQQ-987 It is also possible to specify directly device paths in specific hosts like the following: @@ -855,19 +883,22 @@ It is also possible to specify directly device paths in specific hosts like the hosts: - Node01 - Node02 - data_devices: - paths: + spec: + data_devices: + paths: - /dev/sdb - db_devices: - paths: + db_devices: + paths: - /dev/sdc - wal_devices: - paths: + wal_devices: + paths: - /dev/sdd This can easily be done with other filters, like `size` or `vendor` as well. +.. _cephadm-osd-activate: + Activate existing OSDs ====================== @@ -880,3 +911,9 @@ activates all existing OSDs on a host. ceph cephadm osd activate ... This will scan all existing disks for OSDs and deploy corresponding daemons. + +Futher Reading +============== + +* :ref:`ceph-volume` +* :ref:`rados-index` diff --git a/ceph/doc/cephadm/rgw.rst b/ceph/doc/cephadm/services/rgw.rst similarity index 92% rename from ceph/doc/cephadm/rgw.rst rename to ceph/doc/cephadm/services/rgw.rst index 3283fdbdf..47017139b 100644 --- a/ceph/doc/cephadm/rgw.rst +++ b/ceph/doc/cephadm/services/rgw.rst @@ -49,6 +49,28 @@ ports 8000 and 8001: ceph orch host label add gwhost2 rgw ceph orch apply rgw foo '--placement=label:rgw count-per-host:2' --port=8000 +.. _cephadm-rgw-networks: + +Specifying Networks +------------------- + +The RGW service can have the network they bind to configured with a yaml service specification. + +example spec file: + +.. code-block:: yaml + + service_type: rgw + service_name: foo + placement: + label: rgw + count-per-host: 2 + networks: + - 192.169.142.0/24 + spec: + port: 8000 + + Multisite zones --------------- @@ -82,6 +104,8 @@ something like: See :ref:`orchestrator-cli-placement-spec` for details of the placement specification. See :ref:`multisite` for more information of setting up multisite RGW. +See also :ref:`multisite`. + Setting up HTTPS ---------------- @@ -116,7 +140,15 @@ Then apply this yaml document: ceph orch apply -i myrgw.yaml Note the value of ``rgw_frontend_ssl_certificate`` is a literal string as -indicated by a ``|`` character preserving newline characters. +indicated by a ``|`` character preserving newline characters. + +Service specification +--------------------- + +.. py:currentmodule:: ceph.deployment.service_spec + +.. autoclass:: RGWSpec + :members: .. _orchestrator-haproxy-service-spec: @@ -131,7 +163,7 @@ balancing on a floating virtual IP. If SSL is used, then SSL must be configured and terminated by the ingress service and not RGW itself. -.. image:: ../images/HAProxy_for_RGW.svg +.. image:: ../../images/HAProxy_for_RGW.svg There are N hosts where the ingress service is deployed. Each host has a haproxy daemon and a keepalived daemon. A virtual IP is @@ -250,3 +282,8 @@ Useful hints for ingress * It is good to have at least 3 RGW daemons. * We recommend at least 3 hosts for the ingress service. + +Further Reading +=============== + +* :ref:`object-gateway` diff --git a/ceph/doc/cephadm/troubleshooting.rst b/ceph/doc/cephadm/troubleshooting.rst index 1b6764dd7..4ce76ee81 100644 --- a/ceph/doc/cephadm/troubleshooting.rst +++ b/ceph/doc/cephadm/troubleshooting.rst @@ -252,6 +252,30 @@ To access the admin socket, first enter the daemon container on the host:: [root@mon1 ~]# cephadm enter --name [ceph: root@mon1 /]# ceph --admin-daemon /var/run/ceph/ceph-.asok config show +Calling miscellaneous ceph tools +-------------------------------- + +To call miscellaneous like ``ceph-objectstore-tool`` or +``ceph-monstore-tool``, you can run them by calling +``cephadm shell --name `` like so:: + + root@myhostname # cephadm unit --name mon.myhostname stop + root@myhostname # cephadm shell --name mon.myhostname + [ceph: root@myhostname /]# ceph-monstore-tool /var/lib/ceph/mon/ceph-myhostname get monmap > monmap + [ceph: root@myhostname /]# monmaptool --print monmap + monmaptool: monmap file monmap + epoch 1 + fsid 28596f44-3b56-11ec-9034-482ae35a5fbb + last_changed 2021-11-01T20:57:19.755111+0000 + created 2021-11-01T20:57:19.755111+0000 + min_mon_release 17 (quincy) + election_strategy: 1 + 0: [v2:127.0.0.1:3300/0,v1:127.0.0.1:6789/0] mon.myhostname + +This command sets up the environment in a way that is suitable +for extended daemon maintenance and running the deamon interactively. + +.. _cephadm-restore-quorum: Restoring the MON quorum ------------------------ @@ -275,6 +299,7 @@ form the monmap by following these steps: 3. Follow the steps in :ref:`rados-mon-remove-from-unhealthy` +.. _cephadm-manually-deploy-mgr: Manually deploying a MGR daemon ------------------------------- diff --git a/ceph/doc/cephadm/upgrade.rst b/ceph/doc/cephadm/upgrade.rst index dd6bd8759..429f4f280 100644 --- a/ceph/doc/cephadm/upgrade.rst +++ b/ceph/doc/cephadm/upgrade.rst @@ -37,12 +37,20 @@ To upgrade (or downgrade) to a specific release, run the following command: ceph orch upgrade start --ceph-version -For example, to upgrade to v15.2.1, run the following command: +For example, to upgrade to v16.2.6, run the following command: .. prompt:: bash # ceph orch upgrade start --ceph-version 15.2.1 +.. note:: + + From version v16.2.6 the Docker Hub registry is no longer used, so if you use Docker you have to point it to the image in the quay.io registry: + +.. prompt:: bash # + + ceph orch upgrade start --image quay.io/ceph/ceph:v16.2.6 + Monitoring the upgrade ====================== diff --git a/ceph/doc/cephfs/cephfs-mirroring.rst b/ceph/doc/cephfs/cephfs-mirroring.rst index e485ea3fe..3dbaa5d1a 100644 --- a/ceph/doc/cephfs/cephfs-mirroring.rst +++ b/ceph/doc/cephfs/cephfs-mirroring.rst @@ -165,7 +165,7 @@ Mirroring Status CephFS mirroring module provides `mirror daemon status` interface to check mirror daemon status:: - $ ceph fs snapshot mirror daemon status + $ ceph fs snapshot mirror daemon status [ { "daemon_id": 284167, diff --git a/ceph/doc/cephfs/cephfs-shell.rst b/ceph/doc/cephfs/cephfs-shell.rst index 244d78bda..a24fc56b0 100644 --- a/ceph/doc/cephfs/cephfs-shell.rst +++ b/ceph/doc/cephfs/cephfs-shell.rst @@ -36,7 +36,7 @@ Options : .. code:: bash - [build]$ virtualenv -p python3 venv && source venv/bin/activate && pip3 install cmd2 + [build]$ python3 -m venv venv && source venv/bin/activate && pip3 install cmd2 [build]$ source vstart_environment.sh && source venv/bin/activate && python3 ../src/tools/cephfs/cephfs-shell Commands diff --git a/ceph/doc/cephfs/fs-nfs-exports.rst b/ceph/doc/cephfs/fs-nfs-exports.rst deleted file mode 100644 index 1a95a1cb0..000000000 --- a/ceph/doc/cephfs/fs-nfs-exports.rst +++ /dev/null @@ -1,390 +0,0 @@ -.. _cephfs-nfs: - -======================= -CephFS Exports over NFS -======================= - -CephFS namespaces can be exported over NFS protocol using the `NFS-Ganesha NFS server`_ - -Requirements -============ - -- Latest Ceph file system with mgr enabled -- ``nfs-ganesha``, ``nfs-ganesha-ceph``, ``nfs-ganesha-rados-grace`` and - ``nfs-ganesha-rados-urls`` packages (version 3.3 and above) - -.. note:: From Pacific, the nfs mgr module must be enabled prior to use. - -Ganesha Configuration Hierarchy -=============================== - -Cephadm and rook starts nfs-ganesha daemon with `bootstrap configuration` -containing minimal ganesha configuration, creates empty rados `common config` -object in `nfs-ganesha` pool and watches this config object. The `mgr/nfs` -module adds rados export object urls to the common config object. If cluster -config is set, it creates `user config` object containing custom ganesha -configuration and adds it url to common config object. - -.. ditaa:: - - - rados://$pool/$namespace/export-$i rados://$pool/$namespace/userconf-nfs.$cluster_id - (export config) (user config) - - +----------+ +----------+ +----------+ +---------------------------+ - | | | | | | | | - | export-1 | | export-2 | | export-3 | | userconf-nfs.$cluster_id | - | | | | | | | | - +----+-----+ +----+-----+ +-----+----+ +-------------+-------------+ - ^ ^ ^ ^ - | | | | - +--------------------------------+-------------------------+ - %url | - | - +--------+--------+ - | | rados://$pool/$namespace/conf-nfs.$svc - | conf+nfs.$svc | (common config) - | | - +--------+--------+ - ^ - | - watch_url | - +----------------------------------------------+ - | | | - | | | RADOS - +----------------------------------------------------------------------------------+ - | | | CONTAINER - watch_url | watch_url | watch_url | - | | | - +--------+-------+ +--------+-------+ +-------+--------+ - | | | | | | /etc/ganesha/ganesha.conf - | nfs.$svc.a | | nfs.$svc.b | | nfs.$svc.c | (bootstrap config) - | | | | | | - +----------------+ +----------------+ +----------------+ - -Create NFS Ganesha Cluster -========================== - -.. code:: bash - - $ ceph nfs cluster create [] [--ingress --virtual-ip ] - -This creates a common recovery pool for all NFS Ganesha daemons, new user based on -``clusterid``, and a common NFS Ganesha config RADOS object. - -.. note:: Since this command also brings up NFS Ganesha daemons using a ceph-mgr - orchestrator module (see :doc:`/mgr/orchestrator`) such as "mgr/cephadm", at - least one such module must be enabled for it to work. - - Currently, NFS Ganesha daemon deployed by cephadm listens on the standard - port. So only one daemon will be deployed on a host. - -```` is an arbitrary string by which this NFS Ganesha cluster will be -known. - -```` is an optional string signifying which hosts should have NFS Ganesha -daemon containers running on them and, optionally, the total number of NFS -Ganesha daemons on the cluster (should you want to have more than one NFS Ganesha -daemon running per node). For example, the following placement string means -"deploy NFS Ganesha daemons on nodes host1 and host2 (one daemon per host):: - - "host1,host2" - -and this placement specification says to deploy single NFS Ganesha daemon each -on nodes host1 and host2 (for a total of two NFS Ganesha daemons in the -cluster):: - - "2 host1,host2" - -To deploy NFS with an HA front-end (virtual IP and load balancer), add the -``--ingress`` flag and specify a virtual IP address. This will deploy a combination -of keepalived and haproxy to provide an high-availability NFS frontend for the NFS -service. - -For more details, refer :ref:`orchestrator-cli-placement-spec` but keep -in mind that specifying the placement via a YAML file is not supported. - -Delete NFS Ganesha Cluster -========================== - -.. code:: bash - - $ ceph nfs cluster rm - -This deletes the deployed cluster. - -List NFS Ganesha Cluster -======================== - -.. code:: bash - - $ ceph nfs cluster ls - -This lists deployed clusters. - -Show NFS Ganesha Cluster Information -==================================== - -.. code:: bash - - $ ceph nfs cluster info [] - -This displays ip and port of deployed cluster. - -.. note:: This will not work with rook backend. Instead expose port with - kubectl patch command and fetch the port details with kubectl get services - command:: - - $ kubectl patch service -n rook-ceph -p '{"spec":{"type": "NodePort"}}' rook-ceph-nfs-- - $ kubectl get services -n rook-ceph rook-ceph-nfs-- - -Set Customized NFS Ganesha Configuration -======================================== - -.. code:: bash - - $ ceph nfs cluster config set -i - -With this the nfs cluster will use the specified config and it will have -precedence over default config blocks. - -Example use cases - -1) Changing log level - - It can be done by adding LOG block in the following way:: - - LOG { - COMPONENTS { - ALL = FULL_DEBUG; - } - } - -2) Adding custom export block - - The following sample block creates a single export. This export will not be - managed by `ceph nfs export` interface:: - - EXPORT { - Export_Id = 100; - Transports = TCP; - Path = /; - Pseudo = /ceph/; - Protocols = 4; - Access_Type = RW; - Attr_Expiration_Time = 0; - Squash = None; - FSAL { - Name = CEPH; - Filesystem = "filesystem name"; - User_Id = "user id"; - Secret_Access_Key = "secret key"; - } - } - -.. note:: User specified in FSAL block should have proper caps for NFS-Ganesha - daemons to access ceph cluster. User can be created in following way using - `auth get-or-create`:: - - # ceph auth get-or-create client. mon 'allow r' osd 'allow rw pool=nfs-ganesha namespace=, allow rw tag cephfs data=' mds 'allow rw path=' - -Reset NFS Ganesha Configuration -=============================== - -.. code:: bash - - $ ceph nfs cluster config reset - -This removes the user defined configuration. - -.. note:: With a rook deployment, ganesha pods must be explicitly restarted - for the new config blocks to be effective. - -Create CephFS Export -==================== - -.. warning:: Currently, the nfs interface is not integrated with dashboard. Both - dashboard and nfs interface have different export requirements and - create exports differently. Management of dashboard created exports is not - supported. - -.. code:: bash - - $ ceph nfs export create cephfs [--readonly] [--path=/path/in/cephfs] - -This creates export RADOS objects containing the export block, where - -```` is the name of the FS volume used by the NFS Ganesha cluster -that will serve this export. - -```` is the NFS Ganesha cluster ID. - -```` is the pseudo root path (must be an absolute path and unique). -It specifies the export position within the NFS v4 Pseudo Filesystem. - -```` is the path within cephfs. Valid path should be given and default -path is '/'. It need not be unique. Subvolume path can be fetched using: - -.. code:: - - $ ceph fs subvolume getpath [--group_name ] - -.. note:: Export creation is supported only for NFS Ganesha clusters deployed using nfs interface. - -Delete CephFS Export -==================== - -.. code:: bash - - $ ceph nfs export rm - -This deletes an export in an NFS Ganesha cluster, where: - -```` is the NFS Ganesha cluster ID. - -```` is the pseudo root path (must be an absolute path). - -List CephFS Exports -=================== - -.. code:: bash - - $ ceph nfs export ls [--detailed] - -It lists exports for a cluster, where: - -```` is the NFS Ganesha cluster ID. - -With the ``--detailed`` option enabled it shows entire export block. - -Get CephFS Export -================= - -.. code:: bash - - $ ceph nfs export get - -This displays export block for a cluster based on pseudo root name (binding), -where: - -```` is the NFS Ganesha cluster ID. - -```` is the pseudo root path (must be an absolute path). - - -Update CephFS Export -==================== - -.. code:: bash - - $ ceph nfs export update -i - -This updates the cephfs export specified in the json file. Export in json -format can be fetched with above get command. For example:: - - $ ceph nfs export get vstart /cephfs > update_cephfs_export.json - $ cat update_cephfs_export.json - { - "export_id": 1, - "path": "/", - "cluster_id": "vstart", - "pseudo": "/cephfs", - "access_type": "RW", - "squash": "no_root_squash", - "security_label": true, - "protocols": [ - 4 - ], - "transports": [ - "TCP" - ], - "fsal": { - "name": "CEPH", - "user_id": "vstart1", - "fs_name": "a", - "sec_label_xattr": "" - }, - "clients": [] - } - # Here in the fetched export, pseudo and access_type is modified. Then the modified file is passed to update interface - $ ceph nfs export update -i update_cephfs_export.json - $ cat update_cephfs_export.json - { - "export_id": 1, - "path": "/", - "cluster_id": "vstart", - "pseudo": "/cephfs_testing", - "access_type": "RO", - "squash": "no_root_squash", - "security_label": true, - "protocols": [ - 4 - ], - "transports": [ - "TCP" - ], - "fsal": { - "name": "CEPH", - "user_id": "vstart1", - "fs_name": "a", - "sec_label_xattr": "" - }, - "clients": [] - } - - -Configuring NFS Ganesha to export CephFS with vstart -==================================================== - -1) Using ``cephadm`` - - .. code:: bash - - $ MDS=1 MON=1 OSD=3 NFS=1 ../src/vstart.sh -n -d --cephadm - - This will deploy a single NFS Ganesha daemon using ``vstart.sh``, where - the daemon will listen on the default NFS Ganesha port. - -2) Using test orchestrator - - .. code:: bash - - $ MDS=1 MON=1 OSD=3 NFS=1 ../src/vstart.sh -n -d - - Environment variable ``NFS`` is the number of NFS Ganesha daemons to be - deployed, each listening on a random port. - - .. note:: NFS Ganesha packages must be pre-installed for this to work. - -Mount -===== - -After the exports are successfully created and NFS Ganesha daemons are no longer in -grace period. The exports can be mounted by - -.. code:: bash - - $ mount -t nfs -o port= : - -.. note:: Only NFS v4.0+ is supported. - -Troubleshooting -=============== - -Checking NFS-Ganesha logs with - -1) ``cephadm`` - - .. code:: bash - - $ cephadm logs --fsid --name nfs..hostname - -2) ``rook`` - - .. code:: bash - - $ kubectl logs -n rook-ceph rook-ceph-nfs-- nfs-ganesha - -Log level can be changed using `nfs cluster config set` command. - -.. _NFS-Ganesha NFS Server: https://github.com/nfs-ganesha/nfs-ganesha/wiki diff --git a/ceph/doc/cephfs/fs-volumes.rst b/ceph/doc/cephfs/fs-volumes.rst index 203509f83..6d59ab119 100644 --- a/ceph/doc/cephfs/fs-volumes.rst +++ b/ceph/doc/cephfs/fs-volumes.rst @@ -384,5 +384,39 @@ On successful cancelation, the cloned subvolume is moved to `canceled` state:: .. note:: The canceled cloned can be deleted by using --force option in `fs subvolume rm` command. + +.. _subvol-pinning: + +Pinning Subvolumes and Subvolume Groups +--------------------------------------- + + +Subvolumes and subvolume groups can be automatically pinned to ranks according +to policies. This can help distribute load across MDS ranks in predictable and +stable ways. Review :ref:`cephfs-pinning` and :ref:`cephfs-ephemeral-pinning` +for details on how pinning works. + +Pinning is configured by:: + + $ ceph fs subvolumegroup pin + +or for subvolumes:: + + $ ceph fs subvolume pin + +Typically you will want to set subvolume group pins. The ``pin_type`` may be +one of ``export``, ``distributed``, or ``random``. The ``pin_setting`` +corresponds to the extended attributed "value" as in the pinning documentation +referenced above. + +So, for example, setting a distributed pinning strategy on a subvolume group:: + + $ ceph fs subvolumegroup pin cephfilesystem-a csi distributed 1 + +Will enable distributed subtree partitioning policy for the "csi" subvolume +group. This will cause every subvolume within the group to be automatically +pinned to one of the available ranks on the file system. + + .. _manila: https://github.com/openstack/manila .. _CSI: https://github.com/ceph/ceph-csi diff --git a/ceph/doc/cephfs/index.rst b/ceph/doc/cephfs/index.rst index 02ae3e9b2..1726b2f5f 100644 --- a/ceph/doc/cephfs/index.rst +++ b/ceph/doc/cephfs/index.rst @@ -84,7 +84,6 @@ Administration MDS Configuration Settings Manual: ceph-mds <../../man/8/ceph-mds> Export over NFS - Export over NFS with volume nfs interface Application best practices FS volume and subvolumes CephFS Quotas diff --git a/ceph/doc/cephfs/multimds.rst b/ceph/doc/cephfs/multimds.rst index dcbbfc51e..db9b52f3a 100644 --- a/ceph/doc/cephfs/multimds.rst +++ b/ceph/doc/cephfs/multimds.rst @@ -100,6 +100,8 @@ When a daemon finishes stopping, it will respawn itself and go back to being a standby. +.. _cephfs-pinning: + Manually pinning directory trees to a particular rank ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -136,6 +138,8 @@ directory's export pin. For example: # a/b is now pinned to rank 0 and a/ and the rest of its children are still pinned to rank 1 +.. _cephfs-ephemeral-pinning: + Setting subtree partitioning policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/ceph/doc/cephfs/nfs.rst b/ceph/doc/cephfs/nfs.rst index 17bf4303e..6c44b8650 100644 --- a/ceph/doc/cephfs/nfs.rst +++ b/ceph/doc/cephfs/nfs.rst @@ -1,9 +1,15 @@ +.. _cephfs-nfs: + === NFS === -CephFS namespaces can be exported over NFS protocol using the -`NFS-Ganesha NFS server `_. +CephFS namespaces can be exported over NFS protocol using the NFS-Ganesha NFS +server. This document provides information on configuring NFS-Ganesha +clusters manually. The simplest and preferred way of managing NFS-Ganesha +clusters and CephFS exports is using ``ceph nfs ...`` commands. See +:doc:`/mgr/nfs` for more details. As the deployment is done using cephadm or +rook. Requirements ============ @@ -14,6 +20,10 @@ Requirements ganesha v2.5 stable or higher versions) - NFS-Ganesha server host connected to the Ceph public network +.. note:: + It is recommended to use 3.5 or later stable version of NFS-Ganesha + packages with pacific (16.2.x) or later stable version of Ceph packages. + Configuring NFS-Ganesha to export CephFS ======================================== @@ -70,12 +80,8 @@ to get the benefit of sessions. Conventions for mounting NFS resources are platform-specific. The following conventions work on Linux and some Unix platforms: -From the command line:: +.. code:: bash - mount -t nfs -o nfsvers=4.1,proto=tcp : + mount -t nfs -o nfsvers=4.1,proto=tcp : -Current limitations -=================== -- Per running ganesha daemon, FSAL_CEPH can only export one Ceph file system - although multiple directories in a Ceph file system may be exported. diff --git a/ceph/doc/dev/cephfs-mirroring.rst b/ceph/doc/dev/cephfs-mirroring.rst index 7ce6874db..3ca487c03 100644 --- a/ceph/doc/dev/cephfs-mirroring.rst +++ b/ceph/doc/dev/cephfs-mirroring.rst @@ -215,11 +215,11 @@ and/or peer updates. CephFS mirroring module provides `mirror daemon status` interface to check mirror daemon status:: - $ ceph fs snapshot mirror daemon status + $ ceph fs snapshot mirror daemon status E.g:: - $ ceph fs snapshot mirror daemon status a | jq + $ ceph fs snapshot mirror daemon status | jq [ { "daemon_id": 284167, diff --git a/ceph/doc/dev/developer_guide/dash-devel.rst b/ceph/doc/dev/developer_guide/dash-devel.rst index f892d29da..5b449e099 100644 --- a/ceph/doc/dev/developer_guide/dash-devel.rst +++ b/ceph/doc/dev/developer_guide/dash-devel.rst @@ -1148,7 +1148,7 @@ Unit tests based on tox ~~~~~~~~~~~~~~~~~~~~~~~~ We included a ``tox`` configuration file that will run the unit tests under -Python 2 or 3, as well as linting tools to guarantee the uniformity of code. +Python 3, as well as linting tools to guarantee the uniformity of code. You need to install ``tox`` and ``coverage`` before running it. To install the packages in your system, either install it via your operating system's package @@ -1163,9 +1163,6 @@ Alternatively, you can use Python's native package installation method:: To run the tests, run ``src/script/run_tox.sh`` in the dashboard directory (where ``tox.ini`` is located):: - ## Run Python 2+3 tests+lint commands: - $ ../../../script/run_tox.sh --tox-env py27,py3,lint,check - ## Run Python 3 tests+lint commands: $ ../../../script/run_tox.sh --tox-env py3,lint,check @@ -1662,8 +1659,8 @@ If we want to write a unit test for the above ``Ping`` controller, create a class PingTest(ControllerTestCase): @classmethod def setup_test(cls): - Ping._cp_config['tools.authenticate.on'] = False - cls.setup_controllers([Ping]) + cp_config = {'tools.authenticate.on': True} + cls.setup_controllers([Ping], cp_config=cp_config) def test_ping(self): self._get("/api/ping") @@ -1673,8 +1670,8 @@ If we want to write a unit test for the above ``Ping`` controller, create a The ``ControllerTestCase`` class starts by initializing a CherryPy webserver. Then it will call the ``setup_test()`` class method where we can explicitly load the controllers that we want to test. In the above example we are only -loading the ``Ping`` controller. We can also disable authentication of a -controller at this stage, as depicted in the example. +loading the ``Ping`` controller. We can also provide ``cp_config`` in order to +update the controller's cherrypy config (e.g. enable authentication as shown in the example). How to update or create new dashboards in grafana? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/ceph/doc/dev/developer_guide/tests-unit-tests.rst b/ceph/doc/dev/developer_guide/tests-unit-tests.rst index de9d6de6f..21db9bf9f 100644 --- a/ceph/doc/dev/developer_guide/tests-unit-tests.rst +++ b/ceph/doc/dev/developer_guide/tests-unit-tests.rst @@ -145,7 +145,6 @@ environments and run options:: $ tox -e py3,lint,check ## To run it as Jenkins would: - $ ../../../script/run_tox.sh --tox-env py27,py3,lint,check $ ../../../script/run_tox.sh --tox-env py3,lint,check Manager core unit tests diff --git a/ceph/doc/dev/vstart-ganesha.rst b/ceph/doc/dev/vstart-ganesha.rst new file mode 100644 index 000000000..4e77deb8b --- /dev/null +++ b/ceph/doc/dev/vstart-ganesha.rst @@ -0,0 +1,45 @@ +============================== +NFS CephFS-RGW Developer Guide +============================== + +CephFS exports are supported since Octopus and RGW exports are supported since +Quincy. + +Configuring NFS Ganesha to export CephFS with vstart +==================================================== + +1) Using ``cephadm`` + + .. code:: bash + + $ MDS=1 MON=1 OSD=3 NFS=1 ../src/vstart.sh -n -d --cephadm + + This will deploy a single NFS Ganesha daemon using ``vstart.sh``, where the + daemon will listen on the default NFS Ganesha port. Also cephfs export is + created. + +2) Using test orchestrator + + .. code:: bash + + $ MDS=1 MON=1 OSD=3 NFS=1 ../src/vstart.sh -n -d + + Environment variable ``NFS`` is the number of NFS Ganesha daemons to be + deployed, each listening on a random port. + + .. note:: NFS Ganesha packages must be pre-installed for this to work. + +Configuring NFS Ganesha to export RGW with vstart +================================================= + +1) Using ``cephadm`` + + .. code:: bash + + $ MON=1 OSD=3 RGW=1 NFS=1 ../src/vstart.sh -n -d --cephadm + + This will deploy a single NFS Ganesha daemon using ``vstart.sh``, where the + daemon will listen on the default NFS Ganesha port. Also rgw export is + created. + + .. note:: boto python module must be pre-installed for this to work. diff --git a/ceph/doc/man/8/cephfs-top.rst b/ceph/doc/man/8/cephfs-top.rst index bad687f9a..654633c75 100644 --- a/ceph/doc/man/8/cephfs-top.rst +++ b/ceph/doc/man/8/cephfs-top.rst @@ -71,6 +71,30 @@ Descriptions of fields number of opened inodes +.. describe:: rtio + + total size of read IOs + +.. describe:: wtio + + total size of write IOs + +.. describe:: raio + + average size of read IOs + +.. describe:: waio + + average size of write IOs + +.. describe:: rsp + + speed of read IOs compared with the last refresh + +.. describe:: wsp + + speed of write IOs compared with the last refresh + Availability ============ diff --git a/ceph/doc/mgr/dashboard.rst b/ceph/doc/mgr/dashboard.rst index 3ac0e0333..7acd0695e 100644 --- a/ceph/doc/mgr/dashboard.rst +++ b/ceph/doc/mgr/dashboard.rst @@ -1179,97 +1179,8 @@ A log entry may look like this:: NFS-Ganesha Management ---------------------- -Support for NFS-Ganesha Clusters Deployed by the Orchestrator -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The Ceph Dashboard can be used to manage NFS-Ganesha clusters deployed by the -Orchestrator and will detect them automatically. For more details -on deploying NFS-Ganesha clusters with the Orchestrator, please see: - -- Cephadm backend: :ref:`orchestrator-cli-stateless-services`. Or particularly, see - :ref:`deploy-cephadm-nfs-ganesha`. -- Rook backend: `Ceph NFS Gateway CRD `_. - -Support for NFS-Ganesha Clusters Defined by the User -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. note:: - - This configuration only applies for user-defined clusters, - NOT for Orchestrator-deployed clusters. - -The Ceph Dashboard can manage `NFS Ganesha `_ exports that use -CephFS or RGW as their backstore. - -To enable this feature in Ceph Dashboard there are some assumptions that need -to be met regarding the way NFS-Ganesha services are configured. - -The dashboard manages NFS-Ganesha config files stored in RADOS objects on the Ceph Cluster. -NFS-Ganesha must store part of their configuration in the Ceph cluster. - -These configuration files follow the below conventions. -Each export block must be stored in its own RADOS object named -``export-``, where ```` must match the ``Export_ID`` attribute of the -export configuration. Then, for each NFS-Ganesha service daemon there should -exist a RADOS object named ``conf-``, where ```` is an -arbitrary string that should uniquely identify the daemon instance (e.g., the -hostname where the daemon is running). -Each ``conf-`` object contains the RADOS URLs to the exports that -the NFS-Ganesha daemon should serve. These URLs are of the form:: - - %url rados://[/]/export- - -Both the ``conf-`` and ``export-`` objects must be stored in the -same RADOS pool/namespace. - - -Configuring NFS-Ganesha in the Dashboard -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To enable management of NFS-Ganesha exports in the Ceph Dashboard, we -need to tell the Dashboard the RADOS pool and namespace in which -configuration objects are stored. The Ceph Dashboard can then access them -by following the naming convention described above. - -The Dashboard command to configure the NFS-Ganesha configuration objects -location is:: - - $ ceph dashboard set-ganesha-clusters-rados-pool-namespace [/] - -After running the above command, the Ceph Dashboard is able to find the NFS-Ganesha -configuration objects and we can manage exports through the Web UI. - -.. note:: - - A dedicated pool for the NFS shares should be used. Otherwise it can cause the - `known issue `_ with listing of shares - if the NFS objects are stored together with a lot of other objects in a single - pool. - - -Support for Multiple NFS-Ganesha Clusters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The Ceph Dashboard also supports management of NFS-Ganesha exports belonging -to other NFS-Ganesha clusters. An NFS-Ganesha cluster is a group of -NFS-Ganesha service daemons sharing the same exports. NFS-Ganesha -clusters are independent and don't share the exports configuration among each -other. - -Each NFS-Ganesha cluster should store its configuration objects in a -unique RADOS pool/namespace to isolate the configuration. - -To specify the the configuration location of each NFS-Ganesha cluster we -can use the same command as above but with a different value pattern:: - - $ ceph dashboard set-ganesha-clusters-rados-pool-namespace :[/](,:[/])* - -The ```` is an arbitrary string that should uniquely identify the -NFS-Ganesha cluster. - -When configuring the Ceph Dashboard with multiple NFS-Ganesha clusters, the -Web UI will allow you to choose to which cluster an export belongs. - +The dashboard requires enabling the NFS module which will be used to manage +NFS clusters and NFS exports. For more information check :ref:`mgr-nfs`. Plug-ins -------- diff --git a/ceph/doc/mgr/index.rst b/ceph/doc/mgr/index.rst index 0b03acfb9..50754a5e2 100644 --- a/ceph/doc/mgr/index.rst +++ b/ceph/doc/mgr/index.rst @@ -46,3 +46,4 @@ sensible. Orchestrator module Rook module MDS Autoscaler module + NFS module diff --git a/ceph/doc/mgr/nfs.rst b/ceph/doc/mgr/nfs.rst new file mode 100644 index 000000000..79e6c29be --- /dev/null +++ b/ceph/doc/mgr/nfs.rst @@ -0,0 +1,606 @@ +.. _mgr-nfs: + +============================= +CephFS & RGW Exports over NFS +============================= + +CephFS namespaces and RGW buckets can be exported over NFS protocol +using the `NFS-Ganesha NFS server`_. + +The ``nfs`` manager module provides a general interface for managing +NFS exports of either CephFS directories or RGW buckets. Exports can +be managed either via the CLI ``ceph nfs export ...`` commands +or via the dashboard. + +The deployment of the nfs-ganesha daemons can also be managed +automatically if either the :ref:`cephadm` or :ref:`mgr-rook` +orchestrators are enabled. If neither are in use (e.g., Ceph is +deployed via an external orchestrator like Ansible or Puppet), the +nfs-ganesha daemons must be manually deployed; for more information, +see :ref:`nfs-ganesha-config`. + +.. note:: Starting with Ceph Pacific, the ``nfs`` mgr module must be enabled. + +NFS Cluster management +====================== + +Create NFS Ganesha Cluster +-------------------------- + +.. code:: bash + + $ ceph nfs cluster create [] [--port ] [--ingress --virtual-ip ] + +This creates a common recovery pool for all NFS Ganesha daemons, new user based on +``cluster_id``, and a common NFS Ganesha config RADOS object. + +.. note:: Since this command also brings up NFS Ganesha daemons using a ceph-mgr + orchestrator module (see :doc:`/mgr/orchestrator`) such as cephadm or rook, at + least one such module must be enabled for it to work. + + Currently, NFS Ganesha daemon deployed by cephadm listens on the standard + port. So only one daemon will be deployed on a host. + +```` is an arbitrary string by which this NFS Ganesha cluster will be +known (e.g., ``mynfs``). + +```` is an optional string signifying which hosts should have NFS Ganesha +daemon containers running on them and, optionally, the total number of NFS +Ganesha daemons on the cluster (should you want to have more than one NFS Ganesha +daemon running per node). For example, the following placement string means +"deploy NFS Ganesha daemons on nodes host1 and host2 (one daemon per host):: + + "host1,host2" + +and this placement specification says to deploy single NFS Ganesha daemon each +on nodes host1 and host2 (for a total of two NFS Ganesha daemons in the +cluster):: + + "2 host1,host2" + +NFS can be deployed on a port other than 2049 (the default) with ``--port ``. + +To deploy NFS with a high-availability front-end (virtual IP and load balancer), add the +``--ingress`` flag and specify a virtual IP address. This will deploy a combination +of keepalived and haproxy to provide an high-availability NFS frontend for the NFS +service. + +.. note:: The ingress implementation is not yet complete. Enabling + ingress will deploy multiple ganesha instances and balance + load across them, but a host failure will not immediately + cause cephadm to deploy a replacement daemon before the NFS + grace period expires. This high-availability functionality + is expected to be completed by the Quincy release (March + 2022). + +For more details, refer :ref:`orchestrator-cli-placement-spec` but keep +in mind that specifying the placement via a YAML file is not supported. + +Ingress +------- + +The core *nfs* service will deploy one or more nfs-ganesha daemons, +each of which will provide a working NFS endpoint. The IP for each +NFS endpoint will depend on which host the nfs-ganesha daemons are +deployed. By default, daemons are placed semi-randomly, but users can +also explicitly control where daemons are placed; see +:ref:`orchestrator-cli-placement-spec`. + +When a cluster is created with ``--ingress``, an *ingress* service is +additionally deployed to provide load balancing and high-availability +for the NFS servers. A virtual IP is used to provide a known, stable +NFS endpoint that all clients can use to mount. Ceph will take care +of the details of NFS redirecting traffic on the virtual IP to the +appropriate backend NFS servers, and redeploying NFS servers when they +fail. + +Enabling ingress via the ``ceph nfs cluster create`` command deploys a +simple ingress configuration with the most common configuration +options. Ingress can also be added to an existing NFS service (e.g., +one created without the ``--ingress`` flag), and the basic NFS service can +also be modified after the fact to include non-default options, by modifying +the services directly. For more information, see :ref:`cephadm-ha-nfs`. + +Show NFS Cluster IP(s) +---------------------- + +To examine an NFS cluster's IP endpoints, including the IPs for the individual NFS +daemons, and the virtual IP (if any) for the ingress service, + +.. code:: bash + + $ ceph nfs cluster info [] + +.. note:: This will not work with the rook backend. Instead, expose the port with + the kubectl patch command and fetch the port details with kubectl get services + command:: + + $ kubectl patch service -n rook-ceph -p '{"spec":{"type": "NodePort"}}' rook-ceph-nfs-- + $ kubectl get services -n rook-ceph rook-ceph-nfs-- + + +Delete NFS Ganesha Cluster +-------------------------- + +.. code:: bash + + $ ceph nfs cluster rm + +This deletes the deployed cluster. + +Updating an NFS Cluster +----------------------- + +In order to modify cluster parameters (like the port or placement), you need to +use the orchestrator interface to update the NFS service spec. The safest way to do +that is to export the current spec, modify it, and then re-apply it. For example, +to modify the ``nfs.foo`` service, + +.. code:: bash + + $ ceph orch ls --service-name nfs.foo --export > nfs.foo.yaml + $ vi nfs.foo.yaml + $ ceph orch apply -i nfs.foo.yaml + +For more information about the NFS service spec, see :ref:`deploy-cephadm-nfs-ganesha`. + +List NFS Ganesha Clusters +------------------------- + +.. code:: bash + + $ ceph nfs cluster ls + +This lists deployed clusters. + +.. _nfs-cluster-set: + +Set Customized NFS Ganesha Configuration +---------------------------------------- + +.. code:: bash + + $ ceph nfs cluster config set -i + +With this the nfs cluster will use the specified config and it will have +precedence over default config blocks. + +Example use cases include: + +#. Changing log level. The logging level can be adjusted with the following config + fragment:: + + LOG { + COMPONENTS { + ALL = FULL_DEBUG; + } + } + +#. Adding custom export block. + + The following sample block creates a single export. This export will not be + managed by `ceph nfs export` interface:: + + EXPORT { + Export_Id = 100; + Transports = TCP; + Path = /; + Pseudo = /ceph/; + Protocols = 4; + Access_Type = RW; + Attr_Expiration_Time = 0; + Squash = None; + FSAL { + Name = CEPH; + Filesystem = "filesystem name"; + User_Id = "user id"; + Secret_Access_Key = "secret key"; + } + } + +.. note:: User specified in FSAL block should have proper caps for NFS-Ganesha + daemons to access ceph cluster. User can be created in following way using + `auth get-or-create`:: + + # ceph auth get-or-create client. mon 'allow r' osd 'allow rw pool=.nfs namespace=, allow rw tag cephfs data=' mds 'allow rw path=' + +View Customized NFS Ganesha Configuration +----------------------------------------- + +.. code:: bash + + $ ceph nfs cluster config get + +This will output the user defined configuration (if any). + +Reset NFS Ganesha Configuration +------------------------------- + +.. code:: bash + + $ ceph nfs cluster config reset + +This removes the user defined configuration. + +.. note:: With a rook deployment, ganesha pods must be explicitly restarted + for the new config blocks to be effective. + + +Export Management +================= + +.. warning:: Currently, the nfs interface is not integrated with dashboard. Both + dashboard and nfs interface have different export requirements and + create exports differently. Management of dashboard created exports is not + supported. + +Create CephFS Export +-------------------- + +.. code:: bash + + $ ceph nfs export create cephfs --cluster-id --pseudo-path --fsname [--readonly] [--path=/path/in/cephfs] [--client_addr ...] [--squash ] + +This creates export RADOS objects containing the export block, where + +```` is the NFS Ganesha cluster ID. + +```` is the export position within the NFS v4 Pseudo Filesystem where the export will be available on the server. It must be an absolute path and unique. + +```` is the name of the FS volume used by the NFS Ganesha cluster +that will serve this export. + +```` is the path within cephfs. Valid path should be given and default +path is '/'. It need not be unique. Subvolume path can be fetched using: + +.. code:: + + $ ceph fs subvolume getpath [--group_name ] + +```` is the list of client address for which these export +permissions will be applicable. By default all clients can access the export +according to specified export permissions. See the `NFS-Ganesha Export Sample`_ +for permissible values. + +```` defines the kind of user id squashing to be performed. The default +value is `no_root_squash`. See the `NFS-Ganesha Export Sample`_ for +permissible values. + +.. note:: Export creation is supported only for NFS Ganesha clusters deployed using nfs interface. + +Create RGW Export +----------------- + +There are two kinds of RGW exports: + +- a *user* export will export all buckets owned by an + RGW user, where the top-level directory of the export is a list of buckets. +- a *bucket* export will export a single bucket, where the top-level directory contains + the objects in the bucket. + +RGW bucket export +^^^^^^^^^^^^^^^^^ + +To export a *bucket*: + +.. code:: + + $ ceph nfs export create rgw --cluster-id --pseudo-path --bucket [--user-id ] [--readonly] [--client_addr ...] [--squash ] + +For example, to export *mybucket* via NFS cluster *mynfs* at the pseudo-path */bucketdata* to any host in the ``192.168.10.0/24`` network + +.. code:: + + $ ceph nfs export create rgw --cluster-id mynfs --pseudo-path /bucketdata --bucket mybucket --client_addr 192.168.10.0/24 + +.. note:: Export creation is supported only for NFS Ganesha clusters deployed using nfs interface. + +```` is the NFS Ganesha cluster ID. + +```` is the export position within the NFS v4 Pseudo Filesystem where the export will be available on the server. It must be an absolute path and unique. + +```` is the name of the bucket that will be exported. + +```` is optional, and specifies which RGW user will be used for read and write +operations to the bucket. If it is not specified, the user who owns the bucket will be +used. + +.. note:: Currently, if multi-site RGW is enabled, Ceph can only export RGW buckets in the default realm. + +```` is the list of client address for which these export +permissions will be applicable. By default all clients can access the export +according to specified export permissions. See the `NFS-Ganesha Export Sample`_ +for permissible values. + +```` defines the kind of user id squashing to be performed. The default +value is `no_root_squash`. See the `NFS-Ganesha Export Sample`_ for +permissible values. + +RGW user export +^^^^^^^^^^^^^^^ + +To export an RGW *user*: + +.. code:: + + $ ceph nfs export create rgw --cluster-id --pseudo-path --user-id [--readonly] [--client_addr ...] [--squash ] + +For example, to export *myuser* via NFS cluster *mynfs* at the pseudo-path */myuser* to any host in the ``192.168.10.0/24`` network + +.. code:: + + $ ceph nfs export create rgw --cluster-id mynfs --pseudo-path /bucketdata --user-id myuser --client_addr 192.168.10.0/24 + + +Delete Export +------------- + +.. code:: bash + + $ ceph nfs export rm + +This deletes an export in an NFS Ganesha cluster, where: + +```` is the NFS Ganesha cluster ID. + +```` is the pseudo root path (must be an absolute path). + +List Exports +------------ + +.. code:: bash + + $ ceph nfs export ls [--detailed] + +It lists exports for a cluster, where: + +```` is the NFS Ganesha cluster ID. + +With the ``--detailed`` option enabled it shows entire export block. + +Get Export +---------- + +.. code:: bash + + $ ceph nfs export info + +This displays export block for a cluster based on pseudo root name, +where: + +```` is the NFS Ganesha cluster ID. + +```` is the pseudo root path (must be an absolute path). + + +Create or update export via JSON specification +---------------------------------------------- + +An existing export can be dumped in JSON format with: + +.. prompt:: bash # + + ceph nfs export info ** ** + +An export can be created or modified by importing a JSON description in the +same format: + +.. prompt:: bash # + + ceph nfs export apply ** -i + +For example,:: + + $ ceph nfs export info mynfs /cephfs > update_cephfs_export.json + $ cat update_cephfs_export.json + { + "export_id": 1, + "path": "/", + "cluster_id": "mynfs", + "pseudo": "/cephfs", + "access_type": "RW", + "squash": "no_root_squash", + "security_label": true, + "protocols": [ + 4 + ], + "transports": [ + "TCP" + ], + "fsal": { + "name": "CEPH", + "user_id": "nfs.mynfs.1", + "fs_name": "a", + "sec_label_xattr": "" + }, + "clients": [] + } + +The imported JSON can be a single dict describing a single export, or a JSON list +containing multiple export dicts. + +The exported JSON can be modified and then reapplied. Below, *pseudo* +and *access_type* are modified. When modifying an export, the +provided JSON should fully describe the new state of the export (just +as when creating a new export), with the exception of the +authentication credentials, which will be carried over from the +previous state of the export where possible. + +:: + + $ ceph nfs export apply mynfs -i update_cephfs_export.json + $ cat update_cephfs_export.json + { + "export_id": 1, + "path": "/", + "cluster_id": "mynfs", + "pseudo": "/cephfs_testing", + "access_type": "RO", + "squash": "no_root_squash", + "security_label": true, + "protocols": [ + 4 + ], + "transports": [ + "TCP" + ], + "fsal": { + "name": "CEPH", + "user_id": "nfs.mynfs.1", + "fs_name": "a", + "sec_label_xattr": "" + }, + "clients": [] + } + +An export can also be created or updated by injecting a Ganesha NFS EXPORT config +fragment. For example,:: + + $ ceph nfs export apply mynfs -i update_cephfs_export.conf + $ cat update_cephfs_export.conf + EXPORT { + FSAL { + name = "CEPH"; + filesystem = "a"; + } + export_id = 1; + path = "/"; + pseudo = "/a"; + access_type = "RW"; + squash = "none"; + attr_expiration_time = 0; + security_label = true; + protocols = 4; + transports = "TCP"; + } + + +Mounting +======== + +After the exports are successfully created and NFS Ganesha daemons are +deployed, exports can be mounted with: + +.. code:: bash + + $ mount -t nfs : + +For example, if the NFS cluster was created with ``--ingress --virtual-ip 192.168.10.10`` +and the export's pseudo-path was ``/foo``, the export can be mounted at ``/mnt`` with: + +.. code:: bash + + $ mount -t nfs 192.168.10.10:/foo /mnt + +If the NFS service is running on a non-standard port number: + +.. code:: bash + + $ mount -t nfs -o port= : + +.. note:: Only NFS v4.0+ is supported. + +Troubleshooting +=============== + +Checking NFS-Ganesha logs with + +1) ``cephadm``: The NFS daemons can be listed with: + + .. code:: bash + + $ ceph orch ps --daemon-type nfs + + You can via the logs for a specific daemon (e.g., ``nfs.mynfs.0.0.myhost.xkfzal``) on + the relevant host with: + + .. code:: bash + + # cephadm logs --fsid --name nfs.mynfs.0.0.myhost.xkfzal + +2) ``rook``: + + .. code:: bash + + $ kubectl logs -n rook-ceph rook-ceph-nfs-- nfs-ganesha + +The NFS log level can be adjusted using `nfs cluster config set` command (see :ref:`nfs-cluster-set`). + + +.. _nfs-ganesha-config: + + +Manual Ganesha deployment +========================= + +It may be possible to deploy and manage the NFS ganesha daemons manually +instead of allowing cephadm or rook to do so. + +.. note:: Manual configuration is not tested or fully documented; your + mileage may vary. If you make this work, please help us by + updating this documentation. + +Known issues +------------ + +* The ``mgr/nfs`` module enumerates NFS clusters via the orchestrator API; if NFS is + not managed by the orchestrator (e.g., cephadm or rook) then this will not work. It + may be possible to create the cluster, mark the cephadm service as 'unmanaged', but this + is awkward and not ideal. + +Requirements +------------ + +The following packages are required to enable CephFS and RGW exports with nfs-ganesha: + +- ``nfs-ganesha``, ``nfs-ganesha-ceph``, ``nfs-ganesha-rados-grace`` and + ``nfs-ganesha-rados-urls`` packages (version 3.3 and above) + +Ganesha Configuration Hierarchy +------------------------------- + +Cephadm and rook start each nfs-ganesha daemon with a minimal +`bootstrap` configuration file that pulls from a shared `common` +configuration stored in the ``.nfs`` RADOS pool and watches the common +config for changes. Each export is written to a separate RADOS object +that is referenced by URL from the common config. + +.. ditaa:: + + rados://$pool/$namespace/export-$i rados://$pool/$namespace/userconf-nfs.$cluster_id + (export config) (user config) + + +----------+ +----------+ +----------+ +---------------------------+ + | | | | | | | | + | export-1 | | export-2 | | export-3 | | userconf-nfs.$cluster_id | + | | | | | | | | + +----+-----+ +----+-----+ +-----+----+ +-------------+-------------+ + ^ ^ ^ ^ + | | | | + +--------------------------------+-------------------------+ + %url | + | + +--------+--------+ + | | rados://$pool/$namespace/conf-nfs.$svc + | conf+nfs.$svc | (common config) + | | + +--------+--------+ + ^ + | + watch_url | + +----------------------------------------------+ + | | | + | | | RADOS + +----------------------------------------------------------------------------------+ + | | | CONTAINER + watch_url | watch_url | watch_url | + | | | + +--------+-------+ +--------+-------+ +-------+--------+ + | | | | | | /etc/ganesha/ganesha.conf + | nfs.$svc.a | | nfs.$svc.b | | nfs.$svc.c | (bootstrap config) + | | | | | | + +----------------+ +----------------+ +----------------+ + + +.. _NFS-Ganesha NFS Server: https://github.com/nfs-ganesha/nfs-ganesha/wiki +.. _NFS-Ganesha Export Sample: https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/export.txt diff --git a/ceph/doc/mgr/orchestrator_modules.rst b/ceph/doc/mgr/orchestrator_modules.rst index 65e2e4981..5991afe61 100644 --- a/ceph/doc/mgr/orchestrator_modules.rst +++ b/ceph/doc/mgr/orchestrator_modules.rst @@ -233,6 +233,9 @@ Services .. py:currentmodule:: ceph.deployment.service_spec .. autoclass:: ServiceSpec + :members: + :private-members: + :noindex: .. py:currentmodule:: orchestrator @@ -291,6 +294,7 @@ Services .. py:currentmodule:: ceph.deployment.service_spec .. autoclass:: RGWSpec + :noindex: .. py:currentmodule:: orchestrator diff --git a/ceph/doc/mgr/prometheus.rst b/ceph/doc/mgr/prometheus.rst index 2c0fff5dc..0f1caff23 100644 --- a/ceph/doc/mgr/prometheus.rst +++ b/ceph/doc/mgr/prometheus.rst @@ -60,13 +60,12 @@ To set a different scrape interval in the Prometheus module, set ceph config set mgr mgr/prometheus/scrape_interval 20 On large clusters (>1000 OSDs), the time to fetch the metrics may become -significant. Without the cache, the Prometheus manager module could, -especially in conjunction with multiple Prometheus instances, overload the -manager and lead to unresponsive or crashing Ceph manager instances. Hence, -the cache is enabled by default and cannot be disabled. This means that there -is a possibility that the cache becomes stale. The cache is considered stale -when the time to fetch the metrics from Ceph exceeds the configured -``scrape_interval``. +significant. Without the cache, the Prometheus manager module could, especially +in conjunction with multiple Prometheus instances, overload the manager and lead +to unresponsive or crashing Ceph manager instances. Hence, the cache is enabled +by default. This means that there is a possibility that the cache becomes +stale. The cache is considered stale when the time to fetch the metrics from +Ceph exceeds the configured :confval:``mgr/prometheus/scrape_interval``. If that is the case, **a warning will be logged** and the module will either @@ -85,6 +84,10 @@ To tell the module to respond with "service unavailable", set it to ``fail``:: ceph config set mgr mgr/prometheus/stale_cache_strategy fail +If you are confident that you don't require the cache, you can disable it:: + + ceph config set mgr mgr/prometheus/cache false + .. _prometheus-rbd-io-statistics: RBD IO statistics diff --git a/ceph/doc/mgr/rook.rst b/ceph/doc/mgr/rook.rst index 483772e49..1ae369623 100644 --- a/ceph/doc/mgr/rook.rst +++ b/ceph/doc/mgr/rook.rst @@ -1,7 +1,9 @@ -============================= -Rook orchestrator integration -============================= +.. _mgr-rook: + +==== +Rook +==== Rook (https://rook.io/) is an orchestration tool that can run Ceph inside a Kubernetes cluster. diff --git a/ceph/doc/rados/configuration/mclock-config-ref.rst b/ceph/doc/rados/configuration/mclock-config-ref.rst index 0f773698f..579056895 100644 --- a/ceph/doc/rados/configuration/mclock-config-ref.rst +++ b/ceph/doc/rados/configuration/mclock-config-ref.rst @@ -7,11 +7,12 @@ Mclock profiles mask the low level details from users, making it easier for them to configure mclock. -To use mclock, you must provide the following input parameters: +The following input parameters are required for a mclock profile to configure +the QoS related parameters: -* total capacity of each OSD +* total capacity (IOPS) of each OSD (determined automatically) -* an mclock profile to enable +* an mclock profile type to enable Using the settings in the specified profile, the OSD determines and applies the lower-level mclock and Ceph parameters. The parameters applied by the mclock @@ -31,11 +32,11 @@ Ceph cluster enables the throttling of the operations(IOPS) belonging to different client classes (background recovery, scrub, snaptrim, client op, osd subop)”*. -The mclock profile uses the capacity limits and the mclock profile selected by -the user to determine the low-level mclock resource control parameters. +The mclock profile uses the capacity limits and the mclock profile type selected +by the user to determine the low-level mclock resource control parameters. -Depending on the profile, lower-level mclock resource-control parameters and -some Ceph-configuration parameters are transparently applied. +Depending on the profile type, lower-level mclock resource-control parameters +and some Ceph-configuration parameters are transparently applied. The low-level mclock resource control parameters are the *reservation*, *limit*, and *weight* that provide control of the resource shares, as @@ -56,7 +57,7 @@ mclock profiles can be broadly classified into two types, as compared to background recoveries and other internal clients within Ceph. This profile is enabled by default. - **high_recovery_ops**: - This profile allocates more reservation to background recoveries as + This profile allocates more reservation to background recoveries as compared to external clients and other internal clients within Ceph. For example, an admin may enable this profile temporarily to speed-up background recoveries during non-peak hours. @@ -109,7 +110,8 @@ chunk of the bandwidth allocation goes to client ops. Background recovery ops are given lower allocation (and therefore take a longer time to complete). But there might be instances that necessitate giving higher allocations to either client ops or recovery ops. In order to deal with such a situation, you can -enable one of the alternate built-in profiles mentioned above. +enable one of the alternate built-in profiles by following the steps mentioned +in the next section. If any mClock profile (including "custom") is active, the following Ceph config sleep options will be disabled, @@ -139,20 +141,69 @@ all its clients. Steps to Enable mClock Profile ============================== -The following sections outline the steps required to enable a mclock profile. +As already mentioned, the default mclock profile is set to *high_client_ops*. +The other values for the built-in profiles include *balanced* and +*high_recovery_ops*. + +If there is a requirement to change the default profile, then the option +``osd_mclock_profile`` may be set during runtime by using the following +command: + + .. prompt:: bash # + + ceph config set osd.N osd_mclock_profile + +For example, to change the profile to allow faster recoveries on "osd.0", the +following command can be used to switch to the *high_recovery_ops* profile: + + .. prompt:: bash # + + ceph config set osd.0 osd_mclock_profile high_recovery_ops + +.. note:: The *custom* profile is not recommended unless you are an advanced + user. + +And that's it! You are ready to run workloads on the cluster and check if the +QoS requirements are being met. + + +OSD Capacity Determination (Automated) +====================================== + +The OSD capacity in terms of total IOPS is determined automatically during OSD +initialization. This is achieved by running the OSD bench tool and overriding +the default value of ``osd_mclock_max_capacity_iops_[hdd, ssd]`` option +depending on the device type. No other action/input is expected from the user +to set the OSD capacity. You may verify the capacity of an OSD after the +cluster is brought up by using the following command: + + .. prompt:: bash # + + ceph config show osd.N osd_mclock_max_capacity_iops_[hdd, ssd] + +For example, the following command shows the max capacity for "osd.0" on a Ceph +node whose underlying device type is SSD: + + .. prompt:: bash # + + ceph config show osd.0 osd_mclock_max_capacity_iops_ssd -Determining OSD Capacity Using Benchmark Tests ----------------------------------------------- -To allow mclock to fulfill its QoS goals across its clients, it is most -important to have a good understanding of each OSD's capacity in terms of its -baseline throughputs (IOPS) across the Ceph nodes. To determine this capacity, -you must perform appropriate benchmarking tests. The steps for performing these -benchmarking tests are broadly outlined below. +Steps to Manually Benchmark an OSD (Optional) +============================================= -Any existing benchmarking tool can be used for this purpose. The following -steps use the *Ceph Benchmarking Tool* (cbt_). Regardless of the tool -used, the steps described below remain the same. +.. note:: These steps are only necessary if you want to override the OSD + capacity already determined automatically during OSD initialization. + Otherwise, you may skip this section entirely. + +.. tip:: If you have already determined the benchmark data and wish to manually + override the max osd capacity for an OSD, you may skip to section + `Specifying Max OSD Capacity`_. + + +Any existing benchmarking tool can be used for this purpose. In this case, the +steps use the *Ceph OSD Bench* command described in the next section. Regardless +of the tool/command used, the steps outlined further below remain the same. As already described in the :ref:`dmclock-qos` section, the number of shards and the bluestore's throttle parameters have an impact on the mclock op @@ -171,108 +222,93 @@ maximize the impact of the mclock scheduler. these parameters may also be determined during the benchmarking phase as described below. -Benchmarking Test Steps Using CBT -````````````````````````````````` - -The steps below use the default shards and detail the steps used to determine the -correct bluestore throttle values. - -.. note:: These steps, although manual in April 2021, will be automated in the future. - -1. On the Ceph node hosting the OSDs, download cbt_ from git. -2. Install cbt and all the dependencies mentioned on the cbt github page. -3. Construct the Ceph configuration file and the cbt yaml file. -4. Ensure that the bluestore throttle options ( i.e. - ``bluestore_throttle_bytes`` and ``bluestore_throttle_deferred_bytes``) are - set to the default values. -5. Ensure that the test is performed on similar device types to get reliable - OSD capacity data. -6. The OSDs can be grouped together with the desired replication factor for the - test to ensure reliability of OSD capacity data. -7. After ensuring that the OSDs nodes are in the desired configuration, run a - simple 4KiB random write workload on the OSD(s) for 300 secs. -8. Note the overall throughput(IOPS) obtained from the cbt output file. This - value is the baseline throughput(IOPS) when the default bluestore - throttle options are in effect. -9. If the intent is to determine the bluestore throttle values for your - environment, then set the two options, ``bluestore_throttle_bytes`` and - ``bluestore_throttle_deferred_bytes`` to 32 KiB(32768 Bytes) each to begin - with. Otherwise, you may skip to the next section. -10. Run the 4KiB random write workload as before on the OSD(s) for 300 secs. -11. Note the overall throughput from the cbt log files and compare the value - against the baseline throughput in step 8. -12. If the throughput doesn't match with the baseline, increment the bluestore - throttle options by 2x and repeat steps 9 through 11 until the obtained - throughput is very close to the baseline value. - -For example, during benchmarking on a machine with NVMe SSDs, a value of 256 KiB for -both bluestore throttle and deferred bytes was determined to maximize the impact -of mclock. For HDDs, the corresponding value was 40 MiB, where the overall -throughput was roughly equal to the baseline throughput. Note that in general -for HDDs, the bluestore throttle values are expected to be higher when compared -to SSDs. - -.. _cbt: https://github.com/ceph/cbt +OSD Bench Command Syntax +```````````````````````` -Specifying Max OSD Capacity ----------------------------- +The :ref:`osd-subsystem` section describes the OSD bench command. The syntax +used for benchmarking is shown below : -The steps in this section may be performed only if the max osd capacity is -different from the default values (SSDs: 21500 IOPS and HDDs: 315 IOPS). The -option ``osd_mclock_max_capacity_iops_[hdd, ssd]`` can be set by specifying it -in either the **[global]** section or in a specific OSD section (**[osd.x]** of -your Ceph configuration file). +.. prompt:: bash # -Alternatively, commands of the following form may be used: + ceph tell osd.N bench [TOTAL_BYTES] [BYTES_PER_WRITE] [OBJ_SIZE] [NUM_OBJS] - .. prompt:: bash # +where, - ceph config set [global, osd] osd_mclock_max_capacity_iops_[hdd,ssd] +* ``TOTAL_BYTES``: Total number of bytes to write +* ``BYTES_PER_WRITE``: Block size per write +* ``OBJ_SIZE``: Bytes per object +* ``NUM_OBJS``: Number of objects to write -For example, the following command sets the max capacity for all the OSDs in a -Ceph node whose underlying device type is SSDs: +Benchmarking Test Steps Using OSD Bench +``````````````````````````````````````` - .. prompt:: bash # +The steps below use the default shards and detail the steps used to determine +the correct bluestore throttle values (optional). - ceph config set osd osd_mclock_max_capacity_iops_ssd 25000 +#. Bring up your Ceph cluster and login to the Ceph node hosting the OSDs that + you wish to benchmark. +#. Run a simple 4KiB random write workload on an OSD using the following + commands: -To set the capacity for a specific OSD (for example "osd.0") whose underlying -device type is HDD, use a command like this: + .. note:: Note that before running the test, caches must be cleared to get an + accurate measurement. - .. prompt:: bash # + For example, if you are running the benchmark test on osd.0, run the following + commands: - ceph config set osd.0 osd_mclock_max_capacity_iops_hdd 350 + .. prompt:: bash # + ceph tell osd.0 cache drop -Specifying Which mClock Profile to Enable ------------------------------------------ + .. prompt:: bash # -As already mentioned, the default mclock profile is set to *high_client_ops*. -The other values for the built-in profiles include *balanced* and -*high_recovery_ops*. + ceph tell osd.0 bench 12288000 4096 4194304 100 -If there is a requirement to change the default profile, then the option -``osd_mclock_profile`` may be set in the **[global]** or **[osd]** section of -your Ceph configuration file before bringing up your cluster. +#. Note the overall throughput(IOPS) obtained from the output of the osd bench + command. This value is the baseline throughput(IOPS) when the default + bluestore throttle options are in effect. +#. If the intent is to determine the bluestore throttle values for your + environment, then set the two options, ``bluestore_throttle_bytes`` + and ``bluestore_throttle_deferred_bytes`` to 32 KiB(32768 Bytes) each + to begin with. Otherwise, you may skip to the next section. +#. Run the 4KiB random write test as before using OSD bench. +#. Note the overall throughput from the output and compare the value + against the baseline throughput recorded in step 3. +#. If the throughput doesn't match with the baseline, increment the bluestore + throttle options by 2x and repeat steps 5 through 7 until the obtained + throughput is very close to the baseline value. -Alternatively, to change the profile during runtime, use the following command: +For example, during benchmarking on a machine with NVMe SSDs, a value of 256 KiB +for both bluestore throttle and deferred bytes was determined to maximize the +impact of mclock. For HDDs, the corresponding value was 40 MiB, where the +overall throughput was roughly equal to the baseline throughput. Note that in +general for HDDs, the bluestore throttle values are expected to be higher when +compared to SSDs. - .. prompt:: bash # - ceph config set [global,osd] osd_mclock_profile +Specifying Max OSD Capacity +```````````````````````````` -For example, to change the profile to allow faster recoveries, the following -command can be used to switch to the *high_recovery_ops* profile: +The steps in this section may be performed only if you want to override the +max osd capacity automatically set during OSD initialization. The option +``osd_mclock_max_capacity_iops_[hdd, ssd]`` for an OSD can be set by running the +following command: .. prompt:: bash # - ceph config set osd osd_mclock_profile high_recovery_ops + ceph config set osd.N osd_mclock_max_capacity_iops_[hdd,ssd] -.. note:: The *custom* profile is not recommended unless you are an advanced user. +For example, the following command sets the max capacity for a specific OSD +(say "osd.0") whose underlying device type is HDD to 350 IOPS: -And that's it! You are ready to run workloads on the cluster and check if the -QoS requirements are being met. + .. prompt:: bash # + + ceph config set osd.0 osd_mclock_max_capacity_iops_hdd 350 + +Alternatively, you may specify the max capacity for OSDs within the Ceph +configuration file under the respective [osd.N] section. See +:ref:`ceph-conf-settings` for more details. .. index:: mclock; config settings @@ -294,14 +330,6 @@ mClock Config Options :Valid Choices: high_client_ops, high_recovery_ops, balanced, custom :Default: ``high_client_ops`` -``osd_mclock_max_capacity_iops`` - -:Description: Max IOPS capacity (at 4KiB block size) to consider per OSD - (overrides _ssd and _hdd if non-zero) - -:Type: Float -:Default: ``0.0`` - ``osd_mclock_max_capacity_iops_hdd`` :Description: Max IOPS capacity (at 4KiB block size) to consider per OSD (for @@ -365,4 +393,3 @@ mClock Config Options :Type: Float :Default: ``0.011`` - diff --git a/ceph/doc/rados/index.rst b/ceph/doc/rados/index.rst index 27d1daad1..5f3f112e8 100644 --- a/ceph/doc/rados/index.rst +++ b/ceph/doc/rados/index.rst @@ -1,3 +1,5 @@ +.. _rados-index: + ====================== Ceph Storage Cluster ====================== diff --git a/ceph/doc/rados/operations/control.rst b/ceph/doc/rados/operations/control.rst index 126f72bc6..c5f911f81 100644 --- a/ceph/doc/rados/operations/control.rst +++ b/ceph/doc/rados/operations/control.rst @@ -95,6 +95,8 @@ or delete them if they were just created. :: ceph pg {pgid} mark_unfound_lost revert|delete +.. _osd-subsystem: + OSD Subsystem ============= diff --git a/ceph/doc/rados/operations/index.rst b/ceph/doc/rados/operations/index.rst index c8dff51a1..2136918c7 100644 --- a/ceph/doc/rados/operations/index.rst +++ b/ceph/doc/rados/operations/index.rst @@ -1,3 +1,5 @@ +.. _rados-operations: + ==================== Cluster Operations ==================== diff --git a/ceph/doc/rados/operations/placement-groups.rst b/ceph/doc/rados/operations/placement-groups.rst index 947fdb156..d494f4300 100644 --- a/ceph/doc/rados/operations/placement-groups.rst +++ b/ceph/doc/rados/operations/placement-groups.rst @@ -121,24 +121,24 @@ example, a pool that maps to OSDs of class `ssd` and a pool that maps to OSDs of class `hdd` will each have optimal PG counts that depend on the number of those respective device types. -The autoscaler uses the `scale-down` profile by default, -where each pool starts out with a full complements of PGs and only scales -down when the usage ratio across the pools is not even. However, it also has -a `scale-up` profile, where it starts out each pool with minimal PGs and scales -up PGs when there is more usage in each pool. +The autoscaler uses the `scale-up` profile by default, +where it starts out each pool with minimal PGs and scales +up PGs when there is more usage in each pool. However, it also has +a `scale-down` profile, where each pool starts out with a full complements +of PGs and only scales down when the usage ratio across the pools is not even. With only the `scale-down` profile, the autoscaler identifies any overlapping roots and prevents the pools with such roots from scaling because overlapping roots can cause problems with the scaling process. -To use the `scale-up` profile:: +To use the `scale-down` profile:: - ceph osd pool set autoscale-profile scale-up + ceph osd pool set autoscale-profile scale-down -To switch back to the default `scale-down` profile:: +To switch back to the default `scale-up` profile:: - ceph osd pool set autoscale-profile scale-down + ceph osd pool set autoscale-profile scale-up Existing clusters will continue to use the `scale-up` profile. To use the `scale-down` profile, users will need to set autoscale-profile `scale-down`, diff --git a/ceph/doc/rados/operations/stretch-mode.rst b/ceph/doc/rados/operations/stretch-mode.rst index 5c1207035..eaeaa403f 100644 --- a/ceph/doc/rados/operations/stretch-mode.rst +++ b/ceph/doc/rados/operations/stretch-mode.rst @@ -112,7 +112,8 @@ When stretch mode is enabled, the OSDs wlll only take PGs active when they peer across data centers (or whatever other CRUSH bucket type you specified), assuming both are alive. Pools will increase in size from the default 3 to 4, expecting 2 copies in each site. OSDs will only -be allowed to connect to monitors in the same data center. +be allowed to connect to monitors in the same data center. New monitors +will not be allowed to join the cluster if they do not specify a location. If all the OSDs and monitors from a data center become inaccessible at once, the surviving data center will enter a degraded stretch mode. This @@ -158,6 +159,22 @@ running with more than 2 full sites. Other commands ============== +Starting in Pacific v16.2.8, if your tiebreaker monitor fails for some reason, +you can replace it. Turn on a new monitor and run :: + + $ ceph mon set_new_tiebreaker mon. + +This command will protest if the new monitor is in the same location as existing +non-tiebreaker monitors. This command WILL NOT remove the previous tiebreaker +monitor; you should do so yourself. + +Also in 16.2.7, if you are writing your own tooling for deploying Ceph, you can use a new +``--set-crush-location`` option when booting monitors, instead of running +``ceph mon set_location``. This option accepts only a single "bucket=loc" pair, eg +``ceph-mon --set-crush-location 'datacenter=a'``, which must match the +bucket type you specified when running ``enable_stretch_mode``. + + When in stretch degraded mode, the cluster will go into "recovery" mode automatically when the disconnected data center comes back. If that doesn't work, or you want to enable recovery mode early, you can invoke :: diff --git a/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst b/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst index 48e348391..7bbc72e13 100644 --- a/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst +++ b/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst @@ -1,3 +1,5 @@ +.. _rados-troubleshooting-mon: + ================================= Troubleshooting Monitors ================================= diff --git a/ceph/doc/radosgw/config-ref.rst b/ceph/doc/radosgw/config-ref.rst index e9d419fd3..1ed6085a6 100644 --- a/ceph/doc/radosgw/config-ref.rst +++ b/ceph/doc/radosgw/config-ref.rst @@ -804,6 +804,13 @@ Logging Settings :Default: None +``rgw_ops_log_file_path`` + +:Description: The file for writing operations logs. +:Type: String +:Default: None + + ``rgw_ops_log_data_backlog`` :Description: The maximum data backlog data size for operations logs written diff --git a/ceph/doc/radosgw/nfs.rst b/ceph/doc/radosgw/nfs.rst index 853d0ec6e..0a506599a 100644 --- a/ceph/doc/radosgw/nfs.rst +++ b/ceph/doc/radosgw/nfs.rst @@ -4,13 +4,18 @@ NFS .. versionadded:: Jewel -Ceph Object Gateway namespaces can now be exported over file-based -access protocols such as NFSv3 and NFSv4, alongside traditional HTTP access +.. note:: Only the NFSv4 protocol is supported when using a cephadm or rook based deployment. + +Ceph Object Gateway namespaces can be exported over the file-based +NFSv4 protocols, alongside traditional HTTP access protocols (S3 and Swift). In particular, the Ceph Object Gateway can now be configured to provide file-based access when embedded in the NFS-Ganesha NFS server. +The simplest and preferred way of managing nfs-ganesha clusters and rgw exports +is using ``ceph nfs ...`` commands. See :doc:`/mgr/nfs` for more details. + librgw ====== @@ -61,22 +66,22 @@ Supported Operations The RGW NFS interface supports most operations on files and directories, with the following restrictions: -- Links, including symlinks, are not supported -- NFS ACLs are not supported +- Links, including symlinks, are not supported. +- NFS ACLs are not supported. - + Unix user and group ownership and permissions *are* supported + + Unix user and group ownership and permissions *are* supported. -- Directories may not be moved/renamed +- Directories may not be moved/renamed. - + files may be moved between directories + + Files may be moved between directories. -- Only full, sequential *write* i/o is supported +- Only full, sequential *write* I/O is supported - + i.e., write operations are constrained to be **uploads** - + many typical i/o operations such as editing files in place will necessarily fail as they perform non-sequential stores - + some file utilities *apparently* writing sequentially (e.g., some versions of GNU tar) may fail due to infrequent non-sequential stores - + When mounting via NFS, sequential application i/o can generally be constrained to be written sequentially to the NFS server via a synchronous mount option (e.g. -osync in Linux) - + NFS clients which cannot mount synchronously (e.g., MS Windows) will not be able to upload files + + i.e., write operations are constrained to be **uploads**. + + Many typical I/O operations such as editing files in place will necessarily fail as they perform non-sequential stores. + + Some file utilities *apparently* writing sequentially (e.g., some versions of GNU tar) may fail due to infrequent non-sequential stores. + + When mounting via NFS, sequential application I/O can generally be constrained to be written sequentially to the NFS server via a synchronous mount option (e.g. -osync in Linux). + + NFS clients which cannot mount synchronously (e.g., MS Windows) will not be able to upload files. Security ======== @@ -98,8 +103,8 @@ following characteristics: * additional RGW authentication types such as Keystone are not currently supported -Configuring an NFS-Ganesha Instance -=================================== +Manually configuring an NFS-Ganesha Instance +============================================ Each NFS RGW instance is an NFS-Ganesha server instance *embeddding* a full Ceph RGW instance. diff --git a/ceph/doc/radosgw/notifications.rst b/ceph/doc/radosgw/notifications.rst index d4b5069a8..dc7520a1b 100644 --- a/ceph/doc/radosgw/notifications.rst +++ b/ceph/doc/radosgw/notifications.rst @@ -385,7 +385,7 @@ pushed or pulled using the pubsub sync module. For example: "eventSource":"ceph:s3", "awsRegion":"us-east-1", "eventTime":"2019-11-22T13:47:35.124724Z", - "eventName":"s3:ObjectCreated:Put", + "eventName":"ObjectCreated:Put", "userIdentity":{ "principalId":"tester" }, @@ -424,7 +424,7 @@ pushed or pulled using the pubsub sync module. For example: - awsRegion: zonegroup - eventTime: timestamp indicating when the event was triggered -- eventName: for list of supported events see: `S3 Notification Compatibility`_ +- eventName: for list of supported events see: `S3 Notification Compatibility`_. Note that the eventName values do not start with the `s3:` prefix. - userIdentity.principalId: user that triggered the change - requestParameters.sourceIPAddress: not supported - responseElements.x-amz-request-id: request ID of the original change diff --git a/ceph/doc/radosgw/s3-notification-compatibility.rst b/ceph/doc/radosgw/s3-notification-compatibility.rst index 6a6538aec..008c33a59 100644 --- a/ceph/doc/radosgw/s3-notification-compatibility.rst +++ b/ceph/doc/radosgw/s3-notification-compatibility.rst @@ -105,6 +105,10 @@ Event Types | ``s3:ReducedRedundancyLostObject`` | Not applicable to Ceph | +----------------------------------------------+-----------------+-------------------------------------------+ +.. note:: + + The ``s3:ObjectRemoved:DeleteMarkerCreated`` event presents information on the latest version of the object + Topic Configuration ------------------- In the case of bucket notifications, the topics management API will be derived from `AWS Simple Notification Service API`_. diff --git a/ceph/doc/start/documenting-ceph.rst b/ceph/doc/start/documenting-ceph.rst index fa528a647..735a20df3 100644 --- a/ceph/doc/start/documenting-ceph.rst +++ b/ceph/doc/start/documenting-ceph.rst @@ -271,10 +271,10 @@ the following packages are required: ";t.forEach(e=>{i+=""}),i+="",n.forEach((t,n)=>{const r=e.labelColors[n];let s="background:"+(this.customColors.backgroundColor||r.backgroundColor);s+="; border-color:"+(this.customColors.borderColor||r.borderColor),s+="; border-width: 2px",i+='"}),i+="",this.tooltipEl.querySelector("table").innerHTML=i}const t=this.chartEl.offsetTop,n=this.chartEl.offsetLeft;if(this.checkOffset){const t=e.width/2;this.tooltipEl.classList.remove("transform-left"),this.tooltipEl.classList.remove("transform-right"),e.caretX-t<0?this.tooltipEl.classList.add("transform-left"):e.caretX+t>this.chartEl.width&&this.tooltipEl.classList.add("transform-right")}this.tooltipEl.style.left=this.getStyleLeft(e,n),this.tooltipEl.style.top=this.getStyleTop(e,t),this.tooltipEl.style.opacity=1,this.tooltipEl.style.fontFamily=e._fontFamily,this.tooltipEl.style.fontSize=e.fontSize,this.tooltipEl.style.fontStyle=e._fontStyle,this.tooltipEl.style.padding=e.yPadding+"px "+e.xPadding+"px"}getBody(e){return e}getTitle(e){return e}}},NJ4a:function(e,t,n){"use strict";function i(e){setTimeout(()=>{throw e},0)}n.d(t,"a",(function(){return i}))},NJ9Y:function(e,t,n){"use strict";n.d(t,"a",(function(){return l}));var i=n("sVev"),r=n("pLZG"),s=n("BFxc"),o=n("XDbj"),a=n("xbPD"),c=n("SpAZ");function l(e,t){const n=arguments.length>=2;return l=>l.pipe(e?Object(r.a)((t,n)=>e(t,n,l)):c.a,Object(s.a)(1),n?Object(a.a)(t):Object(o.a)(()=>new i.a))}},NaFW:function(e,t,n){var i=n("9d/t"),r=n("P4y1"),s=n("tiKp")("iterator");e.exports=function(e){if(null!=e)return e[s]||e["@@iterator"]||r[i(e)]}},NwgZ:function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var i=n("8Y7J"),r=n("s7LF");let s=(()=>{class e{constructor(){this.validSubmit=new i.o}onSubmit(){this.markAsTouchedAndDirty(this.formGroup),this.formGroup.valid&&this.validSubmit.emit(this.formGroup.value)}markAsTouchedAndDirty(e){e instanceof r.j?Object.keys(e.controls).forEach(t=>this.markAsTouchedAndDirty(e.controls[t])):e instanceof r.e?e.controls.forEach(e=>this.markAsTouchedAndDirty(e)):e instanceof r.h&&e.enabled&&(e.markAsDirty(),e.markAsTouched(),e.updateValueAndValidity())}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275dir=i.Hb({type:e,selectors:[["","formGroup",""]],hostBindings:function(e,t){1&e&&i.gc("submit",(function(){return t.onSubmit()}))},inputs:{formGroup:"formGroup"},outputs:{validSubmit:"validSubmit"}}),e})()},O741:function(e,t,n){var i=n("hh1v");e.exports=function(e){if(!i(e)&&null!==e)throw TypeError("Can't set "+String(e)+" as a prototype");return e}},OIYi:function(e,t,n){!function(e){"use strict";e.defineLocale("en-ca",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"YYYY-MM-DD",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10;return e+(1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th")}})}(n("wd/R"))},OLbh:function(e,t,n){"use strict";n.d(t,"a",(function(){return f}));var i=n("s7LF"),r=n("8Y7J"),s=n("G0yt"),o=n("ajRT"),a=n("SVse"),c=n("NwgZ"),l=n("6+kj");function u(e,t){1&e&&(r.Sb(0,"span",11),r.Nb(1,"i",12),r.Rb())}function d(e,t){1&e&&r.Ob(0)}function h(e,t){if(1&e&&(r.Sb(0,"p"),r.Oc(1),r.Rb()),2&e){const e=r.ic();r.yb(1),r.Qc(" ",e.description," ")}}let f=(()=>{class e{constructor(e){this.activeModal=e,this.warning=!1,this.showSubmit=!0,this.boundCancel=this.cancel.bind(this),this.canceled=!1,this.confirmationForm=new i.j({})}ngOnInit(){if(this.bodyContext=this.bodyContext||{},this.bodyContext.$implicit=this.bodyData,!this.onSubmit)throw new Error("No submit action defined");if(!this.buttonText)throw new Error("No action name defined");if(!this.titleText)throw new Error("No title defined");if(!this.bodyTpl&&!this.description)throw new Error("No description defined")}ngOnDestroy(){this.onCancel&&this.canceled&&this.onCancel()}cancel(){this.canceled=!0,this.activeModal.close()}stopLoadingSpinner(){this.confirmationForm.setErrors({cdSubmitButton:!0})}}return e.\u0275fac=function(t){return new(t||e)(r.Mb(s.a))},e.\u0275cmp=r.Gb({type:e,selectors:[["cd-confirmation-modal"]],decls:12,vars:9,consts:[[3,"hide"],[1,"modal-title"],["class","text-warning",4,"ngIf"],[1,"modal-content"],["name","confirmationForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[4,"ngTemplateOutlet","ngTemplateOutletContext"],[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","showSubmit","submitActionEvent","backActionEvent"],[1,"text-warning"],[1,"fa","fa-exclamation-triangle","fa-1x"]],template:function(e,t){1&e&&(r.Sb(0,"cd-modal",0),r.gc("hide",(function(){return t.cancel()})),r.Qb(1,1),r.Mc(2,u,2,0,"span",2),r.Oc(3),r.Pb(),r.Qb(4,3),r.Sb(5,"form",4,5),r.Sb(7,"div",6),r.Mc(8,d,1,0,"ng-container",7),r.Mc(9,h,2,1,"p",8),r.Rb(),r.Sb(10,"div",9),r.Sb(11,"cd-form-button-panel",10),r.gc("submitActionEvent",(function(){return t.onSubmit(t.confirmationForm.value)}))("backActionEvent",(function(){return t.boundCancel()})),r.Rb(),r.Rb(),r.Rb(),r.Pb(),r.Rb()),2&e&&(r.yb(2),r.pc("ngIf",t.warning),r.yb(1),r.Pc(t.titleText),r.yb(2),r.pc("formGroup",t.confirmationForm),r.yb(3),r.pc("ngTemplateOutlet",t.bodyTpl)("ngTemplateOutletContext",t.bodyContext),r.yb(1),r.pc("ngIf",t.description),r.yb(2),r.pc("form",t.confirmationForm)("submitText",t.buttonText)("showSubmit",t.showSubmit))},directives:[o.a,a.r,i.C,i.r,i.k,c.a,a.w,l.a],styles:[""]}),e})()},Oaa7:function(e,t,n){!function(e){"use strict";e.defineLocale("en-gb",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10;return e+(1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th")},week:{dow:1,doy:4}})}(n("wd/R"))},Ob0Z:function(e,t,n){!function(e){"use strict";var t={1:"\u0967",2:"\u0968",3:"\u0969",4:"\u096a",5:"\u096b",6:"\u096c",7:"\u096d",8:"\u096e",9:"\u096f",0:"\u0966"},n={"\u0967":"1","\u0968":"2","\u0969":"3","\u096a":"4","\u096b":"5","\u096c":"6","\u096d":"7","\u096e":"8","\u096f":"9","\u0966":"0"};function i(e,t,n,i){var r="";if(t)switch(n){case"s":r="\u0915\u093e\u0939\u0940 \u0938\u0947\u0915\u0902\u0926";break;case"ss":r="%d \u0938\u0947\u0915\u0902\u0926";break;case"m":r="\u090f\u0915 \u092e\u093f\u0928\u093f\u091f";break;case"mm":r="%d \u092e\u093f\u0928\u093f\u091f\u0947";break;case"h":r="\u090f\u0915 \u0924\u093e\u0938";break;case"hh":r="%d \u0924\u093e\u0938";break;case"d":r="\u090f\u0915 \u0926\u093f\u0935\u0938";break;case"dd":r="%d \u0926\u093f\u0935\u0938";break;case"M":r="\u090f\u0915 \u092e\u0939\u093f\u0928\u093e";break;case"MM":r="%d \u092e\u0939\u093f\u0928\u0947";break;case"y":r="\u090f\u0915 \u0935\u0930\u094d\u0937";break;case"yy":r="%d \u0935\u0930\u094d\u0937\u0947"}else switch(n){case"s":r="\u0915\u093e\u0939\u0940 \u0938\u0947\u0915\u0902\u0926\u093e\u0902";break;case"ss":r="%d \u0938\u0947\u0915\u0902\u0926\u093e\u0902";break;case"m":r="\u090f\u0915\u093e \u092e\u093f\u0928\u093f\u091f\u093e";break;case"mm":r="%d \u092e\u093f\u0928\u093f\u091f\u093e\u0902";break;case"h":r="\u090f\u0915\u093e \u0924\u093e\u0938\u093e";break;case"hh":r="%d \u0924\u093e\u0938\u093e\u0902";break;case"d":r="\u090f\u0915\u093e \u0926\u093f\u0935\u0938\u093e";break;case"dd":r="%d \u0926\u093f\u0935\u0938\u093e\u0902";break;case"M":r="\u090f\u0915\u093e \u092e\u0939\u093f\u0928\u094d\u092f\u093e";break;case"MM":r="%d \u092e\u0939\u093f\u0928\u094d\u092f\u093e\u0902";break;case"y":r="\u090f\u0915\u093e \u0935\u0930\u094d\u0937\u093e";break;case"yy":r="%d \u0935\u0930\u094d\u0937\u093e\u0902"}return r.replace(/%d/i,e)}e.defineLocale("mr",{months:"\u091c\u093e\u0928\u0947\u0935\u093e\u0930\u0940_\u092b\u0947\u092c\u094d\u0930\u0941\u0935\u093e\u0930\u0940_\u092e\u093e\u0930\u094d\u091a_\u090f\u092a\u094d\u0930\u093f\u0932_\u092e\u0947_\u091c\u0942\u0928_\u091c\u0941\u0932\u0948_\u0911\u0917\u0938\u094d\u091f_\u0938\u092a\u094d\u091f\u0947\u0902\u092c\u0930_\u0911\u0915\u094d\u091f\u094b\u092c\u0930_\u0928\u094b\u0935\u094d\u0939\u0947\u0902\u092c\u0930_\u0921\u093f\u0938\u0947\u0902\u092c\u0930".split("_"),monthsShort:"\u091c\u093e\u0928\u0947._\u092b\u0947\u092c\u094d\u0930\u0941._\u092e\u093e\u0930\u094d\u091a._\u090f\u092a\u094d\u0930\u093f._\u092e\u0947._\u091c\u0942\u0928._\u091c\u0941\u0932\u0948._\u0911\u0917._\u0938\u092a\u094d\u091f\u0947\u0902._\u0911\u0915\u094d\u091f\u094b._\u0928\u094b\u0935\u094d\u0939\u0947\u0902._\u0921\u093f\u0938\u0947\u0902.".split("_"),monthsParseExact:!0,weekdays:"\u0930\u0935\u093f\u0935\u093e\u0930_\u0938\u094b\u092e\u0935\u093e\u0930_\u092e\u0902\u0917\u0933\u0935\u093e\u0930_\u092c\u0941\u0927\u0935\u093e\u0930_\u0917\u0941\u0930\u0942\u0935\u093e\u0930_\u0936\u0941\u0915\u094d\u0930\u0935\u093e\u0930_\u0936\u0928\u093f\u0935\u093e\u0930".split("_"),weekdaysShort:"\u0930\u0935\u093f_\u0938\u094b\u092e_\u092e\u0902\u0917\u0933_\u092c\u0941\u0927_\u0917\u0941\u0930\u0942_\u0936\u0941\u0915\u094d\u0930_\u0936\u0928\u093f".split("_"),weekdaysMin:"\u0930_\u0938\u094b_\u092e\u0902_\u092c\u0941_\u0917\u0941_\u0936\u0941_\u0936".split("_"),longDateFormat:{LT:"A h:mm \u0935\u093e\u091c\u0924\u093e",LTS:"A h:mm:ss \u0935\u093e\u091c\u0924\u093e",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm \u0935\u093e\u091c\u0924\u093e",LLLL:"dddd, D MMMM YYYY, A h:mm \u0935\u093e\u091c\u0924\u093e"},calendar:{sameDay:"[\u0906\u091c] LT",nextDay:"[\u0909\u0926\u094d\u092f\u093e] LT",nextWeek:"dddd, LT",lastDay:"[\u0915\u093e\u0932] LT",lastWeek:"[\u092e\u093e\u0917\u0940\u0932] dddd, LT",sameElse:"L"},relativeTime:{future:"%s\u092e\u0927\u094d\u092f\u0947",past:"%s\u092a\u0942\u0930\u094d\u0935\u0940",s:i,ss:i,m:i,mm:i,h:i,hh:i,d:i,dd:i,M:i,MM:i,y:i,yy:i},preparse:function(e){return e.replace(/[\u0967\u0968\u0969\u096a\u096b\u096c\u096d\u096e\u096f\u0966]/g,(function(e){return n[e]}))},postformat:function(e){return e.replace(/\d/g,(function(e){return t[e]}))},meridiemParse:/\u092a\u0939\u093e\u091f\u0947|\u0938\u0915\u093e\u0933\u0940|\u0926\u0941\u092a\u093e\u0930\u0940|\u0938\u093e\u092f\u0902\u0915\u093e\u0933\u0940|\u0930\u093e\u0924\u094d\u0930\u0940/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u092a\u0939\u093e\u091f\u0947"===t||"\u0938\u0915\u093e\u0933\u0940"===t?e:"\u0926\u0941\u092a\u093e\u0930\u0940"===t||"\u0938\u093e\u092f\u0902\u0915\u093e\u0933\u0940"===t||"\u0930\u093e\u0924\u094d\u0930\u0940"===t?e>=12?e:e+12:void 0},meridiem:function(e,t,n){return e>=0&&e<6?"\u092a\u0939\u093e\u091f\u0947":e<12?"\u0938\u0915\u093e\u0933\u0940":e<17?"\u0926\u0941\u092a\u093e\u0930\u0940":e<20?"\u0938\u093e\u092f\u0902\u0915\u093e\u0933\u0940":"\u0930\u093e\u0924\u094d\u0930\u0940"},week:{dow:0,doy:6}})}(n("wd/R"))},OjkT:function(e,t,n){!function(e){"use strict";var t={1:"\u0967",2:"\u0968",3:"\u0969",4:"\u096a",5:"\u096b",6:"\u096c",7:"\u096d",8:"\u096e",9:"\u096f",0:"\u0966"},n={"\u0967":"1","\u0968":"2","\u0969":"3","\u096a":"4","\u096b":"5","\u096c":"6","\u096d":"7","\u096e":"8","\u096f":"9","\u0966":"0"};e.defineLocale("ne",{months:"\u091c\u0928\u0935\u0930\u0940_\u092b\u0947\u092c\u094d\u0930\u0941\u0935\u0930\u0940_\u092e\u093e\u0930\u094d\u091a_\u0905\u092a\u094d\u0930\u093f\u0932_\u092e\u0908_\u091c\u0941\u0928_\u091c\u0941\u0932\u093e\u0908_\u0905\u0917\u0937\u094d\u091f_\u0938\u0947\u092a\u094d\u091f\u0947\u092e\u094d\u092c\u0930_\u0905\u0915\u094d\u091f\u094b\u092c\u0930_\u0928\u094b\u092d\u0947\u092e\u094d\u092c\u0930_\u0921\u093f\u0938\u0947\u092e\u094d\u092c\u0930".split("_"),monthsShort:"\u091c\u0928._\u092b\u0947\u092c\u094d\u0930\u0941._\u092e\u093e\u0930\u094d\u091a_\u0905\u092a\u094d\u0930\u093f._\u092e\u0908_\u091c\u0941\u0928_\u091c\u0941\u0932\u093e\u0908._\u0905\u0917._\u0938\u0947\u092a\u094d\u091f._\u0905\u0915\u094d\u091f\u094b._\u0928\u094b\u092d\u0947._\u0921\u093f\u0938\u0947.".split("_"),monthsParseExact:!0,weekdays:"\u0906\u0907\u0924\u092c\u093e\u0930_\u0938\u094b\u092e\u092c\u093e\u0930_\u092e\u0919\u094d\u0917\u0932\u092c\u093e\u0930_\u092c\u0941\u0927\u092c\u093e\u0930_\u092c\u093f\u0939\u093f\u092c\u093e\u0930_\u0936\u0941\u0915\u094d\u0930\u092c\u093e\u0930_\u0936\u0928\u093f\u092c\u093e\u0930".split("_"),weekdaysShort:"\u0906\u0907\u0924._\u0938\u094b\u092e._\u092e\u0919\u094d\u0917\u0932._\u092c\u0941\u0927._\u092c\u093f\u0939\u093f._\u0936\u0941\u0915\u094d\u0930._\u0936\u0928\u093f.".split("_"),weekdaysMin:"\u0906._\u0938\u094b._\u092e\u0902._\u092c\u0941._\u092c\u093f._\u0936\u0941._\u0936.".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"A\u0915\u094b h:mm \u092c\u091c\u0947",LTS:"A\u0915\u094b h:mm:ss \u092c\u091c\u0947",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A\u0915\u094b h:mm \u092c\u091c\u0947",LLLL:"dddd, D MMMM YYYY, A\u0915\u094b h:mm \u092c\u091c\u0947"},preparse:function(e){return e.replace(/[\u0967\u0968\u0969\u096a\u096b\u096c\u096d\u096e\u096f\u0966]/g,(function(e){return n[e]}))},postformat:function(e){return e.replace(/\d/g,(function(e){return t[e]}))},meridiemParse:/\u0930\u093e\u0924\u093f|\u092c\u093f\u0939\u093e\u0928|\u0926\u093f\u0909\u0901\u0938\u094b|\u0938\u093e\u0901\u091d/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u0930\u093e\u0924\u093f"===t?e<4?e:e+12:"\u092c\u093f\u0939\u093e\u0928"===t?e:"\u0926\u093f\u0909\u0901\u0938\u094b"===t?e>=10?e:e+12:"\u0938\u093e\u0901\u091d"===t?e+12:void 0},meridiem:function(e,t,n){return e<3?"\u0930\u093e\u0924\u093f":e<12?"\u092c\u093f\u0939\u093e\u0928":e<16?"\u0926\u093f\u0909\u0901\u0938\u094b":e<20?"\u0938\u093e\u0901\u091d":"\u0930\u093e\u0924\u093f"},calendar:{sameDay:"[\u0906\u091c] LT",nextDay:"[\u092d\u094b\u0932\u093f] LT",nextWeek:"[\u0906\u0909\u0901\u0926\u094b] dddd[,] LT",lastDay:"[\u0939\u093f\u091c\u094b] LT",lastWeek:"[\u0917\u090f\u0915\u094b] dddd[,] LT",sameElse:"L"},relativeTime:{future:"%s\u092e\u093e",past:"%s \u0905\u0917\u093e\u0921\u093f",s:"\u0915\u0947\u0939\u0940 \u0915\u094d\u0937\u0923",ss:"%d \u0938\u0947\u0915\u0947\u0923\u094d\u0921",m:"\u090f\u0915 \u092e\u093f\u0928\u0947\u091f",mm:"%d \u092e\u093f\u0928\u0947\u091f",h:"\u090f\u0915 \u0918\u0923\u094d\u091f\u093e",hh:"%d \u0918\u0923\u094d\u091f\u093e",d:"\u090f\u0915 \u0926\u093f\u0928",dd:"%d \u0926\u093f\u0928",M:"\u090f\u0915 \u092e\u0939\u093f\u0928\u093e",MM:"%d \u092e\u0939\u093f\u0928\u093e",y:"\u090f\u0915 \u092c\u0930\u094d\u0937",yy:"%d \u092c\u0930\u094d\u0937"},week:{dow:0,doy:6}})}(n("wd/R"))},OmwH:function(e,t,n){!function(e){"use strict";e.defineLocale("zh-mo",{months:"\u4e00\u6708_\u4e8c\u6708_\u4e09\u6708_\u56db\u6708_\u4e94\u6708_\u516d\u6708_\u4e03\u6708_\u516b\u6708_\u4e5d\u6708_\u5341\u6708_\u5341\u4e00\u6708_\u5341\u4e8c\u6708".split("_"),monthsShort:"1\u6708_2\u6708_3\u6708_4\u6708_5\u6708_6\u6708_7\u6708_8\u6708_9\u6708_10\u6708_11\u6708_12\u6708".split("_"),weekdays:"\u661f\u671f\u65e5_\u661f\u671f\u4e00_\u661f\u671f\u4e8c_\u661f\u671f\u4e09_\u661f\u671f\u56db_\u661f\u671f\u4e94_\u661f\u671f\u516d".split("_"),weekdaysShort:"\u9031\u65e5_\u9031\u4e00_\u9031\u4e8c_\u9031\u4e09_\u9031\u56db_\u9031\u4e94_\u9031\u516d".split("_"),weekdaysMin:"\u65e5_\u4e00_\u4e8c_\u4e09_\u56db_\u4e94_\u516d".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"YYYY\u5e74M\u6708D\u65e5",LLL:"YYYY\u5e74M\u6708D\u65e5 HH:mm",LLLL:"YYYY\u5e74M\u6708D\u65e5dddd HH:mm",l:"D/M/YYYY",ll:"YYYY\u5e74M\u6708D\u65e5",lll:"YYYY\u5e74M\u6708D\u65e5 HH:mm",llll:"YYYY\u5e74M\u6708D\u65e5dddd HH:mm"},meridiemParse:/\u51cc\u6668|\u65e9\u4e0a|\u4e0a\u5348|\u4e2d\u5348|\u4e0b\u5348|\u665a\u4e0a/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u51cc\u6668"===t||"\u65e9\u4e0a"===t||"\u4e0a\u5348"===t?e:"\u4e2d\u5348"===t?e>=11?e:e+12:"\u4e0b\u5348"===t||"\u665a\u4e0a"===t?e+12:void 0},meridiem:function(e,t,n){var i=100*e+t;return i<600?"\u51cc\u6668":i<900?"\u65e9\u4e0a":i<1130?"\u4e0a\u5348":i<1230?"\u4e2d\u5348":i<1800?"\u4e0b\u5348":"\u665a\u4e0a"},calendar:{sameDay:"[\u4eca\u5929] LT",nextDay:"[\u660e\u5929] LT",nextWeek:"[\u4e0b]dddd LT",lastDay:"[\u6628\u5929] LT",lastWeek:"[\u4e0a]dddd LT",sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}(\u65e5|\u6708|\u9031)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"\u65e5";case"M":return e+"\u6708";case"w":case"W":return e+"\u9031";default:return e}},relativeTime:{future:"%s\u5167",past:"%s\u524d",s:"\u5e7e\u79d2",ss:"%d \u79d2",m:"1 \u5206\u9418",mm:"%d \u5206\u9418",h:"1 \u5c0f\u6642",hh:"%d \u5c0f\u6642",d:"1 \u5929",dd:"%d \u5929",M:"1 \u500b\u6708",MM:"%d \u500b\u6708",y:"1 \u5e74",yy:"%d \u5e74"}})}(n("wd/R"))},Oxv6:function(e,t,n){!function(e){"use strict";var t={0:"-\u0443\u043c",1:"-\u0443\u043c",2:"-\u044e\u043c",3:"-\u044e\u043c",4:"-\u0443\u043c",5:"-\u0443\u043c",6:"-\u0443\u043c",7:"-\u0443\u043c",8:"-\u0443\u043c",9:"-\u0443\u043c",10:"-\u0443\u043c",12:"-\u0443\u043c",13:"-\u0443\u043c",20:"-\u0443\u043c",30:"-\u044e\u043c",40:"-\u0443\u043c",50:"-\u0443\u043c",60:"-\u0443\u043c",70:"-\u0443\u043c",80:"-\u0443\u043c",90:"-\u0443\u043c",100:"-\u0443\u043c"};e.defineLocale("tg",{months:{format:"\u044f\u043d\u0432\u0430\u0440\u0438_\u0444\u0435\u0432\u0440\u0430\u043b\u0438_\u043c\u0430\u0440\u0442\u0438_\u0430\u043f\u0440\u0435\u043b\u0438_\u043c\u0430\u0439\u0438_\u0438\u044e\u043d\u0438_\u0438\u044e\u043b\u0438_\u0430\u0432\u0433\u0443\u0441\u0442\u0438_\u0441\u0435\u043d\u0442\u044f\u0431\u0440\u0438_\u043e\u043a\u0442\u044f\u0431\u0440\u0438_\u043d\u043e\u044f\u0431\u0440\u0438_\u0434\u0435\u043a\u0430\u0431\u0440\u0438".split("_"),standalone:"\u044f\u043d\u0432\u0430\u0440_\u0444\u0435\u0432\u0440\u0430\u043b_\u043c\u0430\u0440\u0442_\u0430\u043f\u0440\u0435\u043b_\u043c\u0430\u0439_\u0438\u044e\u043d_\u0438\u044e\u043b_\u0430\u0432\u0433\u0443\u0441\u0442_\u0441\u0435\u043d\u0442\u044f\u0431\u0440_\u043e\u043a\u0442\u044f\u0431\u0440_\u043d\u043e\u044f\u0431\u0440_\u0434\u0435\u043a\u0430\u0431\u0440".split("_")},monthsShort:"\u044f\u043d\u0432_\u0444\u0435\u0432_\u043c\u0430\u0440_\u0430\u043f\u0440_\u043c\u0430\u0439_\u0438\u044e\u043d_\u0438\u044e\u043b_\u0430\u0432\u0433_\u0441\u0435\u043d_\u043e\u043a\u0442_\u043d\u043e\u044f_\u0434\u0435\u043a".split("_"),weekdays:"\u044f\u043a\u0448\u0430\u043d\u0431\u0435_\u0434\u0443\u0448\u0430\u043d\u0431\u0435_\u0441\u0435\u0448\u0430\u043d\u0431\u0435_\u0447\u043e\u0440\u0448\u0430\u043d\u0431\u0435_\u043f\u0430\u043d\u04b7\u0448\u0430\u043d\u0431\u0435_\u04b7\u0443\u043c\u044a\u0430_\u0448\u0430\u043d\u0431\u0435".split("_"),weekdaysShort:"\u044f\u0448\u0431_\u0434\u0448\u0431_\u0441\u0448\u0431_\u0447\u0448\u0431_\u043f\u0448\u0431_\u04b7\u0443\u043c_\u0448\u043d\u0431".split("_"),weekdaysMin:"\u044f\u0448_\u0434\u0448_\u0441\u0448_\u0447\u0448_\u043f\u0448_\u04b7\u043c_\u0448\u0431".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[\u0418\u043c\u0440\u04ef\u0437 \u0441\u043e\u0430\u0442\u0438] LT",nextDay:"[\u0424\u0430\u0440\u0434\u043e \u0441\u043e\u0430\u0442\u0438] LT",lastDay:"[\u0414\u0438\u0440\u04ef\u0437 \u0441\u043e\u0430\u0442\u0438] LT",nextWeek:"dddd[\u0438] [\u04b3\u0430\u0444\u0442\u0430\u0438 \u043e\u044f\u043d\u0434\u0430 \u0441\u043e\u0430\u0442\u0438] LT",lastWeek:"dddd[\u0438] [\u04b3\u0430\u0444\u0442\u0430\u0438 \u0433\u0443\u0437\u0430\u0448\u0442\u0430 \u0441\u043e\u0430\u0442\u0438] LT",sameElse:"L"},relativeTime:{future:"\u0431\u0430\u044a\u0434\u0438 %s",past:"%s \u043f\u0435\u0448",s:"\u044f\u043a\u0447\u0430\u043d\u0434 \u0441\u043e\u043d\u0438\u044f",m:"\u044f\u043a \u0434\u0430\u049b\u0438\u049b\u0430",mm:"%d \u0434\u0430\u049b\u0438\u049b\u0430",h:"\u044f\u043a \u0441\u043e\u0430\u0442",hh:"%d \u0441\u043e\u0430\u0442",d:"\u044f\u043a \u0440\u04ef\u0437",dd:"%d \u0440\u04ef\u0437",M:"\u044f\u043a \u043c\u043e\u04b3",MM:"%d \u043c\u043e\u04b3",y:"\u044f\u043a \u0441\u043e\u043b",yy:"%d \u0441\u043e\u043b"},meridiemParse:/\u0448\u0430\u0431|\u0441\u0443\u0431\u04b3|\u0440\u04ef\u0437|\u0431\u0435\u0433\u043e\u04b3/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u0448\u0430\u0431"===t?e<4?e:e+12:"\u0441\u0443\u0431\u04b3"===t?e:"\u0440\u04ef\u0437"===t?e>=11?e:e+12:"\u0431\u0435\u0433\u043e\u04b3"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"\u0448\u0430\u0431":e<11?"\u0441\u0443\u0431\u04b3":e<16?"\u0440\u04ef\u0437":e<19?"\u0431\u0435\u0433\u043e\u04b3":"\u0448\u0430\u0431"},dayOfMonthOrdinalParse:/\d{1,2}-(\u0443\u043c|\u044e\u043c)/,ordinal:function(e){return e+(t[e]||t[e%10]||t[e>=100?100:null])},week:{dow:1,doy:7}})}(n("wd/R"))},P4y1:function(e,t){e.exports={}},P8lu:function(e,t,n){"use strict";n.d(t,"a",(function(){return m}));var i=n("mrSG"),r=n("IheW"),s=n("LvDl"),o=n.n(s),a=n("cp0P"),c=n("LRne"),l=n("5+tZ"),u=n("CqXF"),d=n("JIr8"),h=n("9xzX"),f=n("xTzq"),p=n("8Y7J");let m=(()=>{let e=class{constructor(e,t){this.http=e,this.rgwDaemonService=t,this.url="api/rgw/user"}list(){return this.enumerate().pipe(Object(l.a)(e=>e.length>0?Object(a.a)(e.map(e=>this.get(e))):Object(c.a)([])))}enumerate(){return this.rgwDaemonService.request(e=>this.http.get(this.url,{params:e}))}enumerateEmail(){return this.rgwDaemonService.request(e=>this.http.get(this.url+"/get_emails",{params:e}))}get(e){return this.rgwDaemonService.request(t=>this.http.get(`${this.url}/${e}`,{params:t}))}getQuota(e){return this.rgwDaemonService.request(t=>this.http.get(`${this.url}/${e}/quota`,{params:t}))}create(e){return this.rgwDaemonService.request(t=>(o.a.keys(e).forEach(n=>{t=t.append(n,e[n])}),this.http.post(this.url,null,{params:t})))}update(e,t){return this.rgwDaemonService.request(n=>(o.a.keys(t).forEach(e=>{n=n.append(e,t[e])}),this.http.put(`${this.url}/${e}`,null,{params:n})))}updateQuota(e,t){return this.rgwDaemonService.request(n=>(o.a.keys(t).forEach(e=>{n=n.append(e,t[e])}),this.http.put(`${this.url}/${e}/quota`,null,{params:n})))}delete(e){return this.rgwDaemonService.request(t=>this.http.delete(`${this.url}/${e}`,{params:t}))}createSubuser(e,t){return this.rgwDaemonService.request(n=>(o.a.keys(t).forEach(e=>{n=n.append(e,t[e])}),this.http.post(`${this.url}/${e}/subuser`,null,{params:n})))}deleteSubuser(e,t){return this.rgwDaemonService.request(n=>this.http.delete(`${this.url}/${e}/subuser/${t}`,{params:n}))}addCapability(e,t,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",t)).append("perm",n),this.http.post(`${this.url}/${e}/capability`,null,{params:i})))}deleteCapability(e,t,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",t)).append("perm",n),this.http.delete(`${this.url}/${e}/capability`,{params:i})))}addS3Key(e,t){return this.rgwDaemonService.request(n=>(n=n.append("key_type","s3"),o.a.keys(t).forEach(e=>{n=n.append(e,t[e])}),this.http.post(`${this.url}/${e}/key`,null,{params:n})))}deleteS3Key(e,t){return this.rgwDaemonService.request(n=>(n=(n=n.append("key_type","s3")).append("access_key",t),this.http.delete(`${this.url}/${e}/key`,{params:n})))}exists(e){return this.get(e).pipe(Object(u.a)(!0),Object(d.a)(e=>(o.a.isFunction(e.preventDefault)&&e.preventDefault(),Object(c.a)(!1))))}emailExists(e){return e=decodeURIComponent(e),this.enumerateEmail().pipe(Object(l.a)(t=>{const n=o.a.indexOf(t,e);return Object(c.a)(-1!==n)}))}};return e.\u0275fac=function(t){return new(t||e)(p.dc(r.b),p.dc(h.a))},e.\u0275prov=p.Ib({token:e,factory:e.\u0275fac,providedIn:"root"}),e=Object(i.b)([f.a,Object(i.d)("design:paramtypes",[r.b,h.a])],e),e})()},PA2r:function(e,t,n){!function(e){"use strict";var t="leden_\xfanor_b\u0159ezen_duben_kv\u011bten_\u010derven_\u010dervenec_srpen_z\xe1\u0159\xed_\u0159\xedjen_listopad_prosinec".split("_"),n="led_\xfano_b\u0159e_dub_kv\u011b_\u010dvn_\u010dvc_srp_z\xe1\u0159_\u0159\xedj_lis_pro".split("_"),i=[/^led/i,/^\xfano/i,/^b\u0159e/i,/^dub/i,/^kv\u011b/i,/^(\u010dvn|\u010derven$|\u010dervna)/i,/^(\u010dvc|\u010dervenec|\u010dervence)/i,/^srp/i,/^z\xe1\u0159/i,/^\u0159\xedj/i,/^lis/i,/^pro/i],r=/^(leden|\xfanor|b\u0159ezen|duben|kv\u011bten|\u010dervenec|\u010dervence|\u010derven|\u010dervna|srpen|z\xe1\u0159\xed|\u0159\xedjen|listopad|prosinec|led|\xfano|b\u0159e|dub|kv\u011b|\u010dvn|\u010dvc|srp|z\xe1\u0159|\u0159\xedj|lis|pro)/i;function s(e){return e>1&&e<5&&1!=~~(e/10)}function o(e,t,n,i){var r=e+" ";switch(n){case"s":return t||i?"p\xe1r sekund":"p\xe1r sekundami";case"ss":return t||i?r+(s(e)?"sekundy":"sekund"):r+"sekundami";case"m":return t?"minuta":i?"minutu":"minutou";case"mm":return t||i?r+(s(e)?"minuty":"minut"):r+"minutami";case"h":return t?"hodina":i?"hodinu":"hodinou";case"hh":return t||i?r+(s(e)?"hodiny":"hodin"):r+"hodinami";case"d":return t||i?"den":"dnem";case"dd":return t||i?r+(s(e)?"dny":"dn\xed"):r+"dny";case"M":return t||i?"m\u011bs\xedc":"m\u011bs\xedcem";case"MM":return t||i?r+(s(e)?"m\u011bs\xedce":"m\u011bs\xedc\u016f"):r+"m\u011bs\xedci";case"y":return t||i?"rok":"rokem";case"yy":return t||i?r+(s(e)?"roky":"let"):r+"lety"}}e.defineLocale("cs",{months:t,monthsShort:n,monthsRegex:r,monthsShortRegex:r,monthsStrictRegex:/^(leden|ledna|\xfanora|\xfanor|b\u0159ezen|b\u0159ezna|duben|dubna|kv\u011bten|kv\u011btna|\u010dervenec|\u010dervence|\u010derven|\u010dervna|srpen|srpna|z\xe1\u0159\xed|\u0159\xedjen|\u0159\xedjna|listopadu|listopad|prosinec|prosince)/i,monthsShortStrictRegex:/^(led|\xfano|b\u0159e|dub|kv\u011b|\u010dvn|\u010dvc|srp|z\xe1\u0159|\u0159\xedj|lis|pro)/i,monthsParse:i,longMonthsParse:i,shortMonthsParse:i,weekdays:"ned\u011ble_pond\u011bl\xed_\xfater\xfd_st\u0159eda_\u010dtvrtek_p\xe1tek_sobota".split("_"),weekdaysShort:"ne_po_\xfat_st_\u010dt_p\xe1_so".split("_"),weekdaysMin:"ne_po_\xfat_st_\u010dt_p\xe1_so".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd D. MMMM YYYY H:mm",l:"D. M. YYYY"},calendar:{sameDay:"[dnes v] LT",nextDay:"[z\xedtra v] LT",nextWeek:function(){switch(this.day()){case 0:return"[v ned\u011bli v] LT";case 1:case 2:return"[v] dddd [v] LT";case 3:return"[ve st\u0159edu v] LT";case 4:return"[ve \u010dtvrtek v] LT";case 5:return"[v p\xe1tek v] LT";case 6:return"[v sobotu v] LT"}},lastDay:"[v\u010dera v] LT",lastWeek:function(){switch(this.day()){case 0:return"[minulou ned\u011bli v] LT";case 1:case 2:return"[minul\xe9] dddd [v] LT";case 3:return"[minulou st\u0159edu v] LT";case 4:case 5:return"[minul\xfd] dddd [v] LT";case 6:return"[minulou sobotu v] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"p\u0159ed %s",s:o,ss:o,m:o,mm:o,h:o,hh:o,d:o,dd:o,M:o,MM:o,y:o,yy:o},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})}(n("wd/R"))},PCNd:function(e,t,n){"use strict";n.d(t,"a",(function(){return f}));var i=n("SVse"),r=n("TKcr"),s=n("V/fk"),o=n("ChqD"),a=n("yGOH"),c=n("9Xeq"),l=n("Avrn"),u=n("aexS"),d=n("aXbf"),h=n("8Y7J");let f=(()=>{class e{}return e.\u0275mod=h.Kb({type:e}),e.\u0275inj=h.Jb({factory:function(t){return new(t||e)},providers:[u.a,l.a,d.a,r.a],imports:[[i.c,c.a,s.a,o.a,a.a],s.a,c.a,o.a,a.a]}),e})()},PKPk:function(e,t,n){"use strict";var i=n("ZUd8").charAt,r=n("afO8"),s=n("fdAy"),o="String Iterator",a=r.set,c=r.getterFor(o);s(String,"String",(function(e){a(this,{type:o,string:String(e),index:0})}),(function(){var e,t=c(this),n=t.string,r=t.index;return r>=n.length?{value:void 0,done:!0}:(e=i(n,r),t.index+=e.length,{value:e,done:!1})}))},PeUW:function(e,t,n){!function(e){"use strict";var t={1:"\u0be7",2:"\u0be8",3:"\u0be9",4:"\u0bea",5:"\u0beb",6:"\u0bec",7:"\u0bed",8:"\u0bee",9:"\u0bef",0:"\u0be6"},n={"\u0be7":"1","\u0be8":"2","\u0be9":"3","\u0bea":"4","\u0beb":"5","\u0bec":"6","\u0bed":"7","\u0bee":"8","\u0bef":"9","\u0be6":"0"};e.defineLocale("ta",{months:"\u0b9c\u0ba9\u0bb5\u0bb0\u0bbf_\u0baa\u0bbf\u0baa\u0bcd\u0bb0\u0bb5\u0bb0\u0bbf_\u0bae\u0bbe\u0bb0\u0bcd\u0b9a\u0bcd_\u0b8f\u0baa\u0bcd\u0bb0\u0bb2\u0bcd_\u0bae\u0bc7_\u0b9c\u0bc2\u0ba9\u0bcd_\u0b9c\u0bc2\u0bb2\u0bc8_\u0b86\u0b95\u0bb8\u0bcd\u0b9f\u0bcd_\u0b9a\u0bc6\u0baa\u0bcd\u0b9f\u0bc6\u0bae\u0bcd\u0baa\u0bb0\u0bcd_\u0b85\u0b95\u0bcd\u0b9f\u0bc7\u0bbe\u0baa\u0bb0\u0bcd_\u0ba8\u0bb5\u0bae\u0bcd\u0baa\u0bb0\u0bcd_\u0b9f\u0bbf\u0b9a\u0bae\u0bcd\u0baa\u0bb0\u0bcd".split("_"),monthsShort:"\u0b9c\u0ba9\u0bb5\u0bb0\u0bbf_\u0baa\u0bbf\u0baa\u0bcd\u0bb0\u0bb5\u0bb0\u0bbf_\u0bae\u0bbe\u0bb0\u0bcd\u0b9a\u0bcd_\u0b8f\u0baa\u0bcd\u0bb0\u0bb2\u0bcd_\u0bae\u0bc7_\u0b9c\u0bc2\u0ba9\u0bcd_\u0b9c\u0bc2\u0bb2\u0bc8_\u0b86\u0b95\u0bb8\u0bcd\u0b9f\u0bcd_\u0b9a\u0bc6\u0baa\u0bcd\u0b9f\u0bc6\u0bae\u0bcd\u0baa\u0bb0\u0bcd_\u0b85\u0b95\u0bcd\u0b9f\u0bc7\u0bbe\u0baa\u0bb0\u0bcd_\u0ba8\u0bb5\u0bae\u0bcd\u0baa\u0bb0\u0bcd_\u0b9f\u0bbf\u0b9a\u0bae\u0bcd\u0baa\u0bb0\u0bcd".split("_"),weekdays:"\u0b9e\u0bbe\u0baf\u0bbf\u0bb1\u0bcd\u0bb1\u0bc1\u0b95\u0bcd\u0b95\u0bbf\u0bb4\u0bae\u0bc8_\u0ba4\u0bbf\u0b99\u0bcd\u0b95\u0b9f\u0bcd\u0b95\u0bbf\u0bb4\u0bae\u0bc8_\u0b9a\u0bc6\u0bb5\u0bcd\u0bb5\u0bbe\u0baf\u0bcd\u0b95\u0bbf\u0bb4\u0bae\u0bc8_\u0baa\u0bc1\u0ba4\u0ba9\u0bcd\u0b95\u0bbf\u0bb4\u0bae\u0bc8_\u0bb5\u0bbf\u0baf\u0bbe\u0bb4\u0b95\u0bcd\u0b95\u0bbf\u0bb4\u0bae\u0bc8_\u0bb5\u0bc6\u0bb3\u0bcd\u0bb3\u0bbf\u0b95\u0bcd\u0b95\u0bbf\u0bb4\u0bae\u0bc8_\u0b9a\u0ba9\u0bbf\u0b95\u0bcd\u0b95\u0bbf\u0bb4\u0bae\u0bc8".split("_"),weekdaysShort:"\u0b9e\u0bbe\u0baf\u0bbf\u0bb1\u0bc1_\u0ba4\u0bbf\u0b99\u0bcd\u0b95\u0bb3\u0bcd_\u0b9a\u0bc6\u0bb5\u0bcd\u0bb5\u0bbe\u0baf\u0bcd_\u0baa\u0bc1\u0ba4\u0ba9\u0bcd_\u0bb5\u0bbf\u0baf\u0bbe\u0bb4\u0ba9\u0bcd_\u0bb5\u0bc6\u0bb3\u0bcd\u0bb3\u0bbf_\u0b9a\u0ba9\u0bbf".split("_"),weekdaysMin:"\u0b9e\u0bbe_\u0ba4\u0bbf_\u0b9a\u0bc6_\u0baa\u0bc1_\u0bb5\u0bbf_\u0bb5\u0bc6_\u0b9a".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, HH:mm",LLLL:"dddd, D MMMM YYYY, HH:mm"},calendar:{sameDay:"[\u0b87\u0ba9\u0bcd\u0bb1\u0bc1] LT",nextDay:"[\u0ba8\u0bbe\u0bb3\u0bc8] LT",nextWeek:"dddd, LT",lastDay:"[\u0ba8\u0bc7\u0bb1\u0bcd\u0bb1\u0bc1] LT",lastWeek:"[\u0b95\u0b9f\u0ba8\u0bcd\u0ba4 \u0bb5\u0bbe\u0bb0\u0bae\u0bcd] dddd, LT",sameElse:"L"},relativeTime:{future:"%s \u0b87\u0bb2\u0bcd",past:"%s \u0bae\u0bc1\u0ba9\u0bcd",s:"\u0b92\u0bb0\u0bc1 \u0b9a\u0bbf\u0bb2 \u0bb5\u0bbf\u0ba8\u0bbe\u0b9f\u0bbf\u0b95\u0bb3\u0bcd",ss:"%d \u0bb5\u0bbf\u0ba8\u0bbe\u0b9f\u0bbf\u0b95\u0bb3\u0bcd",m:"\u0b92\u0bb0\u0bc1 \u0ba8\u0bbf\u0bae\u0bbf\u0b9f\u0bae\u0bcd",mm:"%d \u0ba8\u0bbf\u0bae\u0bbf\u0b9f\u0b99\u0bcd\u0b95\u0bb3\u0bcd",h:"\u0b92\u0bb0\u0bc1 \u0bae\u0ba3\u0bbf \u0ba8\u0bc7\u0bb0\u0bae\u0bcd",hh:"%d \u0bae\u0ba3\u0bbf \u0ba8\u0bc7\u0bb0\u0bae\u0bcd",d:"\u0b92\u0bb0\u0bc1 \u0ba8\u0bbe\u0bb3\u0bcd",dd:"%d \u0ba8\u0bbe\u0b9f\u0bcd\u0b95\u0bb3\u0bcd",M:"\u0b92\u0bb0\u0bc1 \u0bae\u0bbe\u0ba4\u0bae\u0bcd",MM:"%d \u0bae\u0bbe\u0ba4\u0b99\u0bcd\u0b95\u0bb3\u0bcd",y:"\u0b92\u0bb0\u0bc1 \u0bb5\u0bb0\u0bc1\u0b9f\u0bae\u0bcd",yy:"%d \u0b86\u0ba3\u0bcd\u0b9f\u0bc1\u0b95\u0bb3\u0bcd"},dayOfMonthOrdinalParse:/\d{1,2}\u0bb5\u0ba4\u0bc1/,ordinal:function(e){return e+"\u0bb5\u0ba4\u0bc1"},preparse:function(e){return e.replace(/[\u0be7\u0be8\u0be9\u0bea\u0beb\u0bec\u0bed\u0bee\u0bef\u0be6]/g,(function(e){return n[e]}))},postformat:function(e){return e.replace(/\d/g,(function(e){return t[e]}))},meridiemParse:/\u0baf\u0bbe\u0bae\u0bae\u0bcd|\u0bb5\u0bc8\u0b95\u0bb1\u0bc8|\u0b95\u0bbe\u0bb2\u0bc8|\u0ba8\u0ba3\u0bcd\u0baa\u0b95\u0bb2\u0bcd|\u0b8e\u0bb1\u0bcd\u0baa\u0bbe\u0b9f\u0bc1|\u0bae\u0bbe\u0bb2\u0bc8/,meridiem:function(e,t,n){return e<2?" \u0baf\u0bbe\u0bae\u0bae\u0bcd":e<6?" \u0bb5\u0bc8\u0b95\u0bb1\u0bc8":e<10?" \u0b95\u0bbe\u0bb2\u0bc8":e<14?" \u0ba8\u0ba3\u0bcd\u0baa\u0b95\u0bb2\u0bcd":e<18?" \u0b8e\u0bb1\u0bcd\u0baa\u0bbe\u0b9f\u0bc1":e<22?" \u0bae\u0bbe\u0bb2\u0bc8":" \u0baf\u0bbe\u0bae\u0bae\u0bcd"},meridiemHour:function(e,t){return 12===e&&(e=0),"\u0baf\u0bbe\u0bae\u0bae\u0bcd"===t?e<2?e:e+12:"\u0bb5\u0bc8\u0b95\u0bb1\u0bc8"===t||"\u0b95\u0bbe\u0bb2\u0bc8"===t||"\u0ba8\u0ba3\u0bcd\u0baa\u0b95\u0bb2\u0bcd"===t&&e>=10?e:e+12},week:{dow:0,doy:6}})}(n("wd/R"))},PhyI:function(e,t,n){"use strict";n.d(t,"a",(function(){return i})),n.d(t,"b",(function(){return r}));var i=function(e){return e[e.global=0]="global",e[e.pool=1]="pool",e[e.image=2]="image",e}({}),r=function(e){return e[e.bps=0]="bps",e[e.iops=1]="iops",e[e.milliseconds=2]="milliseconds",e}({})},PpIw:function(e,t,n){!function(e){"use strict";var t={1:"\u0ce7",2:"\u0ce8",3:"\u0ce9",4:"\u0cea",5:"\u0ceb",6:"\u0cec",7:"\u0ced",8:"\u0cee",9:"\u0cef",0:"\u0ce6"},n={"\u0ce7":"1","\u0ce8":"2","\u0ce9":"3","\u0cea":"4","\u0ceb":"5","\u0cec":"6","\u0ced":"7","\u0cee":"8","\u0cef":"9","\u0ce6":"0"};e.defineLocale("kn",{months:"\u0c9c\u0ca8\u0cb5\u0cb0\u0cbf_\u0cab\u0cc6\u0cac\u0ccd\u0cb0\u0cb5\u0cb0\u0cbf_\u0cae\u0cbe\u0cb0\u0ccd\u0c9a\u0ccd_\u0c8f\u0caa\u0ccd\u0cb0\u0cbf\u0cb2\u0ccd_\u0cae\u0cc6\u0cd5_\u0c9c\u0cc2\u0ca8\u0ccd_\u0c9c\u0cc1\u0cb2\u0cc6\u0cd6_\u0c86\u0c97\u0cb8\u0ccd\u0c9f\u0ccd_\u0cb8\u0cc6\u0caa\u0ccd\u0c9f\u0cc6\u0c82\u0cac\u0cb0\u0ccd_\u0c85\u0c95\u0ccd\u0c9f\u0cc6\u0cc2\u0cd5\u0cac\u0cb0\u0ccd_\u0ca8\u0cb5\u0cc6\u0c82\u0cac\u0cb0\u0ccd_\u0ca1\u0cbf\u0cb8\u0cc6\u0c82\u0cac\u0cb0\u0ccd".split("_"),monthsShort:"\u0c9c\u0ca8_\u0cab\u0cc6\u0cac\u0ccd\u0cb0_\u0cae\u0cbe\u0cb0\u0ccd\u0c9a\u0ccd_\u0c8f\u0caa\u0ccd\u0cb0\u0cbf\u0cb2\u0ccd_\u0cae\u0cc6\u0cd5_\u0c9c\u0cc2\u0ca8\u0ccd_\u0c9c\u0cc1\u0cb2\u0cc6\u0cd6_\u0c86\u0c97\u0cb8\u0ccd\u0c9f\u0ccd_\u0cb8\u0cc6\u0caa\u0ccd\u0c9f\u0cc6\u0c82_\u0c85\u0c95\u0ccd\u0c9f\u0cc6\u0cc2\u0cd5_\u0ca8\u0cb5\u0cc6\u0c82_\u0ca1\u0cbf\u0cb8\u0cc6\u0c82".split("_"),monthsParseExact:!0,weekdays:"\u0cad\u0cbe\u0ca8\u0cc1\u0cb5\u0cbe\u0cb0_\u0cb8\u0cc6\u0cc2\u0cd5\u0cae\u0cb5\u0cbe\u0cb0_\u0cae\u0c82\u0c97\u0cb3\u0cb5\u0cbe\u0cb0_\u0cac\u0cc1\u0ca7\u0cb5\u0cbe\u0cb0_\u0c97\u0cc1\u0cb0\u0cc1\u0cb5\u0cbe\u0cb0_\u0cb6\u0cc1\u0c95\u0ccd\u0cb0\u0cb5\u0cbe\u0cb0_\u0cb6\u0ca8\u0cbf\u0cb5\u0cbe\u0cb0".split("_"),weekdaysShort:"\u0cad\u0cbe\u0ca8\u0cc1_\u0cb8\u0cc6\u0cc2\u0cd5\u0cae_\u0cae\u0c82\u0c97\u0cb3_\u0cac\u0cc1\u0ca7_\u0c97\u0cc1\u0cb0\u0cc1_\u0cb6\u0cc1\u0c95\u0ccd\u0cb0_\u0cb6\u0ca8\u0cbf".split("_"),weekdaysMin:"\u0cad\u0cbe_\u0cb8\u0cc6\u0cc2\u0cd5_\u0cae\u0c82_\u0cac\u0cc1_\u0c97\u0cc1_\u0cb6\u0cc1_\u0cb6".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm",LLLL:"dddd, D MMMM YYYY, A h:mm"},calendar:{sameDay:"[\u0c87\u0c82\u0ca6\u0cc1] LT",nextDay:"[\u0ca8\u0cbe\u0cb3\u0cc6] LT",nextWeek:"dddd, LT",lastDay:"[\u0ca8\u0cbf\u0ca8\u0ccd\u0ca8\u0cc6] LT",lastWeek:"[\u0c95\u0cc6\u0cc2\u0ca8\u0cc6\u0caf] dddd, LT",sameElse:"L"},relativeTime:{future:"%s \u0ca8\u0c82\u0ca4\u0cb0",past:"%s \u0cb9\u0cbf\u0c82\u0ca6\u0cc6",s:"\u0c95\u0cc6\u0cb2\u0cb5\u0cc1 \u0c95\u0ccd\u0cb7\u0ca3\u0c97\u0cb3\u0cc1",ss:"%d \u0cb8\u0cc6\u0c95\u0cc6\u0c82\u0ca1\u0cc1\u0c97\u0cb3\u0cc1",m:"\u0c92\u0c82\u0ca6\u0cc1 \u0ca8\u0cbf\u0cae\u0cbf\u0cb7",mm:"%d \u0ca8\u0cbf\u0cae\u0cbf\u0cb7",h:"\u0c92\u0c82\u0ca6\u0cc1 \u0c97\u0c82\u0c9f\u0cc6",hh:"%d \u0c97\u0c82\u0c9f\u0cc6",d:"\u0c92\u0c82\u0ca6\u0cc1 \u0ca6\u0cbf\u0ca8",dd:"%d \u0ca6\u0cbf\u0ca8",M:"\u0c92\u0c82\u0ca6\u0cc1 \u0ca4\u0cbf\u0c82\u0c97\u0cb3\u0cc1",MM:"%d \u0ca4\u0cbf\u0c82\u0c97\u0cb3\u0cc1",y:"\u0c92\u0c82\u0ca6\u0cc1 \u0cb5\u0cb0\u0ccd\u0cb7",yy:"%d \u0cb5\u0cb0\u0ccd\u0cb7"},preparse:function(e){return e.replace(/[\u0ce7\u0ce8\u0ce9\u0cea\u0ceb\u0cec\u0ced\u0cee\u0cef\u0ce6]/g,(function(e){return n[e]}))},postformat:function(e){return e.replace(/\d/g,(function(e){return t[e]}))},meridiemParse:/\u0cb0\u0cbe\u0ca4\u0ccd\u0cb0\u0cbf|\u0cac\u0cc6\u0cb3\u0cbf\u0c97\u0ccd\u0c97\u0cc6|\u0cae\u0ca7\u0ccd\u0caf\u0cbe\u0cb9\u0ccd\u0ca8|\u0cb8\u0c82\u0c9c\u0cc6/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u0cb0\u0cbe\u0ca4\u0ccd\u0cb0\u0cbf"===t?e<4?e:e+12:"\u0cac\u0cc6\u0cb3\u0cbf\u0c97\u0ccd\u0c97\u0cc6"===t?e:"\u0cae\u0ca7\u0ccd\u0caf\u0cbe\u0cb9\u0ccd\u0ca8"===t?e>=10?e:e+12:"\u0cb8\u0c82\u0c9c\u0cc6"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"\u0cb0\u0cbe\u0ca4\u0ccd\u0cb0\u0cbf":e<10?"\u0cac\u0cc6\u0cb3\u0cbf\u0c97\u0ccd\u0c97\u0cc6":e<17?"\u0cae\u0ca7\u0ccd\u0caf\u0cbe\u0cb9\u0ccd\u0ca8":e<20?"\u0cb8\u0c82\u0c9c\u0cc6":"\u0cb0\u0cbe\u0ca4\u0ccd\u0cb0\u0cbf"},dayOfMonthOrdinalParse:/\d{1,2}(\u0ca8\u0cc6\u0cd5)/,ordinal:function(e){return e+"\u0ca8\u0cc6\u0cd5"},week:{dow:0,doy:6}})}(n("wd/R"))},PqYM:function(e,t,n){"use strict";n.d(t,"a",(function(){return a}));var i=n("HDdC"),r=n("D0XW"),s=n("Y7HM"),o=n("z+Ro");function a(e=0,t,n){let a=-1;return Object(s.a)(t)?a=Number(t)<1?1:Number(t):Object(o.a)(t)&&(n=t),Object(o.a)(n)||(n=r.a),new i.a(t=>{const i=Object(s.a)(e)?e:+e-n.now();return n.schedule(c,i,{index:0,period:a,subscriber:t})})}function c(e){const{index:t,period:n,subscriber:i}=e;if(i.next(t),!i.closed){if(-1===n)return i.complete();e.index=t+1,this.schedule(e,n)}}},QFaf:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("s7LF");class r extends i.j{constructor(e,t,n){super(e,t,n),this.controls=e}get(e){const t=this._get(e);if(!t)throw new Error(`Control '${e}' could not be found!`);return t}_get(e){return super.get(e)||Object.values(this.controls).filter(e=>e.get).map(t=>t instanceof r?t._get(e):t.get(e)).find(e=>Boolean(e))}getValue(e){return this.get(e).value}silentSet(e,t){this.get(e).setValue(t,{emitEvent:!1})}showError(e,t,n){const i=this.get(e);return(t.submitted||i.dirty)&&(n?i.hasError(n):i.invalid)}}},QTAa:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("t/zF");class r extends i.a{}},QWBl:function(e,t,n){"use strict";var i=n("I+eb"),r=n("F8JR");i({target:"Array",proto:!0,forced:[].forEach!=r},{forEach:r})},Qj4J:function(e,t,n){!function(e){"use strict";e.defineLocale("ar-kw",{months:"\u064a\u0646\u0627\u064a\u0631_\u0641\u0628\u0631\u0627\u064a\u0631_\u0645\u0627\u0631\u0633_\u0623\u0628\u0631\u064a\u0644_\u0645\u0627\u064a_\u064a\u0648\u0646\u064a\u0648_\u064a\u0648\u0644\u064a\u0648\u0632_\u063a\u0634\u062a_\u0634\u062a\u0646\u0628\u0631_\u0623\u0643\u062a\u0648\u0628\u0631_\u0646\u0648\u0646\u0628\u0631_\u062f\u062c\u0646\u0628\u0631".split("_"),monthsShort:"\u064a\u0646\u0627\u064a\u0631_\u0641\u0628\u0631\u0627\u064a\u0631_\u0645\u0627\u0631\u0633_\u0623\u0628\u0631\u064a\u0644_\u0645\u0627\u064a_\u064a\u0648\u0646\u064a\u0648_\u064a\u0648\u0644\u064a\u0648\u0632_\u063a\u0634\u062a_\u0634\u062a\u0646\u0628\u0631_\u0623\u0643\u062a\u0648\u0628\u0631_\u0646\u0648\u0646\u0628\u0631_\u062f\u062c\u0646\u0628\u0631".split("_"),weekdays:"\u0627\u0644\u0623\u062d\u062f_\u0627\u0644\u0625\u062a\u0646\u064a\u0646_\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621_\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621_\u0627\u0644\u062e\u0645\u064a\u0633_\u0627\u0644\u062c\u0645\u0639\u0629_\u0627\u0644\u0633\u0628\u062a".split("_"),weekdaysShort:"\u0627\u062d\u062f_\u0627\u062a\u0646\u064a\u0646_\u062b\u0644\u0627\u062b\u0627\u0621_\u0627\u0631\u0628\u0639\u0627\u0621_\u062e\u0645\u064a\u0633_\u062c\u0645\u0639\u0629_\u0633\u0628\u062a".split("_"),weekdaysMin:"\u062d_\u0646_\u062b_\u0631_\u062e_\u062c_\u0633".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[\u0627\u0644\u064a\u0648\u0645 \u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",nextDay:"[\u063a\u062f\u0627 \u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",nextWeek:"dddd [\u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",lastDay:"[\u0623\u0645\u0633 \u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",lastWeek:"dddd [\u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",sameElse:"L"},relativeTime:{future:"\u0641\u064a %s",past:"\u0645\u0646\u0630 %s",s:"\u062b\u0648\u0627\u0646",ss:"%d \u062b\u0627\u0646\u064a\u0629",m:"\u062f\u0642\u064a\u0642\u0629",mm:"%d \u062f\u0642\u0627\u0626\u0642",h:"\u0633\u0627\u0639\u0629",hh:"%d \u0633\u0627\u0639\u0627\u062a",d:"\u064a\u0648\u0645",dd:"%d \u0623\u064a\u0627\u0645",M:"\u0634\u0647\u0631",MM:"%d \u0623\u0634\u0647\u0631",y:"\u0633\u0646\u0629",yy:"%d \u0633\u0646\u0648\u0627\u062a"},week:{dow:0,doy:12}})}(n("wd/R"))},Qo9l:function(e,t,n){var i=n("2oRo");e.exports=i},RAwQ:function(e,t,n){!function(e){"use strict";function t(e,t,n,i){var r={m:["eng Minutt","enger Minutt"],h:["eng Stonn","enger Stonn"],d:["een Dag","engem Dag"],M:["ee Mount","engem Mount"],y:["ee Joer","engem Joer"]};return t?r[n][0]:r[n][1]}function n(e){if(e=parseInt(e,10),isNaN(e))return!1;if(e<0)return!0;if(e<10)return 4<=e&&e<=7;if(e<100){var t=e%10;return n(0===t?e/10:t)}if(e<1e4){for(;e>=10;)e/=10;return n(e)}return n(e/=1e3)}e.defineLocale("lb",{months:"Januar_Februar_M\xe4erz_Abr\xebll_Mee_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"Jan._Febr._Mrz._Abr._Mee_Jun._Jul._Aug._Sept._Okt._Nov._Dez.".split("_"),monthsParseExact:!0,weekdays:"Sonndeg_M\xe9indeg_D\xebnschdeg_M\xebttwoch_Donneschdeg_Freideg_Samschdeg".split("_"),weekdaysShort:"So._M\xe9._D\xeb._M\xeb._Do._Fr._Sa.".split("_"),weekdaysMin:"So_M\xe9_D\xeb_M\xeb_Do_Fr_Sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm [Auer]",LTS:"H:mm:ss [Auer]",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm [Auer]",LLLL:"dddd, D. MMMM YYYY H:mm [Auer]"},calendar:{sameDay:"[Haut um] LT",sameElse:"L",nextDay:"[Muer um] LT",nextWeek:"dddd [um] LT",lastDay:"[G\xebschter um] LT",lastWeek:function(){switch(this.day()){case 2:case 4:return"[Leschten] dddd [um] LT";default:return"[Leschte] dddd [um] LT"}}},relativeTime:{future:function(e){return n(e.substr(0,e.indexOf(" ")))?"a "+e:"an "+e},past:function(e){return n(e.substr(0,e.indexOf(" ")))?"viru "+e:"virun "+e},s:"e puer Sekonnen",ss:"%d Sekonnen",m:t,mm:"%d Minutten",h:t,hh:"%d Stonnen",d:t,dd:"%d Deeg",M:t,MM:"%d M\xe9int",y:t,yy:"%d Joer"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})}(n("wd/R"))},RK3t:function(e,t,n){var i=n("0Dky"),r=n("xrYK"),s="".split;e.exports=i((function(){return!Object("z").propertyIsEnumerable(0)}))?function(e){return"String"==r(e)?s.call(e,""):Object(e)}:Object},RNIs:function(e,t,n){var i=n("tiKp"),r=n("fHMY"),s=n("m/L8"),o=i("unscopables"),a=Array.prototype;null==a[o]&&s.f(a,o,{configurable:!0,value:r(null)}),e.exports=function(e){a[o][e]=!0}},Rf2I:function(e,t,n){"use strict";n.d(t,"a",(function(){return A}));var i=n("s7LF"),r=n("LvDl"),s=n.n(r),o=n("2EZI"),a=n("Fgil"),c=n("aXbf"),l=n("8Y7J"),u=n("G0yt"),d=n("ajRT"),h=n("SVse"),f=n("NwgZ"),p=n("6+kj"),m=n("ANnk"),b=n("f69J"),g=n("EmSq"),_=n("ppaS");function y(e,t){if(1&e&&(l.Qb(0,10),l.Oc(1),l.Pb()),2&e){const e=l.ic();l.yb(1),l.Qc(" ",e.titleText," ")}}function v(e,t){if(1&e&&(l.Sb(0,"p"),l.Oc(1),l.Rb()),2&e){const e=l.ic();l.yb(1),l.Pc(e.message)}}const w=function(e){return{required:e}};function S(e,t){if(1&e&&(l.Sb(0,"label",18),l.Oc(1),l.Rb()),2&e){const e=l.ic().$implicit;l.pc("ngClass",l.uc(3,w,!0===(null==e?null:e.required)))("for",e.name),l.yb(1),l.Qc(" ",e.label," ")}}function M(e,t){if(1&e&&l.Nb(0,"input",19),2&e){const e=l.ic().$implicit;l.pc("type",e.type)("id",e.name)("name",e.name)("formControlName",e.name)}}function x(e,t){if(1&e&&l.Nb(0,"input",20),2&e){const e=l.ic().$implicit;l.pc("id",e.name)("name",e.name)("formControlName",e.name)}}function k(e,t){if(1&e&&(l.Sb(0,"option",24),l.Oc(1),l.Rb()),2&e){const e=l.ic(2).$implicit;l.pc("ngValue",null),l.yb(1),l.Qc(" ",null==e||null==e.typeConfig?null:e.typeConfig.placeholder," ")}}function D(e,t){if(1&e&&(l.Sb(0,"option",25),l.Oc(1),l.Rb()),2&e){const e=t.$implicit;l.pc("value",e.value),l.yb(1),l.Qc(" ",e.text," ")}}function T(e,t){if(1&e&&(l.Sb(0,"select",21),l.Mc(1,k,2,2,"option",22),l.Mc(2,D,2,2,"option",23),l.Rb()),2&e){const e=l.ic().$implicit;l.pc("id",e.name)("formControlName",e.name),l.yb(1),l.pc("ngIf",null==e||null==e.typeConfig?null:e.typeConfig.placeholder),l.yb(1),l.pc("ngForOf",null==e||null==e.typeConfig?null:e.typeConfig.options)}}function C(e,t){if(1&e&&l.Nb(0,"cd-select-badges",26),2&e){const e=l.ic().$implicit;l.pc("id",e.name)("data",e.value)("customBadges",null==e||null==e.typeConfig?null:e.typeConfig.customBadges)("options",null==e||null==e.typeConfig?null:e.typeConfig.options)("messages",null==e||null==e.typeConfig?null:e.typeConfig.messages)}}function O(e,t){if(1&e&&(l.Sb(0,"span",27),l.Oc(1),l.Rb()),2&e){const e=l.ic().$implicit,t=l.ic();l.yb(1),l.Qc(" ",t.getError(e)," ")}}const R=function(e,t){return{"cd-col-form-input":e,"col-sm-12":t}},L=function(){return["text","number"]};function E(e,t){if(1&e&&(l.Qb(0),l.Sb(1,"div"),l.Mc(2,S,2,5,"label",11),l.Sb(3,"div",12),l.Mc(4,M,1,4,"input",13),l.Mc(5,x,1,3,"input",14),l.Mc(6,T,3,4,"select",15),l.Mc(7,C,1,5,"cd-select-badges",16),l.Mc(8,O,2,1,"span",17),l.Rb(),l.Rb(),l.Pb()),2&e){const e=t.$implicit,n=l.ic(),i=l.Ac(4);l.yb(1),l.Bb("form-group row cd-",e.name,"-form-group"),l.yb(1),l.pc("ngIf",e.label),l.yb(1),l.pc("ngClass",l.vc(10,R,e.label,!e.label)),l.yb(1),l.pc("ngIf",l.tc(13,L).includes(e.type)),l.yb(1),l.pc("ngIf","binary"===e.type),l.yb(1),l.pc("ngIf","select"===e.type),l.yb(1),l.pc("ngIf","select-badges"===e.type),l.yb(1),l.pc("ngIf",n.formGroup.showError(e.name,i))}}let A=(()=>{class e{constructor(e,t,n,i){this.activeModal=e,this.formBuilder=t,this.formatter=n,this.dimlessBinaryPipe=i}ngOnInit(){this.createForm()}createForm(){const e={};this.fields.forEach(t=>{e[t.name]=this.createFormControl(t)}),this.formGroup=this.formBuilder.group(e)}createFormControl(e){let t=[];return s.a.isBoolean(e.required)&&e.required&&t.push(i.A.required),e.validators&&(t=t.concat(e.validators)),new i.h(s.a.defaultTo("binary"===e.type?this.dimlessBinaryPipe.transform(e.value):e.value,null),{validators:t})}getError(e){const t=this.formGroup.get(e.name).errors;return Object.keys(t).map(n=>this.getErrorMessage(n,t[n],e.errors)).join("
")}getErrorMessage(e,t,n){if(n){const t=n[e];if(t)return t}return["binaryMin","binaryMax"].includes(e)?t():"required"===e?"This field is required.":"An error occurred."}onSubmitForm(e){this.fields.filter(e=>"binary"===e.type).map(e=>e.name).forEach(t=>{const n=e[t];n&&(e[t]=this.formatter.toBytes(n))}),this.activeModal.close(),s.a.isFunction(this.onSubmit)&&this.onSubmit(e)}}return e.\u0275fac=function(t){return new(t||e)(l.Mb(u.a),l.Mb(o.a),l.Mb(c.a),l.Mb(a.a))},e.\u0275cmp=l.Gb({type:e,selectors:[["cd-form-modal"]],decls:10,vars:7,consts:[[3,"modalRef"],["class","modal-title",4,"ngIf"],[1,"modal-content"],["novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[4,"ngIf"],[4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"modal-title"],["class","cd-col-form-label",3,"ngClass","for",4,"ngIf"],[3,"ngClass"],["class","form-control",3,"type","id","name","formControlName",4,"ngIf"],["type","text","class","form-control","cdDimlessBinary","",3,"id","name","formControlName",4,"ngIf"],["class","form-control custom-select",3,"id","formControlName",4,"ngIf"],[3,"id","data","customBadges","options","messages",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],[1,"cd-col-form-label",3,"ngClass","for"],[1,"form-control",3,"type","id","name","formControlName"],["type","text","cdDimlessBinary","",1,"form-control",3,"id","name","formControlName"],[1,"form-control","custom-select",3,"id","formControlName"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],[3,"value"],[3,"id","data","customBadges","options","messages"],[1,"invalid-feedback"]],template:function(e,t){1&e&&(l.Sb(0,"cd-modal",0),l.Mc(1,y,2,1,"ng-container",1),l.Qb(2,2),l.Sb(3,"form",3,4),l.Sb(5,"div",5),l.Mc(6,v,2,1,"p",6),l.Mc(7,E,9,14,"ng-container",7),l.Rb(),l.Sb(8,"div",8),l.Sb(9,"cd-form-button-panel",9),l.gc("submitActionEvent",(function(){return t.onSubmitForm(t.formGroup.value)})),l.Rb(),l.Rb(),l.Rb(),l.Pb(),l.Rb()),2&e&&(l.pc("modalRef",t.activeModal),l.yb(1),l.pc("ngIf",t.titleText),l.yb(2),l.pc("formGroup",t.formGroup),l.yb(3),l.pc("ngIf",t.message),l.yb(1),l.pc("ngForOf",t.fields),l.yb(2),l.pc("form",t.formGroup)("submitText",t.submitButtonText))},directives:[d.a,h.r,i.C,i.r,i.k,f.a,h.q,p.a,h.p,m.a,i.d,b.a,i.q,i.i,g.a,i.z,i.u,i.B,_.a],styles:[""]}),e})()},Rm1S:function(e,t,n){"use strict";var i=n("14Sl"),r=n("glrk"),s=n("UMSQ"),o=n("HYAF"),a=n("iqWW"),c=n("FMNM");i("match",1,(function(e,t,n){return[function(t){var n=o(this),i=null==t?void 0:t[e];return void 0!==i?i.call(t,n):new RegExp(t)[e](String(n))},function(e){var i=n(t,e,this);if(i.done)return i.value;var o=r(e),l=String(this);if(!o.global)return c(o,l);var u=o.unicode;o.lastIndex=0;for(var d,h=[],f=0;null!==(d=c(o,l));){var p=String(d[0]);h[f]=p,""===p&&(o.lastIndex=a(l,s(o.lastIndex),u)),f++}return 0===f?null:h}]}))},RnhZ:function(e,t,n){var i={"./af":"K/tc","./af.js":"K/tc","./ar":"jnO4","./ar-dz":"o1bE","./ar-dz.js":"o1bE","./ar-kw":"Qj4J","./ar-kw.js":"Qj4J","./ar-ly":"HP3h","./ar-ly.js":"HP3h","./ar-ma":"CoRJ","./ar-ma.js":"CoRJ","./ar-sa":"gjCT","./ar-sa.js":"gjCT","./ar-tn":"bYM6","./ar-tn.js":"bYM6","./ar.js":"jnO4","./az":"SFxW","./az.js":"SFxW","./be":"H8ED","./be.js":"H8ED","./bg":"hKrs","./bg.js":"hKrs","./bm":"p/rL","./bm.js":"p/rL","./bn":"kEOa","./bn-bd":"loYQ","./bn-bd.js":"loYQ","./bn.js":"kEOa","./bo":"0mo+","./bo.js":"0mo+","./br":"aIdf","./br.js":"aIdf","./bs":"JVSJ","./bs.js":"JVSJ","./ca":"1xZ4","./ca.js":"1xZ4","./cs":"PA2r","./cs.js":"PA2r","./cv":"A+xa","./cv.js":"A+xa","./cy":"l5ep","./cy.js":"l5ep","./da":"DxQv","./da.js":"DxQv","./de":"tGlX","./de-at":"s+uk","./de-at.js":"s+uk","./de-ch":"u3GI","./de-ch.js":"u3GI","./de.js":"tGlX","./dv":"WYrj","./dv.js":"WYrj","./el":"jUeY","./el.js":"jUeY","./en-au":"Dmvi","./en-au.js":"Dmvi","./en-ca":"OIYi","./en-ca.js":"OIYi","./en-gb":"Oaa7","./en-gb.js":"Oaa7","./en-ie":"4dOw","./en-ie.js":"4dOw","./en-il":"czMo","./en-il.js":"czMo","./en-in":"7C5Q","./en-in.js":"7C5Q","./en-nz":"b1Dy","./en-nz.js":"b1Dy","./en-sg":"t+mt","./en-sg.js":"t+mt","./eo":"Zduo","./eo.js":"Zduo","./es":"iYuL","./es-do":"CjzT","./es-do.js":"CjzT","./es-mx":"tbfe","./es-mx.js":"tbfe","./es-us":"Vclq","./es-us.js":"Vclq","./es.js":"iYuL","./et":"7BjC","./et.js":"7BjC","./eu":"D/JM","./eu.js":"D/JM","./fa":"jfSC","./fa.js":"jfSC","./fi":"gekB","./fi.js":"gekB","./fil":"1ppg","./fil.js":"1ppg","./fo":"ByF4","./fo.js":"ByF4","./fr":"nyYc","./fr-ca":"2fjn","./fr-ca.js":"2fjn","./fr-ch":"Dkky","./fr-ch.js":"Dkky","./fr.js":"nyYc","./fy":"cRix","./fy.js":"cRix","./ga":"USCx","./ga.js":"USCx","./gd":"9rRi","./gd.js":"9rRi","./gl":"iEDd","./gl.js":"iEDd","./gom-deva":"qvJo","./gom-deva.js":"qvJo","./gom-latn":"DKr+","./gom-latn.js":"DKr+","./gu":"4MV3","./gu.js":"4MV3","./he":"x6pH","./he.js":"x6pH","./hi":"3E1r","./hi.js":"3E1r","./hr":"S6ln","./hr.js":"S6ln","./hu":"WxRl","./hu.js":"WxRl","./hy-am":"1rYy","./hy-am.js":"1rYy","./id":"UDhR","./id.js":"UDhR","./is":"BVg3","./is.js":"BVg3","./it":"bpih","./it-ch":"bxKX","./it-ch.js":"bxKX","./it.js":"bpih","./ja":"B55N","./ja.js":"B55N","./jv":"tUCv","./jv.js":"tUCv","./ka":"IBtZ","./ka.js":"IBtZ","./kk":"bXm7","./kk.js":"bXm7","./km":"6B0Y","./km.js":"6B0Y","./kn":"PpIw","./kn.js":"PpIw","./ko":"Ivi+","./ko.js":"Ivi+","./ku":"JCF/","./ku.js":"JCF/","./ky":"lgnt","./ky.js":"lgnt","./lb":"RAwQ","./lb.js":"RAwQ","./lo":"sp3z","./lo.js":"sp3z","./lt":"JvlW","./lt.js":"JvlW","./lv":"uXwI","./lv.js":"uXwI","./me":"KTz0","./me.js":"KTz0","./mi":"aIsn","./mi.js":"aIsn","./mk":"aQkU","./mk.js":"aQkU","./ml":"AvvY","./ml.js":"AvvY","./mn":"lYtQ","./mn.js":"lYtQ","./mr":"Ob0Z","./mr.js":"Ob0Z","./ms":"6+QB","./ms-my":"ZAMP","./ms-my.js":"ZAMP","./ms.js":"6+QB","./mt":"G0Uy","./mt.js":"G0Uy","./my":"honF","./my.js":"honF","./nb":"bOMt","./nb.js":"bOMt","./ne":"OjkT","./ne.js":"OjkT","./nl":"+s0g","./nl-be":"2ykv","./nl-be.js":"2ykv","./nl.js":"+s0g","./nn":"uEye","./nn.js":"uEye","./oc-lnc":"Fnuy","./oc-lnc.js":"Fnuy","./pa-in":"8/+R","./pa-in.js":"8/+R","./pl":"jVdC","./pl.js":"jVdC","./pt":"8mBD","./pt-br":"0tRk","./pt-br.js":"0tRk","./pt.js":"8mBD","./ro":"lyxo","./ro.js":"lyxo","./ru":"lXzo","./ru.js":"lXzo","./sd":"Z4QM","./sd.js":"Z4QM","./se":"//9w","./se.js":"//9w","./si":"7aV9","./si.js":"7aV9","./sk":"e+ae","./sk.js":"e+ae","./sl":"gVVK","./sl.js":"gVVK","./sq":"yPMs","./sq.js":"yPMs","./sr":"zx6S","./sr-cyrl":"E+lV","./sr-cyrl.js":"E+lV","./sr.js":"zx6S","./ss":"Ur1D","./ss.js":"Ur1D","./sv":"X709","./sv.js":"X709","./sw":"dNwA","./sw.js":"dNwA","./ta":"PeUW","./ta.js":"PeUW","./te":"XLvN","./te.js":"XLvN","./tet":"V2x9","./tet.js":"V2x9","./tg":"Oxv6","./tg.js":"Oxv6","./th":"EOgW","./th.js":"EOgW","./tk":"Wv91","./tk.js":"Wv91","./tl-ph":"Dzi0","./tl-ph.js":"Dzi0","./tlh":"z3Vd","./tlh.js":"z3Vd","./tr":"DoHr","./tr.js":"DoHr","./tzl":"z1FC","./tzl.js":"z1FC","./tzm":"wQk9","./tzm-latn":"tT3J","./tzm-latn.js":"tT3J","./tzm.js":"wQk9","./ug-cn":"YRex","./ug-cn.js":"YRex","./uk":"raLr","./uk.js":"raLr","./ur":"UpQW","./ur.js":"UpQW","./uz":"Loxo","./uz-latn":"AQ68","./uz-latn.js":"AQ68","./uz.js":"Loxo","./vi":"KSF8","./vi.js":"KSF8","./x-pseudo":"/X5v","./x-pseudo.js":"/X5v","./yo":"fzPg","./yo.js":"fzPg","./zh-cn":"XDpg","./zh-cn.js":"XDpg","./zh-hk":"SatO","./zh-hk.js":"SatO","./zh-mo":"OmwH","./zh-mo.js":"OmwH","./zh-tw":"kOpN","./zh-tw.js":"kOpN"};function r(e){var t=s(e);return n(t)}function s(e){if(!n.o(i,e)){var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}return i[e]}r.keys=function(){return Object.keys(i)},r.resolve=s,e.exports=r,r.id="RnhZ"},S6ln:function(e,t,n){!function(e){"use strict";function t(e,t,n){var i=e+" ";switch(n){case"ss":return i+(1===e?"sekunda":2===e||3===e||4===e?"sekunde":"sekundi");case"m":return t?"jedna minuta":"jedne minute";case"mm":return i+(1===e?"minuta":2===e||3===e||4===e?"minute":"minuta");case"h":return t?"jedan sat":"jednog sata";case"hh":return i+(1===e?"sat":2===e||3===e||4===e?"sata":"sati");case"dd":return i+(1===e?"dan":"dana");case"MM":return i+(1===e?"mjesec":2===e||3===e||4===e?"mjeseca":"mjeseci");case"yy":return i+(1===e?"godina":2===e||3===e||4===e?"godine":"godina")}}e.defineLocale("hr",{months:{format:"sije\u010dnja_velja\u010de_o\u017eujka_travnja_svibnja_lipnja_srpnja_kolovoza_rujna_listopada_studenoga_prosinca".split("_"),standalone:"sije\u010danj_velja\u010da_o\u017eujak_travanj_svibanj_lipanj_srpanj_kolovoz_rujan_listopad_studeni_prosinac".split("_")},monthsShort:"sij._velj._o\u017eu._tra._svi._lip._srp._kol._ruj._lis._stu._pro.".split("_"),monthsParseExact:!0,weekdays:"nedjelja_ponedjeljak_utorak_srijeda_\u010detvrtak_petak_subota".split("_"),weekdaysShort:"ned._pon._uto._sri._\u010det._pet._sub.".split("_"),weekdaysMin:"ne_po_ut_sr_\u010de_pe_su".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"Do MMMM YYYY",LLL:"Do MMMM YYYY H:mm",LLLL:"dddd, Do MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedjelju] [u] LT";case 3:return"[u] [srijedu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[ju\u010der u] LT",lastWeek:function(){switch(this.day()){case 0:return"[pro\u0161lu] [nedjelju] [u] LT";case 3:return"[pro\u0161lu] [srijedu] [u] LT";case 6:return"[pro\u0161le] [subote] [u] LT";case 1:case 2:case 4:case 5:return"[pro\u0161li] dddd [u] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"prije %s",s:"par sekundi",ss:t,m:t,mm:t,h:t,hh:t,d:"dan",dd:t,M:"mjesec",MM:t,y:"godinu",yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})}(n("wd/R"))},S7zO:function(e,t,n){"use strict";n.d(t,"a",(function(){return g}));var i=n("LvDl"),r=n.n(i),s=n("oxzT"),o=(n("vCyI"),n("jKX/"),n("8Y7J")),a=n("SVse"),c=n("iInd"),l=n("G0yt");const u=function(e){return{disabled:e}},d=function(e){return[e]};function h(e,t){if(1&e){const e=o.Tb();o.Qb(0),o.Sb(1,"button",3),o.gc("click",(function(){o.Dc(e);const t=o.ic();return t.useClickAction(t.currentAction)})),o.Nb(2,"i",4),o.Sb(3,"span"),o.Oc(4),o.Rb(),o.Rb(),o.Pb()}if(2&e){const e=o.ic();o.yb(1),o.Bb("btn btn-",e.btnColor,""),o.qc("title",e.useDisableDesc(e.currentAction)),o.pc("ngClass",o.uc(9,u,e.disableSelectionAction(e.currentAction)))("routerLink",e.useRouterLink(e.currentAction))("preserveFragment",e.currentAction.preserveFragment?"":null),o.yb(1),o.pc("ngClass",o.uc(11,d,e.currentAction.icon)),o.yb(2),o.Pc(e.currentAction.name)}}function f(e,t){if(1&e&&(o.Qb(0),o.Oc(1),o.Pb()),2&e){const e=o.ic(2);o.yb(1),o.Qc("",e.dropDownOnly," ")}}function p(e,t){1&e&&o.Nb(0,"span",10)}function m(e,t){if(1&e){const e=o.Tb();o.Qb(0),o.Sb(1,"button",11),o.gc("click",(function(){o.Dc(e);const n=t.$implicit;return o.ic(2).useClickAction(n)})),o.Nb(2,"i",4),o.Sb(3,"span"),o.Oc(4),o.Rb(),o.Rb(),o.Pb()}if(2&e){const e=t.$implicit,n=o.ic(2);o.yb(1),o.Ab(n.toClassName(e)),o.qc("title",n.useDisableDesc(e)),o.pc("routerLink",n.useRouterLink(e))("preserveFragment",e.preserveFragment?"":null)("disabled",n.disableSelectionAction(e)),o.yb(1),o.pc("ngClass",o.uc(9,d,e.icon)),o.yb(2),o.Pc(e.name)}}function b(e,t){if(1&e&&(o.Sb(0,"div",5),o.Sb(1,"button",6),o.Mc(2,f,2,1,"ng-container",1),o.Mc(3,p,1,0,"span",7),o.Rb(),o.Sb(4,"div",8),o.Mc(5,m,5,11,"ng-container",9),o.Rb(),o.Rb()),2&e){const e=o.ic();o.yb(1),o.Bb("btn btn-",e.btnColor," dropdown-toggle-split"),o.yb(1),o.pc("ngIf",e.dropDownOnly),o.yb(1),o.pc("ngIf",!e.dropDownOnly),o.yb(2),o.pc("ngForOf",e.dropDownActions)}}let g=(()=>{class e{constructor(){this.btnColor="accent",this.dropDownActions=[],this.icons=s.a}ngOnInit(){this.removeActionsWithNoPermissions(),this.onSelectionChange()}ngOnChanges(e){e.selection&&this.onSelectionChange()}onSelectionChange(){this.updateDropDownActions(),this.updateCurrentAction()}toClassName(e){return e.name.replace(/ /g,"-").replace(/[^a-z-]/gi,"").toLowerCase()}removeActionsWithNoPermissions(){if(!this.permission)return void(this.tableActions=[]);const e=Object.keys(this.permission).filter(e=>this.permission[e]);this.tableActions=this.tableActions.filter(t=>e.includes(t.permission))}updateDropDownActions(){this.dropDownActions=this.tableActions.filter(e=>e.visible?e.visible(this.selection):e)}updateCurrentAction(){if(this.dropDownOnly)return void(this.currentAction=void 0);let e=this.dropDownActions.find(e=>this.showableAction(e));!e&&this.dropDownActions.length>0&&(e=this.dropDownActions[0]),this.currentAction=e}showableAction(e){const t=e.canBePrimary,n=this.selection.hasSingleSelection,i="create"===e.permission?!n:n;return t&&t(this.selection)||!t&&i}useRouterLink(e){if(e.routerLink&&!this.disableSelectionAction(e))return r.a.isString(e.routerLink)?e.routerLink:e.routerLink()}disableSelectionAction(e){const t=e.disable;if(t)return Boolean(t(this.selection));const n=e.permission,i=this.selection.hasSingleSelection&&this.selection.first();return Boolean(["update","delete"].includes(n)&&(!i||i.cdExecuting))}useClickAction(e){return!this.disableSelectionAction(e)&&e.click&&e.click()}useDisableDesc(e){if(e.disable){const t=e.disable(this.selection);return r.a.isString(t)?t:void 0}}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275cmp=o.Gb({type:e,selectors:[["cd-table-actions"]],inputs:{permission:"permission",selection:"selection",tableActions:"tableActions",btnColor:"btnColor",dropDownOnly:"dropDownOnly"},features:[o.wb],decls:3,vars:2,consts:[[1,"btn-group"],[4,"ngIf"],["class","btn-group","ngbDropdown","","role","group","aria-label","Button group with nested dropdown",4,"ngIf"],["type","button",3,"title","ngClass","routerLink","preserveFragment","click"],[3,"ngClass"],["ngbDropdown","","role","group","aria-label","Button group with nested dropdown",1,"btn-group"],["ngbDropdownToggle",""],["class","sr-only",4,"ngIf"],["ngbDropdownMenu","",1,"dropdown-menu"],[4,"ngFor","ngForOf"],[1,"sr-only"],["ngbDropdownItem","",3,"title","routerLink","preserveFragment","disabled","click"]],template:function(e,t){1&e&&(o.Sb(0,"div",0),o.Mc(1,h,5,13,"ng-container",1),o.Mc(2,b,6,6,"div",2),o.Rb()),2&e&&(o.yb(1),o.pc("ngIf",t.currentAction),o.yb(1),o.pc("ngIf",t.dropDownActions.length>1))},directives:[a.r,a.p,c.f,l.i,l.m,l.k,a.q,l.j],styles:["button.disabled[_ngcontent-%COMP%]{cursor:default!important;pointer-events:auto}"]}),e})()},SFxW:function(e,t,n){!function(e){"use strict";var t={1:"-inci",5:"-inci",8:"-inci",70:"-inci",80:"-inci",2:"-nci",7:"-nci",20:"-nci",50:"-nci",3:"-\xfcnc\xfc",4:"-\xfcnc\xfc",100:"-\xfcnc\xfc",6:"-nc\u0131",9:"-uncu",10:"-uncu",30:"-uncu",60:"-\u0131nc\u0131",90:"-\u0131nc\u0131"};e.defineLocale("az",{months:"yanvar_fevral_mart_aprel_may_iyun_iyul_avqust_sentyabr_oktyabr_noyabr_dekabr".split("_"),monthsShort:"yan_fev_mar_apr_may_iyn_iyl_avq_sen_okt_noy_dek".split("_"),weekdays:"Bazar_Bazar ert\u0259si_\xc7\u0259r\u015f\u0259nb\u0259 ax\u015fam\u0131_\xc7\u0259r\u015f\u0259nb\u0259_C\xfcm\u0259 ax\u015fam\u0131_C\xfcm\u0259_\u015e\u0259nb\u0259".split("_"),weekdaysShort:"Baz_BzE_\xc7Ax_\xc7\u0259r_CAx_C\xfcm_\u015e\u0259n".split("_"),weekdaysMin:"Bz_BE_\xc7A_\xc7\u0259_CA_C\xfc_\u015e\u0259".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[bug\xfcn saat] LT",nextDay:"[sabah saat] LT",nextWeek:"[g\u0259l\u0259n h\u0259ft\u0259] dddd [saat] LT",lastDay:"[d\xfcn\u0259n] LT",lastWeek:"[ke\xe7\u0259n h\u0259ft\u0259] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s sonra",past:"%s \u0259vv\u0259l",s:"bir ne\xe7\u0259 saniy\u0259",ss:"%d saniy\u0259",m:"bir d\u0259qiq\u0259",mm:"%d d\u0259qiq\u0259",h:"bir saat",hh:"%d saat",d:"bir g\xfcn",dd:"%d g\xfcn",M:"bir ay",MM:"%d ay",y:"bir il",yy:"%d il"},meridiemParse:/gec\u0259|s\u0259h\u0259r|g\xfcnd\xfcz|ax\u015fam/,isPM:function(e){return/^(g\xfcnd\xfcz|ax\u015fam)$/.test(e)},meridiem:function(e,t,n){return e<4?"gec\u0259":e<12?"s\u0259h\u0259r":e<17?"g\xfcnd\xfcz":"ax\u015fam"},dayOfMonthOrdinalParse:/\d{1,2}-(\u0131nc\u0131|inci|nci|\xfcnc\xfc|nc\u0131|uncu)/,ordinal:function(e){if(0===e)return e+"-\u0131nc\u0131";var n=e%10;return e+(t[n]||t[e%100-n]||t[e>=100?100:null])},week:{dow:1,doy:7}})}(n("wd/R"))},STAE:function(e,t,n){var i=n("LQDL"),r=n("0Dky");e.exports=!!Object.getOwnPropertySymbols&&!r((function(){return!String(Symbol())||!Symbol.sham&&i&&i<41}))},SVse:function(e,t,n){"use strict";n.d(t,"a",(function(){return v})),n.d(t,"b",(function(){return Ae})),n.d(t,"c",(function(){return qe})),n.d(t,"d",(function(){return c})),n.d(t,"e",(function(){return Fe})),n.d(t,"f",(function(){return Ve})),n.d(t,"g",(function(){return C})),n.d(t,"h",(function(){return S})),n.d(t,"i",(function(){return ze})),n.d(t,"j",(function(){return $e})),n.d(t,"k",(function(){return He})),n.d(t,"l",(function(){return d})),n.d(t,"m",(function(){return M})),n.d(t,"n",(function(){return _})),n.d(t,"o",(function(){return Ie})),n.d(t,"p",(function(){return me})),n.d(t,"q",(function(){return ge})),n.d(t,"r",(function(){return ye})),n.d(t,"s",(function(){return De})),n.d(t,"t",(function(){return Me})),n.d(t,"u",(function(){return xe})),n.d(t,"v",(function(){return ke})),n.d(t,"w",(function(){return Te})),n.d(t,"x",(function(){return w})),n.d(t,"y",(function(){return Be})),n.d(t,"z",(function(){return l})),n.d(t,"A",(function(){return je})),n.d(t,"B",(function(){return O})),n.d(t,"C",(function(){return Ne})),n.d(t,"D",(function(){return Ke})),n.d(t,"E",(function(){return Q})),n.d(t,"F",(function(){return A})),n.d(t,"G",(function(){return E})),n.d(t,"H",(function(){return I})),n.d(t,"I",(function(){return Qe})),n.d(t,"J",(function(){return a})),n.d(t,"K",(function(){return Je})),n.d(t,"L",(function(){return s})),n.d(t,"M",(function(){return pe})),n.d(t,"N",(function(){return o}));var i=n("8Y7J");let r=null;function s(){return r}function o(e){r||(r=e)}class a{}const c=new i.r("DocumentToken");let l=(()=>{class e{}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=Object(i.Ib)({factory:u,token:e,providedIn:"platform"}),e})();function u(){return Object(i.dc)(h)}const d=new i.r("Location Initialized");let h=(()=>{class e extends l{constructor(e){super(),this._doc=e,this._init()}_init(){this.location=s().getLocation(),this._history=s().getHistory()}getBaseHrefFromDOM(){return s().getBaseHref(this._doc)}onPopState(e){s().getGlobalEventTarget(this._doc,"window").addEventListener("popstate",e,!1)}onHashChange(e){s().getGlobalEventTarget(this._doc,"window").addEventListener("hashchange",e,!1)}get href(){return this.location.href}get protocol(){return this.location.protocol}get hostname(){return this.location.hostname}get port(){return this.location.port}get pathname(){return this.location.pathname}get search(){return this.location.search}get hash(){return this.location.hash}set pathname(e){this.location.pathname=e}pushState(e,t,n){f()?this._history.pushState(e,t,n):this.location.hash=n}replaceState(e,t,n){f()?this._history.replaceState(e,t,n):this.location.hash=n}forward(){this._history.forward()}back(){this._history.back()}getState(){return this._history.state}}return e.\u0275fac=function(t){return new(t||e)(i.dc(c))},e.\u0275prov=Object(i.Ib)({factory:p,token:e,providedIn:"platform"}),e})();function f(){return!!window.history.pushState}function p(){return new h(Object(i.dc)(c))}function m(e,t){if(0==e.length)return t;if(0==t.length)return e;let n=0;return e.endsWith("/")&&n++,t.startsWith("/")&&n++,2==n?e+t.substring(1):1==n?e+t:e+"/"+t}function b(e){const t=e.match(/#|\?|$/),n=t&&t.index||e.length;return e.slice(0,n-("/"===e[n-1]?1:0))+e.slice(n)}function g(e){return e&&"?"!==e[0]?"?"+e:e}let _=(()=>{class e{}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=Object(i.Ib)({factory:y,token:e,providedIn:"root"}),e})();function y(e){const t=Object(i.dc)(c).location;return new w(Object(i.dc)(l),t&&t.origin||"")}const v=new i.r("appBaseHref");let w=(()=>{class e extends _{constructor(e,t){if(super(),this._platformLocation=e,null==t&&(t=this._platformLocation.getBaseHrefFromDOM()),null==t)throw new Error("No base href set. Please provide a value for the APP_BASE_HREF token or add a base element to the document.");this._baseHref=t}onPopState(e){this._platformLocation.onPopState(e),this._platformLocation.onHashChange(e)}getBaseHref(){return this._baseHref}prepareExternalUrl(e){return m(this._baseHref,e)}path(e=!1){const t=this._platformLocation.pathname+g(this._platformLocation.search),n=this._platformLocation.hash;return n&&e?`${t}${n}`:t}pushState(e,t,n,i){const r=this.prepareExternalUrl(n+g(i));this._platformLocation.pushState(e,t,r)}replaceState(e,t,n,i){const r=this.prepareExternalUrl(n+g(i));this._platformLocation.replaceState(e,t,r)}forward(){this._platformLocation.forward()}back(){this._platformLocation.back()}}return e.\u0275fac=function(t){return new(t||e)(i.dc(l),i.dc(v,8))},e.\u0275prov=i.Ib({token:e,factory:e.\u0275fac}),e})(),S=(()=>{class e extends _{constructor(e,t){super(),this._platformLocation=e,this._baseHref="",null!=t&&(this._baseHref=t)}onPopState(e){this._platformLocation.onPopState(e),this._platformLocation.onHashChange(e)}getBaseHref(){return this._baseHref}path(e=!1){let t=this._platformLocation.hash;return null==t&&(t="#"),t.length>0?t.substring(1):t}prepareExternalUrl(e){const t=m(this._baseHref,e);return t.length>0?"#"+t:t}pushState(e,t,n,i){let r=this.prepareExternalUrl(n+g(i));0==r.length&&(r=this._platformLocation.pathname),this._platformLocation.pushState(e,t,r)}replaceState(e,t,n,i){let r=this.prepareExternalUrl(n+g(i));0==r.length&&(r=this._platformLocation.pathname),this._platformLocation.replaceState(e,t,r)}forward(){this._platformLocation.forward()}back(){this._platformLocation.back()}}return e.\u0275fac=function(t){return new(t||e)(i.dc(l),i.dc(v,8))},e.\u0275prov=i.Ib({token:e,factory:e.\u0275fac}),e})(),M=(()=>{class e{constructor(e,t){this._subject=new i.o,this._urlChangeListeners=[],this._platformStrategy=e;const n=this._platformStrategy.getBaseHref();this._platformLocation=t,this._baseHref=b(k(n)),this._platformStrategy.onPopState(e=>{this._subject.emit({url:this.path(!0),pop:!0,state:e.state,type:e.type})})}path(e=!1){return this.normalize(this._platformStrategy.path(e))}getState(){return this._platformLocation.getState()}isCurrentPathEqualTo(e,t=""){return this.path()==this.normalize(e+g(t))}normalize(t){return e.stripTrailingSlash(function(e,t){return e&&t.startsWith(e)?t.substring(e.length):t}(this._baseHref,k(t)))}prepareExternalUrl(e){return e&&"/"!==e[0]&&(e="/"+e),this._platformStrategy.prepareExternalUrl(e)}go(e,t="",n=null){this._platformStrategy.pushState(n,"",e,t),this._notifyUrlChangeListeners(this.prepareExternalUrl(e+g(t)),n)}replaceState(e,t="",n=null){this._platformStrategy.replaceState(n,"",e,t),this._notifyUrlChangeListeners(this.prepareExternalUrl(e+g(t)),n)}forward(){this._platformStrategy.forward()}back(){this._platformStrategy.back()}onUrlChange(e){this._urlChangeListeners.push(e),this._urlChangeSubscription||(this._urlChangeSubscription=this.subscribe(e=>{this._notifyUrlChangeListeners(e.url,e.state)}))}_notifyUrlChangeListeners(e="",t){this._urlChangeListeners.forEach(n=>n(e,t))}subscribe(e,t,n){return this._subject.subscribe({next:e,error:t,complete:n})}}return e.\u0275fac=function(t){return new(t||e)(i.dc(_),i.dc(l))},e.normalizeQueryParams=g,e.joinWithSlash=m,e.stripTrailingSlash=b,e.\u0275prov=Object(i.Ib)({factory:x,token:e,providedIn:"root"}),e})();function x(){return new M(Object(i.dc)(_),Object(i.dc)(l))}function k(e){return e.replace(/\/index.html$/,"")}var D=function(e){return e[e.Decimal=0]="Decimal",e[e.Percent=1]="Percent",e[e.Currency=2]="Currency",e[e.Scientific=3]="Scientific",e}({}),T=function(e){return e[e.Zero=0]="Zero",e[e.One=1]="One",e[e.Two=2]="Two",e[e.Few=3]="Few",e[e.Many=4]="Many",e[e.Other=5]="Other",e}({}),C=function(e){return e[e.Format=0]="Format",e[e.Standalone=1]="Standalone",e}({}),O=function(e){return e[e.Narrow=0]="Narrow",e[e.Abbreviated=1]="Abbreviated",e[e.Wide=2]="Wide",e[e.Short=3]="Short",e}({}),R=function(e){return e[e.Short=0]="Short",e[e.Medium=1]="Medium",e[e.Long=2]="Long",e[e.Full=3]="Full",e}({}),L=function(e){return e[e.Decimal=0]="Decimal",e[e.Group=1]="Group",e[e.List=2]="List",e[e.PercentSign=3]="PercentSign",e[e.PlusSign=4]="PlusSign",e[e.MinusSign=5]="MinusSign",e[e.Exponential=6]="Exponential",e[e.SuperscriptingExponent=7]="SuperscriptingExponent",e[e.PerMille=8]="PerMille",e[e[1/0]=9]="Infinity",e[e.NaN=10]="NaN",e[e.TimeSeparator=11]="TimeSeparator",e[e.CurrencyDecimal=12]="CurrencyDecimal",e[e.CurrencyGroup=13]="CurrencyGroup",e}({});function E(e,t,n){const r=Object(i.ib)(e),s=H([r[i.Z.DayPeriodsFormat],r[i.Z.DayPeriodsStandalone]],t);return H(s,n)}function A(e,t,n){const r=Object(i.ib)(e),s=H([r[i.Z.DaysFormat],r[i.Z.DaysStandalone]],t);return H(s,n)}function I(e,t,n){const r=Object(i.ib)(e),s=H([r[i.Z.MonthsFormat],r[i.Z.MonthsStandalone]],t);return H(s,n)}function P(e,t){return H(Object(i.ib)(e)[i.Z.DateFormat],t)}function j(e,t){return H(Object(i.ib)(e)[i.Z.TimeFormat],t)}function N(e,t){return H(Object(i.ib)(e)[i.Z.DateTimeFormat],t)}function F(e,t){const n=Object(i.ib)(e),r=n[i.Z.NumberSymbols][t];if(void 0===r){if(t===L.CurrencyDecimal)return n[i.Z.NumberSymbols][L.Decimal];if(t===L.CurrencyGroup)return n[i.Z.NumberSymbols][L.Group]}return r}function Y(e,t){return Object(i.ib)(e)[i.Z.NumberFormats][t]}const z=i.lb;function $(e){if(!e[i.Z.ExtraData])throw new Error(`Missing extra locale data for the locale "${e[i.Z.LocaleId]}". Use "registerLocaleData" to load new data. See the "I18n guide" on angular.io to know more.`)}function H(e,t){for(let n=t;n>-1;n--)if(void 0!==e[n])return e[n];throw new Error("Locale data API: locale data undefined")}function W(e){const[t,n]=e.split(":");return{hours:+t,minutes:+n}}const V=/^(\d{4})-?(\d\d)-?(\d\d)(?:T(\d\d)(?::?(\d\d)(?::?(\d\d)(?:\.(\d+))?)?)?(Z|([+-])(\d\d):?(\d\d))?)?$/,B={},U=/((?:[^GyMLwWdEabBhHmsSzZO']+)|(?:'(?:[^']|'')*')|(?:G{1,5}|y{1,4}|M{1,5}|L{1,5}|w{1,2}|W{1}|d{1,2}|E{1,6}|a{1,5}|b{1,5}|B{1,5}|h{1,2}|H{1,2}|m{1,2}|s{1,2}|S{1,3}|z{1,4}|Z{1,5}|O{1,4}))([\s\S]*)/;var G=function(e){return e[e.Short=0]="Short",e[e.ShortGMT=1]="ShortGMT",e[e.Long=2]="Long",e[e.Extended=3]="Extended",e}({}),q=function(e){return e[e.FullYear=0]="FullYear",e[e.Month=1]="Month",e[e.Date=2]="Date",e[e.Hours=3]="Hours",e[e.Minutes=4]="Minutes",e[e.Seconds=5]="Seconds",e[e.FractionalSeconds=6]="FractionalSeconds",e[e.Day=7]="Day",e}({}),J=function(e){return e[e.DayPeriods=0]="DayPeriods",e[e.Days=1]="Days",e[e.Months=2]="Months",e[e.Eras=3]="Eras",e}({});function Q(e,t,n,r){let s=function(e){if(se(e))return e;if("number"==typeof e&&!isNaN(e))return new Date(e);if("string"==typeof e){e=e.trim();const t=parseFloat(e);if(!isNaN(e-t))return new Date(t);if(/^(\d{4}-\d{1,2}-\d{1,2})$/.test(e)){const[t,n,i]=e.split("-").map(e=>+e);return new Date(t,n-1,i)}let n;if(n=e.match(V))return function(e){const t=new Date(0);let n=0,i=0;const r=e[8]?t.setUTCFullYear:t.setFullYear,s=e[8]?t.setUTCHours:t.setHours;e[9]&&(n=Number(e[9]+e[10]),i=Number(e[9]+e[11])),r.call(t,Number(e[1]),Number(e[2])-1,Number(e[3]));const o=Number(e[4]||0)-n,a=Number(e[5]||0)-i,c=Number(e[6]||0),l=Math.round(1e3*parseFloat("0."+(e[7]||0)));return s.call(t,o,a,c,l),t}(n)}const t=new Date(e);if(!se(t))throw new Error(`Unable to convert "${e}" into a date`);return t}(e);t=function e(t,n){const r=function(e){return Object(i.ib)(e)[i.Z.LocaleId]}(t);if(B[r]=B[r]||{},B[r][n])return B[r][n];let s="";switch(n){case"shortDate":s=P(t,R.Short);break;case"mediumDate":s=P(t,R.Medium);break;case"longDate":s=P(t,R.Long);break;case"fullDate":s=P(t,R.Full);break;case"shortTime":s=j(t,R.Short);break;case"mediumTime":s=j(t,R.Medium);break;case"longTime":s=j(t,R.Long);break;case"fullTime":s=j(t,R.Full);break;case"short":const n=e(t,"shortTime"),i=e(t,"shortDate");s=K(N(t,R.Short),[n,i]);break;case"medium":const r=e(t,"mediumTime"),o=e(t,"mediumDate");s=K(N(t,R.Medium),[r,o]);break;case"long":const a=e(t,"longTime"),c=e(t,"longDate");s=K(N(t,R.Long),[a,c]);break;case"full":const l=e(t,"fullTime"),u=e(t,"fullDate");s=K(N(t,R.Full),[l,u])}return s&&(B[r][n]=s),s}(n,t)||t;let o,a=[];for(;t;){if(o=U.exec(t),!o){a.push(t);break}{a=a.concat(o.slice(1));const e=a.pop();if(!e)break;t=e}}let c=s.getTimezoneOffset();r&&(c=re(r,c),s=function(e,t,n){const i=e.getTimezoneOffset();return function(e,t){return(e=new Date(e.getTime())).setMinutes(e.getMinutes()+t),e}(e,-1*(re(t,i)-i))}(s,r));let l="";return a.forEach(e=>{const t=function(e){if(ie[e])return ie[e];let t;switch(e){case"G":case"GG":case"GGG":t=ee(J.Eras,O.Abbreviated);break;case"GGGG":t=ee(J.Eras,O.Wide);break;case"GGGGG":t=ee(J.Eras,O.Narrow);break;case"y":t=X(q.FullYear,1,0,!1,!0);break;case"yy":t=X(q.FullYear,2,0,!0,!0);break;case"yyy":t=X(q.FullYear,3,0,!1,!0);break;case"yyyy":t=X(q.FullYear,4,0,!1,!0);break;case"M":case"L":t=X(q.Month,1,1);break;case"MM":case"LL":t=X(q.Month,2,1);break;case"MMM":t=ee(J.Months,O.Abbreviated);break;case"MMMM":t=ee(J.Months,O.Wide);break;case"MMMMM":t=ee(J.Months,O.Narrow);break;case"LLL":t=ee(J.Months,O.Abbreviated,C.Standalone);break;case"LLLL":t=ee(J.Months,O.Wide,C.Standalone);break;case"LLLLL":t=ee(J.Months,O.Narrow,C.Standalone);break;case"w":t=ne(1);break;case"ww":t=ne(2);break;case"W":t=ne(1,!0);break;case"d":t=X(q.Date,1);break;case"dd":t=X(q.Date,2);break;case"E":case"EE":case"EEE":t=ee(J.Days,O.Abbreviated);break;case"EEEE":t=ee(J.Days,O.Wide);break;case"EEEEE":t=ee(J.Days,O.Narrow);break;case"EEEEEE":t=ee(J.Days,O.Short);break;case"a":case"aa":case"aaa":t=ee(J.DayPeriods,O.Abbreviated);break;case"aaaa":t=ee(J.DayPeriods,O.Wide);break;case"aaaaa":t=ee(J.DayPeriods,O.Narrow);break;case"b":case"bb":case"bbb":t=ee(J.DayPeriods,O.Abbreviated,C.Standalone,!0);break;case"bbbb":t=ee(J.DayPeriods,O.Wide,C.Standalone,!0);break;case"bbbbb":t=ee(J.DayPeriods,O.Narrow,C.Standalone,!0);break;case"B":case"BB":case"BBB":t=ee(J.DayPeriods,O.Abbreviated,C.Format,!0);break;case"BBBB":t=ee(J.DayPeriods,O.Wide,C.Format,!0);break;case"BBBBB":t=ee(J.DayPeriods,O.Narrow,C.Format,!0);break;case"h":t=X(q.Hours,1,-12);break;case"hh":t=X(q.Hours,2,-12);break;case"H":t=X(q.Hours,1);break;case"HH":t=X(q.Hours,2);break;case"m":t=X(q.Minutes,1);break;case"mm":t=X(q.Minutes,2);break;case"s":t=X(q.Seconds,1);break;case"ss":t=X(q.Seconds,2);break;case"S":t=X(q.FractionalSeconds,1);break;case"SS":t=X(q.FractionalSeconds,2);break;case"SSS":t=X(q.FractionalSeconds,3);break;case"Z":case"ZZ":case"ZZZ":t=te(G.Short);break;case"ZZZZZ":t=te(G.Extended);break;case"O":case"OO":case"OOO":case"z":case"zz":case"zzz":t=te(G.ShortGMT);break;case"OOOO":case"ZZZZ":case"zzzz":t=te(G.Long);break;default:return null}return ie[e]=t,t}(e);l+=t?t(s,n,c):"''"===e?"'":e.replace(/(^'|'$)/g,"").replace(/''/g,"'")}),l}function K(e,t){return t&&(e=e.replace(/\{([^}]+)}/g,(function(e,n){return null!=t&&n in t?t[n]:e}))),e}function Z(e,t,n="-",i,r){let s="";(e<0||r&&e<=0)&&(r?e=1-e:(e=-e,s=n));let o=String(e);for(;o.length0||a>-n)&&(a+=n),e===q.Hours)0===a&&-12===n&&(a=12);else if(e===q.FractionalSeconds)return c=t,Z(a,3).substr(0,c);var c;const l=F(o,L.MinusSign);return Z(a,t,l,i,r)}}function ee(e,t,n=C.Format,r=!1){return function(s,o){return function(e,t,n,r,s,o){switch(n){case J.Months:return I(t,s,r)[e.getMonth()];case J.Days:return A(t,s,r)[e.getDay()];case J.DayPeriods:const a=e.getHours(),c=e.getMinutes();if(o){const e=function(e){const t=Object(i.ib)(e);return $(t),(t[i.Z.ExtraData][2]||[]).map(e=>"string"==typeof e?W(e):[W(e[0]),W(e[1])])}(t),n=function(e,t,n){const r=Object(i.ib)(e);$(r);const s=H([r[i.Z.ExtraData][0],r[i.Z.ExtraData][1]],t)||[];return H(s,n)||[]}(t,s,r),o=e.findIndex(e=>{if(Array.isArray(e)){const[t,n]=e,i=a>=t.hours&&c>=t.minutes,r=a0?Math.floor(r/60):Math.ceil(r/60);switch(e){case G.Short:return(r>=0?"+":"")+Z(o,2,s)+Z(Math.abs(r%60),2,s);case G.ShortGMT:return"GMT"+(r>=0?"+":"")+Z(o,1,s);case G.Long:return"GMT"+(r>=0?"+":"")+Z(o,2,s)+":"+Z(Math.abs(r%60),2,s);case G.Extended:return 0===i?"Z":(r>=0?"+":"")+Z(o,2,s)+":"+Z(Math.abs(r%60),2,s);default:throw new Error(`Unknown zone width "${e}"`)}}}function ne(e,t=!1){return function(n,i){let r;if(t){const e=new Date(n.getFullYear(),n.getMonth(),1).getDay()-1,t=n.getDate();r=1+Math.floor((t+e)/7)}else{const e=(s=n,new Date(s.getFullYear(),s.getMonth(),s.getDate()+(4-s.getDay()))),t=function(e){const t=new Date(e,0,1).getDay();return new Date(e,0,1+(t<=4?4:11)-t)}(e.getFullYear()),i=e.getTime()-t.getTime();r=1+Math.round(i/6048e5)}var s;return Z(r,e,F(i,L.MinusSign))}}const ie={};function re(e,t){e=e.replace(/:/g,"");const n=Date.parse("Jan 01, 1970 00:00:00 "+e)/6e4;return isNaN(n)?t:n}function se(e){return e instanceof Date&&!isNaN(e.valueOf())}const oe=/^(\d+)?\.((\d+)(-(\d+))?)?$/,ae=".",ce="0";function le(e,t,n,i,r,s,o=!1){let a="",c=!1;if(isFinite(e)){let l=function(e){let t,n,i,r,s,o=Math.abs(e)+"",a=0;for((n=o.indexOf(ae))>-1&&(o=o.replace(ae,"")),(i=o.search(/e/i))>0?(n<0&&(n=i),n+=+o.slice(i+1),o=o.substring(0,i)):n<0&&(n=o.length),i=0;o.charAt(i)===ce;i++);if(i===(s=o.length))t=[0],n=1;else{for(s--;o.charAt(s)===ce;)s--;for(n-=i,t=[],r=0;i<=s;i++,r++)t[r]=Number(o.charAt(i))}return n>22&&(t=t.splice(0,21),a=n-1,n=1),{digits:t,exponent:a,integerLen:n}}(e);o&&(l=function(e){if(0===e.digits[0])return e;const t=e.digits.length-e.integerLen;return e.exponent?e.exponent+=2:(0===t?e.digits.push(0,0):1===t&&e.digits.push(0),e.integerLen+=2),e}(l));let u=t.minInt,d=t.minFrac,h=t.maxFrac;if(s){const e=s.match(oe);if(null===e)throw new Error(s+" is not a valid digit info");const t=e[1],n=e[3],i=e[5];null!=t&&(u=de(t)),null!=n&&(d=de(n)),null!=i?h=de(i):null!=n&&d>h&&(h=d)}!function(e,t,n){if(t>n)throw new Error(`The minimum number of digits after fraction (${t}) is higher than the maximum (${n}).`);let i=e.digits,r=i.length-e.integerLen;const s=Math.min(Math.max(t,r),n);let o=s+e.integerLen,a=i[o];if(o>0){i.splice(Math.max(e.integerLen,o));for(let e=o;e=5)if(o-1<0){for(let t=0;t>o;t--)i.unshift(0),e.integerLen++;i.unshift(1),e.integerLen++}else i[o-1]++;for(;r=l?i.pop():c=!1),t>=10?1:0}),0);u&&(i.unshift(u),e.integerLen++)}(l,d,h);let f=l.digits,p=l.integerLen;const m=l.exponent;let b=[];for(c=f.every(e=>!e);p0?b=f.splice(p,f.length):(b=f,f=[0]);const g=[];for(f.length>=t.lgSize&&g.unshift(f.splice(-t.lgSize,f.length).join(""));f.length>t.gSize;)g.unshift(f.splice(-t.gSize,f.length).join(""));f.length&&g.unshift(f.join("")),a=g.join(F(n,i)),b.length&&(a+=F(n,r)+b.join("")),m&&(a+=F(n,L.Exponential)+"+"+m)}else a=F(n,L.Infinity);return a=e<0&&!c?t.negPre+a+t.negSuf:t.posPre+a+t.posSuf,a}function ue(e,t="-"){const n={minInt:1,minFrac:0,maxFrac:0,posPre:"",posSuf:"",negPre:"",negSuf:"",gSize:0,lgSize:0},i=e.split(";"),r=i[0],s=i[1],o=-1!==r.indexOf(ae)?r.split(ae):[r.substring(0,r.lastIndexOf(ce)+1),r.substring(r.lastIndexOf(ce)+1)],a=o[0],c=o[1]||"";n.posPre=a.substr(0,a.indexOf("#"));for(let u=0;u{class e extends he{constructor(e){super(),this.locale=e}getPluralCategory(e,t){switch(z(t||this.locale)(e)){case T.Zero:return"zero";case T.One:return"one";case T.Two:return"two";case T.Few:return"few";case T.Many:return"many";default:return"other"}}}return e.\u0275fac=function(t){return new(t||e)(i.dc(i.v))},e.\u0275prov=i.Ib({token:e,factory:e.\u0275fac}),e})();function pe(e,t){t=encodeURIComponent(t);for(const n of e.split(";")){const e=n.indexOf("="),[i,r]=-1==e?[n,""]:[n.slice(0,e),n.slice(e+1)];if(i.trim()===t)return decodeURIComponent(r)}return null}let me=(()=>{class e{constructor(e,t,n,i){this._iterableDiffers=e,this._keyValueDiffers=t,this._ngEl=n,this._renderer=i,this._iterableDiffer=null,this._keyValueDiffer=null,this._initialClasses=[],this._rawClass=null}set klass(e){this._removeClasses(this._initialClasses),this._initialClasses="string"==typeof e?e.split(/\s+/):[],this._applyClasses(this._initialClasses),this._applyClasses(this._rawClass)}set ngClass(e){this._removeClasses(this._rawClass),this._applyClasses(this._initialClasses),this._iterableDiffer=null,this._keyValueDiffer=null,this._rawClass="string"==typeof e?e.split(/\s+/):e,this._rawClass&&(Object(i.ob)(this._rawClass)?this._iterableDiffer=this._iterableDiffers.find(this._rawClass).create():this._keyValueDiffer=this._keyValueDiffers.find(this._rawClass).create())}ngDoCheck(){if(this._iterableDiffer){const e=this._iterableDiffer.diff(this._rawClass);e&&this._applyIterableChanges(e)}else if(this._keyValueDiffer){const e=this._keyValueDiffer.diff(this._rawClass);e&&this._applyKeyValueChanges(e)}}_applyKeyValueChanges(e){e.forEachAddedItem(e=>this._toggleClass(e.key,e.currentValue)),e.forEachChangedItem(e=>this._toggleClass(e.key,e.currentValue)),e.forEachRemovedItem(e=>{e.previousValue&&this._toggleClass(e.key,!1)})}_applyIterableChanges(e){e.forEachAddedItem(e=>{if("string"!=typeof e.item)throw new Error("NgClass can only toggle CSS classes expressed as strings, got "+Object(i.tb)(e.item));this._toggleClass(e.item,!0)}),e.forEachRemovedItem(e=>this._toggleClass(e.item,!1))}_applyClasses(e){e&&(Array.isArray(e)||e instanceof Set?e.forEach(e=>this._toggleClass(e,!0)):Object.keys(e).forEach(t=>this._toggleClass(t,!!e[t])))}_removeClasses(e){e&&(Array.isArray(e)||e instanceof Set?e.forEach(e=>this._toggleClass(e,!1)):Object.keys(e).forEach(e=>this._toggleClass(e,!1)))}_toggleClass(e,t){(e=e.trim())&&e.split(/\s+/g).forEach(e=>{t?this._renderer.addClass(this._ngEl.nativeElement,e):this._renderer.removeClass(this._ngEl.nativeElement,e)})}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.t),i.Mb(i.u),i.Mb(i.m),i.Mb(i.E))},e.\u0275dir=i.Hb({type:e,selectors:[["","ngClass",""]],inputs:{klass:["class","klass"],ngClass:"ngClass"}}),e})();class be{constructor(e,t,n,i){this.$implicit=e,this.ngForOf=t,this.index=n,this.count=i}get first(){return 0===this.index}get last(){return this.index===this.count-1}get even(){return this.index%2==0}get odd(){return!this.even}}let ge=(()=>{class e{constructor(e,t,n){this._viewContainer=e,this._template=t,this._differs=n,this._ngForOf=null,this._ngForOfDirty=!0,this._differ=null}set ngForOf(e){this._ngForOf=e,this._ngForOfDirty=!0}set ngForTrackBy(e){Object(i.U)()&&null!=e&&"function"!=typeof e&&console&&console.warn&&console.warn(`trackBy must be a function, but received ${JSON.stringify(e)}. See https://angular.io/api/common/NgForOf#change-propagation for more information.`),this._trackByFn=e}get ngForTrackBy(){return this._trackByFn}set ngForTemplate(e){e&&(this._template=e)}ngDoCheck(){if(this._ngForOfDirty){this._ngForOfDirty=!1;const n=this._ngForOf;if(!this._differ&&n)try{this._differ=this._differs.find(n).create(this.ngForTrackBy)}catch(t){throw new Error(`Cannot find a differ supporting object '${n}' of type '${e=n,e.name||typeof e}'. NgFor only supports binding to Iterables such as Arrays.`)}}var e;if(this._differ){const e=this._differ.diff(this._ngForOf);e&&this._applyChanges(e)}}_applyChanges(e){const t=[];e.forEachOperation((e,n,i)=>{if(null==e.previousIndex){const n=this._viewContainer.createEmbeddedView(this._template,new be(null,this._ngForOf,-1,-1),null===i?void 0:i),r=new _e(e,n);t.push(r)}else if(null==i)this._viewContainer.remove(null===n?void 0:n);else if(null!==n){const r=this._viewContainer.get(n);this._viewContainer.move(r,i);const s=new _e(e,r);t.push(s)}});for(let n=0;n{this._viewContainer.get(e.currentIndex).context.$implicit=e.item})}_perViewChange(e,t){e.context.$implicit=t.item}static ngTemplateContextGuard(e,t){return!0}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.P),i.Mb(i.L),i.Mb(i.t))},e.\u0275dir=i.Hb({type:e,selectors:[["","ngFor","","ngForOf",""]],inputs:{ngForOf:"ngForOf",ngForTrackBy:"ngForTrackBy",ngForTemplate:"ngForTemplate"}}),e})();class _e{constructor(e,t){this.record=e,this.view=t}}let ye=(()=>{class e{constructor(e,t){this._viewContainer=e,this._context=new ve,this._thenTemplateRef=null,this._elseTemplateRef=null,this._thenViewRef=null,this._elseViewRef=null,this._thenTemplateRef=t}set ngIf(e){this._context.$implicit=this._context.ngIf=e,this._updateView()}set ngIfThen(e){we("ngIfThen",e),this._thenTemplateRef=e,this._thenViewRef=null,this._updateView()}set ngIfElse(e){we("ngIfElse",e),this._elseTemplateRef=e,this._elseViewRef=null,this._updateView()}_updateView(){this._context.$implicit?this._thenViewRef||(this._viewContainer.clear(),this._elseViewRef=null,this._thenTemplateRef&&(this._thenViewRef=this._viewContainer.createEmbeddedView(this._thenTemplateRef,this._context))):this._elseViewRef||(this._viewContainer.clear(),this._thenViewRef=null,this._elseTemplateRef&&(this._elseViewRef=this._viewContainer.createEmbeddedView(this._elseTemplateRef,this._context)))}static ngTemplateContextGuard(e,t){return!0}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.P),i.Mb(i.L))},e.\u0275dir=i.Hb({type:e,selectors:[["","ngIf",""]],inputs:{ngIf:"ngIf",ngIfThen:"ngIfThen",ngIfElse:"ngIfElse"}}),e})();class ve{constructor(){this.$implicit=null,this.ngIf=null}}function we(e,t){if(t&&!t.createEmbeddedView)throw new Error(`${e} must be a TemplateRef, but received '${Object(i.tb)(t)}'.`)}class Se{constructor(e,t){this._viewContainerRef=e,this._templateRef=t,this._created=!1}create(){this._created=!0,this._viewContainerRef.createEmbeddedView(this._templateRef)}destroy(){this._created=!1,this._viewContainerRef.clear()}enforceState(e){e&&!this._created?this.create():!e&&this._created&&this.destroy()}}let Me=(()=>{class e{constructor(){this._defaultUsed=!1,this._caseCount=0,this._lastCaseCheckIndex=0,this._lastCasesMatched=!1}set ngSwitch(e){this._ngSwitch=e,0===this._caseCount&&this._updateDefaultCases(!0)}_addCase(){return this._caseCount++}_addDefault(e){this._defaultViews||(this._defaultViews=[]),this._defaultViews.push(e)}_matchCase(e){const t=e==this._ngSwitch;return this._lastCasesMatched=this._lastCasesMatched||t,this._lastCaseCheckIndex++,this._lastCaseCheckIndex===this._caseCount&&(this._updateDefaultCases(!this._lastCasesMatched),this._lastCaseCheckIndex=0,this._lastCasesMatched=!1),t}_updateDefaultCases(e){if(this._defaultViews&&e!==this._defaultUsed){this._defaultUsed=e;for(let t=0;t{class e{constructor(e,t,n){this.ngSwitch=n,n._addCase(),this._view=new Se(e,t)}ngDoCheck(){this._view.enforceState(this.ngSwitch._matchCase(this.ngSwitchCase))}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.P),i.Mb(i.L),i.Mb(Me,1))},e.\u0275dir=i.Hb({type:e,selectors:[["","ngSwitchCase",""]],inputs:{ngSwitchCase:"ngSwitchCase"}}),e})(),ke=(()=>{class e{constructor(e,t,n){n._addDefault(new Se(e,t))}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.P),i.Mb(i.L),i.Mb(Me,1))},e.\u0275dir=i.Hb({type:e,selectors:[["","ngSwitchDefault",""]]}),e})(),De=(()=>{class e{constructor(e,t,n){this._ngEl=e,this._differs=t,this._renderer=n,this._ngStyle=null,this._differ=null}set ngStyle(e){this._ngStyle=e,!this._differ&&e&&(this._differ=this._differs.find(e).create())}ngDoCheck(){if(this._differ){const e=this._differ.diff(this._ngStyle);e&&this._applyChanges(e)}}_setStyle(e,t){const[n,i]=e.split(".");null!=(t=null!=t&&i?`${t}${i}`:t)?this._renderer.setStyle(this._ngEl.nativeElement,n,t):this._renderer.removeStyle(this._ngEl.nativeElement,n)}_applyChanges(e){e.forEachRemovedItem(e=>this._setStyle(e.key,null)),e.forEachAddedItem(e=>this._setStyle(e.key,e.currentValue)),e.forEachChangedItem(e=>this._setStyle(e.key,e.currentValue))}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.m),i.Mb(i.u),i.Mb(i.E))},e.\u0275dir=i.Hb({type:e,selectors:[["","ngStyle",""]],inputs:{ngStyle:"ngStyle"}}),e})(),Te=(()=>{class e{constructor(e){this._viewContainerRef=e,this._viewRef=null,this.ngTemplateOutletContext=null,this.ngTemplateOutlet=null}ngOnChanges(e){if(this._shouldRecreateView(e)){const e=this._viewContainerRef;this._viewRef&&e.remove(e.indexOf(this._viewRef)),this._viewRef=this.ngTemplateOutlet?e.createEmbeddedView(this.ngTemplateOutlet,this.ngTemplateOutletContext):null}else this._viewRef&&this.ngTemplateOutletContext&&this._updateExistingContext(this.ngTemplateOutletContext)}_shouldRecreateView(e){const t=e.ngTemplateOutletContext;return!!e.ngTemplateOutlet||t&&this._hasContextShapeChanged(t)}_hasContextShapeChanged(e){const t=Object.keys(e.previousValue||{}),n=Object.keys(e.currentValue||{});if(t.length===n.length){for(let e of n)if(-1===t.indexOf(e))return!0;return!1}return!0}_updateExistingContext(e){for(let t of Object.keys(e))this._viewRef.context[t]=this.ngTemplateOutletContext[t]}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.P))},e.\u0275dir=i.Hb({type:e,selectors:[["","ngTemplateOutlet",""]],inputs:{ngTemplateOutletContext:"ngTemplateOutletContext",ngTemplateOutlet:"ngTemplateOutlet"},features:[i.wb]}),e})();function Ce(e,t){return Error(`InvalidPipeArgument: '${t}' for pipe '${Object(i.tb)(e)}'`)}class Oe{createSubscription(e,t){return e.subscribe({next:t,error:e=>{throw e}})}dispose(e){e.unsubscribe()}onDestroy(e){e.unsubscribe()}}class Re{createSubscription(e,t){return e.then(t,e=>{throw e})}dispose(e){}onDestroy(e){}}const Le=new Re,Ee=new Oe;let Ae=(()=>{class e{constructor(e){this._ref=e,this._latestValue=null,this._subscription=null,this._obj=null,this._strategy=null}ngOnDestroy(){this._subscription&&this._dispose()}transform(e){return this._obj?e!==this._obj?(this._dispose(),this.transform(e)):this._latestValue:(e&&this._subscribe(e),this._latestValue)}_subscribe(e){this._obj=e,this._strategy=this._selectStrategy(e),this._subscription=this._strategy.createSubscription(e,t=>this._updateLatestValue(e,t))}_selectStrategy(t){if(Object(i.qb)(t))return Le;if(Object(i.pb)(t))return Ee;throw Ce(e,t)}_dispose(){this._strategy.dispose(this._subscription),this._latestValue=null,this._subscription=null,this._obj=null}_updateLatestValue(e,t){e===this._obj&&(this._latestValue=t,this._ref.markForCheck())}}return e.\u0275fac=function(t){return new(t||e)(i.fc())},e.\u0275pipe=i.Lb({name:"async",type:e,pure:!1}),e})(),Ie=(()=>{class e{transform(t){if(!t)return t;if("string"!=typeof t)throw Ce(e,t);return t.toLowerCase()}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=i.Lb({name:"lowercase",type:e,pure:!0}),e})();const Pe=/(?:[A-Za-z\xAA\xB5\xBA\xC0-\xD6\xD8-\xF6\xF8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u037F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u052F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u0860-\u086A\u08A0-\u08B4\u08B6-\u08BD\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u09FC\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0AF9\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C39\u0C3D\u0C58-\u0C5A\u0C60\u0C61\u0C80\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D3A\u0D3D\u0D4E\u0D54-\u0D56\u0D5F-\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F5\u13F8-\u13FD\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u16F1-\u16F8\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u1884\u1887-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191E\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19B0-\u19C9\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1C80-\u1C88\u1CE9-\u1CEC\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312E\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FEA\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA66E\uA67F-\uA69D\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788\uA78B-\uA7AE\uA7B0-\uA7B7\uA7F7-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA8FD\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uA9E0-\uA9E4\uA9E6-\uA9EF\uA9FA-\uA9FE\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA7E-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E\uAB30-\uAB5A\uAB5C-\uAB65\uAB70-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]|\uD800[\uDC00-\uDC0B\uDC0D-\uDC26\uDC28-\uDC3A\uDC3C\uDC3D\uDC3F-\uDC4D\uDC50-\uDC5D\uDC80-\uDCFA\uDE80-\uDE9C\uDEA0-\uDED0\uDF00-\uDF1F\uDF2D-\uDF40\uDF42-\uDF49\uDF50-\uDF75\uDF80-\uDF9D\uDFA0-\uDFC3\uDFC8-\uDFCF]|\uD801[\uDC00-\uDC9D\uDCB0-\uDCD3\uDCD8-\uDCFB\uDD00-\uDD27\uDD30-\uDD63\uDE00-\uDF36\uDF40-\uDF55\uDF60-\uDF67]|\uD802[\uDC00-\uDC05\uDC08\uDC0A-\uDC35\uDC37\uDC38\uDC3C\uDC3F-\uDC55\uDC60-\uDC76\uDC80-\uDC9E\uDCE0-\uDCF2\uDCF4\uDCF5\uDD00-\uDD15\uDD20-\uDD39\uDD80-\uDDB7\uDDBE\uDDBF\uDE00\uDE10-\uDE13\uDE15-\uDE17\uDE19-\uDE33\uDE60-\uDE7C\uDE80-\uDE9C\uDEC0-\uDEC7\uDEC9-\uDEE4\uDF00-\uDF35\uDF40-\uDF55\uDF60-\uDF72\uDF80-\uDF91]|\uD803[\uDC00-\uDC48\uDC80-\uDCB2\uDCC0-\uDCF2]|\uD804[\uDC03-\uDC37\uDC83-\uDCAF\uDCD0-\uDCE8\uDD03-\uDD26\uDD50-\uDD72\uDD76\uDD83-\uDDB2\uDDC1-\uDDC4\uDDDA\uDDDC\uDE00-\uDE11\uDE13-\uDE2B\uDE80-\uDE86\uDE88\uDE8A-\uDE8D\uDE8F-\uDE9D\uDE9F-\uDEA8\uDEB0-\uDEDE\uDF05-\uDF0C\uDF0F\uDF10\uDF13-\uDF28\uDF2A-\uDF30\uDF32\uDF33\uDF35-\uDF39\uDF3D\uDF50\uDF5D-\uDF61]|\uD805[\uDC00-\uDC34\uDC47-\uDC4A\uDC80-\uDCAF\uDCC4\uDCC5\uDCC7\uDD80-\uDDAE\uDDD8-\uDDDB\uDE00-\uDE2F\uDE44\uDE80-\uDEAA\uDF00-\uDF19]|\uD806[\uDCA0-\uDCDF\uDCFF\uDE00\uDE0B-\uDE32\uDE3A\uDE50\uDE5C-\uDE83\uDE86-\uDE89\uDEC0-\uDEF8]|\uD807[\uDC00-\uDC08\uDC0A-\uDC2E\uDC40\uDC72-\uDC8F\uDD00-\uDD06\uDD08\uDD09\uDD0B-\uDD30\uDD46]|\uD808[\uDC00-\uDF99]|\uD809[\uDC80-\uDD43]|[\uD80C\uD81C-\uD820\uD840-\uD868\uD86A-\uD86C\uD86F-\uD872\uD874-\uD879][\uDC00-\uDFFF]|\uD80D[\uDC00-\uDC2E]|\uD811[\uDC00-\uDE46]|\uD81A[\uDC00-\uDE38\uDE40-\uDE5E\uDED0-\uDEED\uDF00-\uDF2F\uDF40-\uDF43\uDF63-\uDF77\uDF7D-\uDF8F]|\uD81B[\uDF00-\uDF44\uDF50\uDF93-\uDF9F\uDFE0\uDFE1]|\uD821[\uDC00-\uDFEC]|\uD822[\uDC00-\uDEF2]|\uD82C[\uDC00-\uDD1E\uDD70-\uDEFB]|\uD82F[\uDC00-\uDC6A\uDC70-\uDC7C\uDC80-\uDC88\uDC90-\uDC99]|\uD835[\uDC00-\uDC54\uDC56-\uDC9C\uDC9E\uDC9F\uDCA2\uDCA5\uDCA6\uDCA9-\uDCAC\uDCAE-\uDCB9\uDCBB\uDCBD-\uDCC3\uDCC5-\uDD05\uDD07-\uDD0A\uDD0D-\uDD14\uDD16-\uDD1C\uDD1E-\uDD39\uDD3B-\uDD3E\uDD40-\uDD44\uDD46\uDD4A-\uDD50\uDD52-\uDEA5\uDEA8-\uDEC0\uDEC2-\uDEDA\uDEDC-\uDEFA\uDEFC-\uDF14\uDF16-\uDF34\uDF36-\uDF4E\uDF50-\uDF6E\uDF70-\uDF88\uDF8A-\uDFA8\uDFAA-\uDFC2\uDFC4-\uDFCB]|\uD83A[\uDC00-\uDCC4\uDD00-\uDD43]|\uD83B[\uDE00-\uDE03\uDE05-\uDE1F\uDE21\uDE22\uDE24\uDE27\uDE29-\uDE32\uDE34-\uDE37\uDE39\uDE3B\uDE42\uDE47\uDE49\uDE4B\uDE4D-\uDE4F\uDE51\uDE52\uDE54\uDE57\uDE59\uDE5B\uDE5D\uDE5F\uDE61\uDE62\uDE64\uDE67-\uDE6A\uDE6C-\uDE72\uDE74-\uDE77\uDE79-\uDE7C\uDE7E\uDE80-\uDE89\uDE8B-\uDE9B\uDEA1-\uDEA3\uDEA5-\uDEA9\uDEAB-\uDEBB]|\uD869[\uDC00-\uDED6\uDF00-\uDFFF]|\uD86D[\uDC00-\uDF34\uDF40-\uDFFF]|\uD86E[\uDC00-\uDC1D\uDC20-\uDFFF]|\uD873[\uDC00-\uDEA1\uDEB0-\uDFFF]|\uD87A[\uDC00-\uDFE0]|\uD87E[\uDC00-\uDE1D])\S*/g;let je=(()=>{class e{transform(t){if(!t)return t;if("string"!=typeof t)throw Ce(e,t);return t.replace(Pe,e=>e[0].toUpperCase()+e.substr(1).toLowerCase())}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=i.Lb({name:"titlecase",type:e,pure:!0}),e})(),Ne=(()=>{class e{transform(t){if(!t)return t;if("string"!=typeof t)throw Ce(e,t);return t.toUpperCase()}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=i.Lb({name:"uppercase",type:e,pure:!0}),e})(),Fe=(()=>{class e{constructor(e){this.locale=e}transform(t,n="mediumDate",i,r){if(null==t||""===t||t!=t)return null;try{return Q(t,n,r||this.locale,i)}catch(s){throw Ce(e,s.message)}}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.v))},e.\u0275pipe=i.Lb({name:"date",type:e,pure:!0}),e})();const Ye=/#/g;let ze=(()=>{class e{constructor(e){this._localization=e}transform(t,n,i){if(null==t)return"";if("object"!=typeof n||null===n)throw Ce(e,n);return n[function(e,t,n,i){let r="="+e;if(t.indexOf(r)>-1)return r;if(r=n.getPluralCategory(e,i),t.indexOf(r)>-1)return r;if(t.indexOf("other")>-1)return"other";throw new Error(`No plural message found for value "${e}"`)}(t,Object.keys(n),this._localization,i)].replace(Ye,t.toString())}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(he))},e.\u0275pipe=i.Lb({name:"i18nPlural",type:e,pure:!0}),e})(),$e=(()=>{class e{transform(e){return JSON.stringify(e,null,2)}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=i.Lb({name:"json",type:e,pure:!1}),e})(),He=(()=>{class e{constructor(e){this.differs=e,this.keyValues=[]}transform(e,t=We){if(!e||!(e instanceof Map)&&"object"!=typeof e)return null;this.differ||(this.differ=this.differs.find(e).create());const n=this.differ.diff(e);return n&&(this.keyValues=[],n.forEachItem(e=>{this.keyValues.push({key:e.key,value:e.currentValue})}),this.keyValues.sort(t)),this.keyValues}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.u))},e.\u0275pipe=i.Lb({name:"keyvalue",type:e,pure:!1}),e})();function We(e,t){const n=e.key,i=t.key;if(n===i)return 0;if(void 0===n)return 1;if(void 0===i)return-1;if(null===n)return 1;if(null===i)return-1;if("string"==typeof n&&"string"==typeof i)return n{class e{constructor(e){this._locale=e}transform(t,n,i){if(Ue(t))return null;i=i||this._locale;try{return function(e,t,n){return le(e,ue(Y(t,D.Decimal),F(t,L.MinusSign)),t,L.Group,L.Decimal,n)}(Ge(t),i,n)}catch(r){throw Ce(e,r.message)}}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.v))},e.\u0275pipe=i.Lb({name:"number",type:e,pure:!0}),e})(),Be=(()=>{class e{constructor(e){this._locale=e}transform(t,n,i){if(Ue(t))return null;i=i||this._locale;try{return function(e,t,n){return le(e,ue(Y(t,D.Percent),F(t,L.MinusSign)),t,L.Group,L.Decimal,n,!0).replace(new RegExp("%","g"),F(t,L.PercentSign))}(Ge(t),i,n)}catch(r){throw Ce(e,r.message)}}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(i.v))},e.\u0275pipe=i.Lb({name:"percent",type:e,pure:!0}),e})();function Ue(e){return null==e||""===e||e!=e}function Ge(e){if("string"==typeof e&&!isNaN(Number(e)-parseFloat(e)))return Number(e);if("number"!=typeof e)throw new Error(e+" is not a number");return e}let qe=(()=>{class e{}return e.\u0275mod=i.Kb({type:e}),e.\u0275inj=i.Jb({factory:function(t){return new(t||e)},providers:[{provide:he,useClass:fe}]}),e})();const Je="browser";function Qe(e){return e===Je}let Ke=(()=>{class e{}return e.\u0275prov=Object(i.Ib)({token:e,providedIn:"root",factory:()=>new Ze(Object(i.dc)(c),window,Object(i.dc)(i.n))}),e})();class Ze{constructor(e,t,n){this.document=e,this.window=t,this.errorHandler=n,this.offset=()=>[0,0]}setOffset(e){this.offset=Array.isArray(e)?()=>e:e}getScrollPosition(){return this.supportsScrolling()?[this.window.scrollX,this.window.scrollY]:[0,0]}scrollToPosition(e){this.supportsScrolling()&&this.window.scrollTo(e[0],e[1])}scrollToAnchor(e){if(this.supportsScrolling()){const t=this.document.getElementById(e)||this.document.getElementsByName(e)[0];t&&this.scrollToElement(t)}}setHistoryScrollRestoration(e){if(this.supportScrollRestoration()){const t=this.window.history;t&&t.scrollRestoration&&(t.scrollRestoration=e)}}scrollToElement(e){const t=e.getBoundingClientRect(),n=t.left+this.window.pageXOffset,i=t.top+this.window.pageYOffset,r=this.offset();this.window.scrollTo(n-r[0],i-r[1])}supportScrollRestoration(){try{if(!this.window||!this.window.scrollTo)return!1;const e=Xe(this.window.history)||Xe(Object.getPrototypeOf(this.window.history));return!(!e||!e.writable&&!e.set)}catch(e){return!1}}supportsScrolling(){try{return!!this.window.scrollTo}catch(e){return!1}}}function Xe(e){return Object.getOwnPropertyDescriptor(e,"scrollRestoration")}},SatO:function(e,t,n){!function(e){"use strict";e.defineLocale("zh-hk",{months:"\u4e00\u6708_\u4e8c\u6708_\u4e09\u6708_\u56db\u6708_\u4e94\u6708_\u516d\u6708_\u4e03\u6708_\u516b\u6708_\u4e5d\u6708_\u5341\u6708_\u5341\u4e00\u6708_\u5341\u4e8c\u6708".split("_"),monthsShort:"1\u6708_2\u6708_3\u6708_4\u6708_5\u6708_6\u6708_7\u6708_8\u6708_9\u6708_10\u6708_11\u6708_12\u6708".split("_"),weekdays:"\u661f\u671f\u65e5_\u661f\u671f\u4e00_\u661f\u671f\u4e8c_\u661f\u671f\u4e09_\u661f\u671f\u56db_\u661f\u671f\u4e94_\u661f\u671f\u516d".split("_"),weekdaysShort:"\u9031\u65e5_\u9031\u4e00_\u9031\u4e8c_\u9031\u4e09_\u9031\u56db_\u9031\u4e94_\u9031\u516d".split("_"),weekdaysMin:"\u65e5_\u4e00_\u4e8c_\u4e09_\u56db_\u4e94_\u516d".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY/MM/DD",LL:"YYYY\u5e74M\u6708D\u65e5",LLL:"YYYY\u5e74M\u6708D\u65e5 HH:mm",LLLL:"YYYY\u5e74M\u6708D\u65e5dddd HH:mm",l:"YYYY/M/D",ll:"YYYY\u5e74M\u6708D\u65e5",lll:"YYYY\u5e74M\u6708D\u65e5 HH:mm",llll:"YYYY\u5e74M\u6708D\u65e5dddd HH:mm"},meridiemParse:/\u51cc\u6668|\u65e9\u4e0a|\u4e0a\u5348|\u4e2d\u5348|\u4e0b\u5348|\u665a\u4e0a/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u51cc\u6668"===t||"\u65e9\u4e0a"===t||"\u4e0a\u5348"===t?e:"\u4e2d\u5348"===t?e>=11?e:e+12:"\u4e0b\u5348"===t||"\u665a\u4e0a"===t?e+12:void 0},meridiem:function(e,t,n){var i=100*e+t;return i<600?"\u51cc\u6668":i<900?"\u65e9\u4e0a":i<1200?"\u4e0a\u5348":1200===i?"\u4e2d\u5348":i<1800?"\u4e0b\u5348":"\u665a\u4e0a"},calendar:{sameDay:"[\u4eca\u5929]LT",nextDay:"[\u660e\u5929]LT",nextWeek:"[\u4e0b]ddddLT",lastDay:"[\u6628\u5929]LT",lastWeek:"[\u4e0a]ddddLT",sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}(\u65e5|\u6708|\u9031)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"\u65e5";case"M":return e+"\u6708";case"w":case"W":return e+"\u9031";default:return e}},relativeTime:{future:"%s\u5f8c",past:"%s\u524d",s:"\u5e7e\u79d2",ss:"%d \u79d2",m:"1 \u5206\u9418",mm:"%d \u5206\u9418",h:"1 \u5c0f\u6642",hh:"%d \u5c0f\u6642",d:"1 \u5929",dd:"%d \u5929",M:"1 \u500b\u6708",MM:"%d \u500b\u6708",y:"1 \u5e74",yy:"%d \u5e74"}})}(n("wd/R"))},SeVD:function(e,t,n){"use strict";n.d(t,"a",(function(){return u}));var i=n("ngJS"),r=n("NJ4a"),s=n("Lhse"),o=n("kJWO"),a=n("I55L"),c=n("c2HN"),l=n("XoHu");const u=e=>{if(e&&"function"==typeof e[o.a])return u=e,e=>{const t=u[o.a]();if("function"!=typeof t.subscribe)throw new TypeError("Provided object does not correctly implement Symbol.observable");return t.subscribe(e)};if(Object(a.a)(e))return Object(i.a)(e);if(Object(c.a)(e))return n=e,e=>(n.then(t=>{e.closed||(e.next(t),e.complete())},t=>e.error(t)).then(null,r.a),e);if(e&&"function"==typeof e[s.a])return t=e,e=>{const n=t[s.a]();for(;;){let t;try{t=n.next()}catch(i){return e.error(i),e}if(t.done){e.complete();break}if(e.next(t.value),e.closed)break}return"function"==typeof n.return&&e.add(()=>{n.return&&n.return()}),e};{const t=Object(l.a)(e)?"an invalid object":`'${e}'`;throw new TypeError(`You provided ${t} where a stream was expected. You can provide an Observable, Promise, Array, or Iterable.`)}var t,n,u}},SpAZ:function(e,t,n){"use strict";function i(e){return e}n.d(t,"a",(function(){return i}))},SxV6:function(e,t,n){"use strict";n.d(t,"a",(function(){return l}));var i=n("sVev"),r=n("pLZG"),s=n("IzEk"),o=n("xbPD"),a=n("XDbj"),c=n("SpAZ");function l(e,t){const n=arguments.length>=2;return l=>l.pipe(e?Object(r.a)((t,n)=>e(t,n,l)):c.a,Object(s.a)(1),n?Object(o.a)(t):Object(a.a)(()=>new i.a))}},TFwu:function(e,t,n){"use strict";var i=n("25cm"),r=n("jN84"),s=n("mkut");t.a=function(e){return Object(i.a)(e,s.a,r.a)}},TJUb:function(e,t,n){"use strict";n.d(t,"a",(function(){return o}));var i=n("LvDl"),r=n.n(i),s=n("8Y7J");let o=(()=>{class e{transform(e,t){return r.a.isPlainObject(t)?r.a.get(t,e,e):e}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=s.Lb({name:"map",type:e,pure:!0}),e})()},TKcr:function(e,t,n){"use strict";n.d(t,"a",(function(){return i}));class i{propertyValue(e){return getComputedStyle(document.body).getPropertyValue("--"+e)}}},TWQb:function(e,t,n){var i=n("/GqU"),r=n("UMSQ"),s=n("I8vh"),o=function(e){return function(t,n,o){var a,c=i(t),l=r(c.length),u=s(o,l);if(e&&n!=n){for(;l>u;)if((a=c[u++])!=a)return!0}else for(;l>u;u++)if((e||u in c)&&c[u]===n)return e||u||0;return!e&&-1}};e.exports={includes:o(!0),indexOf:o(!1)}},TYzs:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("8Y7J");let r=(()=>{class e{transform(e){const t=parseInt(e,10);return isNaN(t)?e:e+(1===Math.floor(t/10)?"th":t%10==1?"st":t%10==2?"nd":t%10==3?"rd":"th")}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=i.Lb({name:"ordinal",type:e,pure:!0}),e})()},TeQF:function(e,t,n){"use strict";var i=n("I+eb"),r=n("tycR").filter;i({target:"Array",proto:!0,forced:!n("Hd5f")("filter")},{filter:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}})},TnHx:function(e,t,n){"use strict";var i=n("25cm"),r=n("n561"),s=n("4/q3");t.a=function(e){return Object(i.a)(e,s.a,r.a)}},U6JX:function(e,t,n){"use strict";t.a=function(e,t){return function(n){return e(t(n))}}},UDhR:function(e,t,n){!function(e){"use strict";e.defineLocale("id",{months:"Januari_Februari_Maret_April_Mei_Juni_Juli_Agustus_September_Oktober_November_Desember".split("_"),monthsShort:"Jan_Feb_Mar_Apr_Mei_Jun_Jul_Agt_Sep_Okt_Nov_Des".split("_"),weekdays:"Minggu_Senin_Selasa_Rabu_Kamis_Jumat_Sabtu".split("_"),weekdaysShort:"Min_Sen_Sel_Rab_Kam_Jum_Sab".split("_"),weekdaysMin:"Mg_Sn_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|siang|sore|malam/,meridiemHour:function(e,t){return 12===e&&(e=0),"pagi"===t?e:"siang"===t?e>=11?e:e+12:"sore"===t||"malam"===t?e+12:void 0},meridiem:function(e,t,n){return e<11?"pagi":e<15?"siang":e<19?"sore":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Besok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kemarin pukul] LT",lastWeek:"dddd [lalu pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lalu",s:"beberapa detik",ss:"%d detik",m:"semenit",mm:"%d menit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:0,doy:6}})}(n("wd/R"))},UMSQ:function(e,t,n){var i=n("ppGB"),r=Math.min;e.exports=function(e){return e>0?r(i(e),9007199254740991):0}},USCx:function(e,t,n){!function(e){"use strict";e.defineLocale("ga",{months:["Ean\xe1ir","Feabhra","M\xe1rta","Aibre\xe1n","Bealtaine","Meitheamh","I\xfail","L\xfanasa","Me\xe1n F\xf3mhair","Deireadh F\xf3mhair","Samhain","Nollaig"],monthsShort:["Ean","Feabh","M\xe1rt","Aib","Beal","Meith","I\xfail","L\xfan","M.F.","D.F.","Samh","Noll"],monthsParseExact:!0,weekdays:["D\xe9 Domhnaigh","D\xe9 Luain","D\xe9 M\xe1irt","D\xe9 C\xe9adaoin","D\xe9ardaoin","D\xe9 hAoine","D\xe9 Sathairn"],weekdaysShort:["Domh","Luan","M\xe1irt","C\xe9ad","D\xe9ar","Aoine","Sath"],weekdaysMin:["Do","Lu","M\xe1","C\xe9","D\xe9","A","Sa"],longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Inniu ag] LT",nextDay:"[Am\xe1rach ag] LT",nextWeek:"dddd [ag] LT",lastDay:"[Inn\xe9 ag] LT",lastWeek:"dddd [seo caite] [ag] LT",sameElse:"L"},relativeTime:{future:"i %s",past:"%s \xf3 shin",s:"c\xfapla soicind",ss:"%d soicind",m:"n\xf3im\xe9ad",mm:"%d n\xf3im\xe9ad",h:"uair an chloig",hh:"%d uair an chloig",d:"l\xe1",dd:"%d l\xe1",M:"m\xed",MM:"%d m\xedonna",y:"bliain",yy:"%d bliain"},dayOfMonthOrdinalParse:/\d{1,2}(d|na|mh)/,ordinal:function(e){return e+(1===e?"d":e%10==2?"na":"mh")},week:{dow:1,doy:4}})}(n("wd/R"))},UTVS:function(e,t,n){var i=n("ewvW"),r={}.hasOwnProperty;e.exports=function(e,t){return r.call(i(e),t)}},UpQW:function(e,t,n){!function(e){"use strict";var t=["\u062c\u0646\u0648\u0631\u06cc","\u0641\u0631\u0648\u0631\u06cc","\u0645\u0627\u0631\u0686","\u0627\u067e\u0631\u06cc\u0644","\u0645\u0626\u06cc","\u062c\u0648\u0646","\u062c\u0648\u0644\u0627\u0626\u06cc","\u0627\u06af\u0633\u062a","\u0633\u062a\u0645\u0628\u0631","\u0627\u06a9\u062a\u0648\u0628\u0631","\u0646\u0648\u0645\u0628\u0631","\u062f\u0633\u0645\u0628\u0631"],n=["\u0627\u062a\u0648\u0627\u0631","\u067e\u06cc\u0631","\u0645\u0646\u06af\u0644","\u0628\u062f\u06be","\u062c\u0645\u0639\u0631\u0627\u062a","\u062c\u0645\u0639\u06c1","\u06c1\u0641\u062a\u06c1"];e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd\u060c D MMMM YYYY HH:mm"},meridiemParse:/\u0635\u0628\u062d|\u0634\u0627\u0645/,isPM:function(e){return"\u0634\u0627\u0645"===e},meridiem:function(e,t,n){return e<12?"\u0635\u0628\u062d":"\u0634\u0627\u0645"},calendar:{sameDay:"[\u0622\u062c \u0628\u0648\u0642\u062a] LT",nextDay:"[\u06a9\u0644 \u0628\u0648\u0642\u062a] LT",nextWeek:"dddd [\u0628\u0648\u0642\u062a] LT",lastDay:"[\u06af\u0630\u0634\u062a\u06c1 \u0631\u0648\u0632 \u0628\u0648\u0642\u062a] LT",lastWeek:"[\u06af\u0630\u0634\u062a\u06c1] dddd [\u0628\u0648\u0642\u062a] LT",sameElse:"L"},relativeTime:{future:"%s \u0628\u0639\u062f",past:"%s \u0642\u0628\u0644",s:"\u0686\u0646\u062f \u0633\u06cc\u06a9\u0646\u0688",ss:"%d \u0633\u06cc\u06a9\u0646\u0688",m:"\u0627\u06cc\u06a9 \u0645\u0646\u0679",mm:"%d \u0645\u0646\u0679",h:"\u0627\u06cc\u06a9 \u06af\u06be\u0646\u0679\u06c1",hh:"%d \u06af\u06be\u0646\u0679\u06d2",d:"\u0627\u06cc\u06a9 \u062f\u0646",dd:"%d \u062f\u0646",M:"\u0627\u06cc\u06a9 \u0645\u0627\u06c1",MM:"%d \u0645\u0627\u06c1",y:"\u0627\u06cc\u06a9 \u0633\u0627\u0644",yy:"%d \u0633\u0627\u0644"},preparse:function(e){return e.replace(/\u060c/g,",")},postformat:function(e){return e.replace(/,/g,"\u060c")},week:{dow:1,doy:4}})}(n("wd/R"))},Ur1D:function(e,t,n){!function(e){"use strict";e.defineLocale("ss",{months:"Bhimbidvwane_Indlovana_Indlov'lenkhulu_Mabasa_Inkhwekhweti_Inhlaba_Kholwane_Ingci_Inyoni_Imphala_Lweti_Ingongoni".split("_"),monthsShort:"Bhi_Ina_Inu_Mab_Ink_Inh_Kho_Igc_Iny_Imp_Lwe_Igo".split("_"),weekdays:"Lisontfo_Umsombuluko_Lesibili_Lesitsatfu_Lesine_Lesihlanu_Umgcibelo".split("_"),weekdaysShort:"Lis_Umb_Lsb_Les_Lsi_Lsh_Umg".split("_"),weekdaysMin:"Li_Us_Lb_Lt_Ls_Lh_Ug".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Namuhla nga] LT",nextDay:"[Kusasa nga] LT",nextWeek:"dddd [nga] LT",lastDay:"[Itolo nga] LT",lastWeek:"dddd [leliphelile] [nga] LT",sameElse:"L"},relativeTime:{future:"nga %s",past:"wenteka nga %s",s:"emizuzwana lomcane",ss:"%d mzuzwana",m:"umzuzu",mm:"%d emizuzu",h:"lihora",hh:"%d emahora",d:"lilanga",dd:"%d emalanga",M:"inyanga",MM:"%d tinyanga",y:"umnyaka",yy:"%d iminyaka"},meridiemParse:/ekuseni|emini|entsambama|ebusuku/,meridiem:function(e,t,n){return e<11?"ekuseni":e<15?"emini":e<19?"entsambama":"ebusuku"},meridiemHour:function(e,t){return 12===e&&(e=0),"ekuseni"===t?e:"emini"===t?e>=11?e:e+12:"entsambama"===t||"ebusuku"===t?0===e?0:e+12:void 0},dayOfMonthOrdinalParse:/\d{1,2}/,ordinal:"%d",week:{dow:1,doy:4}})}(n("wd/R"))},UudT:function(e,t,n){"use strict";var i=n("U6JX"),r=Object(i.a)(Object.getPrototypeOf,Object);t.a=r},UxlC:function(e,t,n){"use strict";var i=n("14Sl"),r=n("glrk"),s=n("UMSQ"),o=n("ppGB"),a=n("HYAF"),c=n("iqWW"),l=n("DLK6"),u=n("FMNM"),d=Math.max,h=Math.min;i("replace",2,(function(e,t,n,i){var f=i.REGEXP_REPLACE_SUBSTITUTES_UNDEFINED_CAPTURE,p=i.REPLACE_KEEPS_$0,m=f?"$":"$0";return[function(n,i){var r=a(this),s=null==n?void 0:n[e];return void 0!==s?s.call(n,r,i):t.call(String(r),n,i)},function(e,i){if(!f&&p||"string"==typeof i&&-1===i.indexOf(m)){var a=n(t,e,this,i);if(a.done)return a.value}var b=r(e),g=String(this),_="function"==typeof i;_||(i=String(i));var y=b.global;if(y){var v=b.unicode;b.lastIndex=0}for(var w=[];;){var S=u(b,g);if(null===S)break;if(w.push(S),!y)break;""===String(S[0])&&(b.lastIndex=c(g,s(b.lastIndex),v))}for(var M,x="",k=0,D=0;D=k&&(x+=g.slice(k,C)+A,k=C+T.length)}return x+g.slice(k)}]}))},"V/fk":function(e,t,n){"use strict";n.d(t,"a",(function(){return f}));var i=n("SVse"),r=n("s7LF"),s=n("iInd"),o=n("G0yt"),a=n("Hicy"),c=n("hrfs"),l=n("WF9J"),u=n("yGOH"),d=n("9Xeq"),h=n("8Y7J");let f=(()=>{class e{}return e.\u0275mod=h.Kb({type:e}),e.\u0275inj=h.Jb({factory:function(t){return new(t||e)},providers:[],imports:[[i.c,r.m,r.x,o.c,o.y,o.A,o.F,c.b,r.x,d.a,u.a,o.l,a.b,l.b,s.i,o.h,o.C]]}),e})()},V2x9:function(e,t,n){!function(e){"use strict";e.defineLocale("tet",{months:"Janeiru_Fevereiru_Marsu_Abril_Maiu_Ju\xf1u_Jullu_Agustu_Setembru_Outubru_Novembru_Dezembru".split("_"),monthsShort:"Jan_Fev_Mar_Abr_Mai_Jun_Jul_Ago_Set_Out_Nov_Dez".split("_"),weekdays:"Domingu_Segunda_Tersa_Kuarta_Kinta_Sesta_Sabadu".split("_"),weekdaysShort:"Dom_Seg_Ters_Kua_Kint_Sest_Sab".split("_"),weekdaysMin:"Do_Seg_Te_Ku_Ki_Ses_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Ohin iha] LT",nextDay:"[Aban iha] LT",nextWeek:"dddd [iha] LT",lastDay:"[Horiseik iha] LT",lastWeek:"dddd [semana kotuk] [iha] LT",sameElse:"L"},relativeTime:{future:"iha %s",past:"%s liuba",s:"segundu balun",ss:"segundu %d",m:"minutu ida",mm:"minutu %d",h:"oras ida",hh:"oras %d",d:"loron ida",dd:"loron %d",M:"fulan ida",MM:"fulan %d",y:"tinan ida",yy:"tinan %d"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10;return e+(1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th")},week:{dow:1,doy:4}})}(n("wd/R"))},VRyK:function(e,t,n){"use strict";n.d(t,"a",(function(){return a}));var i=n("HDdC"),r=n("z+Ro"),s=n("bHdf"),o=n("yCtX");function a(...e){let t=Number.POSITIVE_INFINITY,n=null,a=e[e.length-1];return Object(r.a)(a)?(n=e.pop(),e.length>1&&"number"==typeof e[e.length-1]&&(t=e.pop())):"number"==typeof a&&(t=e.pop()),null===n&&1===e.length&&e[0]instanceof i.a?e[0]:Object(s.a)(t)(Object(o.a)(e,n))}},VTlA:function(e,t,n){"use strict";n.d(t,"a",(function(){return o}));var i=n("mSOc"),r=n("ufoC"),s=n("8Y7J");let o=(()=>{class e{constructor(e,t){this.taskMessageService=e,this.summaryService=t}init(e,t,n,i,r,s,o){this.getUpdate=e,this.preProcessing=t,this.setList=n,this.onFetchError=i,this.taskFilter=r,this.itemFilter=s,this.builders=o||{},this.summaryDataSubscription=this.summaryService.subscribe(e=>{this.summary=e,this.fetch()},this.onFetchError)}fetch(){this.getUpdate().subscribe(e=>{this.updateData(e,this.summary.executing_tasks.filter(this.taskFilter))},this.onFetchError)}updateData(e,t){const n=this.preProcessing?this.preProcessing(e):e;this.addMissing(n,t),n.forEach(e=>{const n=t.filter(t=>this.itemFilter(e,t));e.cdExecuting=this.getTaskAction(n)}),this.setList(n)}addMissing(e,t){const n=this.builders.default;t.forEach(t=>{const i=e.find(e=>this.itemFilter(e,t)),r=this.builders[t.name];i||!r&&!n||e.push(r?r(t.metadata):n(t.metadata))})}getTaskAction(e){if(0!==e.length)return e.map(e=>{const t=e.progress?` ${e.progress}%`:"";return this.taskMessageService.getRunningText(e)+"..."+t}).join(", ")}ngOnDestroy(){this.summaryDataSubscription&&this.summaryDataSubscription.unsubscribe()}}return e.\u0275fac=function(t){return new(t||e)(s.dc(r.a),s.dc(i.a))},e.\u0275prov=s.Ib({token:e,factory:e.\u0275fac}),e})()},VXsX:function(e,t,n){"use strict";n.d(t,"a",(function(){return a}));var i=n("LvDl"),r=n.n(i),s=n("8Y7J");class o{constructor(e,t,n){this.name=e,this.metadata=t,this.onTaskFinished=n}}let a=(()=>{class e{constructor(){this.subscriptions=[]}init(e){return e.subscribe(e=>{const t=e.executing_tasks,n=e.finished_tasks,i=[];for(const r of this.subscriptions){const e=this._getTask(r,n),s=this._getTask(r,t);null!==e&&null===s&&r.onTaskFinished(e),null!==s&&i.push(r),this.subscriptions=i}})}subscribe(e,t,n){this.subscriptions.push(new o(e,t,n))}_getTask(e,t){for(const n of t)if(n.name===e.name&&r.a.isEqual(n.metadata,e.metadata))return n;return null}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=s.Ib({token:e,factory:e.\u0275fac,providedIn:"root"}),e})()},Vclq:function(e,t,n){!function(e){"use strict";var t="ene._feb._mar._abr._may._jun._jul._ago._sep._oct._nov._dic.".split("_"),n="ene_feb_mar_abr_may_jun_jul_ago_sep_oct_nov_dic".split("_"),i=[/^ene/i,/^feb/i,/^mar/i,/^abr/i,/^may/i,/^jun/i,/^jul/i,/^ago/i,/^sep/i,/^oct/i,/^nov/i,/^dic/i],r=/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre|ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i;e.defineLocale("es-us",{months:"enero_febrero_marzo_abril_mayo_junio_julio_agosto_septiembre_octubre_noviembre_diciembre".split("_"),monthsShort:function(e,i){return e?/-MMM-/.test(i)?n[e.month()]:t[e.month()]:t},monthsRegex:r,monthsShortRegex:r,monthsStrictRegex:/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre)/i,monthsShortStrictRegex:/^(ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i,monthsParse:i,longMonthsParse:i,shortMonthsParse:i,weekdays:"domingo_lunes_martes_mi\xe9rcoles_jueves_viernes_s\xe1bado".split("_"),weekdaysShort:"dom._lun._mar._mi\xe9._jue._vie._s\xe1b.".split("_"),weekdaysMin:"do_lu_ma_mi_ju_vi_s\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"MM/DD/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY h:mm A",LLLL:"dddd, D [de] MMMM [de] YYYY h:mm A"},calendar:{sameDay:function(){return"[hoy a la"+(1!==this.hours()?"s":"")+"] LT"},nextDay:function(){return"[ma\xf1ana a la"+(1!==this.hours()?"s":"")+"] LT"},nextWeek:function(){return"dddd [a la"+(1!==this.hours()?"s":"")+"] LT"},lastDay:function(){return"[ayer a la"+(1!==this.hours()?"s":"")+"] LT"},lastWeek:function(){return"[el] dddd [pasado a la"+(1!==this.hours()?"s":"")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"hace %s",s:"unos segundos",ss:"%d segundos",m:"un minuto",mm:"%d minutos",h:"una hora",hh:"%d horas",d:"un d\xeda",dd:"%d d\xedas",w:"una semana",ww:"%d semanas",M:"un mes",MM:"%d meses",y:"un a\xf1o",yy:"%d a\xf1os"},dayOfMonthOrdinalParse:/\d{1,2}\xba/,ordinal:"%d\xba",week:{dow:0,doy:6}})}(n("wd/R"))},Vhfg:function(e,t,n){"use strict";n.d(t,"a",(function(){return a}));var i=n("lJxs"),r=n("WE5d"),s=n("ej+x"),o=n("8Y7J");let a=(()=>{class e{constructor(e){this.featureToggles=e}canActivate(e){return this.featureToggles.get().pipe(Object(i.a)(t=>{if(!1===t[e.routeConfig.path])throw new r.b;return!0}))}canActivateChild(e){return this.canActivate(e.parent)}}return e.\u0275fac=function(t){return new(t||e)(o.dc(s.a))},e.\u0275prov=o.Ib({token:e,factory:e.\u0275fac,providedIn:"root"}),e})()},VpIT:function(e,t,n){var i=n("xDBR"),r=n("xs3f");(e.exports=function(e,t){return r[e]||(r[e]=void 0!==t?t:{})})("versions",[]).push({version:"3.12.1",mode:i?"pure":"global",copyright:"\xa9 2021 Denis Pushkarev (zloirock.ru)"})},Vu81:function(e,t,n){var i=n("0GbY"),r=n("JBy8"),s=n("dBg+"),o=n("glrk");e.exports=i("Reflect","ownKeys")||function(e){var t=r.f(o(e)),n=s.f;return n?t.concat(n(e)):t}},VxPD:function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var i=n("yJti"),r=n("e2NH");class s extends r.a{constructor(e=i.a.ValueOk,t=""){switch(super(),e){case i.a.ValueOk:this.type="light",this.msg="";break;case i.a.ValueNone:this.type="info",this.msg=(t?"Retrieving data for " + t + ".":"Retrieving data.")+" "+"Please wait...";break;case i.a.ValueStale:this.type="warning",this.msg=t?"Displaying previously cached data for " + t + ".":"Displaying previously cached data.";break;case i.a.ValueException:this.type="danger",this.msg=(t?"Could not load data for " + t + ".":"Could not load data.")+" "+"Please check the cluster health."}}}},WE5d:function(e,t,n){"use strict";n.d(t,"a",(function(){return r})),n.d(t,"b",(function(){return s})),n.d(t,"c",(function(){return o}));var i=n("oxzT");class r extends Error{}class s extends r{constructor(){super(...arguments),this.header="Page Not Found",this.message="Sorry, we couldn\u2019t find what you were looking for.\n The page you requested may have been changed or moved.",this.icon=i.a.warning}}class o extends r{constructor(){super(...arguments),this.header="User Denied",this.message="Sorry, the user does not exist in Ceph.\n You'll be logged out from the Identity Provider when you retry logging in.",this.icon=i.a.warning}}},WF9J:function(e,t,n){"use strict";n.d(t,"a",(function(){return z})),n.d(t,"b",(function(){return $})),n("TeQF"),n("QWBl"),n("4mDm"),n("zKZe"),n("07d7"),n("4l63"),n("PKPk"),n("ENF9"),n("3bBZ");var i=n("hKI/"),r=n.n(i),s=n("9/5/"),o=n.n(s),a=n("uyHG"),c=n.n(a),l=function(){if("undefined"!=typeof Map)return Map;function e(e,t){var n=-1;return e.some((function(e,i){return e[0]===t&&(n=i,!0)})),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(t){var n=e(this.__entries__,t),i=this.__entries__[n];return i&&i[1]},t.prototype.set=function(t,n){var i=e(this.__entries__,t);~i?this.__entries__[i][1]=n:this.__entries__.push([t,n])},t.prototype.delete=function(t){var n=this.__entries__,i=e(n,t);~i&&n.splice(i,1)},t.prototype.has=function(t){return!!~e(this.__entries__,t)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(e,t){void 0===t&&(t=null);for(var n=0,i=this.__entries__;n0},e.prototype.connect_=function(){u&&!this.connected_&&(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),p?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){u&&this.connected_&&(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(e){var t=e.propertyName,n=void 0===t?"":t;f.some((function(e){return!!~n.indexOf(e)}))&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),b=function(e,t){for(var n=0,i=Object.keys(t);n0},e}(),D="undefined"!=typeof WeakMap?new WeakMap:new l,T=function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var n=m.getInstance(),i=new k(t,n,this);D.set(this,i)};["observe","unobserve","disconnect"].forEach((function(e){T.prototype[e]=function(){var t;return(t=D.get(this))[e].apply(t,arguments)}}));var C=void 0!==d.ResizeObserver?d.ResizeObserver:T,O=n("AxL3"),R=n.n(O);function L(e){return e&&e.ownerDocument&&e.ownerDocument.defaultView?e.ownerDocument.defaultView:window}function E(e){return e&&e.ownerDocument?e.ownerDocument:document}n("E9XD"),n("sMBO"),n("rB9j"),n("Rm1S"),n("UxlC");var A=null,I=null;function P(e){if(null===A){var t=E(e);if(void 0===t)return A=0;var n=t.body,i=t.createElement("div");i.classList.add("simplebar-hide-scrollbar"),n.appendChild(i);var r=i.getBoundingClientRect().right;n.removeChild(i),A=r}return A}R.a&&window.addEventListener("resize",(function(){I!==window.devicePixelRatio&&(I=window.devicePixelRatio,A=null)}));var j=function(){function e(t,n){var i=this;this.onScroll=function(){var e=L(i.el);i.scrollXTicking||(e.requestAnimationFrame(i.scrollX),i.scrollXTicking=!0),i.scrollYTicking||(e.requestAnimationFrame(i.scrollY),i.scrollYTicking=!0)},this.scrollX=function(){i.axis.x.isOverflowing&&(i.showScrollbar("x"),i.positionScrollbar("x")),i.scrollXTicking=!1},this.scrollY=function(){i.axis.y.isOverflowing&&(i.showScrollbar("y"),i.positionScrollbar("y")),i.scrollYTicking=!1},this.onMouseEnter=function(){i.showScrollbar("x"),i.showScrollbar("y")},this.onMouseMove=function(e){i.mouseX=e.clientX,i.mouseY=e.clientY,(i.axis.x.isOverflowing||i.axis.x.forceVisible)&&i.onMouseMoveForAxis("x"),(i.axis.y.isOverflowing||i.axis.y.forceVisible)&&i.onMouseMoveForAxis("y")},this.onMouseLeave=function(){i.onMouseMove.cancel(),(i.axis.x.isOverflowing||i.axis.x.forceVisible)&&i.onMouseLeaveForAxis("x"),(i.axis.y.isOverflowing||i.axis.y.forceVisible)&&i.onMouseLeaveForAxis("y"),i.mouseX=-1,i.mouseY=-1},this.onWindowResize=function(){i.scrollbarWidth=i.getScrollbarWidth(),i.hideNativeScrollbar()},this.hideScrollbars=function(){i.axis.x.track.rect=i.axis.x.track.el.getBoundingClientRect(),i.axis.y.track.rect=i.axis.y.track.el.getBoundingClientRect(),i.isWithinBounds(i.axis.y.track.rect)||(i.axis.y.scrollbar.el.classList.remove(i.classNames.visible),i.axis.y.isVisible=!1),i.isWithinBounds(i.axis.x.track.rect)||(i.axis.x.scrollbar.el.classList.remove(i.classNames.visible),i.axis.x.isVisible=!1)},this.onPointerEvent=function(e){var t,n;i.axis.x.track.rect=i.axis.x.track.el.getBoundingClientRect(),i.axis.y.track.rect=i.axis.y.track.el.getBoundingClientRect(),(i.axis.x.isOverflowing||i.axis.x.forceVisible)&&(t=i.isWithinBounds(i.axis.x.track.rect)),(i.axis.y.isOverflowing||i.axis.y.forceVisible)&&(n=i.isWithinBounds(i.axis.y.track.rect)),(t||n)&&(e.preventDefault(),e.stopPropagation(),"mousedown"===e.type&&(t&&(i.axis.x.scrollbar.rect=i.axis.x.scrollbar.el.getBoundingClientRect(),i.isWithinBounds(i.axis.x.scrollbar.rect)?i.onDragStart(e,"x"):i.onTrackClick(e,"x")),n&&(i.axis.y.scrollbar.rect=i.axis.y.scrollbar.el.getBoundingClientRect(),i.isWithinBounds(i.axis.y.scrollbar.rect)?i.onDragStart(e,"y"):i.onTrackClick(e,"y"))))},this.drag=function(t){var n=i.axis[i.draggedAxis].track,r=n.rect[i.axis[i.draggedAxis].sizeAttr],s=i.axis[i.draggedAxis].scrollbar,o=i.contentWrapperEl[i.axis[i.draggedAxis].scrollSizeAttr],a=parseInt(i.elStyles[i.axis[i.draggedAxis].sizeAttr],10);t.preventDefault(),t.stopPropagation();var c=(("y"===i.draggedAxis?t.pageY:t.pageX)-n.rect[i.axis[i.draggedAxis].offsetAttr]-i.axis[i.draggedAxis].dragOffset)/(r-s.size)*(o-a);"x"===i.draggedAxis&&(c=i.isRtl&&e.getRtlHelpers().isRtlScrollbarInverted?c-(r+s.size):c,c=i.isRtl&&e.getRtlHelpers().isRtlScrollingInverted?-c:c),i.contentWrapperEl[i.axis[i.draggedAxis].scrollOffsetAttr]=c},this.onEndDrag=function(e){var t=E(i.el),n=L(i.el);e.preventDefault(),e.stopPropagation(),i.el.classList.remove(i.classNames.dragging),t.removeEventListener("mousemove",i.drag,!0),t.removeEventListener("mouseup",i.onEndDrag,!0),i.removePreventClickId=n.setTimeout((function(){t.removeEventListener("click",i.preventClick,!0),t.removeEventListener("dblclick",i.preventClick,!0),i.removePreventClickId=null}))},this.preventClick=function(e){e.preventDefault(),e.stopPropagation()},this.el=t,this.minScrollbarWidth=20,this.options=Object.assign({},e.defaultOptions,{},n),this.classNames=Object.assign({},e.defaultOptions.classNames,{},this.options.classNames),this.axis={x:{scrollOffsetAttr:"scrollLeft",sizeAttr:"width",scrollSizeAttr:"scrollWidth",offsetSizeAttr:"offsetWidth",offsetAttr:"left",overflowAttr:"overflowX",dragOffset:0,isOverflowing:!0,isVisible:!1,forceVisible:!1,track:{},scrollbar:{}},y:{scrollOffsetAttr:"scrollTop",sizeAttr:"height",scrollSizeAttr:"scrollHeight",offsetSizeAttr:"offsetHeight",offsetAttr:"top",overflowAttr:"overflowY",dragOffset:0,isOverflowing:!0,isVisible:!1,forceVisible:!1,track:{},scrollbar:{}}},this.removePreventClickId=null,e.instances.has(this.el)||(this.recalculate=r()(this.recalculate.bind(this),64),this.onMouseMove=r()(this.onMouseMove.bind(this),64),this.hideScrollbars=o()(this.hideScrollbars.bind(this),this.options.timeout),this.onWindowResize=o()(this.onWindowResize.bind(this),64,{leading:!0}),e.getRtlHelpers=c()(e.getRtlHelpers),this.init())}e.getRtlHelpers=function(){var t=document.createElement("div");t.innerHTML='
';var n=t.firstElementChild;document.body.appendChild(n);var i=n.firstElementChild;n.scrollLeft=0;var r=e.getOffset(n),s=e.getOffset(i);n.scrollLeft=999;var o=e.getOffset(i);return{isRtlScrollingInverted:r.left!==s.left&&s.left-o.left!=0,isRtlScrollbarInverted:r.left!==s.left}},e.getOffset=function(e){var t=e.getBoundingClientRect(),n=E(e),i=L(e);return{top:t.top+(i.pageYOffset||n.documentElement.scrollTop),left:t.left+(i.pageXOffset||n.documentElement.scrollLeft)}};var t=e.prototype;return t.init=function(){e.instances.set(this.el,this),R.a&&(this.initDOM(),this.scrollbarWidth=this.getScrollbarWidth(),this.recalculate(),this.initListeners())},t.initDOM=function(){var e=this;if(Array.prototype.filter.call(this.el.children,(function(t){return t.classList.contains(e.classNames.wrapper)})).length)this.wrapperEl=this.el.querySelector("."+this.classNames.wrapper),this.contentWrapperEl=this.options.scrollableNode||this.el.querySelector("."+this.classNames.contentWrapper),this.contentEl=this.options.contentNode||this.el.querySelector("."+this.classNames.contentEl),this.offsetEl=this.el.querySelector("."+this.classNames.offset),this.maskEl=this.el.querySelector("."+this.classNames.mask),this.placeholderEl=this.findChild(this.wrapperEl,"."+this.classNames.placeholder),this.heightAutoObserverWrapperEl=this.el.querySelector("."+this.classNames.heightAutoObserverWrapperEl),this.heightAutoObserverEl=this.el.querySelector("."+this.classNames.heightAutoObserverEl),this.axis.x.track.el=this.findChild(this.el,"."+this.classNames.track+"."+this.classNames.horizontal),this.axis.y.track.el=this.findChild(this.el,"."+this.classNames.track+"."+this.classNames.vertical);else{for(this.wrapperEl=document.createElement("div"),this.contentWrapperEl=document.createElement("div"),this.offsetEl=document.createElement("div"),this.maskEl=document.createElement("div"),this.contentEl=document.createElement("div"),this.placeholderEl=document.createElement("div"),this.heightAutoObserverWrapperEl=document.createElement("div"),this.heightAutoObserverEl=document.createElement("div"),this.wrapperEl.classList.add(this.classNames.wrapper),this.contentWrapperEl.classList.add(this.classNames.contentWrapper),this.offsetEl.classList.add(this.classNames.offset),this.maskEl.classList.add(this.classNames.mask),this.contentEl.classList.add(this.classNames.contentEl),this.placeholderEl.classList.add(this.classNames.placeholder),this.heightAutoObserverWrapperEl.classList.add(this.classNames.heightAutoObserverWrapperEl),this.heightAutoObserverEl.classList.add(this.classNames.heightAutoObserverEl);this.el.firstChild;)this.contentEl.appendChild(this.el.firstChild);this.contentWrapperEl.appendChild(this.contentEl),this.offsetEl.appendChild(this.contentWrapperEl),this.maskEl.appendChild(this.offsetEl),this.heightAutoObserverWrapperEl.appendChild(this.heightAutoObserverEl),this.wrapperEl.appendChild(this.heightAutoObserverWrapperEl),this.wrapperEl.appendChild(this.maskEl),this.wrapperEl.appendChild(this.placeholderEl),this.el.appendChild(this.wrapperEl)}if(!this.axis.x.track.el||!this.axis.y.track.el){var t=document.createElement("div"),n=document.createElement("div");t.classList.add(this.classNames.track),n.classList.add(this.classNames.scrollbar),t.appendChild(n),this.axis.x.track.el=t.cloneNode(!0),this.axis.x.track.el.classList.add(this.classNames.horizontal),this.axis.y.track.el=t.cloneNode(!0),this.axis.y.track.el.classList.add(this.classNames.vertical),this.el.appendChild(this.axis.x.track.el),this.el.appendChild(this.axis.y.track.el)}this.axis.x.scrollbar.el=this.axis.x.track.el.querySelector("."+this.classNames.scrollbar),this.axis.y.scrollbar.el=this.axis.y.track.el.querySelector("."+this.classNames.scrollbar),this.options.autoHide||(this.axis.x.scrollbar.el.classList.add(this.classNames.visible),this.axis.y.scrollbar.el.classList.add(this.classNames.visible)),this.el.setAttribute("data-simplebar","init")},t.initListeners=function(){var e=this,t=L(this.el);this.options.autoHide&&this.el.addEventListener("mouseenter",this.onMouseEnter),["mousedown","click","dblclick"].forEach((function(t){e.el.addEventListener(t,e.onPointerEvent,!0)})),["touchstart","touchend","touchmove"].forEach((function(t){e.el.addEventListener(t,e.onPointerEvent,{capture:!0,passive:!0})})),this.el.addEventListener("mousemove",this.onMouseMove),this.el.addEventListener("mouseleave",this.onMouseLeave),this.contentWrapperEl.addEventListener("scroll",this.onScroll),t.addEventListener("resize",this.onWindowResize);var n=!1;this.resizeObserver=new(t.ResizeObserver||C)((function(){n&&e.recalculate()})),this.resizeObserver.observe(this.el),this.resizeObserver.observe(this.contentEl),t.requestAnimationFrame((function(){n=!0})),this.mutationObserver=new t.MutationObserver(this.recalculate),this.mutationObserver.observe(this.contentEl,{childList:!0,subtree:!0,characterData:!0})},t.recalculate=function(){var e=L(this.el);this.elStyles=e.getComputedStyle(this.el),this.isRtl="rtl"===this.elStyles.direction;var t=this.heightAutoObserverEl.offsetHeight<=1,n=this.heightAutoObserverEl.offsetWidth<=1,i=this.contentEl.offsetWidth,r=this.contentWrapperEl.offsetWidth,s=this.elStyles.overflowX,o=this.elStyles.overflowY;this.contentEl.style.padding=this.elStyles.paddingTop+" "+this.elStyles.paddingRight+" "+this.elStyles.paddingBottom+" "+this.elStyles.paddingLeft,this.wrapperEl.style.margin="-"+this.elStyles.paddingTop+" -"+this.elStyles.paddingRight+" -"+this.elStyles.paddingBottom+" -"+this.elStyles.paddingLeft;var a=this.contentEl.scrollHeight,c=this.contentEl.scrollWidth;this.contentWrapperEl.style.height=t?"auto":"100%",this.placeholderEl.style.width=n?i+"px":"auto",this.placeholderEl.style.height=a+"px";var l=this.contentWrapperEl.offsetHeight;this.axis.x.isOverflowing=c>i,this.axis.y.isOverflowing=a>l,this.axis.x.isOverflowing="hidden"!==s&&this.axis.x.isOverflowing,this.axis.y.isOverflowing="hidden"!==o&&this.axis.y.isOverflowing,this.axis.x.forceVisible="x"===this.options.forceVisible||!0===this.options.forceVisible,this.axis.y.forceVisible="y"===this.options.forceVisible||!0===this.options.forceVisible,this.hideNativeScrollbar();var u=this.axis.x.isOverflowing?this.scrollbarWidth:0;this.axis.x.isOverflowing=this.axis.x.isOverflowing&&c>r-(this.axis.y.isOverflowing?this.scrollbarWidth:0),this.axis.y.isOverflowing=this.axis.y.isOverflowing&&a>l-u,this.axis.x.scrollbar.size=this.getScrollbarSize("x"),this.axis.y.scrollbar.size=this.getScrollbarSize("y"),this.axis.x.scrollbar.el.style.width=this.axis.x.scrollbar.size+"px",this.axis.y.scrollbar.el.style.height=this.axis.y.scrollbar.size+"px",this.positionScrollbar("x"),this.positionScrollbar("y"),this.toggleTrackVisibility("x"),this.toggleTrackVisibility("y")},t.getScrollbarSize=function(e){if(void 0===e&&(e="y"),!this.axis[e].isOverflowing)return 0;var t,n=this.axis[e].track.el[this.axis[e].offsetSizeAttr];return t=Math.max(~~(n/this.contentEl[this.axis[e].scrollSizeAttr]*n),this.options.scrollbarMinSize),this.options.scrollbarMaxSize&&(t=Math.min(t,this.options.scrollbarMaxSize)),t},t.positionScrollbar=function(t){if(void 0===t&&(t="y"),this.axis[t].isOverflowing){var n=this.contentWrapperEl[this.axis[t].scrollSizeAttr],i=this.axis[t].track.el[this.axis[t].offsetSizeAttr],r=parseInt(this.elStyles[this.axis[t].sizeAttr],10),s=this.axis[t].scrollbar,o=this.contentWrapperEl[this.axis[t].scrollOffsetAttr],a=~~((o="x"===t&&this.isRtl&&e.getRtlHelpers().isRtlScrollingInverted?-o:o)/(n-r)*(i-s.size));a="x"===t&&this.isRtl&&e.getRtlHelpers().isRtlScrollbarInverted?a+(i-s.size):a,s.el.style.transform="x"===t?"translate3d("+a+"px, 0, 0)":"translate3d(0, "+a+"px, 0)"}},t.toggleTrackVisibility=function(e){void 0===e&&(e="y");var t=this.axis[e].track.el,n=this.axis[e].scrollbar.el;this.axis[e].isOverflowing||this.axis[e].forceVisible?(t.style.visibility="visible",this.contentWrapperEl.style[this.axis[e].overflowAttr]="scroll"):(t.style.visibility="hidden",this.contentWrapperEl.style[this.axis[e].overflowAttr]="hidden"),n.style.display=this.axis[e].isOverflowing?"block":"none"},t.hideNativeScrollbar=function(){this.offsetEl.style[this.isRtl?"left":"right"]=this.axis.y.isOverflowing||this.axis.y.forceVisible?"-"+this.scrollbarWidth+"px":0,this.offsetEl.style.bottom=this.axis.x.isOverflowing||this.axis.x.forceVisible?"-"+this.scrollbarWidth+"px":0},t.onMouseMoveForAxis=function(e){void 0===e&&(e="y"),this.axis[e].track.rect=this.axis[e].track.el.getBoundingClientRect(),this.axis[e].scrollbar.rect=this.axis[e].scrollbar.el.getBoundingClientRect(),this.isWithinBounds(this.axis[e].scrollbar.rect)?this.axis[e].scrollbar.el.classList.add(this.classNames.hover):this.axis[e].scrollbar.el.classList.remove(this.classNames.hover),this.isWithinBounds(this.axis[e].track.rect)?(this.showScrollbar(e),this.axis[e].track.el.classList.add(this.classNames.hover)):this.axis[e].track.el.classList.remove(this.classNames.hover)},t.onMouseLeaveForAxis=function(e){void 0===e&&(e="y"),this.axis[e].track.el.classList.remove(this.classNames.hover),this.axis[e].scrollbar.el.classList.remove(this.classNames.hover)},t.showScrollbar=function(e){void 0===e&&(e="y"),this.axis[e].isVisible||(this.axis[e].scrollbar.el.classList.add(this.classNames.visible),this.axis[e].isVisible=!0),this.options.autoHide&&this.hideScrollbars()},t.onDragStart=function(e,t){void 0===t&&(t="y");var n=E(this.el),i=L(this.el);this.axis[t].dragOffset=("y"===t?e.pageY:e.pageX)-this.axis[t].scrollbar.rect[this.axis[t].offsetAttr],this.draggedAxis=t,this.el.classList.add(this.classNames.dragging),n.addEventListener("mousemove",this.drag,!0),n.addEventListener("mouseup",this.onEndDrag,!0),null===this.removePreventClickId?(n.addEventListener("click",this.preventClick,!0),n.addEventListener("dblclick",this.preventClick,!0)):(i.clearTimeout(this.removePreventClickId),this.removePreventClickId=null)},t.onTrackClick=function(e,t){var n=this;if(void 0===t&&(t="y"),this.options.clickOnTrack){var i=L(this.el);this.axis[t].scrollbar.rect=this.axis[t].scrollbar.el.getBoundingClientRect();var r=this.axis[t].scrollbar.rect[this.axis[t].offsetAttr],s=parseInt(this.elStyles[this.axis[t].sizeAttr],10),o=this.contentWrapperEl[this.axis[t].scrollOffsetAttr],a=("y"===t?this.mouseY-r:this.mouseX-r)<0?-1:1,c=-1===a?o-s:o+s;!function e(){var r,s;-1===a?o>c&&(n.contentWrapperEl.scrollTo(((r={})[n.axis[t].offsetAttr]=o-=n.options.clickOnTrackSpeed,r)),i.requestAnimationFrame(e)):o=e.left&&this.mouseX<=e.left+e.width&&this.mouseY>=e.top&&this.mouseY<=e.top+e.height},t.findChild=function(e,t){var n=e.matches||e.webkitMatchesSelector||e.mozMatchesSelector||e.msMatchesSelector;return Array.prototype.filter.call(e.children,(function(e){return n.call(e,t)}))[0]},e}();j.defaultOptions={autoHide:!0,forceVisible:!1,clickOnTrack:!0,clickOnTrackSpeed:40,classNames:{contentEl:"simplebar-content",contentWrapper:"simplebar-content-wrapper",offset:"simplebar-offset",mask:"simplebar-mask",wrapper:"simplebar-wrapper",placeholder:"simplebar-placeholder",scrollbar:"simplebar-scrollbar",track:"simplebar-track",heightAutoObserverWrapperEl:"simplebar-height-auto-observer-wrapper",heightAutoObserverEl:"simplebar-height-auto-observer",visible:"simplebar-visible",horizontal:"simplebar-horizontal",vertical:"simplebar-vertical",hover:"simplebar-hover",dragging:"simplebar-dragging"},scrollbarMinSize:25,scrollbarMaxSize:0,timeout:1e3},j.instances=new WeakMap;var N=j,F=n("8Y7J");const Y=["*"];let z=(()=>{class e{constructor(e){this.elRef=e}ngOnInit(){}ngAfterViewInit(){this.SimpleBar=new N(this.elRef.nativeElement,this.options||{})}ngOnDestroy(){this.SimpleBar.unMount(),this.SimpleBar=null}}return e.\u0275fac=function(t){return new(t||e)(F.Mb(F.m))},e.\u0275cmp=F.Gb({type:e,selectors:[["ngx-simplebar"]],hostAttrs:["data-simplebar","init"],inputs:{options:"options"},ngContentSelectors:Y,decls:13,vars:0,consts:[[1,"simplebar-wrapper"],[1,"simplebar-height-auto-observer-wrapper"],[1,"simplebar-height-auto-observer"],[1,"simplebar-mask"],[1,"simplebar-offset"],[1,"simplebar-content-wrapper"],[1,"simplebar-content"],[1,"simplebar-placeholder"],[1,"simplebar-track","simplebar-horizontal"],[1,"simplebar-scrollbar"],[1,"simplebar-track","simplebar-vertical"]],template:function(e,t){1&e&&(F.oc(),F.Sb(0,"div",0),F.Sb(1,"div",1),F.Nb(2,"div",2),F.Rb(),F.Sb(3,"div",3),F.Sb(4,"div",4),F.Sb(5,"div",5),F.Sb(6,"div",6),F.nc(7),F.Rb(),F.Rb(),F.Rb(),F.Rb(),F.Nb(8,"div",7),F.Rb(),F.Sb(9,"div",8),F.Nb(10,"div",9),F.Rb(),F.Sb(11,"div",10),F.Nb(12,"div",9),F.Rb())},styles:["[data-simplebar]{position:relative;-webkit-box-orient:vertical;-webkit-box-direction:normal;flex-direction:column;flex-wrap:wrap;-webkit-box-pack:start;justify-content:flex-start;align-content:flex-start;-webkit-box-align:start;align-items:flex-start}.simplebar-wrapper{overflow:hidden;width:inherit;height:inherit;max-width:inherit;max-height:inherit}.simplebar-mask{direction:inherit;position:absolute;overflow:hidden;padding:0;margin:0;left:0;top:0;bottom:0;right:0;width:auto!important;height:auto!important;z-index:0}.simplebar-offset{direction:inherit!important;box-sizing:inherit!important;resize:none!important;position:absolute;top:0;left:0;bottom:0;right:0;padding:0;margin:0;-webkit-overflow-scrolling:touch}.simplebar-content-wrapper{direction:inherit;box-sizing:border-box!important;position:relative;display:block;height:100%;width:auto;max-width:100%;max-height:100%;scrollbar-width:none;-ms-overflow-style:none}.simplebar-content-wrapper::-webkit-scrollbar,.simplebar-hide-scrollbar::-webkit-scrollbar{width:0;height:0}.simplebar-content:after,.simplebar-content:before{content:' ';display:table}.simplebar-placeholder{max-height:100%;max-width:100%;width:100%;pointer-events:none}.simplebar-height-auto-observer-wrapper{box-sizing:inherit!important;height:100%;width:100%;max-width:1px;position:relative;float:left;max-height:1px;overflow:hidden;z-index:-1;padding:0;margin:0;pointer-events:none;-webkit-box-flex:inherit;flex-grow:inherit;flex-shrink:0;flex-basis:0}.simplebar-height-auto-observer{box-sizing:inherit;display:block;opacity:0;position:absolute;top:0;left:0;height:1000%;width:1000%;min-height:1px;min-width:1px;overflow:hidden;pointer-events:none;z-index:-1}.simplebar-track{z-index:1;position:absolute;right:0;bottom:0;pointer-events:none;overflow:hidden}[data-simplebar].simplebar-dragging .simplebar-content{pointer-events:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-user-select:none}[data-simplebar].simplebar-dragging .simplebar-track{pointer-events:all}.simplebar-scrollbar{position:absolute;left:0;right:0;min-height:10px}.simplebar-scrollbar:before{position:absolute;content:'';background:#000;border-radius:7px;left:2px;right:2px;opacity:0;-webkit-transition:opacity .2s linear;transition:opacity .2s linear}.simplebar-scrollbar.simplebar-visible:before{opacity:.5;-webkit-transition:opacity linear;transition:opacity linear}.simplebar-track.simplebar-vertical{top:0;width:11px}.simplebar-track.simplebar-vertical .simplebar-scrollbar:before{top:2px;bottom:2px}.simplebar-track.simplebar-horizontal{left:0;height:11px}.simplebar-track.simplebar-horizontal .simplebar-scrollbar:before{height:100%;left:2px;right:2px}.simplebar-track.simplebar-horizontal .simplebar-scrollbar{right:auto;left:0;top:2px;height:7px;min-height:0;min-width:10px;width:auto}[data-simplebar-direction=rtl] .simplebar-track.simplebar-vertical{right:auto;left:0}.hs-dummy-scrollbar-size{direction:rtl;position:fixed;opacity:0;visibility:hidden;height:500px;width:500px;overflow-y:hidden;overflow-x:scroll}.simplebar-hide-scrollbar{position:fixed;left:0;visibility:hidden;overflow-y:scroll;scrollbar-width:none;-ms-overflow-style:none}","ngx-simplebar{display:block}"],encapsulation:2}),e})(),$=(()=>{class e{}return e.\u0275mod=F.Kb({type:e}),e.\u0275inj=F.Jb({factory:function(t){return new(t||e)},imports:[[]]}),e})()},WJ6P:function(e,t,n){"use strict";t.a=function(){return[]}},WJkJ:function(e,t){e.exports="\t\n\v\f\r \xa0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\u2028\u2029\ufeff"},WKiH:function(e,t,n){var i=n("HYAF"),r="["+n("WJkJ")+"]",s=RegExp("^"+r+r+"*"),o=RegExp(r+r+"*$"),a=function(e){return function(t){var n=String(i(t));return 1&e&&(n=n.replace(s,"")),2&e&&(n=n.replace(o,"")),n}};e.exports={start:a(1),end:a(2),trim:a(3)}},WMd4:function(e,t,n){"use strict";n.d(t,"a",(function(){return o}));var i=n("EY2u"),r=n("LRne"),s=n("z6cu");let o=(()=>{class e{constructor(e,t,n){this.kind=e,this.value=t,this.error=n,this.hasValue="N"===e}observe(e){switch(this.kind){case"N":return e.next&&e.next(this.value);case"E":return e.error&&e.error(this.error);case"C":return e.complete&&e.complete()}}do(e,t,n){switch(this.kind){case"N":return e&&e(this.value);case"E":return t&&t(this.error);case"C":return n&&n()}}accept(e,t,n){return e&&"function"==typeof e.next?this.observe(e):this.do(e,t,n)}toObservable(){switch(this.kind){case"N":return Object(r.a)(this.value);case"E":return Object(s.a)(this.error);case"C":return Object(i.b)()}throw new Error("unexpected notification kind value")}static createNext(t){return void 0!==t?new e("N",t):e.undefinedValueNotification}static createError(t){return new e("E",void 0,t)}static createComplete(){return e.completeNotification}}return e.completeNotification=new e("C"),e.undefinedValueNotification=new e("N",void 0),e})()},WOAq:function(e,t,n){"use strict";(function(e){var i=n("Ju5/"),r=n("L3Qv"),s="object"==typeof exports&&exports&&!exports.nodeType&&exports,o=s&&"object"==typeof e&&e&&!e.nodeType&&e,a=o&&o.exports===s?i.a.Buffer:void 0;t.a=(a?a.isBuffer:void 0)||r.a}).call(this,n("3UD+")(e))},WYrj:function(e,t,n){!function(e){"use strict";var t=["\u0796\u07ac\u0782\u07aa\u0787\u07a6\u0783\u07a9","\u078a\u07ac\u0784\u07b0\u0783\u07aa\u0787\u07a6\u0783\u07a9","\u0789\u07a7\u0783\u07a8\u0797\u07aa","\u0787\u07ad\u0795\u07b0\u0783\u07a9\u078d\u07aa","\u0789\u07ad","\u0796\u07ab\u0782\u07b0","\u0796\u07aa\u078d\u07a6\u0787\u07a8","\u0787\u07af\u078e\u07a6\u0790\u07b0\u0793\u07aa","\u0790\u07ac\u0795\u07b0\u0793\u07ac\u0789\u07b0\u0784\u07a6\u0783\u07aa","\u0787\u07ae\u0786\u07b0\u0793\u07af\u0784\u07a6\u0783\u07aa","\u0782\u07ae\u0788\u07ac\u0789\u07b0\u0784\u07a6\u0783\u07aa","\u0791\u07a8\u0790\u07ac\u0789\u07b0\u0784\u07a6\u0783\u07aa"],n=["\u0787\u07a7\u078b\u07a8\u0787\u07b0\u078c\u07a6","\u0780\u07af\u0789\u07a6","\u0787\u07a6\u0782\u07b0\u078e\u07a7\u0783\u07a6","\u0784\u07aa\u078b\u07a6","\u0784\u07aa\u0783\u07a7\u0790\u07b0\u078a\u07a6\u078c\u07a8","\u0780\u07aa\u0786\u07aa\u0783\u07aa","\u0780\u07ae\u0782\u07a8\u0780\u07a8\u0783\u07aa"];e.defineLocale("dv",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:"\u0787\u07a7\u078b\u07a8_\u0780\u07af\u0789\u07a6_\u0787\u07a6\u0782\u07b0_\u0784\u07aa\u078b\u07a6_\u0784\u07aa\u0783\u07a7_\u0780\u07aa\u0786\u07aa_\u0780\u07ae\u0782\u07a8".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"D/M/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/\u0789\u0786|\u0789\u078a/,isPM:function(e){return"\u0789\u078a"===e},meridiem:function(e,t,n){return e<12?"\u0789\u0786":"\u0789\u078a"},calendar:{sameDay:"[\u0789\u07a8\u0787\u07a6\u078b\u07aa] LT",nextDay:"[\u0789\u07a7\u078b\u07a6\u0789\u07a7] LT",nextWeek:"dddd LT",lastDay:"[\u0787\u07a8\u0787\u07b0\u0794\u07ac] LT",lastWeek:"[\u078a\u07a7\u0787\u07a8\u078c\u07aa\u0788\u07a8] dddd LT",sameElse:"L"},relativeTime:{future:"\u078c\u07ac\u0783\u07ad\u078e\u07a6\u0787\u07a8 %s",past:"\u0786\u07aa\u0783\u07a8\u0782\u07b0 %s",s:"\u0790\u07a8\u0786\u07aa\u0782\u07b0\u078c\u07aa\u0786\u07ae\u0785\u07ac\u0787\u07b0",ss:"d% \u0790\u07a8\u0786\u07aa\u0782\u07b0\u078c\u07aa",m:"\u0789\u07a8\u0782\u07a8\u0793\u07ac\u0787\u07b0",mm:"\u0789\u07a8\u0782\u07a8\u0793\u07aa %d",h:"\u078e\u07a6\u0791\u07a8\u0787\u07a8\u0783\u07ac\u0787\u07b0",hh:"\u078e\u07a6\u0791\u07a8\u0787\u07a8\u0783\u07aa %d",d:"\u078b\u07aa\u0788\u07a6\u0780\u07ac\u0787\u07b0",dd:"\u078b\u07aa\u0788\u07a6\u0790\u07b0 %d",M:"\u0789\u07a6\u0780\u07ac\u0787\u07b0",MM:"\u0789\u07a6\u0790\u07b0 %d",y:"\u0787\u07a6\u0780\u07a6\u0783\u07ac\u0787\u07b0",yy:"\u0787\u07a6\u0780\u07a6\u0783\u07aa %d"},preparse:function(e){return e.replace(/\u060c/g,",")},postformat:function(e){return e.replace(/,/g,"\u060c")},week:{dow:7,doy:12}})}(n("wd/R"))},Wv91:function(e,t,n){!function(e){"use strict";var t={1:"'inji",5:"'inji",8:"'inji",70:"'inji",80:"'inji",2:"'nji",7:"'nji",20:"'nji",50:"'nji",3:"'\xfcnji",4:"'\xfcnji",100:"'\xfcnji",6:"'njy",9:"'unjy",10:"'unjy",30:"'unjy",60:"'ynjy",90:"'ynjy"};e.defineLocale("tk",{months:"\xddanwar_Fewral_Mart_Aprel_Ma\xfd_I\xfdun_I\xfdul_Awgust_Sent\xfdabr_Okt\xfdabr_No\xfdabr_Dekabr".split("_"),monthsShort:"\xddan_Few_Mar_Apr_Ma\xfd_I\xfdn_I\xfdl_Awg_Sen_Okt_No\xfd_Dek".split("_"),weekdays:"\xddek\u015fenbe_Du\u015fenbe_Si\u015fenbe_\xc7ar\u015fenbe_Pen\u015fenbe_Anna_\u015eenbe".split("_"),weekdaysShort:"\xddek_Du\u015f_Si\u015f_\xc7ar_Pen_Ann_\u015een".split("_"),weekdaysMin:"\xddk_D\u015f_S\u015f_\xc7r_Pn_An_\u015en".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[bug\xfcn sagat] LT",nextDay:"[ertir sagat] LT",nextWeek:"[indiki] dddd [sagat] LT",lastDay:"[d\xfc\xfdn] LT",lastWeek:"[ge\xe7en] dddd [sagat] LT",sameElse:"L"},relativeTime:{future:"%s so\u0148",past:"%s \xf6\u0148",s:"birn\xe4\xe7e sekunt",m:"bir minut",mm:"%d minut",h:"bir sagat",hh:"%d sagat",d:"bir g\xfcn",dd:"%d g\xfcn",M:"bir a\xfd",MM:"%d a\xfd",y:"bir \xfdyl",yy:"%d \xfdyl"},ordinal:function(e,n){switch(n){case"d":case"D":case"Do":case"DD":return e;default:if(0===e)return e+"'unjy";var i=e%10;return e+(t[i]||t[e%100-i]||t[e>=100?100:null])}},week:{dow:1,doy:7}})}(n("wd/R"))},WxRl:function(e,t,n){!function(e){"use strict";var t="vas\xe1rnap h\xe9tf\u0151n kedden szerd\xe1n cs\xfct\xf6rt\xf6k\xf6n p\xe9nteken szombaton".split(" ");function n(e,t,n,i){var r=e;switch(n){case"s":return i||t?"n\xe9h\xe1ny m\xe1sodperc":"n\xe9h\xe1ny m\xe1sodperce";case"ss":return r+(i||t)?" m\xe1sodperc":" m\xe1sodperce";case"m":return"egy"+(i||t?" perc":" perce");case"mm":return r+(i||t?" perc":" perce");case"h":return"egy"+(i||t?" \xf3ra":" \xf3r\xe1ja");case"hh":return r+(i||t?" \xf3ra":" \xf3r\xe1ja");case"d":return"egy"+(i||t?" nap":" napja");case"dd":return r+(i||t?" nap":" napja");case"M":return"egy"+(i||t?" h\xf3nap":" h\xf3napja");case"MM":return r+(i||t?" h\xf3nap":" h\xf3napja");case"y":return"egy"+(i||t?" \xe9v":" \xe9ve");case"yy":return r+(i||t?" \xe9v":" \xe9ve")}return""}function i(e){return(e?"":"[m\xfalt] ")+"["+t[this.day()]+"] LT[-kor]"}e.defineLocale("hu",{months:"janu\xe1r_febru\xe1r_m\xe1rcius_\xe1prilis_m\xe1jus_j\xfanius_j\xfalius_augusztus_szeptember_okt\xf3ber_november_december".split("_"),monthsShort:"jan._feb._m\xe1rc._\xe1pr._m\xe1j._j\xfan._j\xfal._aug._szept._okt._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"vas\xe1rnap_h\xe9tf\u0151_kedd_szerda_cs\xfct\xf6rt\xf6k_p\xe9ntek_szombat".split("_"),weekdaysShort:"vas_h\xe9t_kedd_sze_cs\xfct_p\xe9n_szo".split("_"),weekdaysMin:"v_h_k_sze_cs_p_szo".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"YYYY.MM.DD.",LL:"YYYY. MMMM D.",LLL:"YYYY. MMMM D. H:mm",LLLL:"YYYY. MMMM D., dddd H:mm"},meridiemParse:/de|du/i,isPM:function(e){return"u"===e.charAt(1).toLowerCase()},meridiem:function(e,t,n){return e<12?!0===n?"de":"DE":!0===n?"du":"DU"},calendar:{sameDay:"[ma] LT[-kor]",nextDay:"[holnap] LT[-kor]",nextWeek:function(){return i.call(this,!0)},lastDay:"[tegnap] LT[-kor]",lastWeek:function(){return i.call(this,!1)},sameElse:"L"},relativeTime:{future:"%s m\xfalva",past:"%s",s:n,ss:n,m:n,mm:n,h:n,hh:n,d:n,dd:n,M:n,MM:n,y:n,yy:n},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})}(n("wd/R"))},X709:function(e,t,n){!function(e){"use strict";e.defineLocale("sv",{months:"januari_februari_mars_april_maj_juni_juli_augusti_september_oktober_november_december".split("_"),monthsShort:"jan_feb_mar_apr_maj_jun_jul_aug_sep_okt_nov_dec".split("_"),weekdays:"s\xf6ndag_m\xe5ndag_tisdag_onsdag_torsdag_fredag_l\xf6rdag".split("_"),weekdaysShort:"s\xf6n_m\xe5n_tis_ons_tor_fre_l\xf6r".split("_"),weekdaysMin:"s\xf6_m\xe5_ti_on_to_fr_l\xf6".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [kl.] HH:mm",LLLL:"dddd D MMMM YYYY [kl.] HH:mm",lll:"D MMM YYYY HH:mm",llll:"ddd D MMM YYYY HH:mm"},calendar:{sameDay:"[Idag] LT",nextDay:"[Imorgon] LT",lastDay:"[Ig\xe5r] LT",nextWeek:"[P\xe5] dddd LT",lastWeek:"[I] dddd[s] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"f\xf6r %s sedan",s:"n\xe5gra sekunder",ss:"%d sekunder",m:"en minut",mm:"%d minuter",h:"en timme",hh:"%d timmar",d:"en dag",dd:"%d dagar",M:"en m\xe5nad",MM:"%d m\xe5nader",y:"ett \xe5r",yy:"%d \xe5r"},dayOfMonthOrdinalParse:/\d{1,2}(\:e|\:a)/,ordinal:function(e){var t=e%10;return e+(1==~~(e%100/10)?":e":1===t||2===t?":a":":e")},week:{dow:1,doy:4}})}(n("wd/R"))},XDbj:function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var i=n("sVev"),r=n("7o/Q");function s(e=c){return t=>t.lift(new o(e))}class o{constructor(e){this.errorFactory=e}call(e,t){return t.subscribe(new a(e,this.errorFactory))}}class a extends r.a{constructor(e,t){super(e),this.errorFactory=t,this.hasValue=!1}_next(e){this.hasValue=!0,this.destination.next(e)}_complete(){if(this.hasValue)return this.destination.complete();{let t;try{t=this.errorFactory()}catch(e){t=e}this.destination.error(t)}}}function c(){return new i.a}},XDpg:function(e,t,n){!function(e){"use strict";e.defineLocale("zh-cn",{months:"\u4e00\u6708_\u4e8c\u6708_\u4e09\u6708_\u56db\u6708_\u4e94\u6708_\u516d\u6708_\u4e03\u6708_\u516b\u6708_\u4e5d\u6708_\u5341\u6708_\u5341\u4e00\u6708_\u5341\u4e8c\u6708".split("_"),monthsShort:"1\u6708_2\u6708_3\u6708_4\u6708_5\u6708_6\u6708_7\u6708_8\u6708_9\u6708_10\u6708_11\u6708_12\u6708".split("_"),weekdays:"\u661f\u671f\u65e5_\u661f\u671f\u4e00_\u661f\u671f\u4e8c_\u661f\u671f\u4e09_\u661f\u671f\u56db_\u661f\u671f\u4e94_\u661f\u671f\u516d".split("_"),weekdaysShort:"\u5468\u65e5_\u5468\u4e00_\u5468\u4e8c_\u5468\u4e09_\u5468\u56db_\u5468\u4e94_\u5468\u516d".split("_"),weekdaysMin:"\u65e5_\u4e00_\u4e8c_\u4e09_\u56db_\u4e94_\u516d".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY/MM/DD",LL:"YYYY\u5e74M\u6708D\u65e5",LLL:"YYYY\u5e74M\u6708D\u65e5Ah\u70b9mm\u5206",LLLL:"YYYY\u5e74M\u6708D\u65e5ddddAh\u70b9mm\u5206",l:"YYYY/M/D",ll:"YYYY\u5e74M\u6708D\u65e5",lll:"YYYY\u5e74M\u6708D\u65e5 HH:mm",llll:"YYYY\u5e74M\u6708D\u65e5dddd HH:mm"},meridiemParse:/\u51cc\u6668|\u65e9\u4e0a|\u4e0a\u5348|\u4e2d\u5348|\u4e0b\u5348|\u665a\u4e0a/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u51cc\u6668"===t||"\u65e9\u4e0a"===t||"\u4e0a\u5348"===t?e:"\u4e0b\u5348"===t||"\u665a\u4e0a"===t?e+12:e>=11?e:e+12},meridiem:function(e,t,n){var i=100*e+t;return i<600?"\u51cc\u6668":i<900?"\u65e9\u4e0a":i<1130?"\u4e0a\u5348":i<1230?"\u4e2d\u5348":i<1800?"\u4e0b\u5348":"\u665a\u4e0a"},calendar:{sameDay:"[\u4eca\u5929]LT",nextDay:"[\u660e\u5929]LT",nextWeek:function(e){return e.week()!==this.week()?"[\u4e0b]dddLT":"[\u672c]dddLT"},lastDay:"[\u6628\u5929]LT",lastWeek:function(e){return this.week()!==e.week()?"[\u4e0a]dddLT":"[\u672c]dddLT"},sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}(\u65e5|\u6708|\u5468)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"\u65e5";case"M":return e+"\u6708";case"w":case"W":return e+"\u5468";default:return e}},relativeTime:{future:"%s\u540e",past:"%s\u524d",s:"\u51e0\u79d2",ss:"%d \u79d2",m:"1 \u5206\u949f",mm:"%d \u5206\u949f",h:"1 \u5c0f\u65f6",hh:"%d \u5c0f\u65f6",d:"1 \u5929",dd:"%d \u5929",w:"1 \u5468",ww:"%d \u5468",M:"1 \u4e2a\u6708",MM:"%d \u4e2a\u6708",y:"1 \u5e74",yy:"%d \u5e74"},week:{dow:1,doy:4}})}(n("wd/R"))},XFyV:function(e,t,n){"use strict";n.d(t,"a",(function(){return l}));var i=n("oxzT"),r=n("8Y7J"),s=n("G0yt"),o=n("SVse");const a=function(e,t){return[e,t]},c=["*"];let l=(()=>{class e{constructor(){this.icons=i.a}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275cmp=r.Gb({type:e,selectors:[["cd-loading-panel"]],ngContentSelectors:c,decls:4,vars:5,consts:[["type","info",3,"dismissible"],["aria-hidden","true",1,"mr-2",3,"ngClass"]],template:function(e,t){1&e&&(r.oc(),r.Sb(0,"ngb-alert",0),r.Sb(1,"strong"),r.Nb(2,"i",1),r.Rb(),r.nc(3),r.Rb()),2&e&&(r.pc("dismissible",!1),r.yb(2),r.pc("ngClass",r.vc(2,a,t.icons.spinner,t.icons.spin)))},directives:[s.b,o.p],styles:[""]}),e})()},XGwC:function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},XLvN:function(e,t,n){!function(e){"use strict";e.defineLocale("te",{months:"\u0c1c\u0c28\u0c35\u0c30\u0c3f_\u0c2b\u0c3f\u0c2c\u0c4d\u0c30\u0c35\u0c30\u0c3f_\u0c2e\u0c3e\u0c30\u0c4d\u0c1a\u0c3f_\u0c0f\u0c2a\u0c4d\u0c30\u0c3f\u0c32\u0c4d_\u0c2e\u0c47_\u0c1c\u0c42\u0c28\u0c4d_\u0c1c\u0c41\u0c32\u0c48_\u0c06\u0c17\u0c38\u0c4d\u0c1f\u0c41_\u0c38\u0c46\u0c2a\u0c4d\u0c1f\u0c46\u0c02\u0c2c\u0c30\u0c4d_\u0c05\u0c15\u0c4d\u0c1f\u0c4b\u0c2c\u0c30\u0c4d_\u0c28\u0c35\u0c02\u0c2c\u0c30\u0c4d_\u0c21\u0c3f\u0c38\u0c46\u0c02\u0c2c\u0c30\u0c4d".split("_"),monthsShort:"\u0c1c\u0c28._\u0c2b\u0c3f\u0c2c\u0c4d\u0c30._\u0c2e\u0c3e\u0c30\u0c4d\u0c1a\u0c3f_\u0c0f\u0c2a\u0c4d\u0c30\u0c3f._\u0c2e\u0c47_\u0c1c\u0c42\u0c28\u0c4d_\u0c1c\u0c41\u0c32\u0c48_\u0c06\u0c17._\u0c38\u0c46\u0c2a\u0c4d._\u0c05\u0c15\u0c4d\u0c1f\u0c4b._\u0c28\u0c35._\u0c21\u0c3f\u0c38\u0c46.".split("_"),monthsParseExact:!0,weekdays:"\u0c06\u0c26\u0c3f\u0c35\u0c3e\u0c30\u0c02_\u0c38\u0c4b\u0c2e\u0c35\u0c3e\u0c30\u0c02_\u0c2e\u0c02\u0c17\u0c33\u0c35\u0c3e\u0c30\u0c02_\u0c2c\u0c41\u0c27\u0c35\u0c3e\u0c30\u0c02_\u0c17\u0c41\u0c30\u0c41\u0c35\u0c3e\u0c30\u0c02_\u0c36\u0c41\u0c15\u0c4d\u0c30\u0c35\u0c3e\u0c30\u0c02_\u0c36\u0c28\u0c3f\u0c35\u0c3e\u0c30\u0c02".split("_"),weekdaysShort:"\u0c06\u0c26\u0c3f_\u0c38\u0c4b\u0c2e_\u0c2e\u0c02\u0c17\u0c33_\u0c2c\u0c41\u0c27_\u0c17\u0c41\u0c30\u0c41_\u0c36\u0c41\u0c15\u0c4d\u0c30_\u0c36\u0c28\u0c3f".split("_"),weekdaysMin:"\u0c06_\u0c38\u0c4b_\u0c2e\u0c02_\u0c2c\u0c41_\u0c17\u0c41_\u0c36\u0c41_\u0c36".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm",LLLL:"dddd, D MMMM YYYY, A h:mm"},calendar:{sameDay:"[\u0c28\u0c47\u0c21\u0c41] LT",nextDay:"[\u0c30\u0c47\u0c2a\u0c41] LT",nextWeek:"dddd, LT",lastDay:"[\u0c28\u0c3f\u0c28\u0c4d\u0c28] LT",lastWeek:"[\u0c17\u0c24] dddd, LT",sameElse:"L"},relativeTime:{future:"%s \u0c32\u0c4b",past:"%s \u0c15\u0c4d\u0c30\u0c3f\u0c24\u0c02",s:"\u0c15\u0c4a\u0c28\u0c4d\u0c28\u0c3f \u0c15\u0c4d\u0c37\u0c23\u0c3e\u0c32\u0c41",ss:"%d \u0c38\u0c46\u0c15\u0c28\u0c4d\u0c32\u0c41",m:"\u0c12\u0c15 \u0c28\u0c3f\u0c2e\u0c3f\u0c37\u0c02",mm:"%d \u0c28\u0c3f\u0c2e\u0c3f\u0c37\u0c3e\u0c32\u0c41",h:"\u0c12\u0c15 \u0c17\u0c02\u0c1f",hh:"%d \u0c17\u0c02\u0c1f\u0c32\u0c41",d:"\u0c12\u0c15 \u0c30\u0c4b\u0c1c\u0c41",dd:"%d \u0c30\u0c4b\u0c1c\u0c41\u0c32\u0c41",M:"\u0c12\u0c15 \u0c28\u0c46\u0c32",MM:"%d \u0c28\u0c46\u0c32\u0c32\u0c41",y:"\u0c12\u0c15 \u0c38\u0c02\u0c35\u0c24\u0c4d\u0c38\u0c30\u0c02",yy:"%d \u0c38\u0c02\u0c35\u0c24\u0c4d\u0c38\u0c30\u0c3e\u0c32\u0c41"},dayOfMonthOrdinalParse:/\d{1,2}\u0c35/,ordinal:"%d\u0c35",meridiemParse:/\u0c30\u0c3e\u0c24\u0c4d\u0c30\u0c3f|\u0c09\u0c26\u0c2f\u0c02|\u0c2e\u0c27\u0c4d\u0c2f\u0c3e\u0c39\u0c4d\u0c28\u0c02|\u0c38\u0c3e\u0c2f\u0c02\u0c24\u0c4d\u0c30\u0c02/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u0c30\u0c3e\u0c24\u0c4d\u0c30\u0c3f"===t?e<4?e:e+12:"\u0c09\u0c26\u0c2f\u0c02"===t?e:"\u0c2e\u0c27\u0c4d\u0c2f\u0c3e\u0c39\u0c4d\u0c28\u0c02"===t?e>=10?e:e+12:"\u0c38\u0c3e\u0c2f\u0c02\u0c24\u0c4d\u0c30\u0c02"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"\u0c30\u0c3e\u0c24\u0c4d\u0c30\u0c3f":e<10?"\u0c09\u0c26\u0c2f\u0c02":e<17?"\u0c2e\u0c27\u0c4d\u0c2f\u0c3e\u0c39\u0c4d\u0c28\u0c02":e<20?"\u0c38\u0c3e\u0c2f\u0c02\u0c24\u0c4d\u0c30\u0c02":"\u0c30\u0c3e\u0c24\u0c4d\u0c30\u0c3f"},week:{dow:0,doy:6}})}(n("wd/R"))},XNiG:function(e,t,n){"use strict";n.d(t,"b",(function(){return l})),n.d(t,"a",(function(){return u}));var i=n("HDdC"),r=n("7o/Q"),s=n("quSY"),o=n("9ppp"),a=n("Ylt2"),c=n("2QA8");class l extends r.a{constructor(e){super(e),this.destination=e}}let u=(()=>{class e extends i.a{constructor(){super(),this.observers=[],this.closed=!1,this.isStopped=!1,this.hasError=!1,this.thrownError=null}[c.a](){return new l(this)}lift(e){const t=new d(this,this);return t.operator=e,t}next(e){if(this.closed)throw new o.a;if(!this.isStopped){const{observers:t}=this,n=t.length,i=t.slice();for(let r=0;rnew d(e,t),e})();class d extends u{constructor(e,t){super(),this.destination=e,this.source=t}next(e){const{destination:t}=this;t&&t.next&&t.next(e)}error(e){const{destination:t}=this;t&&t.error&&this.destination.error(e)}complete(){const{destination:e}=this;e&&e.complete&&this.destination.complete()}_subscribe(e){const{source:t}=this;return t?this.source.subscribe(e):s.a.EMPTY}}},XoHu:function(e,t,n){"use strict";function i(e){return null!==e&&"object"==typeof e}n.d(t,"a",(function(){return i}))},XqMk:function(e,t,n){"use strict";var i="object"==typeof global&&global&&global.Object===Object&&global;t.a=i},Y7HM:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("DH7j");function r(e){return!Object(i.a)(e)&&e-parseFloat(e)+1>=0}},Y7yP:function(e,t,n){"use strict";var i,r=n("vJtL"),s=n("Ju5/").a["__core-js_shared__"],o=(i=/[^.]+$/.exec(s&&s.keys&&s.keys.IE_PROTO||""))?"Symbol(src)_1."+i:"",a=n("IzLi"),c=n("dLWn"),l=/^\[object .+?Constructor\]$/,u=RegExp("^"+Function.prototype.toString.call(Object.prototype.hasOwnProperty).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");t.a=function(e,t){var n=function(e,t){return null==e?void 0:e[t]}(e,t);return function(e){return!(!Object(a.a)(e)||(t=e,o&&o in t))&&(Object(r.a)(e)?u:l).test(Object(c.a)(e));var t}(n)?n:void 0}},YF1G:function(e,t,n){var i=n("xrYK"),r=n("2oRo");e.exports="process"==i(r.process)},YHEm:function(e,t,n){"use strict";t.a=function(e,t){return e===t||e!=e&&t!=t}},YM6B:function(e,t,n){"use strict";var i=n("Y7yP"),r=n("Ju5/"),s=Object(i.a)(r.a,"DataView"),o=n("3cmB"),a=Object(i.a)(r.a,"Promise"),c=Object(i.a)(r.a,"Set"),l=Object(i.a)(r.a,"WeakMap"),u=n("8M4i"),d=n("dLWn"),h="[object Map]",f="[object Promise]",p="[object Set]",m="[object WeakMap]",b="[object DataView]",g=Object(d.a)(s),_=Object(d.a)(o.a),y=Object(d.a)(a),v=Object(d.a)(c),w=Object(d.a)(l),S=u.a;(s&&S(new s(new ArrayBuffer(1)))!=b||o.a&&S(new o.a)!=h||a&&S(a.resolve())!=f||c&&S(new c)!=p||l&&S(new l)!=m)&&(S=function(e){var t=Object(u.a)(e),n="[object Object]"==t?e.constructor:void 0,i=n?Object(d.a)(n):"";if(i)switch(i){case g:return b;case _:return h;case y:return f;case v:return p;case w:return m}return t}),t.a=S},YNrV:function(e,t,n){"use strict";var i=n("g6v/"),r=n("0Dky"),s=n("33Wh"),o=n("dBg+"),a=n("0eef"),c=n("ewvW"),l=n("RK3t"),u=Object.assign,d=Object.defineProperty;e.exports=!u||r((function(){if(i&&1!==u({b:1},u(d({},"a",{enumerable:!0,get:function(){d(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var e={},t={},n=Symbol(),r="abcdefghijklmnopqrst";return e[n]=7,r.split("").forEach((function(e){t[e]=e})),7!=u({},e)[n]||s(u({},t)).join("")!=r}))?function(e,t){for(var n=c(e),r=arguments.length,u=1,d=o.f,h=a.f;r>u;)for(var f,p=l(arguments[u++]),m=d?s(p).concat(d(p)):s(p),b=m.length,g=0;b>g;)f=m[g++],i&&!h.call(p,f)||(n[f]=p[f]);return n}:u},YRex:function(e,t,n){!function(e){"use strict";e.defineLocale("ug-cn",{months:"\u064a\u0627\u0646\u06cb\u0627\u0631_\u0641\u06d0\u06cb\u0631\u0627\u0644_\u0645\u0627\u0631\u062a_\u0626\u0627\u067e\u0631\u06d0\u0644_\u0645\u0627\u064a_\u0626\u0649\u064a\u06c7\u0646_\u0626\u0649\u064a\u06c7\u0644_\u0626\u0627\u06cb\u063a\u06c7\u0633\u062a_\u0633\u06d0\u0646\u062a\u06d5\u0628\u0649\u0631_\u0626\u06c6\u0643\u062a\u06d5\u0628\u0649\u0631_\u0646\u0648\u064a\u0627\u0628\u0649\u0631_\u062f\u06d0\u0643\u0627\u0628\u0649\u0631".split("_"),monthsShort:"\u064a\u0627\u0646\u06cb\u0627\u0631_\u0641\u06d0\u06cb\u0631\u0627\u0644_\u0645\u0627\u0631\u062a_\u0626\u0627\u067e\u0631\u06d0\u0644_\u0645\u0627\u064a_\u0626\u0649\u064a\u06c7\u0646_\u0626\u0649\u064a\u06c7\u0644_\u0626\u0627\u06cb\u063a\u06c7\u0633\u062a_\u0633\u06d0\u0646\u062a\u06d5\u0628\u0649\u0631_\u0626\u06c6\u0643\u062a\u06d5\u0628\u0649\u0631_\u0646\u0648\u064a\u0627\u0628\u0649\u0631_\u062f\u06d0\u0643\u0627\u0628\u0649\u0631".split("_"),weekdays:"\u064a\u06d5\u0643\u0634\u06d5\u0646\u0628\u06d5_\u062f\u06c8\u0634\u06d5\u0646\u0628\u06d5_\u0633\u06d5\u064a\u0634\u06d5\u0646\u0628\u06d5_\u0686\u0627\u0631\u0634\u06d5\u0646\u0628\u06d5_\u067e\u06d5\u064a\u0634\u06d5\u0646\u0628\u06d5_\u062c\u06c8\u0645\u06d5_\u0634\u06d5\u0646\u0628\u06d5".split("_"),weekdaysShort:"\u064a\u06d5_\u062f\u06c8_\u0633\u06d5_\u0686\u0627_\u067e\u06d5_\u062c\u06c8_\u0634\u06d5".split("_"),weekdaysMin:"\u064a\u06d5_\u062f\u06c8_\u0633\u06d5_\u0686\u0627_\u067e\u06d5_\u062c\u06c8_\u0634\u06d5".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"YYYY-\u064a\u0649\u0644\u0649M-\u0626\u0627\u064a\u0646\u0649\u06adD-\u0643\u06c8\u0646\u0649",LLL:"YYYY-\u064a\u0649\u0644\u0649M-\u0626\u0627\u064a\u0646\u0649\u06adD-\u0643\u06c8\u0646\u0649\u060c HH:mm",LLLL:"dddd\u060c YYYY-\u064a\u0649\u0644\u0649M-\u0626\u0627\u064a\u0646\u0649\u06adD-\u0643\u06c8\u0646\u0649\u060c HH:mm"},meridiemParse:/\u064a\u06d0\u0631\u0649\u0645 \u0643\u06d0\u0686\u06d5|\u0633\u06d5\u06be\u06d5\u0631|\u0686\u06c8\u0634\u062a\u0649\u0646 \u0628\u06c7\u0631\u06c7\u0646|\u0686\u06c8\u0634|\u0686\u06c8\u0634\u062a\u0649\u0646 \u0643\u06d0\u064a\u0649\u0646|\u0643\u06d5\u0686/,meridiemHour:function(e,t){return 12===e&&(e=0),"\u064a\u06d0\u0631\u0649\u0645 \u0643\u06d0\u0686\u06d5"===t||"\u0633\u06d5\u06be\u06d5\u0631"===t||"\u0686\u06c8\u0634\u062a\u0649\u0646 \u0628\u06c7\u0631\u06c7\u0646"===t?e:"\u0686\u06c8\u0634\u062a\u0649\u0646 \u0643\u06d0\u064a\u0649\u0646"===t||"\u0643\u06d5\u0686"===t?e+12:e>=11?e:e+12},meridiem:function(e,t,n){var i=100*e+t;return i<600?"\u064a\u06d0\u0631\u0649\u0645 \u0643\u06d0\u0686\u06d5":i<900?"\u0633\u06d5\u06be\u06d5\u0631":i<1130?"\u0686\u06c8\u0634\u062a\u0649\u0646 \u0628\u06c7\u0631\u06c7\u0646":i<1230?"\u0686\u06c8\u0634":i<1800?"\u0686\u06c8\u0634\u062a\u0649\u0646 \u0643\u06d0\u064a\u0649\u0646":"\u0643\u06d5\u0686"},calendar:{sameDay:"[\u0628\u06c8\u06af\u06c8\u0646 \u0633\u0627\u0626\u06d5\u062a] LT",nextDay:"[\u0626\u06d5\u062a\u06d5 \u0633\u0627\u0626\u06d5\u062a] LT",nextWeek:"[\u0643\u06d0\u0644\u06d5\u0631\u0643\u0649] dddd [\u0633\u0627\u0626\u06d5\u062a] LT",lastDay:"[\u062a\u06c6\u0646\u06c8\u06af\u06c8\u0646] LT",lastWeek:"[\u0626\u0627\u0644\u062f\u0649\u0646\u0642\u0649] dddd [\u0633\u0627\u0626\u06d5\u062a] LT",sameElse:"L"},relativeTime:{future:"%s \u0643\u06d0\u064a\u0649\u0646",past:"%s \u0628\u06c7\u0631\u06c7\u0646",s:"\u0646\u06d5\u0686\u0686\u06d5 \u0633\u06d0\u0643\u0648\u0646\u062a",ss:"%d \u0633\u06d0\u0643\u0648\u0646\u062a",m:"\u0628\u0649\u0631 \u0645\u0649\u0646\u06c7\u062a",mm:"%d \u0645\u0649\u0646\u06c7\u062a",h:"\u0628\u0649\u0631 \u0633\u0627\u0626\u06d5\u062a",hh:"%d \u0633\u0627\u0626\u06d5\u062a",d:"\u0628\u0649\u0631 \u0643\u06c8\u0646",dd:"%d \u0643\u06c8\u0646",M:"\u0628\u0649\u0631 \u0626\u0627\u064a",MM:"%d \u0626\u0627\u064a",y:"\u0628\u0649\u0631 \u064a\u0649\u0644",yy:"%d \u064a\u0649\u0644"},dayOfMonthOrdinalParse:/\d{1,2}(-\u0643\u06c8\u0646\u0649|-\u0626\u0627\u064a|-\u06be\u06d5\u067e\u062a\u06d5)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"-\u0643\u06c8\u0646\u0649";case"w":case"W":return e+"-\u06be\u06d5\u067e\u062a\u06d5";default:return e}},preparse:function(e){return e.replace(/\u060c/g,",")},postformat:function(e){return e.replace(/,/g,"\u060c")},week:{dow:1,doy:7}})}(n("wd/R"))},Ylt2:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("quSY");class r extends i.a{constructor(e,t){super(),this.subject=e,this.subscriber=t,this.closed=!1}unsubscribe(){if(this.closed)return;this.closed=!0;const e=this.subject,t=e.observers;if(this.subject=null,!t||0===t.length||e.isStopped||e.closed)return;const n=t.indexOf(this.subscriber);-1!==n&&t.splice(n,1)}}},Yrry:function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var i=n("8Y7J"),r=n("cUpR");let s=(()=>{class e{constructor(e){this.domSanitizer=e}transform(e){return this.domSanitizer.sanitize(i.I.HTML,e)}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(r.b))},e.\u0275pipe=i.Lb({name:"sanitizeHtml",type:e,pure:!0}),e})()},YuTi:function(e,t){e.exports=function(e){return e.webpackPolyfill||(e.deprecate=function(){},e.paths=[],e.children||(e.children=[]),Object.defineProperty(e,"loaded",{enumerable:!0,get:function(){return e.l}}),Object.defineProperty(e,"id",{enumerable:!0,get:function(){return e.i}}),e.webpackPolyfill=1),e}},Z21x:function(e,t,n){"use strict";n.d(t,"a",(function(){return a}));var i=n("8Y7J"),r=n("sne2"),s=n("SVse"),o=n("ANnk");let a=(()=>{class e{constructor(e,t){this.location=e,this.actionLabels=t,this.backAction=new i.o,this.name=this.actionLabels.CANCEL}back(){0===this.backAction.observers.length?this.location.back():this.backAction.emit()}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(s.m),i.Mb(r.b))},e.\u0275cmp=i.Gb({type:e,selectors:[["cd-back-button"]],inputs:{name:"name"},outputs:{backAction:"backAction"},decls:2,vars:1,consts:[["type","button",1,"btn","btn-light","tc_backButton",3,"click"]],template:function(e,t){1&e&&(i.Sb(0,"button",0),i.gc("click",(function(){return t.back()})),i.Oc(1),i.Rb()),2&e&&(i.yb(1),i.Qc(" ",t.name,"\n"))},directives:[o.a],styles:[""]}),e})()},Z4QM:function(e,t,n){!function(e){"use strict";var t=["\u062c\u0646\u0648\u0631\u064a","\u0641\u064a\u0628\u0631\u0648\u0631\u064a","\u0645\u0627\u0631\u0686","\u0627\u067e\u0631\u064a\u0644","\u0645\u0626\u064a","\u062c\u0648\u0646","\u062c\u0648\u0644\u0627\u0621\u0650","\u0622\u06af\u0633\u067d","\u0633\u064a\u067e\u067d\u0645\u0628\u0631","\u0622\u06aa\u067d\u0648\u0628\u0631","\u0646\u0648\u0645\u0628\u0631","\u068a\u0633\u0645\u0628\u0631"],n=["\u0622\u0686\u0631","\u0633\u0648\u0645\u0631","\u0627\u06b1\u0627\u0631\u0648","\u0627\u0631\u0628\u0639","\u062e\u0645\u064a\u0633","\u062c\u0645\u0639","\u0687\u0646\u0687\u0631"];e.defineLocale("sd",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd\u060c D MMMM YYYY HH:mm"},meridiemParse:/\u0635\u0628\u062d|\u0634\u0627\u0645/,isPM:function(e){return"\u0634\u0627\u0645"===e},meridiem:function(e,t,n){return e<12?"\u0635\u0628\u062d":"\u0634\u0627\u0645"},calendar:{sameDay:"[\u0627\u0684] LT",nextDay:"[\u0633\u0680\u0627\u06bb\u064a] LT",nextWeek:"dddd [\u0627\u06b3\u064a\u0646 \u0647\u0641\u062a\u064a \u062a\u064a] LT",lastDay:"[\u06aa\u0627\u0644\u0647\u0647] LT",lastWeek:"[\u06af\u0632\u0631\u064a\u0644 \u0647\u0641\u062a\u064a] dddd [\u062a\u064a] LT",sameElse:"L"},relativeTime:{future:"%s \u067e\u0648\u0621",past:"%s \u0627\u06b3",s:"\u0686\u0646\u062f \u0633\u064a\u06aa\u0646\u068a",ss:"%d \u0633\u064a\u06aa\u0646\u068a",m:"\u0647\u06aa \u0645\u0646\u067d",mm:"%d \u0645\u0646\u067d",h:"\u0647\u06aa \u06aa\u0644\u0627\u06aa",hh:"%d \u06aa\u0644\u0627\u06aa",d:"\u0647\u06aa \u068f\u064a\u0646\u0647\u0646",dd:"%d \u068f\u064a\u0646\u0647\u0646",M:"\u0647\u06aa \u0645\u0647\u064a\u0646\u0648",MM:"%d \u0645\u0647\u064a\u0646\u0627",y:"\u0647\u06aa \u0633\u0627\u0644",yy:"%d \u0633\u0627\u0644"},preparse:function(e){return e.replace(/\u060c/g,",")},postformat:function(e){return e.replace(/,/g,"\u060c")},week:{dow:1,doy:4}})}(n("wd/R"))},ZAMP:function(e,t,n){!function(e){"use strict";e.defineLocale("ms-my",{months:"Januari_Februari_Mac_April_Mei_Jun_Julai_Ogos_September_Oktober_November_Disember".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ogs_Sep_Okt_Nov_Dis".split("_"),weekdays:"Ahad_Isnin_Selasa_Rabu_Khamis_Jumaat_Sabtu".split("_"),weekdaysShort:"Ahd_Isn_Sel_Rab_Kha_Jum_Sab".split("_"),weekdaysMin:"Ah_Is_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|tengahari|petang|malam/,meridiemHour:function(e,t){return 12===e&&(e=0),"pagi"===t?e:"tengahari"===t?e>=11?e:e+12:"petang"===t||"malam"===t?e+12:void 0},meridiem:function(e,t,n){return e<11?"pagi":e<15?"tengahari":e<19?"petang":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Esok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kelmarin pukul] LT",lastWeek:"dddd [lepas pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lepas",s:"beberapa saat",ss:"%d saat",m:"seminit",mm:"%d minit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:1,doy:7}})}(n("wd/R"))},ZUHj:function(e,t,n){"use strict";n.d(t,"a",(function(){return a}));var i=n("7o/Q");class r extends i.a{constructor(e,t,n){super(),this.parent=e,this.outerValue=t,this.outerIndex=n,this.index=0}_next(e){this.parent.notifyNext(this.outerValue,e,this.outerIndex,this.index++,this)}_error(e){this.parent.notifyError(e,this),this.unsubscribe()}_complete(){this.parent.notifyComplete(this),this.unsubscribe()}}var s=n("SeVD"),o=n("HDdC");function a(e,t,n,i,a=new r(e,n,i)){if(!a.closed)return t instanceof o.a?t.subscribe(a):Object(s.a)(t)(a)}},ZUd8:function(e,t,n){var i=n("ppGB"),r=n("HYAF"),s=function(e){return function(t,n){var s,o,a=String(r(t)),c=i(n),l=a.length;return c<0||c>=l?e?"":void 0:(s=a.charCodeAt(c))<55296||s>56319||c+1===l||(o=a.charCodeAt(c+1))<56320||o>57343?e?a.charAt(c):s:e?a.slice(c,c+2):o-56320+(s-55296<<10)+65536}};e.exports={codeAt:s(!1),charAt:s(!0)}},Zduo:function(e,t,n){!function(e){"use strict";e.defineLocale("eo",{months:"januaro_februaro_marto_aprilo_majo_junio_julio_a\u016dgusto_septembro_oktobro_novembro_decembro".split("_"),monthsShort:"jan_feb_mart_apr_maj_jun_jul_a\u016dg_sept_okt_nov_dec".split("_"),weekdays:"diman\u0109o_lundo_mardo_merkredo_\u0135a\u016ddo_vendredo_sabato".split("_"),weekdaysShort:"dim_lun_mard_merk_\u0135a\u016d_ven_sab".split("_"),weekdaysMin:"di_lu_ma_me_\u0135a_ve_sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"[la] D[-an de] MMMM, YYYY",LLL:"[la] D[-an de] MMMM, YYYY HH:mm",LLLL:"dddd[n], [la] D[-an de] MMMM, YYYY HH:mm",llll:"ddd, [la] D[-an de] MMM, YYYY HH:mm"},meridiemParse:/[ap]\.t\.m/i,isPM:function(e){return"p"===e.charAt(0).toLowerCase()},meridiem:function(e,t,n){return e>11?n?"p.t.m.":"P.T.M.":n?"a.t.m.":"A.T.M."},calendar:{sameDay:"[Hodia\u016d je] LT",nextDay:"[Morga\u016d je] LT",nextWeek:"dddd[n je] LT",lastDay:"[Hiera\u016d je] LT",lastWeek:"[pasintan] dddd[n je] LT",sameElse:"L"},relativeTime:{future:"post %s",past:"anta\u016d %s",s:"kelkaj sekundoj",ss:"%d sekundoj",m:"unu minuto",mm:"%d minutoj",h:"unu horo",hh:"%d horoj",d:"unu tago",dd:"%d tagoj",M:"unu monato",MM:"%d monatoj",y:"unu jaro",yy:"%d jaroj"},dayOfMonthOrdinalParse:/\d{1,2}a/,ordinal:"%da",week:{dow:1,doy:7}})}(n("wd/R"))},ZfDv:function(e,t,n){var i=n("hh1v"),r=n("6LWA"),s=n("tiKp")("species");e.exports=function(e,t){var n;return r(e)&&("function"!=typeof(n=e.constructor)||n!==Array&&!r(n.prototype)?i(n)&&null===(n=n[s])&&(n=void 0):n=void 0),new(void 0===n?Array:n)(0===t?0:t)}},a0VL:function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var i=n("8Y7J"),r=n("SVse");let s=(()=>{class e{constructor(e){this.datePipe=e}transform(e){return null===e||""===e?"":this.datePipe.transform(e,"shortDate")+" "+this.datePipe.transform(e,"mediumTime")}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(r.e))},e.\u0275pipe=i.Lb({name:"cdDate",type:e,pure:!0}),e})()},a96k:function(e,t,n){"use strict";n.d(t,"a",(function(){return i}));class i{constructor(e,t,n,i=!0){this.selected=e,this.name=t,this.description=n,this.enabled=i}}},aGrj:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("eIep");function r(e,t){return t?Object(i.a)(()=>e,t):Object(i.a)(()=>e)}},aIdf:function(e,t,n){!function(e){"use strict";function t(e,t,n){return e+" "+function(e,t){return 2===t?function(e){var t={m:"v",b:"v",d:"z"};return void 0===t[e.charAt(0)]?e:t[e.charAt(0)]+e.substring(1)}(e):e}({mm:"munutenn",MM:"miz",dd:"devezh"}[n],e)}var n=[/^gen/i,/^c[\u02bc\']hwe/i,/^meu/i,/^ebr/i,/^mae/i,/^(mez|eve)/i,/^gou/i,/^eos/i,/^gwe/i,/^her/i,/^du/i,/^ker/i],i=/^(genver|c[\u02bc\']hwevrer|meurzh|ebrel|mae|mezheven|gouere|eost|gwengolo|here|du|kerzu|gen|c[\u02bc\']hwe|meu|ebr|mae|eve|gou|eos|gwe|her|du|ker)/i,r=[/^Su/i,/^Lu/i,/^Me([^r]|$)/i,/^Mer/i,/^Ya/i,/^Gw/i,/^Sa/i];e.defineLocale("br",{months:"Genver_C\u02bchwevrer_Meurzh_Ebrel_Mae_Mezheven_Gouere_Eost_Gwengolo_Here_Du_Kerzu".split("_"),monthsShort:"Gen_C\u02bchwe_Meu_Ebr_Mae_Eve_Gou_Eos_Gwe_Her_Du_Ker".split("_"),weekdays:"Sul_Lun_Meurzh_Merc\u02bcher_Yaou_Gwener_Sadorn".split("_"),weekdaysShort:"Sul_Lun_Meu_Mer_Yao_Gwe_Sad".split("_"),weekdaysMin:"Su_Lu_Me_Mer_Ya_Gw_Sa".split("_"),weekdaysParse:r,fullWeekdaysParse:[/^sul/i,/^lun/i,/^meurzh/i,/^merc[\u02bc\']her/i,/^yaou/i,/^gwener/i,/^sadorn/i],shortWeekdaysParse:[/^Sul/i,/^Lun/i,/^Meu/i,/^Mer/i,/^Yao/i,/^Gwe/i,/^Sad/i],minWeekdaysParse:r,monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(genver|c[\u02bc\']hwevrer|meurzh|ebrel|mae|mezheven|gouere|eost|gwengolo|here|du|kerzu)/i,monthsShortStrictRegex:/^(gen|c[\u02bc\']hwe|meu|ebr|mae|eve|gou|eos|gwe|her|du|ker)/i,monthsParse:n,longMonthsParse:n,shortMonthsParse:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [a viz] MMMM YYYY",LLL:"D [a viz] MMMM YYYY HH:mm",LLLL:"dddd, D [a viz] MMMM YYYY HH:mm"},calendar:{sameDay:"[Hiziv da] LT",nextDay:"[Warc\u02bchoazh da] LT",nextWeek:"dddd [da] LT",lastDay:"[Dec\u02bch da] LT",lastWeek:"dddd [paset da] LT",sameElse:"L"},relativeTime:{future:"a-benn %s",past:"%s \u02bczo",s:"un nebeud segondenno\xf9",ss:"%d eilenn",m:"ur vunutenn",mm:t,h:"un eur",hh:"%d eur",d:"un devezh",dd:t,M:"ur miz",MM:t,y:"ur bloaz",yy:function(e){switch(function e(t){return t>9?e(t%10):t}(e)){case 1:case 3:case 4:case 5:case 9:return e+" bloaz";default:return e+" vloaz"}}},dayOfMonthOrdinalParse:/\d{1,2}(a\xf1|vet)/,ordinal:function(e){return e+(1===e?"a\xf1":"vet")},week:{dow:1,doy:4},meridiemParse:/a.m.|g.m./,isPM:function(e){return"g.m."===e},meridiem:function(e,t,n){return e<12?"a.m.":"g.m."}})}(n("wd/R"))},aIsn:function(e,t,n){!function(e){"use strict";e.defineLocale("mi",{months:"Kohi-t\u0101te_Hui-tanguru_Pout\u016b-te-rangi_Paenga-wh\u0101wh\u0101_Haratua_Pipiri_H\u014dngoingoi_Here-turi-k\u014dk\u0101_Mahuru_Whiringa-\u0101-nuku_Whiringa-\u0101-rangi_Hakihea".split("_"),monthsShort:"Kohi_Hui_Pou_Pae_Hara_Pipi_H\u014dngoi_Here_Mahu_Whi-nu_Whi-ra_Haki".split("_"),monthsRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,3}/i,monthsStrictRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,3}/i,monthsShortRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,3}/i,monthsShortStrictRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,2}/i,weekdays:"R\u0101tapu_Mane_T\u016brei_Wenerei_T\u0101ite_Paraire_H\u0101tarei".split("_"),weekdaysShort:"Ta_Ma_T\u016b_We_T\u0101i_Pa_H\u0101".split("_"),weekdaysMin:"Ta_Ma_T\u016b_We_T\u0101i_Pa_H\u0101".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [i] HH:mm",LLLL:"dddd, D MMMM YYYY [i] HH:mm"},calendar:{sameDay:"[i teie mahana, i] LT",nextDay:"[apopo i] LT",nextWeek:"dddd [i] LT",lastDay:"[inanahi i] LT",lastWeek:"dddd [whakamutunga i] LT",sameElse:"L"},relativeTime:{future:"i roto i %s",past:"%s i mua",s:"te h\u0113kona ruarua",ss:"%d h\u0113kona",m:"he meneti",mm:"%d meneti",h:"te haora",hh:"%d haora",d:"he ra",dd:"%d ra",M:"he marama",MM:"%d marama",y:"he tau",yy:"%d tau"},dayOfMonthOrdinalParse:/\d{1,2}\xba/,ordinal:"%d\xba",week:{dow:1,doy:4}})}(n("wd/R"))},aQkU:function(e,t,n){!function(e){"use strict";e.defineLocale("mk",{months:"\u0458\u0430\u043d\u0443\u0430\u0440\u0438_\u0444\u0435\u0432\u0440\u0443\u0430\u0440\u0438_\u043c\u0430\u0440\u0442_\u0430\u043f\u0440\u0438\u043b_\u043c\u0430\u0458_\u0458\u0443\u043d\u0438_\u0458\u0443\u043b\u0438_\u0430\u0432\u0433\u0443\u0441\u0442_\u0441\u0435\u043f\u0442\u0435\u043c\u0432\u0440\u0438_\u043e\u043a\u0442\u043e\u043c\u0432\u0440\u0438_\u043d\u043e\u0435\u043c\u0432\u0440\u0438_\u0434\u0435\u043a\u0435\u043c\u0432\u0440\u0438".split("_"),monthsShort:"\u0458\u0430\u043d_\u0444\u0435\u0432_\u043c\u0430\u0440_\u0430\u043f\u0440_\u043c\u0430\u0458_\u0458\u0443\u043d_\u0458\u0443\u043b_\u0430\u0432\u0433_\u0441\u0435\u043f_\u043e\u043a\u0442_\u043d\u043e\u0435_\u0434\u0435\u043a".split("_"),weekdays:"\u043d\u0435\u0434\u0435\u043b\u0430_\u043f\u043e\u043d\u0435\u0434\u0435\u043b\u043d\u0438\u043a_\u0432\u0442\u043e\u0440\u043d\u0438\u043a_\u0441\u0440\u0435\u0434\u0430_\u0447\u0435\u0442\u0432\u0440\u0442\u043e\u043a_\u043f\u0435\u0442\u043e\u043a_\u0441\u0430\u0431\u043e\u0442\u0430".split("_"),weekdaysShort:"\u043d\u0435\u0434_\u043f\u043e\u043d_\u0432\u0442\u043e_\u0441\u0440\u0435_\u0447\u0435\u0442_\u043f\u0435\u0442_\u0441\u0430\u0431".split("_"),weekdaysMin:"\u043de_\u043fo_\u0432\u0442_\u0441\u0440_\u0447\u0435_\u043f\u0435_\u0441a".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"D.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd, D MMMM YYYY H:mm"},calendar:{sameDay:"[\u0414\u0435\u043d\u0435\u0441 \u0432\u043e] LT",nextDay:"[\u0423\u0442\u0440\u0435 \u0432\u043e] LT",nextWeek:"[\u0412\u043e] dddd [\u0432\u043e] LT",lastDay:"[\u0412\u0447\u0435\u0440\u0430 \u0432\u043e] LT",lastWeek:function(){switch(this.day()){case 0:case 3:case 6:return"[\u0418\u0437\u043c\u0438\u043d\u0430\u0442\u0430\u0442\u0430] dddd [\u0432\u043e] LT";case 1:case 2:case 4:case 5:return"[\u0418\u0437\u043c\u0438\u043d\u0430\u0442\u0438\u043e\u0442] dddd [\u0432\u043e] LT"}},sameElse:"L"},relativeTime:{future:"\u0437\u0430 %s",past:"\u043f\u0440\u0435\u0434 %s",s:"\u043d\u0435\u043a\u043e\u043b\u043a\u0443 \u0441\u0435\u043a\u0443\u043d\u0434\u0438",ss:"%d \u0441\u0435\u043a\u0443\u043d\u0434\u0438",m:"\u0435\u0434\u043d\u0430 \u043c\u0438\u043d\u0443\u0442\u0430",mm:"%d \u043c\u0438\u043d\u0443\u0442\u0438",h:"\u0435\u0434\u0435\u043d \u0447\u0430\u0441",hh:"%d \u0447\u0430\u0441\u0430",d:"\u0435\u0434\u0435\u043d \u0434\u0435\u043d",dd:"%d \u0434\u0435\u043d\u0430",M:"\u0435\u0434\u0435\u043d \u043c\u0435\u0441\u0435\u0446",MM:"%d \u043c\u0435\u0441\u0435\u0446\u0438",y:"\u0435\u0434\u043d\u0430 \u0433\u043e\u0434\u0438\u043d\u0430",yy:"%d \u0433\u043e\u0434\u0438\u043d\u0438"},dayOfMonthOrdinalParse:/\d{1,2}-(\u0435\u0432|\u0435\u043d|\u0442\u0438|\u0432\u0438|\u0440\u0438|\u043c\u0438)/,ordinal:function(e){var t=e%10,n=e%100;return 0===e?e+"-\u0435\u0432":0===n?e+"-\u0435\u043d":n>10&&n<20?e+"-\u0442\u0438":1===t?e+"-\u0432\u0438":2===t?e+"-\u0440\u0438":7===t||8===t?e+"-\u043c\u0438":e+"-\u0442\u0438"},week:{dow:1,doy:7}})}(n("wd/R"))},aXbf:function(e,t,n){"use strict";n.d(t,"a",(function(){return o}));var i=n("LvDl"),r=n.n(i),s=n("8Y7J");let o=(()=>{class e{format_number(e,t,n,i=1){if(r.a.isString(e)&&(e=Number(e)),!r.a.isNumber(e))return"-";let s=e<1?0:Math.floor(Math.log(e)/Math.log(t));s=s>=n.length?n.length-1:s;let o=r.a.round(e/Math.pow(t,s),i).toString();return""===o?"-":(""!==n[s]&&(o=`${o} ${n[s]}`),o)}toBytes(e,t=null){const n=["b","k","m","g","t","p","e","z","y"],i=RegExp("^(\\d+(.\\d+)?) ?(["+n.join("")+"]?(b|ib|B/s)?)?$","i").exec(e);if(null===i)return t;let s=parseFloat(i[1]);return r.a.isString(i[3])&&(s*=Math.pow(1024,n.indexOf(i[3].toLowerCase()[0]))),Math.round(s)}toMilliseconds(e){const t=/^\s*(\d+)\s*(ms)?\s*$/i.exec(e);return null!==t?+t[1]:0}toIops(e){const t=/^\s*(\d+)\s*(IOPS)?\s*$/i.exec(e);return null!==t?+t[1]:0}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=s.Ib({token:e,factory:e.\u0275fac,providedIn:"root"}),e})()},aexS:function(e,t,n){"use strict";n.d(t,"a",(function(){return o}));var i=n("2Vo4"),r=n("jKX/"),s=n("8Y7J");let o=(()=>{class e{constructor(){this.isPwdDisplayedSource=new i.a(!1),this.isPwdDisplayed$=this.isPwdDisplayedSource.asObservable()}set(e,t={},n=!1,i=null,s=!1){localStorage.setItem("dashboard_username",e),localStorage.setItem("dashboard_permissions",JSON.stringify(new r.a(t))),localStorage.setItem("user_pwd_expiration_date",String(i)),localStorage.setItem("user_pwd_update_required",String(s)),localStorage.setItem("sso",String(n))}remove(){localStorage.removeItem("dashboard_username"),localStorage.removeItem("user_pwd_expiration_data"),localStorage.removeItem("user_pwd_update_required")}isLoggedIn(){return null!==localStorage.getItem("dashboard_username")}getUsername(){return localStorage.getItem("dashboard_username")}getPermissions(){return JSON.parse(localStorage.getItem("dashboard_permissions")||JSON.stringify(new r.a({})))}getPwdExpirationDate(){return Number(localStorage.getItem("user_pwd_expiration_date"))}getPwdUpdateRequired(){return"true"===localStorage.getItem("user_pwd_update_required")}isSSO(){return"true"===localStorage.getItem("sso")}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=s.Ib({token:e,factory:e.\u0275fac,providedIn:"root"}),e})()},afO8:function(e,t,n){var i,r,s,o=n("f5p1"),a=n("2oRo"),c=n("hh1v"),l=n("kRJp"),u=n("UTVS"),d=n("xs3f"),h=n("93I0"),f=n("0BK2"),p="Object already initialized";if(o||d.state){var m=d.state||(d.state=new(0,a.WeakMap)),b=m.get,g=m.has,_=m.set;i=function(e,t){if(g.call(m,e))throw new TypeError(p);return t.facade=e,_.call(m,e,t),t},r=function(e){return b.call(m,e)||{}},s=function(e){return g.call(m,e)}}else{var y=h("state");f[y]=!0,i=function(e,t){if(u(e,y))throw new TypeError(p);return t.facade=e,l(e,y,t),t},r=function(e){return u(e,y)?e[y]:{}},s=function(e){return u(e,y)}}e.exports={set:i,get:r,has:s,enforce:function(e){return s(e)?r(e):i(e,{})},getterFor:function(e){return function(t){var n;if(!c(t)||(n=r(t)).type!==e)throw TypeError("Incompatible receiver, "+e+" required");return n}}}},ajRT:function(e,t,n){"use strict";n.d(t,"a",(function(){return a}));var i=n("8Y7J"),r=n("ANnk");const s=[[["",8,"modal-title"]],[["",8,"modal-content"]]],o=[".modal-title",".modal-content"];let a=(()=>{class e{constructor(){this.hide=new i.o}close(){var e;null===(e=this.modalRef)||void 0===e||e.close(),this.hide.emit()}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275cmp=i.Gb({type:e,selectors:[["cd-modal"]],inputs:{modalRef:"modalRef"},outputs:{hide:"hide"},ngContentSelectors:o,decls:7,vars:0,consts:[[1,"modal-header"],[1,"modal-title","float-left"],["type","button","aria-label","Close",1,"close","float-right",3,"click"],["aria-hidden","true"]],template:function(e,t){1&e&&(i.oc(s),i.Sb(0,"div",0),i.Sb(1,"h4",1),i.nc(2),i.Rb(),i.Sb(3,"button",2),i.gc("click",(function(){return t.close()})),i.Sb(4,"span",3),i.Oc(5,"\xd7"),i.Rb(),i.Rb(),i.Rb(),i.nc(6,1))},directives:[r.a],styles:[".modal-header[_ngcontent-%COMP%]{border-radius:5px 5px 0 0}.modal-header[_ngcontent-%COMP%], cd-modal .modal-footer{background-color:#e9ecef;border-bottom:1px solid #ced4da} cd-modal .modal-footer{border-radius:0 0 5px 5px} cd-modal .modal-body{max-height:70vh;overflow-x:hidden;overflow-y:auto}button.close[_ngcontent-%COMP%]{outline:none}"]}),e})()},b1Dy:function(e,t,n){!function(e){"use strict";e.defineLocale("en-nz",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10;return e+(1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th")},week:{dow:1,doy:4}})}(n("wd/R"))},b5OY:function(e,t,n){"use strict";n.d(t,"a",(function(){return _}));var i=n("mrSG"),r=n("IheW"),s=n("LRne"),o=n("5+tZ"),a=n("xTzq"),c=n("8Y7J");let l=(()=>{let e=class{constructor(e){this.http=e,this.url="api/perf_counters"}list(){return this.http.get(this.url)}get(e,t){return this.http.get(`${this.url}/${e}/${t}`).pipe(Object(o.a)(e=>Object(s.a)(e.counters)))}};return e.\u0275fac=function(t){return new(t||e)(c.dc(r.b))},e.\u0275prov=c.Ib({token:e,factory:e.\u0275fac,providedIn:"root"}),e=Object(i.b)([a.a,Object(i.d)("design:paramtypes",[r.b])],e),e})();var u=n("SVse"),d=n("uIqm"),h=n("/NlG"),f=n("o4+5");const p=["valueTpl"];function m(e,t){if(1&e&&(c.Oc(0),c.jc(1,"dimless")),2&e){const e=t.row;c.Rc(" ",c.kc(1,2,e.value)," ",e.unit," ")}}function b(e,t){if(1&e){const e=c.Tb();c.Sb(0,"cd-table",2),c.gc("fetchData",(function(t){return c.Dc(e),c.ic().getCounters(t)})),c.Mc(1,m,2,4,"ng-template",null,3,c.Nc),c.Rb()}if(2&e){const e=c.ic();c.pc("data",e.counters)("columns",e.columns)("autoSave",!1)}}function g(e,t){1&e&&(c.Sb(0,"cd-alert-panel",4),c.Wb(1,5),c.Rb())}let _=(()=>{class e{constructor(e){this.performanceCounterService=e,this.columns=[],this.counters=[]}ngOnInit(){this.columns=[{name:"Name",prop:"name",flexGrow:1},{name:"Description",prop:"description",flexGrow:1},{name:"Value",prop:"value",cellTemplate:this.valueTpl,flexGrow:1}]}getCounters(e){this.performanceCounterService.get(this.serviceType,this.serviceId).subscribe(e=>{this.counters=e},t=>{404===t.status?(t.preventDefault(),this.counters=null):e.error()})}}return e.\u0275fac=function(t){return new(t||e)(c.Mb(l))},e.\u0275cmp=c.Gb({type:e,selectors:[["cd-table-performance-counter"]],viewQuery:function(e,t){var n;1&e&&c.Tc(p,!0),2&e&&c.zc(n=c.hc())&&(t.valueTpl=n.first)},inputs:{serviceType:"serviceType",serviceId:"serviceId"},decls:3,vars:2,consts:function(){return[["columnMode","flex",3,"data","columns","autoSave","fetchData",4,"ngIf","ngIfElse"],["warning",""],["columnMode","flex",3,"data","columns","autoSave","fetchData"],["valueTpl",""],["type","warning"],"Performance counters not available"]},template:function(e,t){if(1&e&&(c.Mc(0,b,3,3,"cd-table",0),c.Mc(1,g,2,0,"ng-template",null,1,c.Nc)),2&e){const e=c.Ac(2);c.pc("ngIf",t.counters)("ngIfElse",e)}},directives:[u.r,d.a,h.a],pipes:[f.a],styles:[""]}),e})()},bHdf:function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var i=n("5+tZ"),r=n("SpAZ");function s(e=Number.POSITIVE_INFINITY){return Object(i.a)(r.a,e)}},bOMt:function(e,t,n){!function(e){"use strict";e.defineLocale("nb",{months:"januar_februar_mars_april_mai_juni_juli_august_september_oktober_november_desember".split("_"),monthsShort:"jan._feb._mars_apr._mai_juni_juli_aug._sep._okt._nov._des.".split("_"),monthsParseExact:!0,weekdays:"s\xf8ndag_mandag_tirsdag_onsdag_torsdag_fredag_l\xf8rdag".split("_"),weekdaysShort:"s\xf8._ma._ti._on._to._fr._l\xf8.".split("_"),weekdaysMin:"s\xf8_ma_ti_on_to_fr_l\xf8".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY [kl.] HH:mm",LLLL:"dddd D. MMMM YYYY [kl.] HH:mm"},calendar:{sameDay:"[i dag kl.] LT",nextDay:"[i morgen kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[i g\xe5r kl.] LT",lastWeek:"[forrige] dddd [kl.] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"%s siden",s:"noen sekunder",ss:"%d sekunder",m:"ett minutt",mm:"%d minutter",h:"en time",hh:"%d timer",d:"en dag",dd:"%d dager",w:"en uke",ww:"%d uker",M:"en m\xe5ned",MM:"%d m\xe5neder",y:"ett \xe5r",yy:"%d \xe5r"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})}(n("wd/R"))},bOdf:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("5+tZ");function r(e,t){return Object(i.a)(e,t,1)}},bWFh:function(e,t,n){"use strict";var i=n("I+eb"),r=n("2oRo"),s=n("lMq5"),o=n("busE"),a=n("8YOa"),c=n("ImZN"),l=n("GarU"),u=n("hh1v"),d=n("0Dky"),h=n("HH4o"),f=n("1E5z"),p=n("cVYH");e.exports=function(e,t,n){var m=-1!==e.indexOf("Map"),b=-1!==e.indexOf("Weak"),g=m?"set":"add",_=r[e],y=_&&_.prototype,v=_,w={},S=function(e){var t=y[e];o(y,e,"add"==e?function(e){return t.call(this,0===e?0:e),this}:"delete"==e?function(e){return!(b&&!u(e))&&t.call(this,0===e?0:e)}:"get"==e?function(e){return b&&!u(e)?void 0:t.call(this,0===e?0:e)}:"has"==e?function(e){return!(b&&!u(e))&&t.call(this,0===e?0:e)}:function(e,n){return t.call(this,0===e?0:e,n),this})};if(s(e,"function"!=typeof _||!(b||y.forEach&&!d((function(){(new _).entries().next()})))))v=n.getConstructor(t,e,m,g),a.REQUIRED=!0;else if(s(e,!0)){var M=new v,x=M[g](b?{}:-0,1)!=M,k=d((function(){M.has(1)})),D=h((function(e){new _(e)})),T=!b&&d((function(){for(var e=new _,t=5;t--;)e[g](t,t);return!e.has(-0)}));D||((v=t((function(t,n){l(t,v,e);var i=p(new _,t,v);return null!=n&&c(n,i[g],{that:i,AS_ENTRIES:m}),i}))).prototype=y,y.constructor=v),(k||T)&&(S("delete"),S("has"),m&&S("get")),(T||x)&&S(g),b&&y.clear&&delete y.clear}return w[e]=v,i({global:!0,forced:v!=_},w),f(v,e),b||n.setStrong(v,e,m),v}},bXm7:function(e,t,n){!function(e){"use strict";var t={0:"-\u0448\u0456",1:"-\u0448\u0456",2:"-\u0448\u0456",3:"-\u0448\u0456",4:"-\u0448\u0456",5:"-\u0448\u0456",6:"-\u0448\u044b",7:"-\u0448\u0456",8:"-\u0448\u0456",9:"-\u0448\u044b",10:"-\u0448\u044b",20:"-\u0448\u044b",30:"-\u0448\u044b",40:"-\u0448\u044b",50:"-\u0448\u0456",60:"-\u0448\u044b",70:"-\u0448\u0456",80:"-\u0448\u0456",90:"-\u0448\u044b",100:"-\u0448\u0456"};e.defineLocale("kk",{months:"\u049b\u0430\u04a3\u0442\u0430\u0440_\u0430\u049b\u043f\u0430\u043d_\u043d\u0430\u0443\u0440\u044b\u0437_\u0441\u04d9\u0443\u0456\u0440_\u043c\u0430\u043c\u044b\u0440_\u043c\u0430\u0443\u0441\u044b\u043c_\u0448\u0456\u043b\u0434\u0435_\u0442\u0430\u043c\u044b\u0437_\u049b\u044b\u0440\u043a\u04af\u0439\u0435\u043a_\u049b\u0430\u0437\u0430\u043d_\u049b\u0430\u0440\u0430\u0448\u0430_\u0436\u0435\u043b\u0442\u043e\u049b\u0441\u0430\u043d".split("_"),monthsShort:"\u049b\u0430\u04a3_\u0430\u049b\u043f_\u043d\u0430\u0443_\u0441\u04d9\u0443_\u043c\u0430\u043c_\u043c\u0430\u0443_\u0448\u0456\u043b_\u0442\u0430\u043c_\u049b\u044b\u0440_\u049b\u0430\u0437_\u049b\u0430\u0440_\u0436\u0435\u043b".split("_"),weekdays:"\u0436\u0435\u043a\u0441\u0435\u043d\u0431\u0456_\u0434\u04af\u0439\u0441\u0435\u043d\u0431\u0456_\u0441\u0435\u0439\u0441\u0435\u043d\u0431\u0456_\u0441\u04d9\u0440\u0441\u0435\u043d\u0431\u0456_\u0431\u0435\u0439\u0441\u0435\u043d\u0431\u0456_\u0436\u04b1\u043c\u0430_\u0441\u0435\u043d\u0431\u0456".split("_"),weekdaysShort:"\u0436\u0435\u043a_\u0434\u04af\u0439_\u0441\u0435\u0439_\u0441\u04d9\u0440_\u0431\u0435\u0439_\u0436\u04b1\u043c_\u0441\u0435\u043d".split("_"),weekdaysMin:"\u0436\u043a_\u0434\u0439_\u0441\u0439_\u0441\u0440_\u0431\u0439_\u0436\u043c_\u0441\u043d".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[\u0411\u04af\u0433\u0456\u043d \u0441\u0430\u0493\u0430\u0442] LT",nextDay:"[\u0415\u0440\u0442\u0435\u04a3 \u0441\u0430\u0493\u0430\u0442] LT",nextWeek:"dddd [\u0441\u0430\u0493\u0430\u0442] LT",lastDay:"[\u041a\u0435\u0448\u0435 \u0441\u0430\u0493\u0430\u0442] LT",lastWeek:"[\u04e8\u0442\u043a\u0435\u043d \u0430\u043f\u0442\u0430\u043d\u044b\u04a3] dddd [\u0441\u0430\u0493\u0430\u0442] LT",sameElse:"L"},relativeTime:{future:"%s \u0456\u0448\u0456\u043d\u0434\u0435",past:"%s \u0431\u04b1\u0440\u044b\u043d",s:"\u0431\u0456\u0440\u043d\u0435\u0448\u0435 \u0441\u0435\u043a\u0443\u043d\u0434",ss:"%d \u0441\u0435\u043a\u0443\u043d\u0434",m:"\u0431\u0456\u0440 \u043c\u0438\u043d\u0443\u0442",mm:"%d \u043c\u0438\u043d\u0443\u0442",h:"\u0431\u0456\u0440 \u0441\u0430\u0493\u0430\u0442",hh:"%d \u0441\u0430\u0493\u0430\u0442",d:"\u0431\u0456\u0440 \u043a\u04af\u043d",dd:"%d \u043a\u04af\u043d",M:"\u0431\u0456\u0440 \u0430\u0439",MM:"%d \u0430\u0439",y:"\u0431\u0456\u0440 \u0436\u044b\u043b",yy:"%d \u0436\u044b\u043b"},dayOfMonthOrdinalParse:/\d{1,2}-(\u0448\u0456|\u0448\u044b)/,ordinal:function(e){return e+(t[e]||t[e%10]||t[e>=100?100:null])},week:{dow:1,doy:7}})}(n("wd/R"))},bYM6:function(e,t,n){!function(e){"use strict";e.defineLocale("ar-tn",{months:"\u062c\u0627\u0646\u0641\u064a_\u0641\u064a\u0641\u0631\u064a_\u0645\u0627\u0631\u0633_\u0623\u0641\u0631\u064a\u0644_\u0645\u0627\u064a_\u062c\u0648\u0627\u0646_\u062c\u0648\u064a\u0644\u064a\u0629_\u0623\u0648\u062a_\u0633\u0628\u062a\u0645\u0628\u0631_\u0623\u0643\u062a\u0648\u0628\u0631_\u0646\u0648\u0641\u0645\u0628\u0631_\u062f\u064a\u0633\u0645\u0628\u0631".split("_"),monthsShort:"\u062c\u0627\u0646\u0641\u064a_\u0641\u064a\u0641\u0631\u064a_\u0645\u0627\u0631\u0633_\u0623\u0641\u0631\u064a\u0644_\u0645\u0627\u064a_\u062c\u0648\u0627\u0646_\u062c\u0648\u064a\u0644\u064a\u0629_\u0623\u0648\u062a_\u0633\u0628\u062a\u0645\u0628\u0631_\u0623\u0643\u062a\u0648\u0628\u0631_\u0646\u0648\u0641\u0645\u0628\u0631_\u062f\u064a\u0633\u0645\u0628\u0631".split("_"),weekdays:"\u0627\u0644\u0623\u062d\u062f_\u0627\u0644\u0625\u062b\u0646\u064a\u0646_\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621_\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621_\u0627\u0644\u062e\u0645\u064a\u0633_\u0627\u0644\u062c\u0645\u0639\u0629_\u0627\u0644\u0633\u0628\u062a".split("_"),weekdaysShort:"\u0623\u062d\u062f_\u0625\u062b\u0646\u064a\u0646_\u062b\u0644\u0627\u062b\u0627\u0621_\u0623\u0631\u0628\u0639\u0627\u0621_\u062e\u0645\u064a\u0633_\u062c\u0645\u0639\u0629_\u0633\u0628\u062a".split("_"),weekdaysMin:"\u062d_\u0646_\u062b_\u0631_\u062e_\u062c_\u0633".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[\u0627\u0644\u064a\u0648\u0645 \u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",nextDay:"[\u063a\u062f\u0627 \u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",nextWeek:"dddd [\u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",lastDay:"[\u0623\u0645\u0633 \u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",lastWeek:"dddd [\u0639\u0644\u0649 \u0627\u0644\u0633\u0627\u0639\u0629] LT",sameElse:"L"},relativeTime:{future:"\u0641\u064a %s",past:"\u0645\u0646\u0630 %s",s:"\u062b\u0648\u0627\u0646",ss:"%d \u062b\u0627\u0646\u064a\u0629",m:"\u062f\u0642\u064a\u0642\u0629",mm:"%d \u062f\u0642\u0627\u0626\u0642",h:"\u0633\u0627\u0639\u0629",hh:"%d \u0633\u0627\u0639\u0627\u062a",d:"\u064a\u0648\u0645",dd:"%d \u0623\u064a\u0627\u0645",M:"\u0634\u0647\u0631",MM:"%d \u0623\u0634\u0647\u0631",y:"\u0633\u0646\u0629",yy:"%d \u0633\u0646\u0648\u0627\u062a"},week:{dow:1,doy:4}})}(n("wd/R"))},bpih:function(e,t,n){!function(e){"use strict";e.defineLocale("it",{months:"gennaio_febbraio_marzo_aprile_maggio_giugno_luglio_agosto_settembre_ottobre_novembre_dicembre".split("_"),monthsShort:"gen_feb_mar_apr_mag_giu_lug_ago_set_ott_nov_dic".split("_"),weekdays:"domenica_luned\xec_marted\xec_mercoled\xec_gioved\xec_venerd\xec_sabato".split("_"),weekdaysShort:"dom_lun_mar_mer_gio_ven_sab".split("_"),weekdaysMin:"do_lu_ma_me_gi_ve_sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:function(){return"[Oggi a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},nextDay:function(){return"[Domani a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},nextWeek:function(){return"dddd [a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},lastDay:function(){return"[Ieri a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},lastWeek:function(){switch(this.day()){case 0:return"[La scorsa] dddd [a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT";default:return"[Lo scorso] dddd [a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"}},sameElse:"L"},relativeTime:{future:"tra %s",past:"%s fa",s:"alcuni secondi",ss:"%d secondi",m:"un minuto",mm:"%d minuti",h:"un'ora",hh:"%d ore",d:"un giorno",dd:"%d giorni",w:"una settimana",ww:"%d settimane",M:"un mese",MM:"%d mesi",y:"un anno",yy:"%d anni"},dayOfMonthOrdinalParse:/\d{1,2}\xba/,ordinal:"%d\xba",week:{dow:1,doy:4}})}(n("wd/R"))},busE:function(e,t,n){var i=n("2oRo"),r=n("kRJp"),s=n("UTVS"),o=n("zk60"),a=n("iSVu"),c=n("afO8"),l=c.get,u=c.enforce,d=String(String).split("String");(e.exports=function(e,t,n,a){var c,l=!!a&&!!a.unsafe,h=!!a&&!!a.enumerable,f=!!a&&!!a.noTargetGet;"function"==typeof n&&("string"!=typeof t||s(n,"name")||r(n,"name",t),(c=u(n)).source||(c.source=d.join("string"==typeof t?t:""))),e!==i?(l?!f&&e[t]&&(h=!0):delete e[t],h?e[t]=n:r(e,t,n)):h?e[t]=n:o(t,n)})(Function.prototype,"toString",(function(){return"function"==typeof this&&l(this).source||a(this)}))},bxKX:function(e,t,n){!function(e){"use strict";e.defineLocale("it-ch",{months:"gennaio_febbraio_marzo_aprile_maggio_giugno_luglio_agosto_settembre_ottobre_novembre_dicembre".split("_"),monthsShort:"gen_feb_mar_apr_mag_giu_lug_ago_set_ott_nov_dic".split("_"),weekdays:"domenica_luned\xec_marted\xec_mercoled\xec_gioved\xec_venerd\xec_sabato".split("_"),weekdaysShort:"dom_lun_mar_mer_gio_ven_sab".split("_"),weekdaysMin:"do_lu_ma_me_gi_ve_sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Oggi alle] LT",nextDay:"[Domani alle] LT",nextWeek:"dddd [alle] LT",lastDay:"[Ieri alle] LT",lastWeek:function(){switch(this.day()){case 0:return"[la scorsa] dddd [alle] LT";default:return"[lo scorso] dddd [alle] LT"}},sameElse:"L"},relativeTime:{future:function(e){return(/^[0-9].+$/.test(e)?"tra":"in")+" "+e},past:"%s fa",s:"alcuni secondi",ss:"%d secondi",m:"un minuto",mm:"%d minuti",h:"un'ora",hh:"%d ore",d:"un giorno",dd:"%d giorni",M:"un mese",MM:"%d mesi",y:"un anno",yy:"%d anni"},dayOfMonthOrdinalParse:/\d{1,2}\xba/,ordinal:"%d\xba",week:{dow:1,doy:4}})}(n("wd/R"))},c2HN:function(e,t,n){"use strict";function i(e){return!!e&&"function"!=typeof e.subscribe&&"function"==typeof e.then}n.d(t,"a",(function(){return i}))},cEzo:function(e,t,n){"use strict";n.d(t,"a",(function(){return u}));var i=n("e0ae"),r=n("oxzT"),s=n("8Y7J"),o=n("EApP"),a=n("ANnk"),c=n("SVse");const l=function(e){return[e]};let u=(()=>{class e{constructor(e){this.toastr=e,this.byId=!0,this.icons=r.a}getText(){return document.getElementById(this.source).value}onClick(){try{const e=Object(i.a)(),t=this.byId?this.getText():this.source,n=()=>{this.toastr.success("Copied text to the clipboard successfully.")};["firefox","ie","ios","safari"].includes(e.name)?navigator.clipboard.writeText(t).then(()=>n()):navigator.permissions.query({name:"clipboard-write"}).then(e=>{"granted"!==e.state&&"prompt"!==e.state||navigator.clipboard.writeText(t).then(()=>n())})}catch(e){this.toastr.error("Failed to copy text to the clipboard.")}}}return e.\u0275fac=function(t){return new(t||e)(s.Mb(o.b))},e.\u0275cmp=s.Gb({type:e,selectors:[["cd-copy-2-clipboard-button"]],hostBindings:function(e,t){1&e&&s.gc("click",(function(){return t.onClick()}))},inputs:{source:"source",byId:"byId"},decls:3,vars:3,consts:function(){return[["type","button",1,"btn","btn-light",3,"click",6,"title"],["title","Copy to Clipboard"],[3,"ngClass"]]},template:function(e,t){1&e&&(s.Sb(0,"button",0),s.Yb(1,1),s.gc("click",(function(){return t.onClick()})),s.Nb(2,"i",2),s.Rb()),2&e&&(s.yb(2),s.pc("ngClass",s.uc(1,l,t.icons.clipboard)))},directives:[a.a,c.p],styles:[""]}),e})()},cRix:function(e,t,n){!function(e){"use strict";var t="jan._feb._mrt._apr._mai_jun._jul._aug._sep._okt._nov._des.".split("_"),n="jan_feb_mrt_apr_mai_jun_jul_aug_sep_okt_nov_des".split("_");e.defineLocale("fy",{months:"jannewaris_febrewaris_maart_april_maaie_juny_july_augustus_septimber_oktober_novimber_desimber".split("_"),monthsShort:function(e,i){return e?/-MMM-/.test(i)?n[e.month()]:t[e.month()]:t},monthsParseExact:!0,weekdays:"snein_moandei_tiisdei_woansdei_tongersdei_freed_sneon".split("_"),weekdaysShort:"si._mo._ti._wo._to._fr._so.".split("_"),weekdaysMin:"Si_Mo_Ti_Wo_To_Fr_So".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[hjoed om] LT",nextDay:"[moarn om] LT",nextWeek:"dddd [om] LT",lastDay:"[juster om] LT",lastWeek:"[\xf4fr\xfbne] dddd [om] LT",sameElse:"L"},relativeTime:{future:"oer %s",past:"%s lyn",s:"in pear sekonden",ss:"%d sekonden",m:"ien min\xfat",mm:"%d minuten",h:"ien oere",hh:"%d oeren",d:"ien dei",dd:"%d dagen",M:"ien moanne",MM:"%d moannen",y:"ien jier",yy:"%d jierren"},dayOfMonthOrdinalParse:/\d{1,2}(ste|de)/,ordinal:function(e){return e+(1===e||8===e||e>=20?"ste":"de")},week:{dow:1,doy:4}})}(n("wd/R"))},cSlR:function(e,t,n){"use strict";var i=/^(?:0|[1-9]\d*)$/;t.a=function(e,t){var n=typeof e;return!!(t=null==t?9007199254740991:t)&&("number"==n||"symbol"!=n&&i.test(e))&&e>-1&&e%1==0&&e{e.removeEventListener(t,n,!1)}}dispatchEvent(e,t){e.dispatchEvent(t)}remove(e){return e.parentNode&&e.parentNode.removeChild(e),e}getValue(e){return e.value}createElement(e,t){return(t=t||this.getDefaultDocument()).createElement(e)}createHtmlDocument(){return document.implementation.createHTMLDocument("fakeTitle")}getDefaultDocument(){return document}isElementNode(e){return e.nodeType===Node.ELEMENT_NODE}isShadowRoot(e){return e instanceof DocumentFragment}getGlobalEventTarget(e,t){return"window"===t?window:"document"===t?e:"body"===t?e.body:null}getHistory(){return window.history}getLocation(){return window.location}getBaseHref(e){const t=c||(c=document.querySelector("base"),c)?c.getAttribute("href"):null;return null==t?null:(n=t,a||(a=document.createElement("a")),a.setAttribute("href",n),"/"===a.pathname.charAt(0)?a.pathname:"/"+a.pathname);var n}resetBaseElement(){c=null}getUserAgent(){return window.navigator.userAgent}performanceNow(){return window.performance&&window.performance.now?window.performance.now():(new Date).getTime()}supportsCookies(){return!0}getCookie(e){return Object(i.M)(document.cookie,e)}}let a,c=null;const l=new r.r("TRANSITION_ID"),u=[{provide:r.d,useFactory:function(e,t,n){return()=>{n.get(r.e).donePromise.then(()=>{const n=Object(i.L)();Array.prototype.slice.apply(t.querySelectorAll("style[ng-transition]")).filter(t=>t.getAttribute("ng-transition")===e).forEach(e=>n.remove(e))})}},deps:[l,i.d,r.s],multi:!0}];class d{static init(){Object(r.W)(new d)}addToWindow(e){r.nb.getAngularTestability=(t,n=!0)=>{const i=e.findTestabilityInTree(t,n);if(null==i)throw new Error("Could not find testability for element.");return i},r.nb.getAllAngularTestabilities=()=>e.getAllTestabilities(),r.nb.getAllAngularRootElements=()=>e.getAllRootElements(),r.nb.frameworkStabilizers||(r.nb.frameworkStabilizers=[]),r.nb.frameworkStabilizers.push(e=>{const t=r.nb.getAllAngularTestabilities();let n=t.length,i=!1;const s=function(t){i=i||t,n--,0==n&&e(i)};t.forEach((function(e){e.whenStable(s)}))})}findTestabilityInTree(e,t,n){if(null==t)return null;const r=e.getTestability(t);return null!=r?r:n?Object(i.L)().isShadowRoot(t)?this.findTestabilityInTree(e,t.host,!0):this.findTestabilityInTree(e,t.parentElement,!0):null}}const h=new r.r("EventManagerPlugins");let f=(()=>{class e{constructor(e,t){this._zone=t,this._eventNameToPlugin=new Map,e.forEach(e=>e.manager=this),this._plugins=e.slice().reverse()}addEventListener(e,t,n){return this._findPluginFor(t).addEventListener(e,t,n)}addGlobalEventListener(e,t,n){return this._findPluginFor(t).addGlobalEventListener(e,t,n)}getZone(){return this._zone}_findPluginFor(e){const t=this._eventNameToPlugin.get(e);if(t)return t;const n=this._plugins;for(let i=0;i{class e{constructor(){this._stylesSet=new Set}addStyles(e){const t=new Set;e.forEach(e=>{this._stylesSet.has(e)||(this._stylesSet.add(e),t.add(e))}),this.onStylesAdded(t)}onStylesAdded(e){}getAllStyles(){return Array.from(this._stylesSet)}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=r.Ib({token:e,factory:e.\u0275fac}),e})(),b=(()=>{class e extends m{constructor(e){super(),this._doc=e,this._hostNodes=new Set,this._styleNodes=new Set,this._hostNodes.add(e.head)}_addStylesToHost(e,t){e.forEach(e=>{const n=this._doc.createElement("style");n.textContent=e,this._styleNodes.add(t.appendChild(n))})}addHost(e){this._addStylesToHost(this._stylesSet,e),this._hostNodes.add(e)}removeHost(e){this._hostNodes.delete(e)}onStylesAdded(e){this._hostNodes.forEach(t=>this._addStylesToHost(e,t))}ngOnDestroy(){this._styleNodes.forEach(e=>Object(i.L)().remove(e))}}return e.\u0275fac=function(t){return new(t||e)(r.dc(i.d))},e.\u0275prov=r.Ib({token:e,factory:e.\u0275fac}),e})();const g={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},_=/%COMP%/g;function y(e,t,n){for(let i=0;i{if("__ngUnwrap__"===t)return e;!1===e(t)&&(t.preventDefault(),t.returnValue=!1)}}let w=(()=>{class e{constructor(e,t,n){this.eventManager=e,this.sharedStylesHost=t,this.appId=n,this.rendererByCompId=new Map,this.defaultRenderer=new S(e)}createRenderer(e,t){if(!e||!t)return this.defaultRenderer;switch(t.encapsulation){case r.Q.Emulated:{let n=this.rendererByCompId.get(t.id);return n||(n=new M(this.eventManager,this.sharedStylesHost,t,this.appId),this.rendererByCompId.set(t.id,n)),n.applyToHost(e),n}case r.Q.Native:case r.Q.ShadowDom:return new x(this.eventManager,this.sharedStylesHost,e,t);default:if(!this.rendererByCompId.has(t.id)){const e=y(t.id,t.styles,[]);this.sharedStylesHost.addStyles(e),this.rendererByCompId.set(t.id,this.defaultRenderer)}return this.defaultRenderer}}begin(){}end(){}}return e.\u0275fac=function(t){return new(t||e)(r.dc(f),r.dc(b),r.dc(r.c))},e.\u0275prov=r.Ib({token:e,factory:e.\u0275fac}),e})();class S{constructor(e){this.eventManager=e,this.data=Object.create(null)}destroy(){}createElement(e,t){return t?document.createElementNS(g[t]||t,e):document.createElement(e)}createComment(e){return document.createComment(e)}createText(e){return document.createTextNode(e)}appendChild(e,t){e.appendChild(t)}insertBefore(e,t,n){e&&e.insertBefore(t,n)}removeChild(e,t){e&&e.removeChild(t)}selectRootElement(e,t){let n="string"==typeof e?document.querySelector(e):e;if(!n)throw new Error(`The selector "${e}" did not match any elements`);return t||(n.textContent=""),n}parentNode(e){return e.parentNode}nextSibling(e){return e.nextSibling}setAttribute(e,t,n,i){if(i){t=i+":"+t;const r=g[i];r?e.setAttributeNS(r,t,n):e.setAttribute(t,n)}else e.setAttribute(t,n)}removeAttribute(e,t,n){if(n){const i=g[n];i?e.removeAttributeNS(i,t):e.removeAttribute(`${n}:${t}`)}else e.removeAttribute(t)}addClass(e,t){e.classList.add(t)}removeClass(e,t){e.classList.remove(t)}setStyle(e,t,n,i){i&r.G.DashCase?e.style.setProperty(t,n,i&r.G.Important?"important":""):e.style[t]=n}removeStyle(e,t,n){n&r.G.DashCase?e.style.removeProperty(t):e.style[t]=""}setProperty(e,t,n){e[t]=n}setValue(e,t){e.nodeValue=t}listen(e,t,n){return"string"==typeof e?this.eventManager.addGlobalEventListener(e,t,v(n)):this.eventManager.addEventListener(e,t,v(n))}}class M extends S{constructor(e,t,n,i){super(e),this.component=n;const r=y(i+"-"+n.id,n.styles,[]);t.addStyles(r),this.contentAttr="_ngcontent-%COMP%".replace(_,i+"-"+n.id),this.hostAttr="_nghost-%COMP%".replace(_,i+"-"+n.id)}applyToHost(e){super.setAttribute(e,this.hostAttr,"")}createElement(e,t){const n=super.createElement(e,t);return super.setAttribute(n,this.contentAttr,""),n}}class x extends S{constructor(e,t,n,i){super(e),this.sharedStylesHost=t,this.hostEl=n,this.component=i,this.shadowRoot=i.encapsulation===r.Q.ShadowDom?n.attachShadow({mode:"open"}):n.createShadowRoot(),this.sharedStylesHost.addHost(this.shadowRoot);const s=y(i.id,i.styles,[]);for(let r=0;r{class e extends p{constructor(e){super(e)}supports(e){return!0}addEventListener(e,t,n){return e.addEventListener(t,n,!1),()=>this.removeEventListener(e,t,n)}removeEventListener(e,t,n){return e.removeEventListener(t,n)}}return e.\u0275fac=function(t){return new(t||e)(r.dc(i.d))},e.\u0275prov=r.Ib({token:e,factory:e.\u0275fac}),e})();const D=["alt","control","meta","shift"],T={"\b":"Backspace","\t":"Tab","\x7f":"Delete","\x1b":"Escape",Del:"Delete",Esc:"Escape",Left:"ArrowLeft",Right:"ArrowRight",Up:"ArrowUp",Down:"ArrowDown",Menu:"ContextMenu",Scroll:"ScrollLock",Win:"OS"},C={A:"1",B:"2",C:"3",D:"4",E:"5",F:"6",G:"7",H:"8",I:"9",J:"*",K:"+",M:"-",N:".",O:"/","`":"0","\x90":"NumLock"},O={alt:e=>e.altKey,control:e=>e.ctrlKey,meta:e=>e.metaKey,shift:e=>e.shiftKey};let R=(()=>{class e extends p{constructor(e){super(e)}supports(t){return null!=e.parseEventName(t)}addEventListener(t,n,r){const s=e.parseEventName(n),o=e.eventCallback(s.fullKey,r,this.manager.getZone());return this.manager.getZone().runOutsideAngular(()=>Object(i.L)().onAndCancel(t,s.domEventName,o))}static parseEventName(t){const n=t.toLowerCase().split("."),i=n.shift();if(0===n.length||"keydown"!==i&&"keyup"!==i)return null;const r=e._normalizeKey(n.pop());let s="";if(D.forEach(e=>{const t=n.indexOf(e);t>-1&&(n.splice(t,1),s+=e+".")}),s+=r,0!=n.length||0===r.length)return null;const o={};return o.domEventName=i,o.fullKey=s,o}static getEventFullKey(e){let t="",n=function(e){let t=e.key;if(null==t){if(t=e.keyIdentifier,null==t)return"Unidentified";t.startsWith("U+")&&(t=String.fromCharCode(parseInt(t.substring(2),16)),3===e.location&&C.hasOwnProperty(t)&&(t=C[t]))}return T[t]||t}(e);return n=n.toLowerCase()," "===n?n="space":"."===n&&(n="dot"),D.forEach(i=>{i!=n&&(0,O[i])(e)&&(t+=i+".")}),t+=n,t}static eventCallback(t,n,i){return r=>{e.getEventFullKey(r)===t&&i.runGuarded(()=>n(r))}}static _normalizeKey(e){switch(e){case"esc":return"escape";default:return e}}}return e.\u0275fac=function(t){return new(t||e)(r.dc(i.d))},e.\u0275prov=r.Ib({token:e,factory:e.\u0275fac}),e})(),L=(()=>{class e{}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=Object(r.Ib)({factory:function(){return Object(r.dc)(A)},token:e,providedIn:"root"}),e})();function E(e){return new A(e.get(i.d))}let A=(()=>{class e extends L{constructor(e){super(),this._doc=e}sanitize(e,t){if(null==t)return null;switch(e){case r.I.NONE:return t;case r.I.HTML:return Object(r.cb)(t,"HTML")?Object(r.ub)(t):Object(r.ab)(this._doc,String(t));case r.I.STYLE:return Object(r.cb)(t,"Style")?Object(r.ub)(t):t;case r.I.SCRIPT:if(Object(r.cb)(t,"Script"))return Object(r.ub)(t);throw new Error("unsafe value used in a script context");case r.I.URL:return Object(r.mb)(t),Object(r.cb)(t,"URL")?Object(r.ub)(t):Object(r.bb)(String(t));case r.I.RESOURCE_URL:if(Object(r.cb)(t,"ResourceURL"))return Object(r.ub)(t);throw new Error("unsafe value used in a resource URL context (see http://g.co/ng/security#xss)");default:throw new Error(`Unexpected SecurityContext ${e} (see http://g.co/ng/security#xss)`)}}bypassSecurityTrustHtml(e){return Object(r.db)(e)}bypassSecurityTrustStyle(e){return Object(r.gb)(e)}bypassSecurityTrustScript(e){return Object(r.fb)(e)}bypassSecurityTrustUrl(e){return Object(r.hb)(e)}bypassSecurityTrustResourceUrl(e){return Object(r.eb)(e)}}return e.\u0275fac=function(t){return new(t||e)(r.dc(i.d))},e.\u0275prov=Object(r.Ib)({factory:function(){return E(Object(r.dc)(r.p))},token:e,providedIn:"root"}),e})();const I=[{provide:r.C,useValue:i.K},{provide:r.D,useValue:function(){o.makeCurrent(),d.init()},multi:!0},{provide:i.d,useFactory:function(){return Object(r.sb)(document),document},deps:[]}],P=Object(r.R)(r.V,"browser",I),j=[[],{provide:r.Y,useValue:"root"},{provide:r.n,useFactory:function(){return new r.n},deps:[]},{provide:h,useClass:k,multi:!0,deps:[i.d,r.A,r.C]},{provide:h,useClass:R,multi:!0,deps:[i.d]},[],{provide:w,useClass:w,deps:[f,b,r.c]},{provide:r.F,useExisting:w},{provide:m,useExisting:b},{provide:b,useClass:b,deps:[i.d]},{provide:r.M,useClass:r.M,deps:[r.A]},{provide:f,useClass:f,deps:[h,r.A]},[]];let N=(()=>{class e{constructor(e){if(e)throw new Error("BrowserModule has already been loaded. If you need access to common directives such as NgIf and NgFor from a lazy loaded module, import CommonModule instead.")}static withServerTransition(t){return{ngModule:e,providers:[{provide:r.c,useValue:t.appId},{provide:l,useExisting:r.c},u]}}}return e.\u0275mod=r.Kb({type:e}),e.\u0275inj=r.Jb({factory:function(t){return new(t||e)(r.dc(e,12))},providers:j,imports:[i.c,r.f]}),e})();const F="undefined"!=typeof window&&window||{};class Y{constructor(e,t){this.msPerTick=e,this.numTicks=t}}class z{constructor(e){this.appRef=e.injector.get(r.g)}timeChangeDetection(e){const t=e&&e.record,n="Change Detection",r=null!=F.console.profile;t&&r&&F.console.profile(n);const s=Object(i.L)().performanceNow();let o=0;for(;o<5||Object(i.L)().performanceNow()-s<500;)this.appRef.tick(),o++;const a=Object(i.L)().performanceNow();t&&r&&F.console.profileEnd(n);const c=(a-s)/o;return F.console.log(`ran ${o} change detection cycles`),F.console.log(c.toFixed(2)+" ms per check"),new Y(c,o)}}function $(e){return"profiler",t=new z(e),"undefined"!=typeof COMPILED&&COMPILED||((r.nb.ng=r.nb.ng||{}).profiler=t),e;var t}},cVYH:function(e,t,n){var i=n("hh1v"),r=n("0rvr");e.exports=function(e,t,n){var s,o;return r&&"function"==typeof(s=t.constructor)&&s!==n&&i(o=s.prototype)&&o!==n.prototype&&r(e,o),e}},cp0P:function(e,t,n){"use strict";n.d(t,"a",(function(){return c}));var i=n("HDdC"),r=n("DH7j"),s=n("lJxs"),o=n("XoHu"),a=n("Cfvw");function c(...e){if(1===e.length){const t=e[0];if(Object(r.a)(t))return l(t,null);if(Object(o.a)(t)&&Object.getPrototypeOf(t)===Object.prototype){const e=Object.keys(t);return l(e.map(e=>t[e]),e)}}if("function"==typeof e[e.length-1]){const t=e.pop();return l(e=1===e.length&&Object(r.a)(e[0])?e[0]:e,null).pipe(Object(s.a)(e=>t(...e)))}return l(e,null)}function l(e,t){return new i.a(n=>{const i=e.length;if(0===i)return void n.complete();const r=new Array(i);let s=0,o=0;for(let c=0;c{u||(u=!0,o++),r[c]=e},error:e=>n.error(e),complete:()=>{s++,s!==i&&u||(o===i&&n.next(t?t.reduce((e,t,n)=>(e[t]=r[n],e),{}):r),n.complete())}}))}})}},czMo:function(e,t,n){!function(e){"use strict";e.defineLocale("en-il",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10;return e+(1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th")}})}(n("wd/R"))},"d+Og":function(e,t,n){"use strict";n.d(t,"a",(function(){return c}));var i=n("LvDl"),r=n.n(i),s=n("kJI8");let o=(()=>{class e{constructor(e){if(this.type=e,!this.isValidType())throw new Error("Wrong placement group category type");this.setTypeStates()}isValidType(){return e.VALID_CATEGORIES.includes(this.type)}setTypeStates(){switch(this.type){case e.CATEGORY_CLEAN:this.states=["active","clean"];break;case e.CATEGORY_WORKING:this.states=["activating","backfill_wait","backfilling","creating","deep","degraded","forced_backfill","forced_recovery","peering","peered","recovering","recovery_wait","repair","scrubbing","snaptrim","snaptrim_wait"];break;case e.CATEGORY_WARNING:this.states=["backfill_toofull","backfill_unfound","down","incomplete","inconsistent","recovery_toofull","recovery_unfound","remapped","snaptrim_error","stale","undersized"];break;default:this.states=[]}}}return e.CATEGORY_CLEAN="clean",e.CATEGORY_WORKING="working",e.CATEGORY_WARNING="warning",e.CATEGORY_UNKNOWN="unknown",e.VALID_CATEGORIES=[e.CATEGORY_CLEAN,e.CATEGORY_WORKING,e.CATEGORY_WARNING,e.CATEGORY_UNKNOWN],e})();var a=n("8Y7J");let c=(()=>{class e{constructor(){this.categories=this.createCategories()}getAllTypes(){return o.VALID_CATEGORIES}getTypeByStates(e){const t=this.getPgStatesFromText(e);if(0===t.length)return o.CATEGORY_UNKNOWN;const n=r.a.zipObject(o.VALID_CATEGORIES,o.VALID_CATEGORIES.map(e=>r.a.intersection(this.categories[e].states,t).length));if(n[o.CATEGORY_WARNING]>0)return o.CATEGORY_WARNING;const i=n[o.CATEGORY_WORKING];return t.length>n[o.CATEGORY_CLEAN]+i?o.CATEGORY_UNKNOWN:i?o.CATEGORY_WORKING:o.CATEGORY_CLEAN}createCategories(){return r.a.zipObject(o.VALID_CATEGORIES,o.VALID_CATEGORIES.map(e=>new o(e)))}getPgStatesFromText(e){const t=e.replace(/[^a-z]+/g," ").trim().split(" ");return r.a.uniq(t)}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275prov=a.Ib({token:e,factory:e.\u0275fac,providedIn:s.a}),e})()},"dBg+":function(e,t){t.f=Object.getOwnPropertySymbols},dEH0:function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("8Y7J");let r=(()=>{class e{transform(e){return e+" ms"}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=i.Lb({name:"milliseconds",type:e,pure:!0}),e})()},dLWn:function(e,t,n){"use strict";var i=Function.prototype.toString;t.a=function(e){if(null!=e){try{return i.call(e)}catch(t){}try{return e+""}catch(t){}}return""}},dNwA:function(e,t,n){!function(e){"use strict";e.defineLocale("sw",{months:"Januari_Februari_Machi_Aprili_Mei_Juni_Julai_Agosti_Septemba_Oktoba_Novemba_Desemba".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ago_Sep_Okt_Nov_Des".split("_"),weekdays:"Jumapili_Jumatatu_Jumanne_Jumatano_Alhamisi_Ijumaa_Jumamosi".split("_"),weekdaysShort:"Jpl_Jtat_Jnne_Jtan_Alh_Ijm_Jmos".split("_"),weekdaysMin:"J2_J3_J4_J5_Al_Ij_J1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"hh:mm A",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[leo saa] LT",nextDay:"[kesho saa] LT",nextWeek:"[wiki ijayo] dddd [saat] LT",lastDay:"[jana] LT",lastWeek:"[wiki iliyopita] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s baadaye",past:"tokea %s",s:"hivi punde",ss:"sekunde %d",m:"dakika moja",mm:"dakika %d",h:"saa limoja",hh:"masaa %d",d:"siku moja",dd:"siku %d",M:"mwezi mmoja",MM:"miezi %d",y:"mwaka mmoja",yy:"miaka %d"},week:{dow:1,doy:7}})}(n("wd/R"))},dgut:function(e,t,n){"use strict";n.d(t,"a",(function(){return u}));var i=n("NEZu"),r=n("Fgil"),s=n("8Y7J"),o=n("SVse"),a=n("hrfs");const c=["sparkCanvas"],l=["sparkTooltip"];let u=(()=>{class e{constructor(e){this.dimlessBinaryPipe=e,this.style={height:"30px",width:"100px"},this.colors=[{backgroundColor:"rgba(40,140,234,0.2)",borderColor:"rgba(40,140,234,1)",pointBackgroundColor:"rgba(40,140,234,1)",pointBorderColor:"#fff",pointHoverBackgroundColor:"#fff",pointHoverBorderColor:"rgba(40,140,234,0.8)"}],this.options={animation:{duration:0},responsive:!0,maintainAspectRatio:!1,legend:{display:!1},elements:{line:{borderWidth:1}},tooltips:{enabled:!1,mode:"index",intersect:!1,custom:void 0,callbacks:{label:e=>this.isBinary?this.dimlessBinaryPipe.transform(e.yLabel):e.yLabel,title:()=>""}},scales:{yAxes:[{display:!1}],xAxes:[{display:!1}]}},this.datasets=[{data:[]}],this.labels=[]}ngOnInit(){const e=new i.a(this.chartCanvasRef,this.chartTooltipRef,(e,t)=>t+e.caretX+"px",e=>e.caretY-e.height-e.yPadding-5+"px");e.customColors={backgroundColor:this.colors[0].pointBackgroundColor,borderColor:this.colors[0].pointBorderColor},this.options.tooltips.custom=t=>{e.customTooltips(t)}}ngOnChanges(e){this.datasets[0].data=e.data.currentValue,this.labels=[...Array(e.data.currentValue.length)]}}return e.\u0275fac=function(t){return new(t||e)(s.Mb(r.a))},e.\u0275cmp=s.Gb({type:e,selectors:[["cd-sparkline"]],viewQuery:function(e,t){var n;1&e&&(s.Jc(c,!0),s.Jc(l,!0)),2&e&&(s.zc(n=s.hc())&&(t.chartCanvasRef=n.first),s.zc(n=s.hc())&&(t.chartTooltipRef=n.first))},inputs:{data:"data",style:"style",isBinary:"isBinary"},features:[s.wb],decls:6,vars:6,consts:[[1,"chart-container",3,"ngStyle"],["baseChart","",3,"labels","datasets","options","colors","chartType"],["sparkCanvas",""],[1,"chartjs-tooltip"],["sparkTooltip",""]],template:function(e,t){1&e&&(s.Sb(0,"div",0),s.Nb(1,"canvas",1,2),s.Sb(3,"div",3,4),s.Nb(5,"table"),s.Rb(),s.Rb()),2&e&&(s.pc("ngStyle",t.style),s.yb(1),s.pc("labels",t.labels)("datasets",t.datasets)("options",t.options)("colors",t.colors)("chartType","line"))},directives:[o.s,a.a],styles:['.chart-container[_ngcontent-%COMP%]{cursor:pointer;margin:auto;overflow:visible;position:absolute}canvas[_ngcontent-%COMP%]{user-select:none}.chartjs-tooltip[_ngcontent-%COMP%]{background:rgba(0,0,0,.7);border-radius:3px;color:#fff;font-family:Helvetica Neue,Helvetica,Arial,sans-serif!important;opacity:0;pointer-events:none;position:absolute;transform:translate(-50%);transition:all .1s ease}.chartjs-tooltip.transform-left[_ngcontent-%COMP%]{transform:translate(-10%)}.chartjs-tooltip.transform-left[_ngcontent-%COMP%]:after{left:10%}.chartjs-tooltip.transform-right[_ngcontent-%COMP%]{transform:translate(-90%)}.chartjs-tooltip.transform-right[_ngcontent-%COMP%]:after{left:90%}.chartjs-tooltip[_ngcontent-%COMP%]:after{border:5px solid transparent;border-top-color:#000;content:" ";left:50%;margin-left:-5px;position:absolute;top:100%} .chartjs-tooltip-key{display:inline-block;height:10px;margin-right:10px;width:10px}.chart-container[_ngcontent-%COMP%]{position:static!important}']}),e})()},"e+ae":function(e,t,n){!function(e){"use strict";var t="janu\xe1r_febru\xe1r_marec_apr\xedl_m\xe1j_j\xfan_j\xfal_august_september_okt\xf3ber_november_december".split("_"),n="jan_feb_mar_apr_m\xe1j_j\xfan_j\xfal_aug_sep_okt_nov_dec".split("_");function i(e){return e>1&&e<5}function r(e,t,n,r){var s=e+" ";switch(n){case"s":return t||r?"p\xe1r sek\xfand":"p\xe1r sekundami";case"ss":return t||r?s+(i(e)?"sekundy":"sek\xfand"):s+"sekundami";case"m":return t?"min\xfata":r?"min\xfatu":"min\xfatou";case"mm":return t||r?s+(i(e)?"min\xfaty":"min\xfat"):s+"min\xfatami";case"h":return t?"hodina":r?"hodinu":"hodinou";case"hh":return t||r?s+(i(e)?"hodiny":"hod\xedn"):s+"hodinami";case"d":return t||r?"de\u0148":"d\u0148om";case"dd":return t||r?s+(i(e)?"dni":"dn\xed"):s+"d\u0148ami";case"M":return t||r?"mesiac":"mesiacom";case"MM":return t||r?s+(i(e)?"mesiace":"mesiacov"):s+"mesiacmi";case"y":return t||r?"rok":"rokom";case"yy":return t||r?s+(i(e)?"roky":"rokov"):s+"rokmi"}}e.defineLocale("sk",{months:t,monthsShort:n,weekdays:"nede\u013ea_pondelok_utorok_streda_\u0161tvrtok_piatok_sobota".split("_"),weekdaysShort:"ne_po_ut_st_\u0161t_pi_so".split("_"),weekdaysMin:"ne_po_ut_st_\u0161t_pi_so".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd D. MMMM YYYY H:mm"},calendar:{sameDay:"[dnes o] LT",nextDay:"[zajtra o] LT",nextWeek:function(){switch(this.day()){case 0:return"[v nede\u013eu o] LT";case 1:case 2:return"[v] dddd [o] LT";case 3:return"[v stredu o] LT";case 4:return"[vo \u0161tvrtok o] LT";case 5:return"[v piatok o] LT";case 6:return"[v sobotu o] LT"}},lastDay:"[v\u010dera o] LT",lastWeek:function(){switch(this.day()){case 0:return"[minul\xfa nede\u013eu o] LT";case 1:case 2:return"[minul\xfd] dddd [o] LT";case 3:return"[minul\xfa stredu o] LT";case 4:case 5:return"[minul\xfd] dddd [o] LT";case 6:return"[minul\xfa sobotu o] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"pred %s",s:r,ss:r,m:r,mm:r,h:r,hh:r,d:r,dd:r,M:r,MM:r,y:r,yy:r},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})}(n("wd/R"))},e0ae:function(e,t,n){"use strict";n.d(t,"a",(function(){return d}));var i=function(e,t,n){this.name=e,this.version=t,this.os=n,this.type="browser"},r=function(e){this.version=e,this.type="node",this.name="node",this.os=process.platform},s=function(e,t,n,i){this.name=e,this.version=t,this.os=n,this.bot=i,this.type="bot-device"},o=function(){this.type="bot",this.bot=!0,this.name="bot",this.version=null,this.os=null},a=function(){this.type="react-native",this.name="react-native",this.version=null,this.os=null},c=/(nuhk|Googlebot|Yammybot|Openbot|Slurp|MSNBot|Ask\ Jeeves\/Teoma|ia_archiver)/,l=[["aol",/AOLShield\/([0-9\._]+)/],["edge",/Edge\/([0-9\._]+)/],["edge-ios",/EdgiOS\/([0-9\._]+)/],["yandexbrowser",/YaBrowser\/([0-9\._]+)/],["kakaotalk",/KAKAOTALK\s([0-9\.]+)/],["samsung",/SamsungBrowser\/([0-9\.]+)/],["silk",/\bSilk\/([0-9._-]+)\b/],["miui",/MiuiBrowser\/([0-9\.]+)$/],["beaker",/BeakerBrowser\/([0-9\.]+)/],["edge-chromium",/EdgA?\/([0-9\.]+)/],["chromium-webview",/(?!Chrom.*OPR)wv\).*Chrom(?:e|ium)\/([0-9\.]+)(:?\s|$)/],["chrome",/(?!Chrom.*OPR)Chrom(?:e|ium)\/([0-9\.]+)(:?\s|$)/],["phantomjs",/PhantomJS\/([0-9\.]+)(:?\s|$)/],["crios",/CriOS\/([0-9\.]+)(:?\s|$)/],["firefox",/Firefox\/([0-9\.]+)(?:\s|$)/],["fxios",/FxiOS\/([0-9\.]+)/],["opera-mini",/Opera Mini.*Version\/([0-9\.]+)/],["opera",/Opera\/([0-9\.]+)(?:\s|$)/],["opera",/OPR\/([0-9\.]+)(:?\s|$)/],["ie",/Trident\/7\.0.*rv\:([0-9\.]+).*\).*Gecko$/],["ie",/MSIE\s([0-9\.]+);.*Trident\/[4-7].0/],["ie",/MSIE\s(7\.0)/],["bb10",/BB10;\sTouch.*Version\/([0-9\.]+)/],["android",/Android\s([0-9\.]+)/],["ios",/Version\/([0-9\._]+).*Mobile.*Safari.*/],["safari",/Version\/([0-9\._]+).*Safari/],["facebook",/FBAV\/([0-9\.]+)/],["instagram",/Instagram\s([0-9\.]+)/],["ios-webview",/AppleWebKit\/([0-9\.]+).*Mobile/],["ios-webview",/AppleWebKit\/([0-9\.]+).*Gecko\)$/],["searchbot",/alexa|bot|crawl(er|ing)|facebookexternalhit|feedburner|google web preview|nagios|postrank|pingdom|slurp|spider|yahoo!|yandex/]],u=[["iOS",/iP(hone|od|ad)/],["Android OS",/Android/],["BlackBerry OS",/BlackBerry|BB10/],["Windows Mobile",/IEMobile/],["Amazon OS",/Kindle/],["Windows 3.11",/Win16/],["Windows 95",/(Windows 95)|(Win95)|(Windows_95)/],["Windows 98",/(Windows 98)|(Win98)/],["Windows 2000",/(Windows NT 5.0)|(Windows 2000)/],["Windows XP",/(Windows NT 5.1)|(Windows XP)/],["Windows Server 2003",/(Windows NT 5.2)/],["Windows Vista",/(Windows NT 6.0)/],["Windows 7",/(Windows NT 6.1)/],["Windows 8",/(Windows NT 6.2)/],["Windows 8.1",/(Windows NT 6.3)/],["Windows 10",/(Windows NT 10.0)/],["Windows ME",/Windows ME/],["Open BSD",/OpenBSD/],["Sun OS",/SunOS/],["Chrome OS",/CrOS/],["Linux",/(Linux)|(X11)/],["Mac OS",/(Mac_PowerPC)|(Macintosh)/],["QNX",/QNX/],["BeOS",/BeOS/],["OS/2",/OS\/2/]];function d(e){return e?h(e):"undefined"==typeof document&&"undefined"!=typeof navigator&&"ReactNative"===navigator.product?new a:"undefined"!=typeof navigator?h(navigator.userAgent):"undefined"!=typeof process&&process.version?new r(process.version.slice(1)):null}function h(e){var t=function(e){return""!==e&&l.reduce((function(t,n){var i=n[0];if(t)return t;var r=n[1].exec(e);return!!r&&[i,r]}),!1)}(e);if(!t)return null;var n=t[0],r=t[1];if("searchbot"===n)return new o;var a=r[1]&&r[1].split(/[._]/).slice(0,3);a?a.length<3&&(a=function(){for(var e=0,t=0,n=arguments.length;tn.pipe(o((n,s)=>Object(r.a)(e(n,s)).pipe(Object(i.a)((e,i)=>t(n,e,s,i))))):t=>t.lift(new a(e))}class a{constructor(e){this.project=e}call(e,t){return t.subscribe(new c(e,this.project))}}class c extends s.b{constructor(e,t){super(e),this.project=t,this.index=0}_next(e){let t;const n=this.index++;try{t=this.project(e,n)}catch(i){return void this.destination.error(i)}this._innerSub(t)}_innerSub(e){const t=this.innerSubscription;t&&t.unsubscribe();const n=new s.a(this),i=this.destination;i.add(n),this.innerSubscription=Object(s.c)(e,n),this.innerSubscription!==n&&i.add(this.innerSubscription)}_complete(){const{innerSubscription:e}=this;e&&!e.closed||super._complete(),this.unsubscribe()}_unsubscribe(){this.innerSubscription=void 0}notifyComplete(){this.innerSubscription=void 0,this.isStopped&&super._complete()}notifyNext(e){this.destination.next(e)}}},efK2:function(e,t,n){"use strict";n.d(t,"a",(function(){return o}));var i=n("LvDl"),r=n.n(i),s=n("8Y7J");let o=(()=>{class e{transform(e,t,n){return r.a.isString(e)?(n=r.a.defaultTo(n,""),r.a.truncate(e,{length:t,omission:n})):e}}return e.\u0275fac=function(t){return new(t||e)},e.\u0275pipe=s.Lb({name:"truncate",type:e,pure:!0}),e})()},"ej+x":function(e,t,n){"use strict";n.d(t,"a",(function(){return o}));var i=n("lOp/"),r=n("8Y7J"),s=n("IheW");let o=(()=>{class e{constructor(e,t){this.http=e,this.timerService=t,this.API_URL="api/feature_toggles",this.REFRESH_INTERVAL=3e4,this.featureToggleMap$=this.timerService.get(()=>this.http.get(this.API_URL),this.REFRESH_INTERVAL)}get(){return this.featureToggleMap$}}return e.\u0275fac=function(t){return new(t||e)(r.dc(s.b),r.dc(i.a))},e.\u0275prov=r.Ib({token:e,factory:e.\u0275fac,providedIn:"root"}),e})()},ewvW:function(e,t,n){var i=n("HYAF");e.exports=function(e){return Object(i(e))}},"f/UV":function(e,t,n){"use strict";n.d(t,"a",(function(){return r}));var i=n("8Y7J");let r=(()=>{class e{}return e.\u0275fac=function(t){return new(t||e)},e.\u0275dir=i.Hb({type:e,selectors:[["","cdFormScope",""]],inputs:{cdFormScope:"cdFormScope"}}),e})()},f5p1:function(e,t,n){var i=n("2oRo"),r=n("iSVu"),s=i.WeakMap;e.exports="function"==typeof s&&/native code/.test(r(s))},f69J:function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var i=n("8Y7J"),r=n("s7LF");let s=(()=>{class e{constructor(e){this.parent=e}get validClass(){return!!this.control&&this.control.valid&&(this.control.touched||this.control.dirty)}get invalidClass(){return!!this.control&&this.control.invalid&&this.control.touched&&this.control.dirty}get path(){return[...this.parent.path,this.formControlName]}get control(){return this.formDirective&&this.formDirective.getControl(this)}get formDirective(){return this.parent?this.parent.formDirective:null}}return e.\u0275fac=function(t){return new(t||e)(i.Mb(r.c,13))},e.\u0275dir=i.Hb({type:e,selectors:[["",8,"form-control"],["",8,"form-check-input"],["",8,"custom-control-input"]],hostVars:4,hostBindings:function(e,t){2&e&&i.Eb("is-valid",t.validClass)("is-invalid",t.invalidClass)},inputs:{formControlName:"formControlName",formControl:"formControl"}}),e})()},fHMY:function(e,t,n){var i,r=n("glrk"),s=n("N+g0"),o=n("eDl+"),a=n("0BK2"),c=n("G+Rx"),l=n("zBJ4"),u=n("93I0")("IE_PROTO"),d=function(){},h=function(e){return"

Debian/Ubuntu

- gcc -- python-dev -- python-pip -- python-virtualenv -- python-sphinx +- python3-dev +- python3-pip +- python3-sphinx +- pytnon3-venv - libxml2-dev - libxslt1-dev - doxygen @@ -289,7 +289,6 @@ the following packages are required: - gcc - python-devel - python-pip -- python-virtualenv - python-docutils - python-jinja2 - python-pygments @@ -308,7 +307,6 @@ the following packages are required: - gcc - python-devel - python-pip -- python-virtualenv - python-docutils - python-jinja2 - python-pygments @@ -329,14 +327,14 @@ distributions, execute the following: .. prompt:: bash $ - sudo apt-get install gcc python-dev python-pip python-virtualenv libxml2-dev libxslt-dev doxygen graphviz ant ditaa + sudo apt-get install gcc python-dev python-pip libxml2-dev libxslt-dev doxygen graphviz ant ditaa sudo apt-get install python-sphinx For Fedora distributions, execute the following: .. prompt:: bash $ - sudo yum install gcc python-devel python-pip python-virtualenv libxml2-devel libxslt-devel doxygen graphviz ant + sudo yum install gcc python-devel python-pip libxml2-devel libxslt-devel doxygen graphviz ant sudo pip install html2text sudo yum install python-jinja2 python-pygments python-docutils python-sphinx sudo yum install jericho-html ditaa @@ -354,7 +352,7 @@ For CentOS/RHEL distributions, execute the following: .. prompt:: bash $ - sudo yum install gcc python-devel python-pip python-virtualenv libxml2-devel libxslt-devel doxygen graphviz ant + sudo yum install gcc python-devel python-pip libxml2-devel libxslt-devel doxygen graphviz ant sudo pip install html2text For CentOS/RHEL distributions, the remaining python packages are not available diff --git a/ceph/doc_deps.deb.txt b/ceph/doc_deps.deb.txt index 4bfff0f20..e2eb80a2f 100644 --- a/ceph/doc_deps.deb.txt +++ b/ceph/doc_deps.deb.txt @@ -2,8 +2,7 @@ git gcc python3-dev python3-pip -python3-virtualenv -virtualenv +python3-venv doxygen ditaa libxml2-dev diff --git a/ceph/examples/boto3/README.md b/ceph/examples/boto3/README.md index be6799da9..52f43746d 100644 --- a/ceph/examples/boto3/README.md +++ b/ceph/examples/boto3/README.md @@ -14,11 +14,6 @@ The standard [AWS CLI](https://docs.aws.amazon.com/cli/latest/) may also be used aws --endpoint-url http://localhost:8000 s3api list-objects --bucket=mybucket --allow-unordered ``` -- Use the following command to set SNS signature to s3v2: -``` -aws configure set default.sns.signature_version s3 -``` - - Topic creation with endpoint: ``` aws --endpoint-url http://localhost:8000 sns create-topic --name=mytopic --attributes='{"push-endpoint": "amqp://localhost:5672", "amqp-exchange": "ex1", "amqp-ack-level": "broker"}' diff --git a/ceph/install-deps.sh b/ceph/install-deps.sh index 73242df8f..3ba5e47ff 100755 --- a/ceph/install-deps.sh +++ b/ceph/install-deps.sh @@ -256,7 +256,6 @@ if [ x$(uname)x = xFreeBSDx ]; then devel/libtool \ devel/google-perftools \ lang/cython \ - devel/py-virtualenv \ databases/leveldb \ net/openldap24-client \ archivers/snappy \ @@ -406,7 +405,7 @@ function activate_virtualenv() { local env_dir=$top_srcdir/install-deps-python3 if ! test -d $env_dir ; then - virtualenv --python=python3 ${env_dir} + python3 -m venv ${env_dir} . $env_dir/bin/activate if ! populate_wheelhouse install ; then rm -rf $env_dir diff --git a/ceph/make-dist b/ceph/make-dist index ff6a4a27e..75528c1b8 100755 --- a/ceph/make-dist +++ b/ceph/make-dist @@ -109,6 +109,23 @@ download_liburing() { rm -rf src/liburing } +download_pmdk() { + pmdk_version=$1 + shift + pmdk_sha256=$1 + shift + pmdk_fname=pmdk-${pmdk_version}.tar.gz + download_from $pmdk_fname $pmdk_sha256 $* + tar xzf $pmdk_fname -C src \ + --exclude="pmdk-${pmdk_version}/doc" \ + --exclude="pmdk-${pmdk_version}/src/test" \ + --exclude="pmdk-${pmdk_version}/src/examples" \ + --exclude="pmdk-${pmdk_version}/src/benchmarks" + mv src/pmdk-${pmdk_version} src/pmdk + tar cf ${outfile}.pmdk.tar ${outfile}/src/pmdk + rm -rf src/pmdk +} + build_dashboard_frontend() { CURR_DIR=`pwd` TEMP_DIR=`mktemp -d` @@ -169,11 +186,15 @@ download_boost $boost_version 4eb3b8d442b426dc35346235c8733b5ae35ba431690e38c6a8 download_liburing 0.7 8e2842cfe947f3a443af301bdd6d034455536c38a455c7a700d0c1ad165a7543 \ https://github.com/axboe/liburing/archive \ https://git.kernel.dk/cgit/liburing/snapshot +pmdk_version=1.10 +download_pmdk $pmdk_version 08dafcf94db5ac13fac9139c92225d9aa5f3724ea74beee4e6ca19a01a2eb20c \ + https://github.com/pmem/pmdk/releases/download/$pmdk_version build_dashboard_frontend generate_rook_ceph_client for tarball in $outfile.version \ $outfile.boost \ $outfile.liburing \ + $outfile.pmdk \ dashboard_frontend \ rook_ceph_client \ $outfile; do diff --git a/ceph/monitoring/grafana/dashboards/ceph-cluster.json b/ceph/monitoring/grafana/dashboards/ceph-cluster.json index e4e367efd..1b91bfe41 100644 --- a/ceph/monitoring/grafana/dashboards/ceph-cluster.json +++ b/ceph/monitoring/grafana/dashboards/ceph-cluster.json @@ -678,7 +678,7 @@ "type": "vonage-status-panel" }, { - "colorMode": "Disabled", + "colorMode": "Panel", "colors": { "crit": "rgba(245, 54, 54, 0.9)", "disable": "rgba(128, 128, 128, 0.9)", @@ -706,21 +706,36 @@ "targets": [ { "aggregation": "Last", - "alias": "Clients", + "alias": "Active", "decimals": 2, "displayAliasType": "Always", "displayType": "Regular", "displayValueWithAlias": "When Alias Displayed", - "expr": "ceph_mds_server_handle_client_session", + "expr": "count(ceph_mgr_status == 1) or vector(0)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Clients", + "legendFormat": "Active", "refId": "A", "units": "none", "valueHandler": "Number Threshold" + }, + { + "aggregation": "Last", + "alias": "Standby", + "decimals": 2, + "displayAliasType": "Always", + "displayType": "Regular", + "displayValueWithAlias": "When Alias Displayed", + "expr": "count(ceph_mgr_status == 0) or vector(0)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Standby", + "refId": "B", + "units": "none", + "valueHandler": "Number Threshold" } ], - "title": "Client connections", + "title": "MGRs", "type": "vonage-status-panel" }, { diff --git a/ceph/monitoring/grafana/dashboards/cephfs-overview.json b/ceph/monitoring/grafana/dashboards/cephfs-overview.json index 57922f551..91a37f080 100644 --- a/ceph/monitoring/grafana/dashboards/cephfs-overview.json +++ b/ceph/monitoring/grafana/dashboards/cephfs-overview.json @@ -1,309 +1,319 @@ { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.3.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1557392920097, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 10, - "panels": [], - "title": "MDS Performance", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*Reads/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(ceph_objecter_op_r{ceph_daemon=~\"($mds_servers).*\"})", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Read Ops", - "refId": "A" - }, - { - "expr": "sum(ceph_objecter_op_w{ceph_daemon=~\"($mds_servers).*\"})", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Write Ops", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "MDS Workload - $mds_servers", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": "Reads(-) / Writes (+)", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "ceph_mds_server_handle_client_request{ceph_daemon=~\"($mds_servers).*\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{ceph_daemon}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Client Request Load - $mds_servers", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "MDS Performance", + "titleSize": "h6", + "type": "row" }, - "yaxes": [ - { - "format": "none", - "label": "Client Requests", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "15s", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(ceph_objecter_op_r{ceph_daemon=~\"($mds_servers).*\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read Ops", + "refId": "A" + }, + { + "expr": "sum(rate(ceph_objecter_op_w{ceph_daemon=~\"($mds_servers).*\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write Ops", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "MDS Workload - $mds_servers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "none", + "label": "Reads(-) / Writes (+)", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "MDS Server", - "multi": false, - "name": "mds_servers", - "options": [], - "query": "label_values(ceph_mds_inodes, ceph_daemon)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_mds_server_handle_client_request{ceph_daemon=~\"($mds_servers).*\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ceph_daemon}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Client Request Load - $mds_servers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "none", + "label": "Client Requests", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "15s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "MDS Performance", - "uid": "tbO9LAiZz", - "version": 2 + ], + "refresh": "15s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "MDS Server", + "multi": false, + "name": "mds_servers", + "options": [ ], + "query": "label_values(ceph_mds_inodes, ceph_daemon)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "MDS Performance", + "uid": "tbO9LAiZz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/host-details.json b/ceph/monitoring/grafana/dashboards/host-details.json index 71ac36f37..1d071d4ec 100644 --- a/ceph/monitoring/grafana/dashboards/host-details.json +++ b/ceph/monitoring/grafana/dashboards/host-details.json @@ -1,1215 +1,1193 @@ { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.3.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1615564911000, - "links": [], - "panels": [ - { - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 16, - "title": "$ceph_hosts System Overview", - "type": "row" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 3, - "x": 0, - "y": 1 - }, - "height": "160", - "id": 1, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": "", - "minSpan": 4, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{hostname='$ceph_hosts'}))", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 40, - "textEditor": true - } - ], - "thresholds": "", - "title": "OSDs", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "aliasColors": { - "interrupt": "#447EBC", - "steal": "#6D1F62", - "system": "#890F02", - "user": "#3F6833", - "wait": "#C15C17" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Shows the CPU breakdown. When multiple servers are selected, only the first host's cpu data is shown", - "fill": 1, - "gridPos": { - "h": 10, - "w": 6, - "x": 3, - "y": 1 - }, - "id": 9, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]))\n) * 100", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{mode}}", - "refId": "A", - "step": 10, - "textEditor": true - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "CPU Utilization", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percent", - "label": "% Utilization", - "logBase": 1, - "max": "100", - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "Available": "#508642", - "Free": "#508642", - "Total": "#bf1b00", - "Used": "#bf1b00", - "total": "#bf1b00", - "used": "#0a50a1" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 10, - "w": 6, - "x": 9, - "y": 1 - }, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "total", - "color": "#bf1b00", - "fill": 0, - "linewidth": 2, - "stack": false - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "(node_memory_MemTotal{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"})- (\n (node_memory_MemFree{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n (node_memory_Cached{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n (node_memory_Buffers{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) +\n (node_memory_Slab{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"})\n )\n \n", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "used", - "refId": "D" - }, - { - "expr": "node_memory_MemFree{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} ", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "Free", - "refId": "A" - }, - { - "expr": "(node_memory_Cached{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) + \n(node_memory_Buffers{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) +\n(node_memory_Slab{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}) \n", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "buffers/cache", - "refId": "C" - }, - { - "expr": "node_memory_MemTotal{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"} ", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "total", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "RAM Usage", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": "RAM used", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Show the network load (rx,tx) across all interfaces (excluding loopback 'lo')", - "fill": 0, - "gridPos": { - "h": 10, - "w": 6, - "x": 15, - "y": 1 - }, - "id": 10, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideZero": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*tx/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}}.rx", - "refId": "A", - "step": 10, - "textEditor": true - }, - { - "expr": "sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}}.tx", - "refId": "B", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Network Load", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "label": "Send (-) / Receive (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 5, - "w": 3, - "x": 21, - "y": 1 - }, - "hideTimeOverride": true, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*tx/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(node_network_receive_drop{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "{{device}}.rx", - "refId": "A" - }, - { - "expr": "irate(node_network_transmit_drop{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}}.tx", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Network drop rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "pps", - "label": "Send (-) / Receive (+)", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "$datasource", - "decimals": 0, - "description": "Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.", - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 3, - "x": 0, - "y": 6 - }, - "height": "160", - "id": 2, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": "", - "minSpan": 4, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 40, - "textEditor": true - } - ], - "thresholds": "", - "title": "Raw Capacity", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 5, - "w": 3, - "x": 21, - "y": 6 - }, - "hideTimeOverride": true, - "id": 19, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*tx/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(node_network_receive_errs{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "{{device}}.rx", - "refId": "A" - }, - { - "expr": "irate(node_network_transmit_errs{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"[[ceph_hosts]]([\\\\.:].*)?\"}[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}}.tx", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Network error rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "pps", - "label": "Send (-) / Receive (+)", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 11 - }, - "id": 12, - "panels": [], - "repeat": null, - "title": "OSD Disk Performance Statistics", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it's name and corresponding OSD id value", - "fill": 1, - "gridPos": { - "h": 9, - "w": 11, - "x": 0, - "y": 12 - }, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*reads/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}}({{ceph_daemon}}) writes", - "refId": "A", - "step": 10, - "textEditor": true - }, - { - "expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{device}}({{ceph_daemon}}) reads", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$ceph_hosts Disk IOPS", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" }, - "yaxes": [ - { - "format": "ops", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "5.0.0" } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id", - "fill": 1, - "gridPos": { - "h": 9, - "w": 11, - "x": 12, - "y": 12 - }, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*read/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}}({{ceph_daemon}}) write", - "refId": "B" - }, - { - "expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}}({{ceph_daemon}}) read", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$ceph_hosts Throughput by Disk", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "$ceph_hosts System Overview", + "titleSize": "h6", + "type": "row" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{hostname='$ceph_hosts'}))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "OSDs", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "yaxes": [ - { - "format": "Bps", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it's corresponding OSD id", - "fill": 1, - "gridPos": { - "h": 9, - "w": 11, - "x": 0, - "y": 21 + { + "aliasColors": { + "interrupt": "#447EBC", + "steal": "#6D1F62", + "system": "#890F02", + "user": "#3F6833", + "wait": "#C15C17" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Shows the CPU breakdown. When multiple servers are selected, only the first host's cpu data is shown", + "fill": 1, + "gridPos": { + "h": 10, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]))\n) * 100", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{mode}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percent", + "label": "% Utilization", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + { + "aliasColors": { + "Available": "#508642", + "Free": "#508642", + "Total": "#bf1b00", + "Used": "#bf1b00", + "total": "#bf1b00", + "used": "#0a50a1" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 10, + "w": 6, + "x": 9, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "total", + "color": "#bf1b00", + "fill": 0, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node_memory_MemFree{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"} ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Free", + "refId": "A" + }, + { + "expr": "node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"} ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "total", + "refId": "B" + }, + { + "expr": "(node_memory_Cached{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n(node_memory_Buffers{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) +\n(node_memory_Slab{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) \n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "buffers/cache", + "refId": "C" + }, + { + "expr": "(node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"})- (\n (node_memory_MemFree{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n (node_memory_Cached{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n (node_memory_Buffers{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) +\n (node_memory_Slab{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"})\n )\n \n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "used", + "refId": "D" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "RAM Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": "RAM used", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{device}}({{ceph_daemon}})", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$ceph_hosts Disk Latency", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show the network load (rx,tx) across all interfaces (excluding loopback 'lo')", + "fill": 1, + "gridPos": { + "h": 10, + "w": 6, + "x": 15, + "y": 1 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "decbytes", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 5, + "w": 3, + "x": 21, + "y": 1 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(node_network_receive_drop{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "irate(node_network_transmit_drop{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network drop rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "pps", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "yaxes": [ - { - "format": "s", - "label": "", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 11, - "x": 12, - "y": 21 + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 6 + }, + "id": 8, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Raw Capacity", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 5, + "w": 3, + "x": 21, + "y": 6 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*tx/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(node_network_receive_errs{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.rx", + "refId": "A" + }, + { + "expr": "irate(node_network_transmit_errs{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}.tx", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network error rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "pps", + "label": "Send (-) / Receive (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "lines": true, - "linewidth": 1, - "links": [], - "maxPerRow": 2, - "nullPointMode": "connected", - "options": { - "dataLinks": [] + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 10, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OSD Disk Performance Statistics", + "titleSize": "h6", + "type": "row" }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{device}}({{ceph_daemon}})", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "$ceph_hosts Disk utilization", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it's name and corresponding OSD id value", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 12 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) writes", + "refId": "A" + }, + { + "expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) reads", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Disk IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ops", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 12, + "y": 12 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*read/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) write", + "refId": "A" + }, + { + "expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}}) read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Throughput by Disk", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "yaxes": [ - { - "format": "percent", - "label": "%Util", - "logBase": 1, - "max": "100", - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "10s", - "schemaVersion": 16, - "style": "dark", - "tags": [ - "overview" - ], - "templating": { - "list": [ { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it's corresponding OSD id", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 0, + "y": 21 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}})", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Disk Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "Hostname", - "multi": false, - "name": "ceph_hosts", - "options": [], - "query": "label_values(node_scrape_collector_success, instance) ", - "refresh": 1, - "regex": "([^.:]*).*", - "skipUrlSync": false, - "sort": 3, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.", + "fill": 1, + "gridPos": { + "h": 9, + "w": 11, + "x": 12, + "y": 21 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}}({{ceph_daemon}})", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$ceph_hosts Disk utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percent", + "label": "%Util", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Host Details", - "uid": "rtOg0AiWz", - "version": 4 + ], + "refresh": "10s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Hostname", + "multi": false, + "name": "ceph_hosts", + "options": [ ], + "query": "label_values(node_scrape_collector_success, instance) ", + "refresh": 1, + "regex": "([^.:]*).*", + "sort": 3, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Host Details", + "uid": "rtOg0AiWz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/hosts-overview.json b/ceph/monitoring/grafana/dashboards/hosts-overview.json index b179d5717..115c18249 100644 --- a/ceph/monitoring/grafana/dashboards/hosts-overview.json +++ b/ceph/monitoring/grafana/dashboards/hosts-overview.json @@ -1,852 +1,844 @@ { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.3.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1557393917915, - "links": [], - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 4, - "x": 0, - "y": 0 - }, - "id": 5, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(sum by (hostname) (ceph_osd_metadata))", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "title": "OSD Hosts", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster", - "decimals": 2, - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 4, - "x": 4, - "y": 0 - }, - "id": 6, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "avg(\n 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "title": "AVG CPU Busy", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)", - "decimals": 2, - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 4, - "x": 8, - "y": 0 - }, - "id": 9, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "avg (((node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})- (\n (node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*\"} ))", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "title": "AVG RAM Utilization", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "IOPS Load at the device as reported by the OS on all OSD hosts", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 4, - "x": 12, - "y": 0 - }, - "id": 2, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum ((irate(node_disk_reads_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($osd_hosts).*\"}[5m]) ) + \n(irate(node_disk_writes_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_writes_completed_total{instance=~\"($osd_hosts).*\"}[5m])))", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "title": "Physical IOPS", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)", - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 4, - "x": 16, - "y": 0 - }, - "id": 20, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr" : "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device, ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "title": "AVG Disk Utilization", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "decimals": 0, - "description": "Total send/receive network load across all hosts in the ceph cluster", - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 4, - "x": 20, - "y": 0 + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" }, - "id": 18, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum (\n irate(node_network_receive_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_receive_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n ) +\nsum (\n irate(node_network_transmit_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n )", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "title": "Network Load", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Show the top 10 busiest hosts by cpu", - "fill": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 5 - }, - "id": 13, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk(10,100 * ( 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )\n)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "CPU Busy - Top 10 Hosts", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" }, - "yaxes": [ - { - "decimals": 1, - "format": "percent", - "label": null, - "logBase": 1, - "max": "100", - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "5.0.0" } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Top 10 hosts by network load", - "fill": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 5 - }, - "id": 19, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(sum by (hostname) (ceph_osd_metadata))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "OSD Hosts", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk(10, (sum by(instance) (\n (\n irate(node_network_receive_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_receive_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ) +\n (\n irate(node_network_transmit_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n ))\n )\n)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Network Load - Top 10 Hosts", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 4, + "y": 0 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(\n 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG CPU Busy", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 8, + "y": 0 + }, + "id": 4, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg (((node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})- (\n (node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*\"} ))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG RAM Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "yaxes": [ - { - "decimals": 1, - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "10s", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "IOPS Load at the device as reported by the OS on all OSD hosts", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 12, + "y": 0 + }, + "id": 5, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum ((irate(node_disk_reads_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($osd_hosts).*\"}[5m]) ) + \n(irate(node_disk_writes_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_writes_completed_total{instance=~\"($osd_hosts).*\"}[5m])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Physical IOPS", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, { - "allValue": "", - "current": {}, - "datasource": "$datasource", - "hide": 2, - "includeAll": true, - "label": null, - "multi": false, - "name": "osd_hosts", - "options": [], - "query": "label_values(ceph_disk_occupation, exported_instance)", - "refresh": 1, - "regex": "([^.]*).*", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "ceph", - "type": "query", - "useTags": false + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 16, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device, ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "AVG Disk Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 2, - "includeAll": true, - "label": null, - "multi": false, - "name": "mon_hosts", - "options": [], - "query": "label_values(ceph_mon_metadata, ceph_daemon)", - "refresh": 1, - "regex": "mon.(.*)", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Total send/receive network load across all hosts in the ceph cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 20, + "y": 0 + }, + "id": 7, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (\n\t(\n\t\tirate(node_network_receive_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m]) or\n\t\tirate(node_network_receive_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n\t) unless on (device, instance)\n\tlabel_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n) +\nsum (\n\t(\n\t\tirate(node_network_transmit_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m]) or\n\t\tirate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n\t) unless on (device, instance)\n\tlabel_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n\t)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Network Load", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 2, - "includeAll": true, - "label": null, - "multi": false, - "name": "mds_hosts", - "options": [], - "query": "label_values(ceph_mds_inodes, ceph_daemon)", - "refresh": 1, - "regex": "mds.(.*)", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show the top 10 busiest hosts by cpu", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10,100 * ( 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Busy - Top 10 Hosts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 2, - "includeAll": true, - "label": null, - "multi": false, - "name": "rgw_hosts", - "options": [], - "query": "label_values(ceph_rgw_qlen, ceph_daemon)", - "refresh": 1, - "regex": "rgw.(.*)", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Top 10 hosts by network load", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10, (sum by(instance) (\n(\n\tirate(node_network_receive_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]) or\n\tirate(node_network_receive_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) +\n(\n\tirate(node_network_transmit_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]) or\n\tirate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) unless on (device, instance)\n\tlabel_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\"))\n))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Network Load - Top 10 Hosts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Host Overview", - "uid": "y0KGL0iZz", - "version": 3 + ], + "refresh": "10s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "osd_hosts", + "options": [ ], + "query": "label_values(ceph_disk_occupation, exported_instance)", + "refresh": 1, + "regex": "([^.]*).*", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "mon_hosts", + "options": [ ], + "query": "label_values(ceph_mon_metadata, ceph_daemon)", + "refresh": 1, + "regex": "mon.(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "mds_hosts", + "options": [ ], + "query": "label_values(ceph_mds_inodes, ceph_daemon)", + "refresh": 1, + "regex": "mds.(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "rgw_hosts", + "options": [ ], + "query": "label_values(ceph_rgw_qlen, ceph_daemon)", + "refresh": 1, + "regex": "rgw.(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Host Overview", + "uid": "y0KGL0iZz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/jsonnet/grafana_dashboards.jsonnet b/ceph/monitoring/grafana/dashboards/jsonnet/grafana_dashboards.jsonnet index 11c89b212..b9f5bffaa 100644 --- a/ceph/monitoring/grafana/dashboards/jsonnet/grafana_dashboards.jsonnet +++ b/ceph/monitoring/grafana/dashboards/jsonnet/grafana_dashboards.jsonnet @@ -1,29 +1,225 @@ local g = import 'grafana.libsonnet'; -local dashboardSchema(title, uid, time_from, refresh, schemaVersion, tags,timezone, timepicker) = - g.dashboard.new(title=title, uid=uid, time_from=time_from, refresh=refresh, schemaVersion=schemaVersion, tags=tags, timezone=timezone, timepicker=timepicker); +local dashboardSchema(title, description, uid, time_from, refresh, schemaVersion, tags, timezone, timepicker) = + g.dashboard.new(title=title, description=description, uid=uid, time_from=time_from, refresh=refresh, schemaVersion=schemaVersion, tags=tags, timezone=timezone, timepicker=timepicker); -local graphPanelSchema(title, nullPointMode, stack, formatY1, formatY2, labelY1, labelY2, min, fill, datasource) = - g.graphPanel.new(title=title, nullPointMode=nullPointMode, stack=stack, formatY1=formatY1, formatY2=formatY2, labelY1=labelY1, labelY2=labelY2, min=min, fill=fill, datasource=datasource); +local graphPanelSchema(aliasColors, title, description, nullPointMode, stack, formatY1, formatY2, labelY1, labelY2, min, fill, datasource) = + g.graphPanel.new(aliasColors=aliasColors, title=title, description=description, nullPointMode=nullPointMode, stack=stack, formatY1=formatY1, formatY2=formatY2, labelY1=labelY1, labelY2=labelY2, min=min, fill=fill, datasource=datasource); local addTargetSchema(expr, intervalFactor, format, legendFormat) = g.prometheus.target(expr=expr, intervalFactor=intervalFactor, format=format, legendFormat=legendFormat); -local addTemplateSchema(name, datasource, query, refresh, hide, includeAll, sort) = - g.template.new(name=name, datasource=datasource, query=query, refresh=refresh, hide=hide, includeAll=includeAll, sort=sort); +local addTemplateSchema(name, datasource, query, refresh, includeAll, sort, label, regex) = + g.template.new(name=name, datasource=datasource, query=query, refresh=refresh, includeAll=includeAll, sort=sort, label=label, regex=regex); local addAnnotationSchema(builtIn, datasource, enable, hide, iconColor, name, type) = g.annotation.datasource(builtIn=builtIn, datasource=datasource, enable=enable, hide=hide, iconColor=iconColor, name=name, type=type); +local addRowSchema(collapse, showTitle, title) = + g.row.new(collapse=collapse, showTitle=showTitle, title=title); + +local addSingelStatSchema(datasource, format, title, description, valueName, colorValue, gaugeMaxValue, gaugeShow, sparklineShow, thresholds) = + g.singlestat.new(datasource=datasource, format=format, title=title, description=description, valueName=valueName, colorValue=colorValue, gaugeMaxValue=gaugeMaxValue, gaugeShow=gaugeShow, sparklineShow=sparklineShow, thresholds=thresholds); + +local addPieChartSchema(aliasColors, datasource, description, legendType, pieType, title, valueName) = + g.pieChartPanel.new(aliasColors=aliasColors, datasource=datasource, description=description, legendType=legendType, pieType=pieType, title=title, valueName=valueName); + +local addTableSchema(datasource, description, sort, styles, title, transform) = + g.tablePanel.new(datasource=datasource, description=description, sort=sort, styles=styles, title=title, transform=transform); + +local addStyle(alias, colorMode, colors, dateFormat, decimals, mappingType, pattern, thresholds, type, unit, valueMaps) = + {'alias': alias, 'colorMode': colorMode, 'colors':colors, 'dateFormat':dateFormat, 'decimals':decimals, 'mappingType':mappingType, 'pattern':pattern, 'thresholds':thresholds, 'type':type, 'unit':unit, 'valueMaps':valueMaps}; + +{ + "hosts-overview.json": + local HostsOverviewSingleStatPanel(format, title, description, valueName, expr, targetFormat, x, y, w, h) = + addSingelStatSchema('$datasource', format, title, description, valueName, false, 100, false, false, '') + .addTarget(addTargetSchema(expr, 1, targetFormat, '')) + {gridPos: {x: x, y: y, w: w, h: h}}; + + local HostsOverviewGraphPanel(title, description, formatY1, expr, legendFormat, x, y, w, h) = + graphPanelSchema({}, title, description, 'null', false, formatY1, 'short', null, null, 0, 1, '$datasource') + .addTargets( + [addTargetSchema(expr, 1, 'time_series', legendFormat)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'Host Overview', '', 'y0KGL0iZz', 'now-1h', '10s', 16, [], '', {refresh_intervals:['5s','10s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='singlestat', name='Singlestat', version='5.0.0' + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + addTemplateSchema('osd_hosts', '$datasource', 'label_values(ceph_disk_occupation, exported_instance)', 1, true, 1, null, '([^.]*).*') + ) + .addTemplate( + addTemplateSchema('mon_hosts', '$datasource', 'label_values(ceph_mon_metadata, ceph_daemon)', 1, true, 1, null, 'mon.(.*)') + ) + .addTemplate( + addTemplateSchema('mds_hosts', '$datasource', 'label_values(ceph_mds_inodes, ceph_daemon)', 1, true, 1, null, 'mds.(.*)') + ) + .addTemplate( + addTemplateSchema('rgw_hosts', '$datasource', 'label_values(ceph_rgw_qlen, ceph_daemon)', 1, true, 1, null, 'rgw.(.*)') + ) + .addPanels([ + HostsOverviewSingleStatPanel( + 'none', 'OSD Hosts', '', 'current', 'count(sum by (hostname) (ceph_osd_metadata))', 'time_series', 0, 0, 4, 5), + HostsOverviewSingleStatPanel( + 'percentunit', 'AVG CPU Busy', 'Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster', 'current', 'avg(\n 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )', 'time_series', 4, 0, 4, 5), + HostsOverviewSingleStatPanel( + 'percentunit', 'AVG RAM Utilization', 'Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)', 'current', 'avg (((node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})- (\n (node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*\"} ))', 'time_series', 8, 0, 4, 5), + HostsOverviewSingleStatPanel( + 'none', 'Physical IOPS', 'IOPS Load at the device as reported by the OS on all OSD hosts', 'current', 'sum ((irate(node_disk_reads_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($osd_hosts).*\"}[5m]) ) + \n(irate(node_disk_writes_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_writes_completed_total{instance=~\"($osd_hosts).*\"}[5m])))', 'time_series', 12, 0, 4, 5), + HostsOverviewSingleStatPanel( + 'percent', 'AVG Disk Utilization', 'Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)', 'current', 'avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device, ceph_daemon) label_replace(label_replace(ceph_disk_occupation{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)', 'time_series', 16, 0, 4, 5), + HostsOverviewSingleStatPanel( + 'bytes', 'Network Load', 'Total send/receive network load across all hosts in the ceph cluster', 'current', ||| + sum ( + ( + irate(node_network_receive_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) or + irate(node_network_receive_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) + ) unless on (device, instance) + label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)") + ) + + sum ( + ( + irate(node_network_transmit_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) or + irate(node_network_transmit_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) + ) unless on (device, instance) + label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)") + ) + ||| + , 'time_series', 20, 0, 4, 5), + HostsOverviewGraphPanel( + 'CPU Busy - Top 10 Hosts', 'Show the top 10 busiest hosts by cpu', 'percent', 'topk(10,100 * ( 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )\n)', '{{instance}}', 0, 5, 12, 9), + HostsOverviewGraphPanel( + 'Network Load - Top 10 Hosts', 'Top 10 hosts by network load', 'Bps', ||| + topk(10, (sum by(instance) ( + ( + irate(node_network_receive_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) or + irate(node_network_receive_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) + ) + + ( + irate(node_network_transmit_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) or + irate(node_network_transmit_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) + ) unless on (device, instance) + label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)")) + )) + ||| + , '{{instance}}', 12, 5, 12, 9), + ]) +} +{ + "host-details.json": + local HostDetailsSingleStatPanel(format, title, description, valueName, expr, targetFormat, x, y, w, h) = + addSingelStatSchema('$datasource', format, title, description, valueName, false, 100, false, false, '') + .addTarget(addTargetSchema(expr, 1, targetFormat, '')) + {gridPos: {x: x, y: y, w: w, h: h}}; + + local HostDetailsGraphPanel(alias, title, description, nullPointMode, formatY1, labelY1, expr, legendFormat, x, y, w, h) = + graphPanelSchema(alias, title, description, nullPointMode, false, formatY1, 'short', labelY1, null, null, 1, '$datasource') + .addTargets( + [addTargetSchema(expr, 1, 'time_series', legendFormat)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'Host Details', '', 'rtOg0AiWz', 'now-1h', '10s', 16, ['overview'], '', {refresh_intervals:['5s','10s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='singlestat', name='Singlestat', version='5.0.0' + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + addTemplateSchema('ceph_hosts', '$datasource', 'label_values(node_scrape_collector_success, instance) ', 1, false, 3, 'Hostname', '([^.:]*).*') + ) + .addPanels([ + addRowSchema(false, true, '$ceph_hosts System Overview') + {gridPos: {x: 0, y: 0, w: 24, h: 1}}, + HostDetailsSingleStatPanel( + 'none', 'OSDs', '', 'current', 'count(sum by (ceph_daemon) (ceph_osd_metadata{hostname=\'$ceph_hosts\'}))', 'time_series', 0, 1, 3, 5 + ), + HostDetailsGraphPanel( + {"interrupt": "#447EBC","steal": "#6D1F62","system": "#890F02","user": "#3F6833","wait": "#C15C17"},'CPU Utilization', 'Shows the CPU breakdown. When multiple servers are selected, only the first host\'s cpu data is shown', 'null', 'percent', '% Utilization', 'sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]))\n) * 100', '{{mode}}', 3, 1, 6, 10 + ), + HostDetailsGraphPanel( + {"Available": "#508642","Free": "#508642","Total": "#bf1b00","Used": "#bf1b00","total": "#bf1b00","used": "#0a50a1"},'RAM Usage', '', 'null', 'bytes', 'RAM used', 'node_memory_MemFree{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"} ', 'Free', 9, 1, 6, 10) + .addTargets( + [addTargetSchema('node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"} ', 1, 'time_series', 'total'), + addTargetSchema('(node_memory_Cached{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n(node_memory_Buffers{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) +\n(node_memory_Slab{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) \n', 1, 'time_series', 'buffers/cache'), + addTargetSchema('(node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"})- (\n (node_memory_MemFree{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n (node_memory_Cached{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n (node_memory_Buffers{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) +\n (node_memory_Slab{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"})\n )\n \n', 1, 'time_series', 'used')]) + .addSeriesOverride({"alias": "total","color": "#bf1b00","fill": 0,"linewidth": 2,"stack": false} + ), + HostDetailsGraphPanel( + {},'Network Load', 'Show the network load (rx,tx) across all interfaces (excluding loopback \'lo\')', 'null', 'decbytes', 'Send (-) / Receive (+)', 'sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)', '{{device}}.rx', 15, 1, 6, 10) + .addTargets( + [addTargetSchema('sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)', 1, 'time_series', '{{device}}.tx')]) + .addSeriesOverride({"alias": "/.*tx/","transform": "negative-Y"} + ), + HostDetailsGraphPanel( + {},'Network drop rate', '', 'null', 'pps', 'Send (-) / Receive (+)', 'irate(node_network_receive_drop{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])', '{{device}}.rx', 21, 1, 3, 5) + .addTargets( + [addTargetSchema('irate(node_network_transmit_drop{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])', 1, 'time_series', '{{device}}.tx')]) + .addSeriesOverride({"alias": "/.*tx/","transform": "negative-Y"} + ), + HostDetailsSingleStatPanel( + 'bytes', 'Raw Capacity', 'Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.', 'current', 'sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})', 'time_series', 0, 6, 3, 5 + ), + HostDetailsGraphPanel( + {},'Network error rate', '', 'null', 'pps', 'Send (-) / Receive (+)', 'irate(node_network_receive_errs{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])', '{{device}}.rx', 21, 6, 3, 5) + .addTargets( + [addTargetSchema('irate(node_network_transmit_errs{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])', 1, 'time_series', '{{device}}.tx')]) + .addSeriesOverride({"alias": "/.*tx/","transform": "negative-Y"} + ), + addRowSchema(false, true, 'OSD Disk Performance Statistics') + {gridPos: {x: 0, y: 11, w: 24, h: 1}}, + HostDetailsGraphPanel( + {},'$ceph_hosts Disk IOPS', 'For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it\'s name and corresponding OSD id value', 'connected', 'ops', 'Read (-) / Write (+)', 'label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )', '{{device}}({{ceph_daemon}}) writes', 0, 12, 11, 9) + .addTargets( + [addTargetSchema('label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device, ceph_daemon) group_left\n label_replace(\n label_replace(\n ceph_disk_occupation,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )', 1, 'time_series', '{{device}}({{ceph_daemon}}) reads')]) + .addSeriesOverride({"alias": "/.*reads/","transform": "negative-Y"} + ), + HostDetailsGraphPanel( + {},'$ceph_hosts Throughput by Disk', 'For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id', 'connected', 'Bps', 'Read (-) / Write (+)', 'label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', '{{device}}({{ceph_daemon}}) write', 12, 12, 11, 9) + .addTargets( + [addTargetSchema('label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', 1, 'time_series', '{{device}}({{ceph_daemon}}) read')]) + .addSeriesOverride({"alias": "/.*read/","transform": "negative-Y"} + ), + HostDetailsGraphPanel( + {},'$ceph_hosts Disk Latency', 'For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it\'s corresponding OSD id', 'null as zero', 's', '', 'max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', '{{device}}({{ceph_daemon}})', 0, 21, 11, 9 + ), + HostDetailsGraphPanel( + {},'$ceph_hosts Disk utilization', 'Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.', 'connected', 'percent', '%Util', 'label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device, ceph_daemon) group_left label_replace(label_replace(ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', '{{device}}({{ceph_daemon}})', 12, 21, 11, 9 + ) + ]) +} { "radosgw-sync-overview.json": local RgwSyncOverviewPanel(title, formatY1, labelY1, rgwMetric, x, y, w, h) = - graphPanelSchema(title, 'null as zero', true, formatY1, 'short', labelY1, null, 0, 1, '$datasource') + graphPanelSchema({}, title, '', 'null as zero', true, formatY1, 'short', labelY1, null, 0, 1, '$datasource') .addTargets( [addTargetSchema('sum by (source_zone) (rate(%s[30s]))' % rgwMetric, 1, 'time_series', '{{source_zone}}')]) + {gridPos: {x: x, y: y, w: w, h: h}}; dashboardSchema( - 'RGW Sync Overview', 'rgw-sync-overview', 'now-1h', '15s', 16, ["overview"], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + 'RGW Sync Overview', '', 'rgw-sync-overview', 'now-1h', '15s', 16, ["overview"], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} ) .addAnnotation( addAnnotationSchema( @@ -36,7 +232,7 @@ local addAnnotationSchema(builtIn, datasource, enable, hide, iconColor, name, ty type='panel', id='graph', name='Graph', version='5.0.0' ) .addTemplate( - addTemplateSchema('rgw_servers', '$datasource', 'prometehus', 1, 2, true, 1) + addTemplateSchema('rgw_servers', '$datasource', 'prometehus', 1, true, 1, '', '') ) .addTemplate( g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') @@ -52,3 +248,507 @@ local addAnnotationSchema(builtIn, datasource, enable, hide, iconColor, name, ty 'Unsuccessful Object Replications from Source Zone', 'short', 'Count/s', 'ceph_data_sync_from_zone_fetch_errors', 0, 7, 8, 7) ]) } +{ + "radosgw-overview.json": + local RgwOverviewPanel(title, description, formatY1, formatY2, expr1, legendFormat1, x, y, w, h) = + graphPanelSchema({}, title, description, 'null', false, formatY1, formatY2, null, null, 0, 1, '$datasource') + .addTargets( + [addTargetSchema(expr1, 1, 'time_series', legendFormat1)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'RGW Overview', '', 'WAkugZpiz', 'now-1h', '15s', 16, ['overview'], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + addTemplateSchema('rgw_servers', '$datasource', 'label_values(ceph_rgw_req, ceph_daemon)', 1, true, 1, '', '') + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addPanels([ + addRowSchema(false, true, 'RGW Overview - All Gateways') + {gridPos: {x: 0, y: 0, w: 24, h: 1}}, + RgwOverviewPanel( + 'Average GET/PUT Latencies', '', 's', 'short', 'rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])', 'GET AVG', 0, 1, 8, 7).addTargets( + [addTargetSchema('rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])', 1, 'time_series', 'PUT AVG')]), + RgwOverviewPanel( + 'Total Requests/sec by RGW Instance', '', 'none', 'short', 'sum by(rgw_host) (label_replace(rate(ceph_rgw_req[30s]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))', '{{rgw_host}}', 8, 1, 7, 7), + RgwOverviewPanel( + 'GET Latencies by RGW Instance', 'Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts', 's', 'short', 'label_replace(rate(ceph_rgw_get_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")', '{{rgw_host}}', 15, 1, 6, 7), + RgwOverviewPanel( + 'Bandwidth Consumed by Type', 'Total bytes transferred in/out of all radosgw instances within the cluster', 'bytes', 'short', 'sum(rate(ceph_rgw_get_b[30s]))', 'GETs', 0, 8, 8, 6).addTargets( + [addTargetSchema('sum(rate(ceph_rgw_put_b[30s]))', 1, 'time_series', 'PUTs')]), + RgwOverviewPanel( + 'Bandwidth by RGW Instance', 'Total bytes transferred in/out through get/put operations, by radosgw instance', 'bytes', 'short', 'sum by(rgw_host) (\n (label_replace(rate(ceph_rgw_get_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n (label_replace(rate(ceph_rgw_put_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)', '{{rgw_host}}', 8, 8, 7, 6), + RgwOverviewPanel( + 'PUT Latencies by RGW Instance', 'Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts', 's', 'short', 'label_replace(rate(ceph_rgw_put_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")', '{{rgw_host}}', 15, 8, 6, 6) + ]) +} +{ + "radosgw-detail.json": + local RgwDetailsPanel(aliasColors, title, description, formatY1, formatY2, expr1, expr2, legendFormat1, legendFormat2, x, y, w, h) = + graphPanelSchema(aliasColors, title, description, 'null', false, formatY1, formatY2, null, null, 0, 1, '$datasource') + .addTargets( + [addTargetSchema(expr1, 1, 'time_series', legendFormat1),addTargetSchema(expr2, 1, 'time_series', legendFormat2)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'RGW Instance Detail', '', 'x5ARzZtmk', 'now-1h', '15s', 16, ['overview'], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addRequired( + type='panel', id='grafana-piechart-panel', name='Pie Chart', version='1.3.3' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + addTemplateSchema('rgw_servers', '$datasource', 'label_values(ceph_rgw_req, ceph_daemon)', 1, true, 1, '', '') + ) + .addPanels([ + addRowSchema(false, true, 'RGW Host Detail : $rgw_servers') + {gridPos: {x: 0, y: 0, w: 24, h: 1}}, + RgwDetailsPanel( + {}, '$rgw_servers GET/PUT Latencies', '', 's', 'short', 'sum by (ceph_daemon) (rate(ceph_rgw_get_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_get_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))', 'sum by (ceph_daemon)(rate(ceph_rgw_put_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_put_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))', 'GET {{ceph_daemon}}', 'PUT {{ceph_daemon}}', 0, 1, 6, 8), + RgwDetailsPanel( + {}, 'Bandwidth by HTTP Operation', '', 'bytes', 'short', 'rate(ceph_rgw_get_b{ceph_daemon=~\"$rgw_servers\"}[30s])', 'rate(ceph_rgw_put_b{ceph_daemon=~\"$rgw_servers\"}[30s])', 'GETs {{ceph_daemon}}', 'PUTs {{ceph_daemon}}', 6, 1, 7, 8), + RgwDetailsPanel( + {"GETs": "#7eb26d","Other": "#447ebc","PUTs": "#eab839","Requests": "#3f2b5b","Requests Failed": "#bf1b00"},'HTTP Request Breakdown', '', 'short', 'short', 'rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])', 'rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])', 'Requests Failed {{ceph_daemon}}', 'GETs {{ceph_daemon}}', 13, 1, 7, 8) + .addTargets( + [addTargetSchema('rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])', 1, 'time_series', 'PUTs {{ceph_daemon}}'),addTargetSchema('rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))', 1, 'time_series', 'Other {{ceph_daemon}}')]), + addPieChartSchema( + {"GETs": "#7eb26d","Other (HEAD,POST,DELETE)": "#447ebc","PUTs": "#eab839","Requests": "#3f2b5b","Failures": "#bf1b00"},'$datasource', '', 'Under graph', 'pie', 'Workload Breakdown', 'current') + .addTarget(addTargetSchema('rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])', 1, 'time_series', 'Failures {{ceph_daemon}}')) + .addTarget(addTargetSchema('rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])', 1, 'time_series', 'GETs {{ceph_daemon}}')) + .addTarget(addTargetSchema('rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])', 1, 'time_series', 'PUTs {{ceph_daemon}}')) + .addTarget(addTargetSchema('rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))', 1, 'time_series', 'Other (DELETE,LIST) {{ceph_daemon}}')) + {gridPos: {x: 20, y: 1, w: 4, h: 8}} + ]) +} +{ + "rbd-details.json": + local RbdDetailsPanel(title, formatY1, expr1, expr2, x, y, w, h) = + graphPanelSchema({}, title, '', 'null as zero', false, formatY1, formatY1, null, null, 0, 1, '$Datasource') + .addTargets( + [addTargetSchema(expr1, 1, 'time_series', 'Write'),addTargetSchema(expr2, 1, 'time_series', 'Read')]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'RBD Details', 'Detailed Performance of RBD Images (IOPS/Throughput/Latency)', 'YhCYGcuZz', 'now-1h', false, 16, [], '', {refresh_intervals:['5s','10s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.3' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('Datasource', 'prometheus', 'default', label=null) + ) + .addTemplate( + addTemplateSchema('Pool', '$Datasource', 'label_values(pool)', 1, false, 0, '', '') + ) + .addTemplate( + addTemplateSchema('Image', '$Datasource', 'label_values(image)', 1, false, 0, '', '') + ) + .addPanels([ + RbdDetailsPanel( + 'IOPS', 'iops', 'irate(ceph_rbd_write_ops{pool=\"$Pool\", image=\"$Image\"}[30s])','irate(ceph_rbd_read_ops{pool=\"$Pool\", image=\"$Image\"}[30s])', 0, 0, 8, 9), + RbdDetailsPanel( + 'Throughput', 'Bps', 'irate(ceph_rbd_write_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])', 'irate(ceph_rbd_read_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])', 8, 0, 8, 9), + RbdDetailsPanel( + 'Average Latency', 'ns', 'irate(ceph_rbd_write_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])', 'irate(ceph_rbd_read_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])', 16, 0, 8, 9) + ]) +} +{ + "rbd-overview.json": + local RgwOverviewStyle(alias, pattern, type, unit) = + addStyle(alias, null, ["rgba(245, 54, 54, 0.9)","rgba(237, 129, 40, 0.89)","rgba(50, 172, 45, 0.97)"], 'YYYY-MM-DD HH:mm:ss', 2, 1, pattern, [], type, unit, []); + local RbdOverviewPanel(title, formatY1, expr1, expr2, legendFormat1, legendFormat2, x, y, w, h) = + graphPanelSchema({}, title, '', 'null', false, formatY1, 'short', null, null, 0, 1, '$datasource') + .addTargets( + [addTargetSchema(expr1, 1, 'time_series', legendFormat1),addTargetSchema(expr2, 1, 'time_series', legendFormat2)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'RBD Overview', '', '41FrpeUiz', 'now-1h', '30s', 16, ["overview"], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.4.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='datasource', id='prometheus', name='Prometheus', version='5.0.0' + ) + .addRequired( + type='panel', id='table', name='Table', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addPanels([ + RbdOverviewPanel( + 'IOPS', 'short', 'round(sum(irate(ceph_rbd_write_ops[30s])))','round(sum(irate(ceph_rbd_read_ops[30s])))', 'Writes', 'Reads', 0, 0, 8, 7), + RbdOverviewPanel( + 'Throughput', 'Bps', 'round(sum(irate(ceph_rbd_write_bytes[30s])))','round(sum(irate(ceph_rbd_read_bytes[30s])))', 'Write', 'Read', 8, 0, 8, 7), + RbdOverviewPanel( + 'Average Latency', 'ns', 'round(sum(irate(ceph_rbd_write_latency_sum[30s])) / sum(irate(ceph_rbd_write_latency_count[30s])))','round(sum(irate(ceph_rbd_read_latency_sum[30s])) / sum(irate(ceph_rbd_read_latency_count[30s])))', 'Write', 'Read', 16, 0, 8, 7), + addTableSchema( + '$datasource', '', {"col": 3,"desc": true}, [RgwOverviewStyle('Pool', 'pool', 'string', 'short'),RgwOverviewStyle('Image', 'image', 'string', 'short'),RgwOverviewStyle('IOPS', 'Value', 'number', 'iops'), RgwOverviewStyle('', '/.*/', 'hidden', 'short')], 'Highest IOPS', 'table' + ) + .addTarget( + addTargetSchema('topk(10, (sort((irate(ceph_rbd_write_ops[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops[30s])))))', 1, 'table', '') + ) + {gridPos: {x: 0, y: 7, w: 8, h: 7}}, + addTableSchema( + '$datasource', '', {"col": 3,"desc": true}, [RgwOverviewStyle('Pool', 'pool', 'string', 'short'),RgwOverviewStyle('Image', 'image', 'string', 'short'),RgwOverviewStyle('Throughput', 'Value', 'number', 'Bps'), RgwOverviewStyle('', '/.*/', 'hidden', 'short')], 'Highest Throughput', 'table' + ) + .addTarget( + addTargetSchema('topk(10, sort(sum(irate(ceph_rbd_read_bytes[30s]) + irate(ceph_rbd_write_bytes[30s])) by (pool, image, namespace)))', 1, 'table', '') + ) + {gridPos: {x: 8, y: 7, w: 8, h: 7}}, + addTableSchema( + '$datasource', '', {"col": 3,"desc": true}, [RgwOverviewStyle('Pool', 'pool', 'string', 'short'),RgwOverviewStyle('Image', 'image', 'string', 'short'),RgwOverviewStyle('Latency', 'Value', 'number', 'ns'), RgwOverviewStyle('', '/.*/', 'hidden', 'short')], 'Highest Latency', 'table' + ) + .addTarget( + addTargetSchema('topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum[30s]) / clamp_min(irate(ceph_rbd_write_latency_count[30s]), 1) +\n irate(ceph_rbd_read_latency_sum[30s]) / clamp_min(irate(ceph_rbd_read_latency_count[30s]), 1)\n ) by (pool, image, namespace)\n)', 1, 'table', '') + ) + {gridPos: {x: 16, y: 7, w: 8, h: 7}} + ]) +} +{ + "pool-overview.json": + local PoolOverviewSingleStatPanel(format, title, description, valueName, expr, targetFormat, x, y, w, h) = + addSingelStatSchema('$datasource', format, title, description, valueName, false, 100, false, false, '') + .addTarget(addTargetSchema(expr, 1, targetFormat, '')) + {gridPos: {x: x, y: y, w: w, h: h}}; + + local PoolOverviewStyle(alias, pattern, type, unit, colorMode, thresholds, valueMaps) = + addStyle(alias, colorMode, ["rgba(245, 54, 54, 0.9)","rgba(237, 129, 40, 0.89)","rgba(50, 172, 45, 0.97)"], 'YYYY-MM-DD HH:mm:ss', 2, 1, pattern, thresholds, type, unit, valueMaps); + + local PoolOverviewGraphPanel(title, description, formatY1, labelY1, expr, targetFormat, legendFormat, x, y, w, h) = + graphPanelSchema({}, title, description, 'null as zero', false, formatY1, 'short', labelY1, null, 0, 1, '$datasource') + .addTargets( + [addTargetSchema(expr, 1, 'time_series', legendFormat)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'Ceph Pools Overview', '', 'z99hzWtmk', 'now-1h', '15s', 22, [], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'Dashboard1', label='Data Source') + ) + .addTemplate( + g.template.custom(label='TopK', name='topk', current='15', query='15') + ) + .addPanels([ + PoolOverviewSingleStatPanel( + 'none', 'Pools', '', 'avg', 'count(ceph_pool_metadata)', 'table', 0, 0, 3, 3), + PoolOverviewSingleStatPanel( + 'none', 'Pools with Compression', 'Count of the pools that have compression enabled', 'current', 'count(ceph_pool_metadata{compression_mode!=\"none\"})', '', 3, 0, 3, 3), + PoolOverviewSingleStatPanel( + 'bytes', 'Total Raw Capacity', 'Total raw capacity available to the cluster', 'current', 'sum(ceph_osd_stat_bytes)', '', 6, 0, 3, 3), + PoolOverviewSingleStatPanel( + 'bytes', 'Raw Capacity Consumed', 'Total raw capacity consumed by user data and associated overheads (metadata + redundancy)', 'current', 'sum(ceph_pool_bytes_used)', '', 9, 0, 3, 3), + PoolOverviewSingleStatPanel( + 'bytes', 'Logical Stored ', 'Total of client data stored in the cluster', 'current', 'sum(ceph_pool_stored)', '', 12, 0, 3, 3), + PoolOverviewSingleStatPanel( + 'bytes', 'Compression Savings', 'A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression', 'current', 'sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used)', '', 15, 0, 3, 3), + PoolOverviewSingleStatPanel( + 'percent', 'Compression Eligibility', 'Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data\n', 'current', '(sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_stored_raw and ceph_pool_compress_under_bytes > 0)) * 100', 'table', 18, 0, 3, 3), + PoolOverviewSingleStatPanel( + 'none', 'Compression Factor', 'This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)', 'current', 'sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_compress_bytes_used > 0)', '', 21, 0, 3, 3), + addTableSchema( + '$datasource', '', {"col": 5,"desc": true}, [PoolOverviewStyle('', 'Time', 'hidden', 'short', null, [], []),PoolOverviewStyle('', 'instance', 'hidden', 'short', null, [], []),PoolOverviewStyle('', 'job', 'hidden', 'short', null, [], []),PoolOverviewStyle('Pool Name', 'name', 'string', 'short', null, [], []),PoolOverviewStyle('Pool ID', 'pool_id', 'hidden', 'none', null, [], []),PoolOverviewStyle('Compression Factor', 'Value #A', 'number', 'none', null, [], []),PoolOverviewStyle('% Used', 'Value #D', 'number', 'percentunit', 'value', ['70','85'], []),PoolOverviewStyle('Usable Free', 'Value #B', 'number', 'bytes', null, [], []),PoolOverviewStyle('Compression Eligibility', 'Value #C', 'number', 'percent', null, [], []),PoolOverviewStyle('Compression Savings', 'Value #E', 'number', 'bytes', null, [], []),PoolOverviewStyle('Growth (5d)', 'Value #F', 'number', 'bytes', 'value', ['0', '0'], []),PoolOverviewStyle('IOPS', 'Value #G', 'number', 'none', null, [], []),PoolOverviewStyle('Bandwidth', 'Value #H', 'number', 'Bps', null, [], []),PoolOverviewStyle('', '__name__', 'hidden', 'short', null, [], []),PoolOverviewStyle('', 'type', 'hidden', 'short', null, [], []),PoolOverviewStyle('', 'compression_mode', 'hidden', 'short', null, [], []),PoolOverviewStyle('Type', 'description', 'string', 'short', null, [], []),PoolOverviewStyle('Stored', 'Value #J', 'number', 'bytes', null, [], []),PoolOverviewStyle('', 'Value #I', 'hidden', 'short', null, [], []),PoolOverviewStyle('Compression', 'Value #K', 'string', 'short', null, [], [{"text": "ON","value": "1"}])], 'Pool Overview', 'table' + ) + .addTargets( + [addTargetSchema('(ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)', 1, 'table', ''), + addTargetSchema('ceph_pool_max_avail * on(pool_id) group_left(name) ceph_pool_metadata', 1, 'table', ''), + addTargetSchema('((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100', 1, 'table', ''), + addTargetSchema('(ceph_pool_percent_used * on(pool_id) group_left(name) ceph_pool_metadata)', 1, 'table', ''), + addTargetSchema('(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used > 0)', 1, 'table', ''), + addTargetSchema('delta(ceph_pool_stored[5d])', 1, 'table', ''), + addTargetSchema('rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])', 1, 'table', ''), + addTargetSchema('rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])', 1, 'table', ''), + addTargetSchema('ceph_pool_metadata', 1, 'table', ''), + addTargetSchema('ceph_pool_stored * on(pool_id) group_left ceph_pool_metadata', 1, 'table', ''), + addTargetSchema('ceph_pool_metadata{compression_mode!=\"none\"}', 1, 'table', ''), + addTargetSchema('', '', '', '')] + ) + {gridPos: {x: 0, y: 3, w: 24, h: 6}}, + PoolOverviewGraphPanel( + 'Top $topk Client IOPS by Pool', 'This chart shows the sum of read and write IOPS from all clients by pool', 'short', 'IOPS', 'topk($topk,round((rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata) ', 'time_series', '{{name}} ', 0, 9, 12, 8 + ) + .addTarget( + addTargetSchema('topk($topk,rate(ceph_pool_wr[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata) ', 1, 'time_series', '{{name}} - write') + ), + PoolOverviewGraphPanel( + 'Top $topk Client Bandwidth by Pool', 'The chart shows the sum of read and write bytes from all clients, by pool', 'Bps', 'Throughput', 'topk($topk,(rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata)', 'time_series', '{{name}}', 12, 9, 12, 8 + ), + PoolOverviewGraphPanel( + 'Pool Capacity Usage (RAW)', 'Historical view of capacity usage, to help identify growth and trends in pool consumption', 'bytes', 'Capacity Used', 'ceph_pool_bytes_used * on(pool_id) group_right ceph_pool_metadata', '', '{{name}}', 0, 17, 24, 7 + ) + ]) +} +{ + "pool-detail.json": + local PoolDetailSingleStatPanel(format, title, description, valueName, colorValue, gaugeMaxValue, gaugeShow, sparkLineShow, thresholds, expr, targetFormat, x, y, w, h) = + addSingelStatSchema('$datasource', format, title, description, valueName, colorValue, gaugeMaxValue, gaugeShow, sparkLineShow, thresholds) + .addTarget(addTargetSchema(expr, 1, targetFormat, '')) + {gridPos: {x: x, y: y, w: w, h: h}}; + + local PoolDetailGraphPanel(alias, title, description, formatY1, labelY1, expr, targetFormat, legendFormat, x, y, w, h) = + graphPanelSchema(alias, title, description, 'null as zero', false, formatY1, 'short', labelY1, null, null, 1, '$datasource') + .addTargets( + [addTargetSchema(expr, 1, 'time_series', legendFormat)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'Ceph Pool Details', '', '-xyV8KCiz', 'now-1h', '15s', 22, [], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='singlestat', name='Singlestat', version='5.0.0' + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'Prometheus admin.virt1.home.fajerski.name:9090', label='Data Source') + ) + .addTemplate( + addTemplateSchema('pool_name', '$datasource', 'label_values(ceph_pool_metadata,name)', 1, false, 1, 'Pool Name', '') + ) + .addPanels([ + PoolDetailSingleStatPanel( + 'percentunit', 'Capacity used', '', 'current', true, 1, true, true, '.7,.8', '(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}', 'time_series', 0, 0, 7, 7), + PoolDetailSingleStatPanel( + 's', 'Time till full', 'Time till pool is full assuming the average fill rate of the last 6 hours', false, 100, false, false, '', 'current', '(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0', 'time_series', 7, 0, 5, 7), + PoolDetailGraphPanel( + {"read_op_per_sec": "#3F6833","write_op_per_sec": "#E5AC0E"},'$pool_name Object Ingress/Egress', '', 'ops', 'Objects out(-) / in(+) ', 'deriv(ceph_pool_objects[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}', 'time_series', 'Objects per second', 12, 0, 12, 7 + ), + PoolDetailGraphPanel( + {"read_op_per_sec": "#3F6833","write_op_per_sec": "#E5AC0E"},'$pool_name Client IOPS', '', 'iops', 'Read (-) / Write (+)', 'irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}', 'time_series', 'reads', 0, 7, 12, 7 + ) + .addSeriesOverride({"alias": "reads","transform": "negative-Y"}) + .addTarget( + addTargetSchema('irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}', 1, 'time_series', 'writes') + ), + PoolDetailGraphPanel( + {"read_op_per_sec": "#3F6833","write_op_per_sec": "#E5AC0E"},'$pool_name Client Throughput', '', 'Bps', 'Read (-) / Write (+)', 'irate(ceph_pool_rd_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}', 'time_series', 'reads', 12, 7, 12, 7 + ) + .addSeriesOverride({"alias": "reads","transform": "negative-Y"}) + .addTarget( + addTargetSchema('irate(ceph_pool_wr_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}', 1, 'time_series', 'writes') + ), + PoolDetailGraphPanel( + {"read_op_per_sec": "#3F6833","write_op_per_sec": "#E5AC0E"},'$pool_name Objects', '', 'short', 'Objects', 'ceph_pool_objects * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}', 'time_series', 'Number of Objects', 0, 14, 12, 7 + ) + ]) +} +{ + "osds-overview.json": + local OsdOverviewStyle(alias, pattern, type, unit) = + addStyle(alias, null, ["rgba(245, 54, 54, 0.9)","rgba(237, 129, 40, 0.89)","rgba(50, 172, 45, 0.97)"], 'YYYY-MM-DD HH:mm:ss', 2, 1, pattern, [], type, unit, []); + local OsdOverviewGraphPanel(alias, title, description, formatY1, labelY1, min, expr, legendFormat1, x, y, w, h) = + graphPanelSchema(alias, title, description, 'null', false, formatY1, 'short', labelY1, null, min, 1, '$datasource') + .addTargets( + [addTargetSchema(expr, 1, 'time_series', legendFormat1)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + local OsdOverviewPieChartPanel(alias, description, title) = + addPieChartSchema(alias, '$datasource', description, 'Under graph', 'pie', title, 'current'); + + dashboardSchema( + 'OSD Overview', '', 'lo02I1Aiz', 'now-1h', '10s', 16, [], '', {refresh_intervals:['5s','10s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addRequired( + type='panel', id='grafana-piechart-panel', name='Pie Chart', version='1.3.3' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='table', name='Table', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addPanels([ + OsdOverviewGraphPanel( + {"@95%ile": "#e0752d"},'OSD Read Latencies', '', 'ms', null, '0', 'avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)', 'AVG read', 0, 0, 8, 8) + .addTargets( + [addTargetSchema('max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)', 1, 'time_series', 'MAX read'),addTargetSchema('quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)', 1, 'time_series', '@95%ile')], + ), + addTableSchema( + '$datasource', 'This table shows the osd\'s that are delivering the 10 highest read latencies within the cluster', {"col": 2,"desc": true}, [OsdOverviewStyle('OSD ID', 'ceph_daemon', 'string', 'short'),OsdOverviewStyle('Latency (ms)', 'Value', 'number', 'none'),OsdOverviewStyle('', '/.*/', 'hidden', 'short')], 'Highest READ Latencies', 'table' + ) + .addTarget( + addTargetSchema('topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n ))\n)\n\n', 1, 'table', '') + ) + {gridPos: {x: 8, y: 0, w: 4, h: 8}}, + OsdOverviewGraphPanel( + {"@95%ile write": "#e0752d"},'OSD Write Latencies', '', 'ms', null, '0', 'avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)', 'AVG write', 12, 0, 8, 8) + .addTargets( + [addTargetSchema('max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)', 1, 'time_series', 'MAX write'),addTargetSchema('quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)', 1, 'time_series', '@95%ile write')], + ), + addTableSchema( + '$datasource', 'This table shows the osd\'s that are delivering the 10 highest write latencies within the cluster', {"col": 2,"desc": true}, [OsdOverviewStyle('OSD ID', 'ceph_daemon', 'string', 'short'),OsdOverviewStyle('Latency (ms)', 'Value', 'number', 'none'),OsdOverviewStyle('', '/.*/', 'hidden', 'short')], 'Highest WRITE Latencies', 'table' + ) + .addTarget( + addTargetSchema('topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n ))\n)\n\n', 1, 'table', '') + ) + {gridPos: {x: 20, y: 0, w: 4, h: 8}}, + OsdOverviewPieChartPanel( + {}, '', 'OSD Types Summary' + ) + .addTarget(addTargetSchema('count by (device_class) (ceph_osd_metadata)', 1, 'time_series', '{{device_class}}')) + {gridPos: {x: 0, y: 8, w: 4, h: 8}}, + OsdOverviewPieChartPanel( + {"Non-Encrypted": "#E5AC0E"}, '', 'OSD Objectstore Types' + ) + .addTarget(addTargetSchema('count(ceph_bluefs_wal_total_bytes)', 1, 'time_series', 'bluestore')) + .addTarget(addTargetSchema('count(ceph_osd_metadata) - count(ceph_bluefs_wal_total_bytes)', 1, 'time_series', 'filestore')) + .addTarget(addTargetSchema('absent(ceph_bluefs_wal_total_bytes)*count(ceph_osd_metadata)', 1, 'time_series', 'filestore')) + {gridPos: {x: 4, y: 8, w: 4, h: 8}}, + OsdOverviewPieChartPanel( + {}, 'The pie chart shows the various OSD sizes used within the cluster', 'OSD Size Summary' + ) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes < 1099511627776)', 1, 'time_series', '<1TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 1099511627776 < 2199023255552)', 1, 'time_series', '<2TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 2199023255552 < 3298534883328)', 1, 'time_series', '<3TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 3298534883328 < 4398046511104)', 1, 'time_series', '<4TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 4398046511104 < 6597069766656)', 1, 'time_series', '<6TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 6597069766656 < 8796093022208)', 1, 'time_series', '<8TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 8796093022208 < 10995116277760)', 1, 'time_series', '<10TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 10995116277760 < 13194139533312)', 1, 'time_series', '<12TB')) + .addTarget(addTargetSchema('count(ceph_osd_stat_bytes >= 13194139533312)', 1, 'time_series', '<12TB+')) + {gridPos: {x: 8, y: 8, w: 4, h: 8}}, + g.graphPanel.new(bars=true, datasource='$datasource', title='Distribution of PGs per OSD', x_axis_buckets=20, x_axis_mode='histogram', x_axis_values=['total'], formatY1='short', formatY2='short', labelY1='# of OSDs', min='0', nullPointMode='null') + .addTarget(addTargetSchema('ceph_osd_numpg\n', 1, 'time_series', 'PGs per OSD')) + {gridPos: {x: 12, y: 8, w: 12, h: 8}}, + addRowSchema(false, true, 'R/W Profile') + {gridPos: {x: 0, y: 16, w: 24, h: 1}}, + OsdOverviewGraphPanel( + {},'Read/Write Profile', 'Show the read/write workload profile overtime', 'short', null, null, 'round(sum(irate(ceph_pool_rd[30s])))', 'Reads', 0, 17, 24, 8) + .addTargets([addTargetSchema('round(sum(irate(ceph_pool_wr[30s])))', 1, 'time_series', 'Writes')]) + ]) +} +{ + "osd-device-details.json": + local OsdDeviceDetailsPanel(title, description, formatY1, labelY1, expr1, expr2, legendFormat1, legendFormat2, x, y, w, h) = + graphPanelSchema({}, title, description, 'null', false, formatY1, 'short', labelY1, null, null, 1, '$datasource') + .addTargets( + [addTargetSchema(expr1, 1, 'time_series', legendFormat1),addTargetSchema(expr2, 1, 'time_series', legendFormat2)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'OSD device details', '', 'CrAHE0iZz', 'now-3h', '', 16, [], '', {refresh_intervals:['5s','10s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + addTemplateSchema('osd', '$datasource', 'label_values(ceph_osd_metadata,ceph_daemon)', 1, false, 1, 'OSD', '(.*)') + ) + .addPanels([ + addRowSchema(false, true, 'OSD Performance') + {gridPos: {x: 0, y: 0, w: 24, h: 1}}, + OsdDeviceDetailsPanel( + '$osd Latency', '', 's', 'Read (-) / Write (+)', 'irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m])', 'irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m])', 'read', 'write', 0, 1, 6, 9) + .addSeriesOverride({"alias": "read","transform": "negative-Y"} + ), + OsdDeviceDetailsPanel( + '$osd R/W IOPS', '', 'short', 'Read (-) / Write (+)', 'irate(ceph_osd_op_r{ceph_daemon=~\"$osd\"}[1m])', 'irate(ceph_osd_op_w{ceph_daemon=~\"$osd\"}[1m])', 'Reads', 'Writes', 6, 1, 6, 9) + .addSeriesOverride({"alias": "Reads","transform": "negative-Y"} + ), + OsdDeviceDetailsPanel( + '$osd R/W Bytes', '', 'bytes', 'Read (-) / Write (+)', 'irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"$osd\"}[1m])', 'irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"$osd\"}[1m])', 'Read Bytes', 'Write Bytes', 12, 1, 6, 9) + .addSeriesOverride({"alias": "Read Bytes","transform": "negative-Y"}), + addRowSchema(false, true, 'Physical Device Performance') + {gridPos: {x: 0, y: 10, w: 24, h: 1}}, + OsdDeviceDetailsPanel( + 'Physical Device Latency for $osd', '', 's', 'Read (-) / Write (+)', '(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))', '(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))', '{{instance}}/{{device}} Reads', '{{instance}}/{{device}} Writes', 0, 11, 6, 9) + .addSeriesOverride({"alias": "/.*Reads/","transform": "negative-Y"} + ), + OsdDeviceDetailsPanel( + 'Physical Device R/W IOPS for $osd', '', 'short', 'Read (-) / Write (+)', 'label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', 'label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', '{{device}} on {{instance}} Writes', '{{device}} on {{instance}} Reads', 6, 11, 6, 9) + .addSeriesOverride({"alias": "/.*Reads/","transform": "negative-Y"} + ), + OsdDeviceDetailsPanel( + 'Physical Device R/W Bytes for $osd', '', 'Bps', 'Read (-) / Write (+)', 'label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', 'label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', '{{instance}} {{device}} Reads', '{{instance}} {{device}} Writes', 12, 11, 6, 9) + .addSeriesOverride({"alias": "/.*Reads/","transform": "negative-Y"} + ), + graphPanelSchema( + {}, 'Physical Device Util% for $osd', '', 'null', false, 'percentunit', 'short', null, null, null, 1, '$datasource' + ) + .addTarget(addTargetSchema('label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")', 1, 'time_series', '{{device}} on {{instance}}')) + {gridPos: {x: 18, y: 11, w: 6, h: 9}}, + ]) +} +{ + "cephfs-overview.json": + local CephfsOverviewGraphPanel(title, formatY1, labelY1, expr, legendFormat, x, y, w, h) = + graphPanelSchema({}, title, '', 'null', false, formatY1, 'short', labelY1, null, 0, 1, '$datasource') + .addTargets( + [addTargetSchema(expr, 1, 'time_series', legendFormat)]) + {gridPos: {x: x, y: y, w: w, h: h}}; + + dashboardSchema( + 'MDS Performance', '', 'tbO9LAiZz', 'now-1h', '15s', 16, [], '', {refresh_intervals:['5s','10s','15s','30s','1m','5m','15m','30m','1h','2h','1d'],time_options:['5m','15m','1h','6h','12h','24h','2d','7d','30d']} + ) + .addAnnotation( + addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + addTemplateSchema('mds_servers', '$datasource', 'label_values(ceph_mds_inodes, ceph_daemon)', 1, true, 1, 'MDS Server', '') + ) + .addPanels([ + addRowSchema(false, true, 'MDS Performance') + {gridPos: {x: 0, y: 0, w: 24, h: 1}}, + CephfsOverviewGraphPanel( + 'MDS Workload - $mds_servers', 'none', 'Reads(-) / Writes (+)', 'sum(rate(ceph_objecter_op_r{ceph_daemon=~\"($mds_servers).*\"}[1m]))', 'Read Ops', 0, 1, 12, 9) + .addTarget(addTargetSchema('sum(rate(ceph_objecter_op_w{ceph_daemon=~\"($mds_servers).*\"}[1m]))', 1, 'time_series', 'Write Ops')) + .addSeriesOverride({"alias": "/.*Reads/","transform": "negative-Y"} + ), + CephfsOverviewGraphPanel( + 'Client Request Load - $mds_servers', 'none', 'Client Requests', 'ceph_mds_server_handle_client_request{ceph_daemon=~\"($mds_servers).*\"}', '{{ceph_daemon}}', 12, 1, 12, 9 + ) + ]) +} diff --git a/ceph/monitoring/grafana/dashboards/osd-device-details.json b/ceph/monitoring/grafana/dashboards/osd-device-details.json index eefb59125..6d59f3be4 100644 --- a/ceph/monitoring/grafana/dashboards/osd-device-details.json +++ b/ceph/monitoring/grafana/dashboards/osd-device-details.json @@ -1,800 +1,822 @@ { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.3.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1557395861896, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 14, - "panels": [], - "title": "OSD Performance", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 0, - "y": 1 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "read", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "read", - "refId": "A" - }, - { - "expr": "irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "write", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$osd Latency", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 6, - "y": 1 - }, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "Reads", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_osd_op_r{ceph_daemon=~\"$osd\"}[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Reads", - "refId": "A" - }, - { - "expr": "irate(ceph_osd_op_w{ceph_daemon=~\"$osd\"}[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Writes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$osd R/W IOPS", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 12, - "y": 1 - }, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "Read Bytes", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"$osd\"}[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Read Bytes", - "refId": "A" - }, - { - "expr": "irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"$osd\"}[1m])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Write Bytes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$osd R/W Bytes", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 10 - }, - "id": 12, - "panels": [], - "title": "Physical Device Performance", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 0, - "y": 11 - }, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*Reads/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}}/{{device}} Reads", - "refId": "A" - }, - { - "expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}}/{{device}} Writes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Physical Device Latency for $osd", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 6, - "y": 11 + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" }, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*Reads/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}} on {{instance}} Writes", - "refId": "A" - }, - { - "expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}} on {{instance}} Reads", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Physical Device R/W IOPS for $osd", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 6, - "x": 12, - "y": 11 - }, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "OSD Performance", + "titleSize": "h6", + "type": "row" }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*Reads/", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}} {{device}} Reads", - "refId": "A" - }, - { - "expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}} {{device}} Writes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Physical Device R/W Bytes for $osd", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "read", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "read", + "refId": "A" + }, + { + "expr": "irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "write", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$osd Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "Reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_osd_op_r{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "A" + }, + { + "expr": "irate(ceph_osd_op_w{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$osd R/W IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "yaxes": [ - { - "format": "Bps", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 6, - "x": 18, - "y": 11 + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "Read Bytes", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read Bytes", + "refId": "A" + }, + { + "expr": "irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"$osd\"}[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write Bytes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$osd R/W Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 6, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Physical Device Performance", + "titleSize": "h6", + "type": "row" }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device}} on {{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Physical Device Util% for $osd", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 11 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}/{{device}} Reads", + "refId": "A" + }, + { + "expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}/{{device}} Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device Latency for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 11 + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}} Writes", + "refId": "A" + }, + { + "expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}} Reads", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device R/W IOPS for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 11 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/.*Reads/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{device}} Reads", + "refId": "A" + }, + { + "expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{device}} Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device R/W Bytes for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "OSD", - "multi": false, - "name": "osd", - "options": [], - "query": "label_values(ceph_osd_metadata,ceph_daemon)", - "refresh": 1, - "regex": "(.*)", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 6, + "x": 18, + "y": 11 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device}} on {{instance}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Physical Device Util% for $osd", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-3h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "OSD device details", - "uid": "CrAHE0iZz", - "version": 3 + ], + "refresh": "", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "OSD", + "multi": false, + "name": "osd", + "options": [ ], + "query": "label_values(ceph_osd_metadata,ceph_daemon)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "OSD device details", + "uid": "CrAHE0iZz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/osds-overview.json b/ceph/monitoring/grafana/dashboards/osds-overview.json index 4b91df9eb..8a873090a 100644 --- a/ceph/monitoring/grafana/dashboards/osds-overview.json +++ b/ceph/monitoring/grafana/dashboards/osds-overview.json @@ -1,876 +1,837 @@ { - - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "grafana-piechart-panel", - "name": "Pie Chart", - "version": "1.3.3" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "table", - "name": "Table", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1538083987689, - "links": [], - "panels": [ - { - "aliasColors": { - "@95%ile": "#e0752d" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 0 - }, - "id": 12, - "legend": { - "avg": false, - "current": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "AVG read", - "refId": "A" - }, - { - "expr": "max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "MAX read", - "refId": "B" - }, - { - "expr": "quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "@95%ile", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "OSD Read Latencies", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "columns": [], - "datasource": "$datasource", - "description": "This table shows the osd's that are delivering the 10 highest read latencies within the cluster", - "fontSize": "100%", - "gridPos": { - "h": 8, - "w": 4, - "x": 8, - "y": 0 - }, - "id": 15, - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 2, - "desc": true - }, - "styles": [ - { - "alias": "OSD ID", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "ceph_daemon", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Latency (ms)", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "pattern": "Value", - "thresholds": [], - "type": "number", - "unit": "none" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n ))\n)\n\n", - "format": "table", - "instant": true, - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "title": "Highest READ Latencies", - "transform": "table", - "type": "table" - }, - { - "aliasColors": { - "@95%ile write": "#e0752d" + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 8, - "w": 8, - "x": 12, - "y": 0 - }, - "id": 13, - "legend": { - "avg": false, - "current": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "AVG write", - "refId": "A" - }, - { - "expr": "max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "MAX write", - "refId": "B" - }, - { - "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "@95%ile write", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "OSD Write Latencies", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "id": "grafana-piechart-panel", + "name": "Pie Chart", + "type": "panel", + "version": "1.3.3" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } + { + "id": "table", + "name": "Table", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } ] - }, - { - "columns": [], - "datasource": "$datasource", - "description": "This table shows the osd's that are delivering the 10 highest write latencies within the cluster", - "fontSize": "100%", - "gridPos": { - "h": 8, - "w": 4, - "x": 20, - "y": 0 - }, - "id": 16, - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 2, - "desc": true - }, - "styles": [ - { - "alias": "OSD ID", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "ceph_daemon", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Latency (ms)", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "pattern": "Value", - "thresholds": [], - "type": "number", - "unit": "none" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n ))\n)\n\n", - "format": "table", - "instant": true, - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "title": "Highest WRITE Latencies", - "transform": "table", - "type": "table" - }, - { - "aliasColors": {}, - "breakPoint": "50%", - "cacheTimeout": null, - "combine": { - "label": "Others", - "threshold": 0 - }, - "datasource": "$datasource", - "fontSize": "80%", - "format": "none", - "gridPos": { - "h": 8, - "w": 4, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": null, - "legend": { - "show": true, - "values": true - }, - "legendType": "Under graph", - "links": [], - "maxDataPoints": 3, - "nullPointMode": "connected", - "pieType": "pie", - "strokeWidth": 1, - "targets": [ - { - "expr": "count by (device_class) (ceph_osd_metadata)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{device_class}}", - "refId": "A" - } - ], - "title": "OSD Types Summary", - "type": "grafana-piechart-panel", - "valueName": "current" - }, - { - "aliasColors": { - "Non-Encrypted": "#E5AC0E" - }, - "breakPoint": "50%", - "cacheTimeout": null, - "combine": { - "label": "Others", - "threshold": 0 - }, - "datasource": "$datasource", - "fontSize": "80%", - "format": "none", - "gridPos": { - "h": 8, - "w": 4, - "x": 4, - "y": 8 + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "aliasColors": { + "@95%ile": "#e0752d" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "AVG read", + "refId": "A" + }, + { + "expr": "max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MAX read", + "refId": "B" + }, + { + "expr": "quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "@95%ile", + "refId": "C" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "OSD Read Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ] }, - "height": "200px", - "hideTimeOverride": true, - "id": 4, - "interval": null, - "legend": { - "percentage": false, - "show": true, - "values": true + { + "columns": [ ], + "datasource": "$datasource", + "description": "This table shows the osd's that are delivering the 10 highest read latencies within the cluster", + "gridPos": { + "h": 8, + "w": 4, + "x": 8, + "y": 0 + }, + "id": 3, + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "OSD ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "ceph_daemon", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Latency (ms)", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n ))\n)\n\n", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest READ Latencies", + "transform": "table", + "type": "table" }, - "legendType": "Under graph", - "links": [], - "maxDataPoints": "1", - "minSpan": 4, - "nullPointMode": "connected", - "pieType": "pie", - "strokeWidth": 1, - "targets": [ - { - "expr": "count(ceph_bluefs_wal_total_bytes)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "bluestore", - "refId": "A", - "step": 240 - }, - { - "expr": "count(ceph_osd_metadata) - count(ceph_bluefs_wal_total_bytes)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "filestore", - "refId": "B", - "step": 240 - }, - { - "expr": "absent(ceph_bluefs_wal_total_bytes)*count(ceph_osd_metadata)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "filestore", - "refId": "C", - "step": 240 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "OSD Objectstore Types", - "type": "grafana-piechart-panel", - "valueName": "current" - }, - { - "aliasColors": {}, - "breakPoint": "50%", - "cacheTimeout": null, - "combine": { - "label": "Others", - "threshold": "0.05" + { + "aliasColors": { + "@95%ile write": "#e0752d" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 8, + "w": 8, + "x": 12, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "AVG write", + "refId": "A" + }, + { + "expr": "max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "MAX write", + "refId": "B" + }, + { + "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "@95%ile write", + "refId": "C" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "OSD Write Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ] }, - "datasource": "$datasource", - "description": "The pie chart shows the various OSD sizes used within the cluster", - "fontSize": "80%", - "format": "none", - "gridPos": { - "h": 8, - "w": 4, - "x": 8, - "y": 8 + { + "columns": [ ], + "datasource": "$datasource", + "description": "This table shows the osd's that are delivering the 10 highest write latencies within the cluster", + "gridPos": { + "h": 8, + "w": 4, + "x": 20, + "y": 0 + }, + "id": 5, + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "OSD ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "ceph_daemon", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Latency (ms)", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n ))\n)\n\n", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest WRITE Latencies", + "transform": "table", + "type": "table" }, - "height": "220", - "hideTimeOverride": true, - "id": 8, - "interval": null, - "legend": { - "header": "", - "percentage": false, - "show": true, - "sideWidth": null, - "sortDesc": true, - "values": true + { + "aliasColors": { }, + "datasource": "$datasource", + "description": "", + "gridPos": { + "h": 8, + "w": 4, + "x": 0, + "y": 8 + }, + "id": 6, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "count by (device_class) (ceph_osd_metadata)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{device_class}}", + "refId": "A" + } + ], + "title": "OSD Types Summary", + "type": "grafana-piechart-panel", + "valueName": "current" }, - "legendType": "Under graph", - "links": [], - "maxDataPoints": "", - "minSpan": 6, - "nullPointMode": "connected", - "pieType": "pie", - "strokeWidth": "1", - "targets": [ - { - "expr": "count(ceph_osd_stat_bytes < 1099511627776)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<1 TB", - "refId": "A", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 1099511627776 < 2199023255552)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<2 TB", - "refId": "B", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 2199023255552 < 3298534883328)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<3TB", - "refId": "C", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 3298534883328 < 4398046511104)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<4TB", - "refId": "D", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 4398046511104 < 6597069766656)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<6TB", - "refId": "E", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 6597069766656 < 8796093022208)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<8TB", - "refId": "F", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 8796093022208 < 10995116277760)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<10TB", - "refId": "G", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 10995116277760 < 13194139533312)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "<12TB", - "refId": "H", - "step": 2 - }, - { - "expr": "count(ceph_osd_stat_bytes >= 13194139533312)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "12TB+", - "refId": "I", - "step": 2 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "OSD Size Summary", - "type": "grafana-piechart-panel", - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Each bar indicates the number of OSD's that have a PG count in a specific range as shown on the x axis.", - "fill": 1, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 8 + { + "aliasColors": { + "Non-Encrypted": "#E5AC0E" + }, + "datasource": "$datasource", + "description": "", + "gridPos": { + "h": 8, + "w": 4, + "x": 4, + "y": 8 + }, + "id": 7, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "count(ceph_bluefs_wal_total_bytes)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "bluestore", + "refId": "A" + }, + { + "expr": "count(ceph_osd_metadata) - count(ceph_bluefs_wal_total_bytes)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "filestore", + "refId": "B" + }, + { + "expr": "absent(ceph_bluefs_wal_total_bytes)*count(ceph_osd_metadata)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "filestore", + "refId": "C" + } + ], + "title": "OSD Objectstore Types", + "type": "grafana-piechart-panel", + "valueName": "current" }, - "id": 6, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false + { + "aliasColors": { }, + "datasource": "$datasource", + "description": "The pie chart shows the various OSD sizes used within the cluster", + "gridPos": { + "h": 8, + "w": 4, + "x": 8, + "y": 8 + }, + "id": 8, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "count(ceph_osd_stat_bytes < 1099511627776)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<1TB", + "refId": "A" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 1099511627776 < 2199023255552)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<2TB", + "refId": "B" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 2199023255552 < 3298534883328)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<3TB", + "refId": "C" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 3298534883328 < 4398046511104)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<4TB", + "refId": "D" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 4398046511104 < 6597069766656)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<6TB", + "refId": "E" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 6597069766656 < 8796093022208)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<8TB", + "refId": "F" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 8796093022208 < 10995116277760)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<10TB", + "refId": "G" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 10995116277760 < 13194139533312)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<12TB", + "refId": "H" + }, + { + "expr": "count(ceph_osd_stat_bytes >= 13194139533312)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "<12TB+", + "refId": "I" + } + ], + "title": "OSD Size Summary", + "type": "grafana-piechart-panel", + "valueName": "current" }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "ceph_osd_numpg\n", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "legendFormat": "PGs per OSD", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Distribution of PGs per OSD", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" + { + "aliasColors": { }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_osd_numpg\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PGs per OSD", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Distribution of PGs per OSD", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": 20, + "mode": "histogram", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "# of OSDs", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ] }, - "type": "graph", - "xaxis": { - "buckets": 20, - "mode": "histogram", - "name": null, - "show": true, - "values": [ - "total" - ] + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 10, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "R/W Profile", + "titleSize": "h6", + "type": "row" }, - "yaxes": [ - { - "decimals": 0, - "format": "short", - "label": "# of OSDs", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Show the read/write workload profile overtime", + "fill": 1, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_pool_rd[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_pool_wr[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Read/Write Profile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "10s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } ] - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 16 - }, - "id": 20, - "panels": [], - "title": "R/W Profile", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Show the read/write workload profile overtime", - "fill": 1, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 17 - }, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "round(sum(irate(ceph_pool_rd[30s])))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Reads", - "refId": "A" - }, - { - "expr": "round(sum(irate(ceph_pool_wr[30s])))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Writes", - "refId": "B" - } + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Read/Write Profile", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" ] - } - ], - "refresh": "10s", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "tags": [], - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "OSD Overview", - "uid": "lo02I1Aiz", - "version": 3 + }, + "timezone": "", + "title": "OSD Overview", + "uid": "lo02I1Aiz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/pool-detail.json b/ceph/monitoring/grafana/dashboards/pool-detail.json index dd6bc3927..e64cc3d82 100644 --- a/ceph/monitoring/grafana/dashboards/pool-detail.json +++ b/ceph/monitoring/grafana/dashboards/pool-detail.json @@ -1,665 +1,663 @@ { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.3.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1551858875941, - "links": [], - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "decimals": 2, - "format": "percentunit", - "gauge": { - "maxValue": 1, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 7, - "w": 7, - "x": 0, - "y": 0 - }, - "id": 12, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", - "format": "time_series", - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": ".7,.8", - "title": "Capacity used", - "type": "singlestat", - "valueFontSize": "50%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "Time till pool is full assuming the average fill rate of the last 6 hours", - "format": "s", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 7, - "w": 5, - "x": 7, - "y": 0 - }, - "id": 14, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0", - "format": "time_series", - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "title": "Time till full", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "inf", - "value": "null" - }, - { - "op": "=", - "text": "inf", - "value": "N/A" - } - ], - "valueName": "current" - }, - { - "aliasColors": { - "read_op_per_sec": "#3F6833", - "write_op_per_sec": "#E5AC0E" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 0 - }, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "deriv(ceph_pool_objects[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Objects per second", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$pool_name Object Ingress/Egress", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ops", - "label": "Objects out(-) / in(+) ", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "read_op_per_sec": "#3F6833", - "write_op_per_sec": "#E5AC0E" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 7 + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.2" }, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "reads", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "reads", - "refId": "B" - }, - { - "expr": "irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "writes", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$pool_name Client IOPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "iops", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "read_op_per_sec": "#3F6833", - "write_op_per_sec": "#E5AC0E" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 7 - }, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "reads", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_pool_rd_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "reads", - "refId": "A" - }, - { - "expr": "irate(ceph_pool_wr_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "writes", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$pool_name Client Throughput", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" }, - "yaxes": [ - { - "format": "Bps", - "label": "Read (-) / Write (+)", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "5.0.0" } - }, - { - "aliasColors": { - "read_op_per_sec": "#3F6833", - "write_op_per_sec": "#E5AC0E" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 14 + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": ".7,.8", + "title": "Capacity used", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": 100, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Time till pool is full assuming the average fill rate of the last 6 hours", + "format": "s", + "gauge": { + "maxValue": false, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 7, + "y": 0 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": "" + }, + "tableColumn": "", + "targets": [ + { + "expr": "(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "current", + "title": "Time till full", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": false }, - "lines": true, - "linewidth": 1, - "links": [], - "minSpan": 12, - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "ceph_pool_objects * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Number of Objects", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$pool_name Objects", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "deriv(ceph_pool_objects[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Objects per second", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Object Ingress/Egress", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ops", + "label": "Objects out(-) / in(+) ", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "reads", + "refId": "A" + }, + { + "expr": "irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Client IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "iops", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, - "yaxes": [ - { - "format": "short", - "label": "Objects", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "15s", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ { - "current": { - "text": "Prometheus admin.virt1.home.fajerski.name:9090", - "value": "Prometheus admin.virt1.home.fajerski.name:9090" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 7 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "reads", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_pool_rd_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "reads", + "refId": "A" + }, + { + "expr": "irate(ceph_pool_wr_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "writes", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Client Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Read (-) / Write (+)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "Pool Name", - "multi": false, - "name": "pool_name", - "options": [], - "query": "label_values(ceph_pool_metadata,name)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { + "read_op_per_sec": "#3F6833", + "write_op_per_sec": "#E5AC0E" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_pool_objects * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Number of Objects", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$pool_name Objects", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "Objects", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "15s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Ceph Pool Details", - "uid": "-xyV8KCiz", - "version": 1 + ], + "refresh": "15s", + "rows": [ ], + "schemaVersion": 22, + "style": "dark", + "tags": [ ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus admin.virt1.home.fajerski.name:9090", + "value": "Prometheus admin.virt1.home.fajerski.name:9090" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Pool Name", + "multi": false, + "name": "pool_name", + "options": [ ], + "query": "label_values(ceph_pool_metadata,name)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph Pool Details", + "uid": "-xyV8KCiz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/pool-overview.json b/ceph/monitoring/grafana/dashboards/pool-overview.json index c405f6075..d6c62e6e5 100644 --- a/ceph/monitoring/grafana/dashboards/pool-overview.json +++ b/ceph/monitoring/grafana/dashboards/pool-overview.json @@ -1,1564 +1,1483 @@ { - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1617656284287, - "links": [], - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 0 - }, - "id": 21, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(ceph_pool_metadata)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Pools", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "Count of the pools that have compression enabled", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 3, - "y": 0 - }, - "id": 7, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "pluginVersion": "6.7.4", - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(ceph_pool_metadata{compression_mode!=\"none\"})", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Pools with Compression", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "decimals": 1, - "description": "Total raw capacity available to the cluster", - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 6, - "y": 0 - }, - "id": 27, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(ceph_osd_stat_bytes)", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Total Raw Capacity", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "Total raw capacity consumed by user data and associated overheads (metadata + redundancy)", - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 0 - }, - "id": 25, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(ceph_pool_bytes_used)", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Raw Capacity Consumed", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current", - "decimals": 2 - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "decimals": 1, - "description": "Total of client data stored in the cluster", - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 12, - "y": 0 - }, - "id": 23, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(ceph_pool_stored)", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Logical Stored ", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "decimals": 1, - "description": "A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression", - "format": "bytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 15, - "y": 0 - }, - "id": 9, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used)", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Compression Savings", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data\n", - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 18, - "y": 0 - }, - "id": 17, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "(sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_stored_raw and ceph_pool_compress_under_bytes > 0)) * 100", - "format": "table", - "hide": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Compression Eligibility", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "description": "This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 21, - "y": 0 - }, - "id": 15, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "80%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_compress_bytes_used > 0)", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Compression Factor", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "columns": [], - "datasource": "$datasource", - "fontSize": "100%", - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 3 - }, - "id": 5, - "links": [], - "maxPerRow": 3, - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 5, - "desc": true - }, - "styles": [ - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "Time", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "instance", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "job", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "Pool Name", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "name", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Pool ID", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "pattern": "pool_id", - "thresholds": [], - "type": "hidden", - "unit": "none" - }, - { - "alias": "Compression Factor", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 1, - "mappingType": 1, - "pattern": "Value #A", - "thresholds": [], - "type": "number", - "unit": "none" - }, - { - "alias": "% Used", - "align": "auto", - "colorMode": "value", - "colors": [ - "rgb(0, 0, 0)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value #D", - "thresholds": [ - "70", - "85" - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Usable Free", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value #B", - "thresholds": [], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Compression Eligibility", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "mappingType": 1, - "pattern": "Value #C", - "thresholds": [], - "type": "number", - "unit": "percent" - }, - { - "alias": "Compression Savings", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 1, - "mappingType": 1, - "pattern": "Value #E", - "thresholds": [], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Growth (5d)", - "align": "auto", - "colorMode": "value", - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value #F", - "thresholds": [ - "0", - "0" - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "IOPS", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "mappingType": 1, - "pattern": "Value #G", - "thresholds": [], - "type": "number", - "unit": "none" - }, - { - "alias": "Bandwidth", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "mappingType": 1, - "pattern": "Value #H", - "thresholds": [], - "type": "number", - "unit": "Bps" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "__name__", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "type", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "compression_mode", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "Type", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "description", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Stored", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 1, - "mappingType": 1, - "pattern": "Value #J", - "thresholds": [], - "type": "number", - "unit": "bytes" - }, - { - "alias": "", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value #I", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "Compression", - "align": "auto", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value #K", - "thresholds": [], - "type": "string", - "unit": "short", - "valueMaps": [ - { - "text": "ON", - "value": "1" + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 } - ] - } - ], - "targets": [ - { - "expr": "(ceph_pool_percent_used * on(pool_id) group_left(name) ceph_pool_metadata)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "D" - }, - { - "expr": "ceph_pool_stored * on(pool_id) group_left ceph_pool_metadata", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "J" - }, - { - "expr": "ceph_pool_max_avail * on(pool_id) group_left(name) ceph_pool_metadata", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "B" - }, - { - "expr": "delta(ceph_pool_stored[5d])", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "F" - }, - { - "expr": "ceph_pool_metadata", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "I" - }, - { - "expr": "ceph_pool_metadata{compression_mode!=\"none\"}", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "K" - }, - { - "expr": "(ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - }, - { - "expr": "((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "C" - }, - { - "expr": "(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used > 0)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "E" - }, - { - "expr": "rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "G" - }, - { - "expr": "rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "H" - }, - { - "expr": "", - "interval": "", - "legendFormat": "", - "refId": "L" - } - ], - "title": "Pool Overview", - "transform": "table", - "type": "table" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "This chart shows the sum of read and write IOPS from all clients by pool", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "maxPerRow": 2, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk($topk,round((rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata) ", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{name}} ", - "refId": "F" - }, - { - "expr": "topk($topk,rate(ceph_pool_wr[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata) ", - "format": "time_series", - "hide": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{name}} - write", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Top $topk Client IOPS by Pool", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "short", - "label": "IOPS", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "The chart shows the sum of read and write bytes from all clients, by pool", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 9 + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_pool_metadata)", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Pools", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Count of the pools that have compression enabled", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 3, + "y": 0 + }, + "id": 3, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(ceph_pool_metadata{compression_mode!=\"none\"})", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Pools with Compression", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "lines": true, - "linewidth": 2, - "links": [], - "maxPerRow": 2, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Total raw capacity available to the cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 6, + "y": 0 + }, + "id": 4, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_osd_stat_bytes)", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Total Raw Capacity", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk($topk,(rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{name}}", - "refId": "A", - "textEditor": true - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Top $topk Client Bandwidth by Pool", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Total raw capacity consumed by user data and associated overheads (metadata + redundancy)", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 5, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_bytes_used)", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Raw Capacity Consumed", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Total of client data stored in the cluster", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_stored)", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Logical Stored ", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "yaxes": [ - { - "format": "Bps", - "label": "Throughput", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Historical view of capacity usage, to help identify growth and trends in pool consumption", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 17 + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 15, + "y": 0 + }, + "id": 7, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used)", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Compression Savings", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "hiddenSeries": false, - "id": 19, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data\n", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 18, + "y": 0 + }, + "id": 8, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_stored_raw and ceph_pool_compress_under_bytes > 0)) * 100", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Compression Eligibility", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "description": "This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 21, + "y": 0 + }, + "id": 9, + "interval": null, + "links": [ ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_compress_bytes_used > 0)", + "format": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Compression Factor", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "ceph_pool_bytes_used * on(pool_id) group_right ceph_pool_metadata", - "interval": "", - "legendFormat": "{{name}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": "14d", - "timeRegions": [ - { - "colorMode": "background6", - "fill": true, - "fillColor": "rgba(234, 112, 112, 0.12)", - "line": false, - "lineColor": "rgba(237, 46, 24, 0.60)", - "op": "time" - } - ], - "timeShift": null, - "title": "Pool Capacity Usage (RAW)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" + { + "columns": [ ], + "datasource": "$datasource", + "description": "", + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 10, + "sort": { + "col": 5, + "desc": true + }, + "styles": [ + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Time", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "instance", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "job", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Pool Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "name", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Pool ID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool_id", + "thresholds": [ ], + "type": "hidden", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "Compression Factor", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "% Used", + "colorMode": "value", + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [ + "70", + "85" + ], + "type": "number", + "unit": "percentunit", + "valueMaps": [ ] + }, + { + "alias": "Usable Free", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [ ], + "type": "number", + "unit": "bytes", + "valueMaps": [ ] + }, + { + "alias": "Compression Eligibility", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [ ], + "type": "number", + "unit": "percent", + "valueMaps": [ ] + }, + { + "alias": "Compression Savings", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [ ], + "type": "number", + "unit": "bytes", + "valueMaps": [ ] + }, + { + "alias": "Growth (5d)", + "colorMode": "value", + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [ + "0", + "0" + ], + "type": "number", + "unit": "bytes", + "valueMaps": [ ] + }, + { + "alias": "IOPS", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [ ], + "type": "number", + "unit": "none", + "valueMaps": [ ] + }, + { + "alias": "Bandwidth", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #H", + "thresholds": [ ], + "type": "number", + "unit": "Bps", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "__name__", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "type", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "compression_mode", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Type", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "description", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Stored", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #J", + "thresholds": [ ], + "type": "number", + "unit": "bytes", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #I", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Compression", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #K", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ + { + "text": "ON", + "value": "1" + } + ] + } + ], + "targets": [ + { + "expr": "(ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "ceph_pool_max_avail * on(pool_id) group_left(name) ceph_pool_metadata", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + }, + { + "expr": "((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C" + }, + { + "expr": "(ceph_pool_percent_used * on(pool_id) group_left(name) ceph_pool_metadata)", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D" + }, + { + "expr": "(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used > 0)", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E" + }, + { + "expr": "delta(ceph_pool_stored[5d])", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "F" + }, + { + "expr": "rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "G" + }, + { + "expr": "rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "H" + }, + { + "expr": "ceph_pool_metadata", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "I" + }, + { + "expr": "ceph_pool_stored * on(pool_id) group_left ceph_pool_metadata", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "J" + }, + { + "expr": "ceph_pool_metadata{compression_mode!=\"none\"}", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "K" + }, + { + "expr": "", + "format": "", + "intervalFactor": "", + "legendFormat": "", + "refId": "L" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Pool Overview", + "transform": "table", + "type": "table" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "This chart shows the sum of read and write IOPS from all clients by pool", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk($topk,round((rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}} ", + "refId": "A" + }, + { + "expr": "topk($topk,rate(ceph_pool_wr[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}} - write", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Top $topk Client IOPS by Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": "IOPS", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "yaxes": [ - { - "decimals": 1, - "format": "bytes", - "label": "Capacity Used", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "15s", - "schemaVersion": 22, - "style": "dark", - "tags": [], - "templating": { - "list": [ { - "current": { - "selected": false, - "text": "Dashboard1", - "value": "Dashboard1" - }, - "hide": 0, - "includeAll": false, - "label": "Data Source", - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "The chart shows the sum of read and write bytes from all clients, by pool", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk($topk,(rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Top $topk Client Bandwidth by Pool", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": "Throughput", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { - "current": { - "text": "15", - "value": "15" - }, - "hide": 0, - "label": "Top K", - "name": "topk", - "options": [ - { - "text": "15", - "value": "15" - } - ], - "query": "15", - "skipUrlSync": false, - "type": "textbox" + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Historical view of capacity usage, to help identify growth and trends in pool consumption", + "fill": 1, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ceph_pool_bytes_used * on(pool_id) group_right ceph_pool_metadata", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Pool Capacity Usage (RAW)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": "Capacity Used", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "15s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Ceph Pools Overview", - "uid": "z99hzWtmk", - "variables": { - "list": [] - }, - "version": 10 + ], + "refresh": "15s", + "rows": [ ], + "schemaVersion": 22, + "style": "dark", + "tags": [ ], + "templating": { + "list": [ + { + "current": { + "text": "Dashboard1", + "value": "Dashboard1" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "15", + "value": "15" + }, + "hide": 0, + "includeAll": false, + "label": "TopK", + "multi": false, + "name": "topk", + "options": [ + { + "text": "15", + "value": "15" + } + ], + "query": "15", + "refresh": 0, + "type": "custom" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Ceph Pools Overview", + "uid": "z99hzWtmk", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/radosgw-detail.json b/ceph/monitoring/grafana/dashboards/radosgw-detail.json index 648abab89..432eecc83 100644 --- a/ceph/monitoring/grafana/dashboards/radosgw-detail.json +++ b/ceph/monitoring/grafana/dashboards/radosgw-detail.json @@ -1,491 +1,499 @@ { - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "grafana-piechart-panel", - "name": "Pie Chart", - "version": "1.3.3" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1534386250869, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 12, - "panels": [], - "repeat": null, - "title": "RGW Host Detail : $rgw_servers", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 8, - "w": 6, - "x": 0, - "y": 1 - }, - "id": 34, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (ceph_daemon) (rate(ceph_rgw_get_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_get_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "GET {{ceph_daemon}}", - "refId": "A" - }, - { - "expr": "sum by (ceph_daemon)(rate(ceph_rgw_put_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_put_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "PUT {{ceph_daemon}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "$rgw_servers GET/PUT Latencies", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 7, - "x": 6, - "y": 1 - }, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "rate(ceph_rgw_get_b{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "GETs {{ceph_daemon}}", - "refId": "B" - }, - { - "expr": "rate(ceph_rgw_put_b{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "PUTs {{ceph_daemon}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Bandwidth by HTTP Operation", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "bytes", - "label": "", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - "GETs": "#7eb26d", - "Other": "#447ebc", - "PUTs": "#eab839", - "Requests": "#3f2b5b", - "Requests Failed": "#bf1b00" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 7, - "x": 13, - "y": 1 + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" }, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Requests Failed {{ceph_daemon}}", - "refId": "B" - }, - { - "expr": "rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "GETs {{ceph_daemon}}", - "refId": "C" - }, - { - "expr": "rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "PUTs {{ceph_daemon}}", - "refId": "D" - }, - { - "expr": "rate(ceph_rgw_req{ceph_daemon=~\"[[rgw_servers]]\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[30s]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Other {{ceph_daemon}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP Request Breakdown", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "id": "grafana-piechart-panel", + "name": "Pie Chart", + "type": "panel", + "version": "1.3.3" }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } ] - }, - { - "aliasColors": { - "Failures": "#bf1b00", - "GETs": "#7eb26d", - "Other (HEAD,POST,DELETE)": "#447ebc", - "PUTs": "#eab839" - }, - "breakPoint": "50%", - "cacheTimeout": null, - "combine": { - "label": "Others", - "threshold": 0 + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "RGW Host Detail : $rgw_servers", + "titleSize": "h6", + "type": "row" }, - "datasource": "$datasource", - "fontSize": "80%", - "format": "none", - "gridPos": { - "h": 8, - "w": 4, - "x": 20, - "y": 1 + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (ceph_daemon) (rate(ceph_rgw_get_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_get_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GET {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "sum by (ceph_daemon)(rate(ceph_rgw_put_initial_lat_sum{ceph_daemon=~\"($rgw_servers)\"}[30s]) / rate(ceph_rgw_put_initial_lat_count{ceph_daemon=~\"($rgw_servers)\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUT {{ceph_daemon}}", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "$rgw_servers GET/PUT Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "id": 23, - "interval": null, - "legend": { - "show": true, - "values": true + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 8, + "w": 7, + "x": 6, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_get_b{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_put_b{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth by HTTP Operation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "legendType": "Under graph", - "links": [], - "maxDataPoints": 3, - "nullPointMode": "connected", - "pieType": "pie", - "strokeWidth": 1, - "targets": [ - { - "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Failures {{ceph_daemon}}", - "refId": "A" - }, - { - "expr": "rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "GETs {{ceph_daemon}}", - "refId": "B" - }, - { - "expr": "rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "PUTs {{ceph_daemon}}", - "refId": "C" - }, - { - "expr": "rate(ceph_rgw_req{ceph_daemon=~\"[[rgw_servers]]\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"[[rgw_servers]]\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"[[rgw_servers]]\"}[30s]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Other (DELETE,LIST) {{ceph_daemon}}", - "refId": "D" - } - ], - "title": "Workload Breakdown", - "type": "grafana-piechart-panel", - "valueName": "current" - } - ], - "refresh": "15s", - "schemaVersion": 16, - "style": "dark", - "tags": [ - "overview" - ], - "templating": { - "list": [ { - "current": { - "tags": [], - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" + "aliasColors": { + "GETs": "#7eb26d", + "Other": "#447ebc", + "PUTs": "#eab839", + "Requests": "#3f2b5b", + "Requests Failed": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 8, + "w": 7, + "x": 13, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Requests Failed {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "B" + }, + { + "expr": "rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "C" + }, + { + "expr": "rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Other {{ceph_daemon}}", + "refId": "D" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "HTTP Request Breakdown", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "rgw_servers", - "options": [], - "query": "label_values(ceph_rgw_req, ceph_daemon)", - "refresh": 1, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { + "Failures": "#bf1b00", + "GETs": "#7eb26d", + "Other (HEAD,POST,DELETE)": "#447ebc", + "PUTs": "#eab839", + "Requests": "#3f2b5b" + }, + "datasource": "$datasource", + "description": "", + "gridPos": { + "h": 8, + "w": 4, + "x": 20, + "y": 1 + }, + "id": 6, + "legend": { + "percentage": true, + "show": true, + "values": true + }, + "legendType": "Under graph", + "pieType": "pie", + "targets": [ + { + "expr": "rate(ceph_rgw_failed_req{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Failures {{ceph_daemon}}", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs {{ceph_daemon}}", + "refId": "B" + }, + { + "expr": "rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs {{ceph_daemon}}", + "refId": "C" + }, + { + "expr": "rate(ceph_rgw_req{ceph_daemon=~\"$rgw_servers\"}[30s]) -\n (rate(ceph_rgw_get{ceph_daemon=~\"$rgw_servers\"}[30s]) +\n rate(ceph_rgw_put{ceph_daemon=~\"$rgw_servers\"}[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Other (DELETE,LIST) {{ceph_daemon}}", + "refId": "D" + } + ], + "title": "Workload Breakdown", + "type": "grafana-piechart-panel", + "valueName": "current" } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "15s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "RGW Instance Detail", - "uid": "x5ARzZtmk", - "version": 2 + ], + "refresh": "15s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "", + "multi": false, + "name": "rgw_servers", + "options": [ ], + "query": "label_values(ceph_rgw_req, ceph_daemon)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Instance Detail", + "uid": "x5ARzZtmk", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/radosgw-overview.json b/ceph/monitoring/grafana/dashboards/radosgw-overview.json index 487d736b3..f996fed95 100644 --- a/ceph/monitoring/grafana/dashboards/radosgw-overview.json +++ b/ceph/monitoring/grafana/dashboards/radosgw-overview.json @@ -1,630 +1,663 @@ { - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1534386107523, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 2, - "panels": [], - "title": "RGW Overview - All Gateways", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 1 - }, - "id": 29, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "GET AVG", - "refId": "A" - }, - { - "expr": "rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "PUT AVG", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average GET/PUT Latencies", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 7, - "x": 8, - "y": 1 - }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by(rgw_host) (label_replace(rate(ceph_rgw_req[30s]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{rgw_host}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Total Requests/sec by RGW Instance", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts", - "fill": 1, - "gridPos": { - "h": 7, - "w": 6, - "x": 15, - "y": 1 - }, - "id": 31, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(rate(ceph_rgw_get_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{rgw_host}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "GET Latencies by RGW Instance", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": false - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Total bytes transferred in/out of all radosgw instances within the cluster", - "fill": 1, - "gridPos": { - "h": 6, - "w": 8, - "x": 0, - "y": 8 - }, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(ceph_rgw_get_b[30s]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "GETs", - "refId": "A" - }, - { - "expr": "sum(rate(ceph_rgw_put_b[30s]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "PUTs", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Bandwidth Consumed by Type", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Total bytes transferred in/out through get/put operations, by radosgw instance", - "fill": 1, - "gridPos": { - "h": 6, - "w": 7, - "x": 8, - "y": 8 - }, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by(rgw_host) (\n (label_replace(rate(ceph_rgw_get_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n (label_replace(rate(ceph_rgw_put_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{rgw_host}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Bandwidth by RGW Instance", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "RGW Overview - All Gateways", + "titleSize": "h6", + "type": "row" }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts", - "fill": 1, - "gridPos": { - "h": 6, - "w": 6, - "x": 15, - "y": 8 + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GET AVG", + "refId": "A" + }, + { + "expr": "rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUT AVG", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Average GET/PUT Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "id": 32, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 7, + "x": 8, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(rgw_host) (label_replace(rate(ceph_rgw_req[30s]), \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Total Requests/sec by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(rate(ceph_rgw_put_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{rgw_host}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "PUT Latencies by RGW Instance", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts", + "fill": 1, + "gridPos": { + "h": 7, + "w": 6, + "x": 15, + "y": 1 + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(rate(ceph_rgw_get_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_get_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "GET Latencies by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Total bytes transferred in/out of all radosgw instances within the cluster", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(ceph_rgw_get_b[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "GETs", + "refId": "A" + }, + { + "expr": "sum(rate(ceph_rgw_put_b[30s]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "PUTs", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth Consumed by Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "yaxes": [ - { - "decimals": null, - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": false - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "refresh": "15s", - "schemaVersion": 16, - "style": "dark", - "tags": [ - "overview" - ], - "templating": { - "list": [ { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 2, - "includeAll": true, - "label": null, - "multi": false, - "name": "rgw_servers", - "options": [], - "query": "label_values(ceph_rgw_req, ceph_daemon)", - "refresh": 1, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Total bytes transferred in/out through get/put operations, by radosgw instance", + "fill": 1, + "gridPos": { + "h": 6, + "w": 7, + "x": 8, + "y": 8 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(rgw_host) (\n (label_replace(rate(ceph_rgw_get_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")) + \n (label_replace(rate(ceph_rgw_put_b[30s]), \"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\"))\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Bandwidth by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { - "current": { - "tags": [], - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts", + "fill": 1, + "gridPos": { + "h": 6, + "w": 6, + "x": 15, + "y": 8 + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(rate(ceph_rgw_put_initial_lat_sum[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\") / \nlabel_replace(rate(ceph_rgw_put_initial_lat_count[30s]),\"rgw_host\",\"$1\",\"ceph_daemon\",\"rgw.(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{rgw_host}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "PUT Latencies by RGW Instance", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "15s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "RGW Overview", - "uid": "WAkugZpiz", - "version": 2 + ], + "refresh": "15s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "", + "multi": false, + "name": "rgw_servers", + "options": [ ], + "query": "label_values(ceph_rgw_req, ceph_daemon)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RGW Overview", + "uid": "WAkugZpiz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json b/ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json index d6cd5e98a..442da5759 100644 --- a/ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json +++ b/ceph/monitoring/grafana/dashboards/radosgw-sync-overview.json @@ -29,6 +29,7 @@ } ] }, + "description": "", "editable": false, "gnetId": null, "graphTooltip": 0, @@ -42,6 +43,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "", "fill": 1, "gridPos": { "h": 7, @@ -126,6 +128,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "", "fill": 1, "gridPos": { "h": 7, @@ -210,6 +213,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "", "fill": 1, "gridPos": { "h": 7, @@ -294,6 +298,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "", "fill": 1, "gridPos": { "h": 7, @@ -386,9 +391,9 @@ "allValue": null, "current": { }, "datasource": "$datasource", - "hide": 2, + "hide": 0, "includeAll": true, - "label": null, + "label": "", "multi": false, "name": "rgw_servers", "options": [ ], diff --git a/ceph/monitoring/grafana/dashboards/rbd-details.json b/ceph/monitoring/grafana/dashboards/rbd-details.json index 59932a5ee..d943b16a6 100644 --- a/ceph/monitoring/grafana/dashboards/rbd-details.json +++ b/ceph/monitoring/grafana/dashboards/rbd-details.json @@ -1,409 +1,413 @@ { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.3.3" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "description": "Detailed Performance of RBD Images (IOPS/Throughput/Latency)", - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1584428820779, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$Datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 8, - "x": 0, - "y": 0 - }, - "id": 6, - "legend": { - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_rbd_write_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Write", - "refId": "A" - }, - { - "expr": "irate(ceph_rbd_read_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Read", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "IOPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "iops", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "iops", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$Datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 8, - "x": 8, - "y": 0 - }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_rbd_write_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Write", - "refId": "A" - }, - { - "expr": "irate(ceph_rbd_read_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Read", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Throughput", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.3.3" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$Datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 8, - "x": 16, - "y": 0 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "irate(ceph_rbd_write_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Write", - "refId": "A" - }, - { - "expr": "irate(ceph_rbd_read_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Read", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average Latency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": true, - "alignLevel": null + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" } - } - ], - "refresh": false, - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "Detailed Performance of RBD Images (IOPS/Throughput/Latency)", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ { - "current": {}, - "hide": 0, - "label": null, - "name": "Datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$Datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_rbd_write_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "irate(ceph_rbd_read_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "iops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "iops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$Datasource", - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "Pool", - "options": [], - "query": "label_values(pool)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$Datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_rbd_write_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "irate(ceph_rbd_read_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { - "allValue": null, - "current": {}, - "datasource": "$Datasource", - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "Image", - "options": [], - "query": "label_values(image)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$Datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(ceph_rbd_write_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "irate(ceph_rbd_read_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Average Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "RBD Details", - "uid": "YhCYGcuZz", - "version": 7 + ], + "refresh": false, + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "Datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { }, + "datasource": "$Datasource", + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "Pool", + "options": [ ], + "query": "label_values(pool)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { }, + "datasource": "$Datasource", + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "Image", + "options": [ ], + "query": "label_values(image)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RBD Details", + "uid": "YhCYGcuZz", + "version": 0 } diff --git a/ceph/monitoring/grafana/dashboards/rbd-overview.json b/ceph/monitoring/grafana/dashboards/rbd-overview.json index eb15fbcb8..5f0ade741 100644 --- a/ceph/monitoring/grafana/dashboards/rbd-overview.json +++ b/ceph/monitoring/grafana/dashboards/rbd-overview.json @@ -1,685 +1,688 @@ { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.4.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "table", - "name": "Table", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ + "__inputs": [ ], + "__requires": [ { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1547242766440, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 0 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "round(sum(irate(ceph_rbd_write_ops[30s])))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Writes", - "refId": "A" - }, - { - "expr": "round(sum(irate(ceph_rbd_read_ops[30s])))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "Reads", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "IOPS", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 0 - }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.4.2" }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "round(sum(irate(ceph_rbd_write_bytes[30s])))", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "Write", - "refId": "A" - }, - { - "expr": "round(sum(irate(ceph_rbd_read_bytes[30s])))", - "format": "time_series", - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Read", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Throughput", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 0 - }, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "round(sum(irate(ceph_rbd_write_latency_sum[30s])) / sum(irate(ceph_rbd_write_latency_count[30s])))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Write", - "refId": "A" - }, - { - "expr": "round(sum(irate(ceph_rbd_read_latency_sum[30s])) / sum(irate(ceph_rbd_read_latency_count[30s])))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "Read", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average Latency", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "5.0.0" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + { + "id": "prometheus", + "name": "Prometheus", + "type": "datasource", + "version": "5.0.0" }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null + { + "id": "table", + "name": "Table", + "type": "panel", + "version": "5.0.0" } - }, - { - "columns": [], - "datasource": "$datasource", - "fontSize": "100%", - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 7 - }, - "hideTimeOverride": false, - "id": 12, - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 3, - "desc": true + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "showIn": 0, + "tags": [ ], + "type": "dashboard" + } + ] + }, + "description": "", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ ], + "panels": [ + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_rbd_write_ops[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Writes", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_rbd_read_ops[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Reads", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "IOPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "styles": [ - { - "alias": "Pool", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "pool", - "thresholds": [], - "type": "string", - "unit": "short", - "valueMaps": [] - }, - { - "alias": "Image", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "image", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "IOPS", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value", - "thresholds": [], - "type": "number", - "unit": "iops" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "/.*/", - "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "topk(10, (sort((irate(ceph_rbd_write_ops[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops[30s])))))", - "format": "table", - "instant": true, - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "title": "Highest IOPS", - "transform": "table", - "type": "table" - }, - { - "columns": [], - "datasource": "$datasource", - "fontSize": "100%", - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 7 + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_rbd_write_bytes[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_rbd_read_bytes[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "id": 10, - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 3, - "desc": true + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(ceph_rbd_write_latency_sum[30s])) / sum(irate(ceph_rbd_write_latency_count[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "round(sum(irate(ceph_rbd_read_latency_sum[30s])) / sum(irate(ceph_rbd_read_latency_count[30s])))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Average Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, - "styles": [ - { - "alias": "Pool", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "pool", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Image", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "image", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Throughput", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value", - "thresholds": [], - "type": "number", - "unit": "Bps" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "topk(10, sort(sum(irate(ceph_rbd_read_bytes[30s]) + irate(ceph_rbd_write_bytes[30s])) by (pool, image, namespace)))", - "format": "table", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "title": "Highest Throughput", - "transform": "table", - "type": "table" - }, - { - "columns": [], - "datasource": "$datasource", - "fontSize": "100%", - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 7 + { + "columns": [ ], + "datasource": "$datasource", + "description": "", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "id": 5, + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "IOPS", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "iops", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10, (sort((irate(ceph_rbd_write_ops[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops[30s])))))", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest IOPS", + "transform": "table", + "type": "table" }, - "id": 14, - "links": [], - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 3, - "desc": true + { + "columns": [ ], + "datasource": "$datasource", + "description": "", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "id": 6, + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Throughput", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "Bps", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10, sort(sum(irate(ceph_rbd_read_bytes[30s]) + irate(ceph_rbd_write_bytes[30s])) by (pool, image, namespace)))", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest Throughput", + "transform": "table", + "type": "table" }, - "styles": [ - { - "alias": "Pool", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "pool", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Image", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "image", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Latency", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value", - "thresholds": [], - "type": "number", - "unit": "ns" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "/.*/", - "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum[30s]) / clamp_min(irate(ceph_rbd_write_latency_count[30s]), 1) +\n irate(ceph_rbd_read_latency_sum[30s]) / clamp_min(irate(ceph_rbd_read_latency_count[30s]), 1)\n ) by (pool, image, namespace)\n)", - "format": "table", - "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "title": "Highest Latency", - "transform": "table", - "type": "table" - } - ], - "refresh": "30s", - "schemaVersion": 16, - "style": "dark", - "tags": [ - "overview" - ], - "templating": { - "list": [ { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" + "columns": [ ], + "datasource": "$datasource", + "description": "", + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "id": 7, + "sort": { + "col": 3, + "desc": true + }, + "styles": [ + { + "alias": "Pool", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pool", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Image", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "image", + "thresholds": [ ], + "type": "string", + "unit": "short", + "valueMaps": [ ] + }, + { + "alias": "Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ ], + "type": "number", + "unit": "ns", + "valueMaps": [ ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [ ], + "type": "hidden", + "unit": "short", + "valueMaps": [ ] + } + ], + "targets": [ + { + "expr": "topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum[30s]) / clamp_min(irate(ceph_rbd_write_latency_count[30s]), 1) +\n irate(ceph_rbd_read_latency_sum[30s]) / clamp_min(irate(ceph_rbd_read_latency_count[30s]), 1)\n ) by (pool, image, namespace)\n)", + "format": "table", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest Latency", + "transform": "table", + "type": "table" } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "15s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "RBD Overview", - "uid": "41FrpeUiz", - "version": 8 + ], + "refresh": "30s", + "rows": [ ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "overview" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "15s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "RBD Overview", + "uid": "41FrpeUiz", + "version": 0 } diff --git a/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml b/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml index d4a0b8209..71fc864cd 100644 --- a/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml +++ b/ceph/monitoring/prometheus/alerts/ceph_default_alerts.yml @@ -233,7 +233,7 @@ groups: rate of the past 48 hours. - alert: MTU Mismatch - expr: node_network_mtu_bytes{device!="lo"} != on() group_left() (quantile(0.5, node_network_mtu_bytes{device!="lo"})) + expr: node_network_mtu_bytes{device!="lo"} * (node_network_up{device!="lo"} > 0) != on() group_left() (quantile(0.5, node_network_mtu_bytes{device!="lo"})) labels: severity: warning type: ceph_default diff --git a/ceph/monitoring/prometheus/alerts/test_alerts.yml b/ceph/monitoring/prometheus/alerts/test_alerts.yml index 8bc35aa26..913c20733 100644 --- a/ceph/monitoring/prometheus/alerts/test_alerts.yml +++ b/ceph/monitoring/prometheus/alerts/test_alerts.yml @@ -680,13 +680,27 @@ tests: - series: 'node_network_mtu_bytes{device="eth4",instance="node-exporter", job="node-exporter"}' values: '9000 9000 9000 9000 9000' + - series: 'node_network_up{device="eth0",instance="node-exporter", + job="node-exporter"}' + values: '0 0 0 0 0' + - series: 'node_network_up{device="eth1",instance="node-exporter", + job="node-exporter"}' + values: '0 0 0 0 0' + - series: 'node_network_up{device="eth2",instance="node-exporter", + job="node-exporter"}' + values: '1 1 1 1 1' + - series: 'node_network_up{device="eth3",instance="node-exporter", + job="node-exporter"}' + values: '0 0 0 0 0' + - series: 'node_network_up{device="eth4",instance="node-exporter", + job="node-exporter"}' + values: '1 1 1 1 1' promql_expr_test: - - expr: node_network_mtu_bytes{device!="lo"} != on() group_left() + - expr: node_network_mtu_bytes{device!="lo"} * (node_network_up{device!="lo"} > 0) != on() group_left() (quantile(0.5, node_network_mtu_bytes{device!="lo"})) eval_time: 1m exp_samples: - - labels: '{__name__="node_network_mtu_bytes", device="eth4", - instance="node-exporter", job="node-exporter"}' + - labels: '{device="eth4", instance="node-exporter", job="node-exporter"}' value: 9000 alert_rule_test: - eval_time: 1m diff --git a/ceph/qa/cephfs/overrides/frag_enable.yaml b/ceph/qa/cephfs/overrides/frag.yaml similarity index 86% rename from ceph/qa/cephfs/overrides/frag_enable.yaml rename to ceph/qa/cephfs/overrides/frag.yaml index f1ccc1c9d..f05b3f48f 100644 --- a/ceph/qa/cephfs/overrides/frag_enable.yaml +++ b/ceph/qa/cephfs/overrides/frag.yaml @@ -2,8 +2,8 @@ overrides: ceph: conf: mds: - mds bal frag: true mds bal fragment size max: 10000 - mds bal split size: 100 mds bal merge size: 5 mds bal split bits: 3 + mds bal split size: 100 + diff --git a/ceph/qa/distros/podman/centos_8.stream_container_tools.yaml b/ceph/qa/distros/podman/centos_8.stream_container_tools.yaml new file mode 100644 index 000000000..4a9578e60 --- /dev/null +++ b/ceph/qa/distros/podman/centos_8.stream_container_tools.yaml @@ -0,0 +1,14 @@ +os_type: centos +os_version: "8.stream" +overrides: + selinux: + whitelist: + - scontext=system_u:system_r:logrotate_t:s0 + +tasks: +- pexec: + all: + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - sudo dnf -y module reset container-tools + - sudo dnf -y module install container-tools + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml b/ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml deleted file mode 100644 index 4e50abc45..000000000 --- a/ceph/qa/distros/podman/rhel_8.3_kubic_stable.yaml +++ /dev/null @@ -1,18 +0,0 @@ -os_type: rhel -os_version: "8.3" -overrides: - selinux: - whitelist: - - scontext=system_u:system_r:logrotate_t:s0 - -tasks: -- pexec: - all: - - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup - - sudo dnf -y module disable container-tools - - sudo dnf -y install 'dnf-command(copr)' - - sudo dnf -y copr enable rhcontainerbot/container-selinux - - sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo - - sudo dnf remove -y podman - - sudo dnf -y install podman - - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/rhel_8.4_container_tools_3.0.yaml b/ceph/qa/distros/podman/rhel_8.4_container_tools_3.0.yaml new file mode 100644 index 000000000..a01eec952 --- /dev/null +++ b/ceph/qa/distros/podman/rhel_8.4_container_tools_3.0.yaml @@ -0,0 +1,13 @@ +os_type: rhel +os_version: "8.4" +overrides: + selinux: + whitelist: + - scontext=system_u:system_r:logrotate_t:s0 +tasks: +- pexec: + all: + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - sudo dnf -y module reset container-tools + - sudo dnf -y module install container-tools:3.0 + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/rhel_8.4_container_tools_rhel8.yaml b/ceph/qa/distros/podman/rhel_8.4_container_tools_rhel8.yaml new file mode 100644 index 000000000..fdce9e837 --- /dev/null +++ b/ceph/qa/distros/podman/rhel_8.4_container_tools_rhel8.yaml @@ -0,0 +1,13 @@ +os_type: rhel +os_version: "8.4" +overrides: + selinux: + whitelist: + - scontext=system_u:system_r:logrotate_t:s0 +tasks: +- pexec: + all: + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - sudo dnf -y module reset container-tools + - sudo dnf -y module install container-tools:rhel8 + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/ubuntu_18.04_kubic_stable.yaml b/ceph/qa/distros/podman/ubuntu_18.04_kubic_stable.yaml deleted file mode 100644 index 1055bcc9e..000000000 --- a/ceph/qa/distros/podman/ubuntu_18.04_kubic_stable.yaml +++ /dev/null @@ -1,13 +0,0 @@ -os_type: ubuntu -os_version: "18.04" - -# feel free to remove this test, if Kubic project is no longer maintained. -tasks: -- pexec: - all: - - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup - - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_18.04/Release.key | sudo apt-key add - - - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_18.04/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list - - sudo apt update - - sudo apt -y install podman - - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml b/ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml deleted file mode 100644 index 3a04f50a8..000000000 --- a/ceph/qa/distros/podman/ubuntu_20.04_kubic_stable.yaml +++ /dev/null @@ -1,13 +0,0 @@ -os_type: ubuntu -os_version: "20.04" - -# feel free to remove this test, if Kubic project is no longer maintained. -tasks: -- pexec: - all: - - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup - - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key | sudo apt-key add - - - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list - - sudo apt update - - sudo apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install podman containernetworking-plugins - - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml b/ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml deleted file mode 100644 index 56b4d1e0f..000000000 --- a/ceph/qa/distros/podman/ubuntu_20.04_kubic_testing.yaml +++ /dev/null @@ -1,13 +0,0 @@ -os_type: ubuntu -os_version: "20.04" - -# feel free to remove this test, if Kubic project is no longer maintained. -tasks: -- pexec: - all: - - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup - - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/xUbuntu_20.04/Release.key | sudo apt-key add - - - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/xUbuntu_20.04/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:testing.list - - sudo apt update - - sudo apt -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install podman containernetworking-plugins - - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf diff --git a/ceph/qa/standalone/scrub/osd-scrub-repair.sh b/ceph/qa/standalone/scrub/osd-scrub-repair.sh index e000134a8..63400a1d0 100755 --- a/ceph/qa/standalone/scrub/osd-scrub-repair.sh +++ b/ceph/qa/standalone/scrub/osd-scrub-repair.sh @@ -4673,8 +4673,6 @@ EOF "primary": false }, { - "data_digest": "0x00000000", - "omap_digest": "0xffffffff", "object_info": { "oid": { "oid": "EOBJ5", @@ -4711,6 +4709,7 @@ EOF }, "size": 4096, "errors": [ + "read_error", "size_mismatch_info", "obj_size_info_mismatch" ], @@ -4763,6 +4762,7 @@ EOF "watchers": {} }, "union_shard_errors": [ + "read_error", "size_mismatch_info", "obj_size_info_mismatch" ], @@ -5441,8 +5441,8 @@ EOF "size": 4096, "shard": 0, "errors": [ + "read_error", "size_mismatch_info", - "ec_size_error", "obj_size_info_mismatch" ], "osd": 1, @@ -5493,8 +5493,8 @@ EOF "watchers": {} }, "union_shard_errors": [ + "read_error", "size_mismatch_info", - "ec_size_error", "obj_size_info_mismatch" ], "errors": [ diff --git a/ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml b/ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/32bits/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml b/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/full/overrides/frag_enable.yaml b/ceph/qa/suites/fs/full/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/full/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/functional/overrides/frag_enable.yaml b/ceph/qa/suites/fs/functional/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/functional/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/libcephfs/overrides/frag_enable.yaml b/ceph/qa/suites/fs/libcephfs/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/libcephfs/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/+ b/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/+ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/.qa b/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/frag.yaml b/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/frag.yaml new file mode 120000 index 000000000..5e5cdaed8 --- /dev/null +++ b/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/frag.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/libcephfs/tasks/libcephfs.yaml b/ceph/qa/suites/fs/libcephfs/tasks/libcephfs/test.yaml similarity index 100% rename from ceph/qa/suites/fs/libcephfs/tasks/libcephfs.yaml rename to ceph/qa/suites/fs/libcephfs/tasks/libcephfs/test.yaml diff --git a/ceph/qa/suites/fs/mirror-ha/% b/ceph/qa/suites/fs/mirror-ha/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/mirror-ha/.qa b/ceph/qa/suites/fs/mirror-ha/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/begin.yaml b/ceph/qa/suites/fs/mirror-ha/begin.yaml new file mode 120000 index 000000000..311d404f7 --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/three-per-cluster.yaml b/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/three-per-cluster.yaml new file mode 100644 index 000000000..095f0893a --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/three-per-cluster.yaml @@ -0,0 +1,12 @@ +meta: +- desc: run one cephfs-mirror daemon on primary cluster +tasks: +- cephfs-mirror: + client: client.mirror1 + run_in_foreground: True +- cephfs-mirror: + client: client.mirror2 + run_in_foreground: True +- cephfs-mirror: + client: client.mirror3 + run_in_foreground: True diff --git a/ceph/qa/suites/fs/mirror-ha/clients/+ b/ceph/qa/suites/fs/mirror-ha/clients/+ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/mirror-ha/clients/.qa b/ceph/qa/suites/fs/mirror-ha/clients/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/clients/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/clients/mirror.yaml b/ceph/qa/suites/fs/mirror-ha/clients/mirror.yaml new file mode 100644 index 000000000..620c821e1 --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/clients/mirror.yaml @@ -0,0 +1,32 @@ +meta: +- desc: configure the permissions for client.mirror +overrides: + ceph: + conf: + client: + debug cephfs_mirror: 20 + log to stderr: false + # make these predictable + client.mirror1: + admin socket: /var/run/ceph/cephfs-mirror1.asok + pid file: /var/run/ceph/cephfs-mirror1.pid + client.mirror2: + admin socket: /var/run/ceph/cephfs-mirror2.asok + pid file: /var/run/ceph/cephfs-mirror2.pid + client.mirror3: + admin socket: /var/run/ceph/cephfs-mirror3.asok + pid file: /var/run/ceph/cephfs-mirror3.pid +tasks: +- exec: + client.mirror1: + - "sudo ceph auth caps client.mirror1 mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'" + client.mirror2: + - "sudo ceph auth caps client.mirror2 mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'" + client.mirror3: + - "sudo ceph auth caps client.mirror3 mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'" + client.mirror_remote: + - "sudo ceph auth caps client.mirror_remote mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'" + client.1: + - "sudo ceph auth caps client.0 mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'" + client.2: + - "sudo ceph auth caps client.1 mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'" diff --git a/ceph/qa/suites/fs/mirror-ha/cluster/+ b/ceph/qa/suites/fs/mirror-ha/cluster/+ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/mirror-ha/cluster/1-node.yaml b/ceph/qa/suites/fs/mirror-ha/cluster/1-node.yaml new file mode 100644 index 000000000..cc70c106d --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/cluster/1-node.yaml @@ -0,0 +1,20 @@ +meta: +- desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 5 mdss +roles: +- - mon.a + - mgr.x + - mds.a + - mds.b + - mds.c + - mds.d + - mds.e + - osd.0 + - osd.1 + - osd.2 + - client.0 + - client.1 + - client.2 + - client.mirror1 + - client.mirror2 + - client.mirror3 + - client.mirror_remote \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/objectstore/.qa b/ceph/qa/suites/fs/mirror-ha/objectstore/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/objectstore/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/objectstore/bluestore-bitmap.yaml b/ceph/qa/suites/fs/mirror-ha/objectstore/bluestore-bitmap.yaml new file mode 120000 index 000000000..a59cf5175 --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/overrides/+ b/ceph/qa/suites/fs/mirror-ha/overrides/+ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/mirror-ha/overrides/.qa b/ceph/qa/suites/fs/mirror-ha/overrides/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/overrides/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml b/ceph/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml new file mode 100644 index 000000000..d40fa4cb8 --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + log-ignorelist: + - overall HEALTH_ + - \(FS_DEGRADED\) + - \(MDS_FAILED\) + - \(MDS_DEGRADED\) + - \(FS_WITH_FAILED_MDS\) + - \(MDS_DAMAGE\) + - \(MDS_ALL_DOWN\) + - \(MDS_UP_LESS_THAN_MAX\) + - \(FS_INLINE_DATA_DEPRECATED\) + - Reduced data availability + - Degraded data redundancy diff --git a/ceph/qa/suites/fs/mirror-ha/supported-random-distro$ b/ceph/qa/suites/fs/mirror-ha/supported-random-distro$ new file mode 120000 index 000000000..0862b4457 --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/workloads/.qa b/ceph/qa/suites/fs/mirror-ha/workloads/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/workloads/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml b/ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml new file mode 100644 index 000000000..18eb46d78 --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml @@ -0,0 +1,37 @@ +meta: +- desc: run the cephfs_mirror_ha.sh workunit to test cephfs-mirror daemon in HA active/active mode + +overrides: + ceph: + conf: + mgr: + debug client: 10 + +tasks: + - exec: + client.1: + - "ceph fs volume create dc" + - "ceph fs volume create dc-backup" + - ceph-fuse: + client.1: + cephfs_name: dc + client.2: + cephfs_name: dc-backup + - cephfs_mirror_thrash: + randomize: False + max_thrash_delay: 10 + - workunit: + subdir: mirror + cleanup: False + clients: + client.1: [fs/cephfs_mirror_ha_gen.sh] + timeout: 1h + - exec: + client.2: + - "echo verifying synchronized snapshots..." + - workunit: + subdir: mirror + cleanup: False + clients: + client.2: [fs/cephfs_mirror_ha_verify.sh] + timeout: 3h \ No newline at end of file diff --git a/ceph/qa/suites/fs/mirror/tasks/mirror.yaml b/ceph/qa/suites/fs/mirror/tasks/mirror.yaml index af60495e5..07c1e24ef 100644 --- a/ceph/qa/suites/fs/mirror/tasks/mirror.yaml +++ b/ceph/qa/suites/fs/mirror/tasks/mirror.yaml @@ -7,4 +7,4 @@ overrides: tasks: - cephfs_test_runner: modules: - - tasks.cephfs.test_mirroring + - tasks.cephfs.test_mirroring.TestMirroring diff --git a/ceph/qa/suites/fs/mixed-clients/overrides/frag_enable.yaml b/ceph/qa/suites/fs/mixed-clients/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/mixed-clients/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml b/ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/multiclient/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml b/ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/multifs/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/multifs/tasks/failover.yaml b/ceph/qa/suites/fs/multifs/tasks/failover.yaml index 4a95f01da..9c403c76d 100644 --- a/ceph/qa/suites/fs/multifs/tasks/failover.yaml +++ b/ceph/qa/suites/fs/multifs/tasks/failover.yaml @@ -5,6 +5,8 @@ overrides: - \(MDS_INSUFFICIENT_STANDBY\) - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) + - \(MDS_DAMAGE\) + - \(FS_DEGRADED\) ceph-fuse: disabled: true tasks: diff --git a/ceph/qa/suites/fs/permission/overrides/frag_enable.yaml b/ceph/qa/suites/fs/permission/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/permission/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/shell/overrides/frag_enable.yaml b/ceph/qa/suites/fs/shell/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/shell/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml b/ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/snaps/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/multifs/overrides/frag.yaml b/ceph/qa/suites/fs/thrash/multifs/overrides/frag.yaml new file mode 120000 index 000000000..5e5cdaed8 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/multifs/overrides/frag.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/multifs/overrides/frag_enable.yaml b/ceph/qa/suites/fs/thrash/multifs/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/thrash/multifs/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/workloads/overrides/frag.yaml b/ceph/qa/suites/fs/thrash/workloads/overrides/frag.yaml new file mode 120000 index 000000000..5e5cdaed8 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/workloads/overrides/frag.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/thrash/workloads/overrides/frag_enable.yaml b/ceph/qa/suites/fs/thrash/workloads/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/thrash/workloads/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/overrides/frag.yaml b/ceph/qa/suites/fs/traceless/overrides/frag.yaml new file mode 120000 index 000000000..5e5cdaed8 --- /dev/null +++ b/ceph/qa/suites/fs/traceless/overrides/frag.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml b/ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/traceless/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml b/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/% b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/.qa b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/bluestore-bitmap.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/bluestore-bitmap.yaml new file mode 120000 index 000000000..fb603bc9a --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec/bluestore-bitmap.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/centos_8.stream_container_tools.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/centos_8.stream_container_tools.yaml new file mode 120000 index 000000000..7a86f967f --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/centos_8.stream_container_tools.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.stream_container_tools.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/conf b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/conf new file mode 120000 index 000000000..6d4712984 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/conf @@ -0,0 +1 @@ +.qa/cephfs/conf/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/% b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/.qa b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/pg-warn.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/pg-warn.yaml new file mode 100644 index 000000000..4ae54a40d --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/pg-warn.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + mon pg warn min per osd: 0 diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_health.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_health.yaml new file mode 120000 index 000000000..74f39a49b --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_wrongly_marked_down.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 000000000..b4528c0f8 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/roles.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/roles.yaml new file mode 100644 index 000000000..bce4ecd34 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/roles.yaml @@ -0,0 +1,11 @@ +roles: +- - host.a + - client.0 + - osd.0 + - osd.1 + - osd.2 +- - host.b + - client.1 + - osd.3 + - osd.4 + - osd.5 diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/% b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/.qa b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/.qa b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/v16.2.4.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/v16.2.4.yaml new file mode 100644 index 000000000..47166a9af --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/v16.2.4.yaml @@ -0,0 +1,30 @@ +meta: +- desc: | + setup ceph/pacific v16.2.4 + +tasks: +# Disable metrics sending by kclient as it may crash (assert) a v16.2.4 MDS +- pexec: + clients: + - sudo modprobe -r ceph + - sudo modprobe ceph disable_send_metrics=on +- install: + tag: v16.2.4 + exclude_packages: + - ceph-volume +- print: "**** done install task..." +- cephadm: + roleless: true + image: docker.io/ceph/ceph:v16.2.4 + cephadm_branch: v16.2.4 + cephadm_git_url: https://github.com/ceph/ceph + # needed for v16.2.4 due to --skip-admin-label + avoid_pacific_features: true +- print: "**** done starting v16.2.4" +- cephadm.shell: + host.a: + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/% b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/.qa b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/0-create.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/0-create.yaml new file mode 100644 index 000000000..5ee0022c6 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/0-create.yaml @@ -0,0 +1,5 @@ +tasks: +- cephadm.shell: + host.a: + - ceph fs volume create cephfs --placement=4 + - ceph fs dump diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/.qa b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/1.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/1.yaml new file mode 100644 index 000000000..8c1cd2fe0 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/1.yaml @@ -0,0 +1,4 @@ +tasks: +- cephadm.shell: + host.a: + - ceph fs set cephfs max_mds 1 diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/2.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/2.yaml new file mode 100644 index 000000000..fcd3b1ea4 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/2.yaml @@ -0,0 +1,4 @@ +tasks: +- cephadm.shell: + host.a: + - ceph fs set cephfs max_mds 2 diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/.qa b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/no.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/no.yaml new file mode 100644 index 000000000..3dbc81089 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/no.yaml @@ -0,0 +1,4 @@ +tasks: +- cephadm.shell: + host.a: + - ceph fs set cephfs allow_standby_replay false diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/yes.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/yes.yaml new file mode 100644 index 000000000..fb894425e --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/yes.yaml @@ -0,0 +1,4 @@ +tasks: +- cephadm.shell: + host.a: + - ceph fs set cephfs allow_standby_replay true diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/3-verify.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/3-verify.yaml new file mode 100644 index 000000000..ec2a2a54f --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/3-verify.yaml @@ -0,0 +1,7 @@ +tasks: +- cephadm.shell: + host.a: + - ceph fs dump + - ceph --format=json fs dump | jq -e ".filesystems | length == 1" + - ceph --format=json mds versions | jq -e ". | add == 4" +- fs.pre_upgrade_save: diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml new file mode 100644 index 000000000..92b9dda84 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml @@ -0,0 +1,3 @@ +tasks: +- kclient: +- print: "**** done client" diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/3-upgrade-with-workload.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/3-upgrade-with-workload.yaml new file mode 100644 index 000000000..200c4dcb5 --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/3-upgrade-with-workload.yaml @@ -0,0 +1,33 @@ +tasks: +- parallel: + - upgrade-tasks + - workload-tasks + +upgrade-tasks: + sequential: + - cephadm.shell: + env: [sha1] + host.a: + - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force + - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force + - ceph config set global log_to_journald false --force + - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 + - cephadm.shell: + env: [sha1] + host.a: + - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; ceph fs dump; sleep 30 ; done + - ceph orch ps + - ceph versions + - echo "wait for servicemap items w/ changing names to refresh" + - sleep 60 + - ceph orch ps + - ceph versions + - ceph versions | jq -e '.overall | length == 1' + - ceph versions | jq -e '.overall | keys' | grep $sha1 + +workload-tasks: + sequential: + - workunit: + clients: + all: + - suites/fsstress.sh diff --git a/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/4-verify.yaml b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/4-verify.yaml new file mode 100644 index 000000000..c2b657e5a --- /dev/null +++ b/ceph/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/4-verify.yaml @@ -0,0 +1,5 @@ +tasks: +- cephadm.shell: + host.a: + - ceph fs dump +- fs.post_upgrade_checks: diff --git a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml b/ceph/qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/valgrind/mirror/tasks/mirror.yaml b/ceph/qa/suites/fs/valgrind/mirror/tasks/mirror.yaml index af60495e5..07c1e24ef 100644 --- a/ceph/qa/suites/fs/valgrind/mirror/tasks/mirror.yaml +++ b/ceph/qa/suites/fs/valgrind/mirror/tasks/mirror.yaml @@ -7,4 +7,4 @@ overrides: tasks: - cephfs_test_runner: modules: - - tasks.cephfs.test_mirroring + - tasks.cephfs.test_mirroring.TestMirroring diff --git a/ceph/qa/suites/fs/verify/overrides/frag_enable.yaml b/ceph/qa/suites/fs/verify/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/verify/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/volumes/overrides/frag_enable.yaml b/ceph/qa/suites/fs/volumes/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/volumes/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/workload/overrides/frag.yaml b/ceph/qa/suites/fs/workload/overrides/frag.yaml new file mode 120000 index 000000000..5e5cdaed8 --- /dev/null +++ b/ceph/qa/suites/fs/workload/overrides/frag.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag.yaml \ No newline at end of file diff --git a/ceph/qa/suites/fs/workload/overrides/frag_enable.yaml b/ceph/qa/suites/fs/workload/overrides/frag_enable.yaml deleted file mode 120000 index 34a39a368..000000000 --- a/ceph/qa/suites/fs/workload/overrides/frag_enable.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/dashboard/0-distro/centos_8.3_container_tools_3.0.yaml b/ceph/qa/suites/orch/cephadm/dashboard/0-distro/centos_8.3_container_tools_3.0.yaml new file mode 120000 index 000000000..479a5c26e --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/dashboard/0-distro/centos_8.3_container_tools_3.0.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.3_container_tools_3.0.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/mds_upgrade_sequence b/ceph/qa/suites/orch/cephadm/mds_upgrade_sequence new file mode 120000 index 000000000..24aa41c10 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mds_upgrade_sequence @@ -0,0 +1 @@ +.qa/suites/fs/upgrade/mds_upgrade_sequence/ \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/% b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/% new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/.qa b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/.qa b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.3_container_tools_3.0.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.3_container_tools_3.0.yaml new file mode 120000 index 000000000..479a5c26e --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.3_container_tools_3.0.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.3_container_tools_3.0.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.stream_container_tools.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.stream_container_tools.yaml new file mode 120000 index 000000000..7a86f967f --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-distro/centos_8.stream_container_tools.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.stream_container_tools.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.4.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.4.yaml new file mode 100644 index 000000000..991562b94 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.4.yaml @@ -0,0 +1,8 @@ +tasks: +- cephadm: + roleless: true + image: docker.io/ceph/ceph:v16.2.4 + cephadm_branch: v16.2.4 + cephadm_git_url: https://github.com/ceph/ceph + # needed for v16.2.4 due to --skip-admin-label + avoid_pacific_features: true diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.5.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.5.yaml new file mode 100644 index 000000000..2568f23fa --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.5.yaml @@ -0,0 +1,6 @@ +tasks: +- cephadm: + roleless: true + image: docker.io/ceph/ceph:v16.2.5 + cephadm_branch: v16.2.5 + cephadm_git_url: https://github.com/ceph/ceph diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/octopus.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/octopus.yaml new file mode 100644 index 000000000..16670e7ec --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/octopus.yaml @@ -0,0 +1,8 @@ +tasks: +- cephadm: + roleless: true + image: docker.io/ceph/ceph:v15 + cephadm_branch: octopus + cephadm_git_url: https://github.com/ceph/ceph + avoid_pacific_features: true + add_mons_via_daemon_add: true diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-start.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-start.yaml new file mode 100644 index 000000000..2d9f09a4e --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-start.yaml @@ -0,0 +1,29 @@ +tasks: +- cephadm.shell: + host.a: + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls +roles: +- - host.a + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 +- - host.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/2-nfs.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/2-nfs.yaml new file mode 100644 index 000000000..34680fc8a --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/2-nfs.yaml @@ -0,0 +1,29 @@ +tasks: + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph fs volume create foofs + +- cephadm.wait_for_service: + service: mds.foofs + +- cephadm.shell: + host.a: + - ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2 + - ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake + + # we can't do wait_for_service here because with octopus it's nfs.ganesha-foo not nfs.foo + - while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done + +- vip.exec: + host.a: + - mkdir /mnt/foo + - while ! mount -t nfs $(hostname):/fake /mnt/foo -o sync ; do sleep 5 ; done + - echo test > /mnt/foo/testfile + - sync + diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/3-upgrade-with-workload.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/3-upgrade-with-workload.yaml new file mode 100644 index 000000000..6d10b3576 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/3-upgrade-with-workload.yaml @@ -0,0 +1,41 @@ +tasks: +- parallel: + - upgrade-tasks + - workload-tasks + +upgrade-tasks: + sequential: + - cephadm.shell: + env: [sha1] + host.a: + - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force + - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force + - ceph config set global log_to_journald false --force + - ceph mgr module enable nfs --force + - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 + - cephadm.shell: + env: [sha1] + host.a: + - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done + - ceph orch ps + - ceph versions + - echo "wait for servicemap items w/ changing names to refresh" + - sleep 60 + - ceph orch ps + - ceph versions + - ceph versions | jq -e '.overall | length == 1' + - ceph versions | jq -e '.overall | keys' | grep $sha1 + + # this should be a no-op, but confirms nfs.ganesha-foo was remapped to nfs.foo + - cephadm.wait_for_service: + service: nfs.foo + +workload-tasks: + sequential: + - exec: + host.a: + - cd /mnt/foo && dbench 5 -t 600 || true # might fail with ESTALE + # make sure mount works + - umount /mnt/foo + - while ! mount -t nfs $(hostname):/fake /mnt/foo ; do sleep 5 ; done + - cd /mnt/foo && dbench 5 -t 5 diff --git a/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/4-final.yaml b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/4-final.yaml new file mode 100644 index 000000000..11e8bb3b8 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/mgr-nfs-upgrade/4-final.yaml @@ -0,0 +1,10 @@ +tasks: +- vip.exec: + host.a: + - umount /mnt/foo +- cephadm.shell: + host.a: + - ceph nfs cluster ls | grep foo + - ceph nfs export ls foo --detailed + - rados -p .nfs --all ls - + - ceph config get mgr mgr/cephadm/migration_current | grep 3 diff --git a/ceph/qa/suites/orch/cephadm/osds/2-ops/rm-zap-flag.yaml b/ceph/qa/suites/orch/cephadm/osds/2-ops/rm-zap-flag.yaml new file mode 100644 index 000000000..8f07f6d53 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/osds/2-ops/rm-zap-flag.yaml @@ -0,0 +1,15 @@ +tasks: +- cephadm.shell: + host.a: + - | + set -e + set -x + ceph orch ps + ceph orch device ls + DEVID=$(ceph device ls | grep osd.1 | awk '{print $1}') + HOST=$(ceph orch device ls | grep "$DEVID" | awk '{print $1}') + DEV=$(ceph orch device ls | grep "$DEVID" | awk '{print $2}') + echo "host $HOST, dev $DEV, devid $DEVID" + ceph orch osd rm --zap --replace 1 + while ceph orch osd rm status | grep ^1 ; do sleep 5 ; done + while ! ceph osd dump | grep osd.1 | grep "up\s*in" ; do sleep 5 ; done diff --git a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml new file mode 100644 index 000000000..3f4964978 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml @@ -0,0 +1,89 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph orch apply rgw foorgw --port 8800 + - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} + +- vip.exec: + host.a: + - dnf install -y python3-boto3 || apt install -y python3-boto3 + - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json + +- python: + host.a: | + import boto3 + import json + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + bucket.create() + bucket.put_object(Key='myobject', Body='thebody') + +- cephadm.shell: + host.a: + - ceph nfs export create rgw --bucket foobucket --cluster-id foo --pseudo-path /foobucket + +- cephadm.wait_for_service: + service: nfs.foo +- cephadm.wait_for_service: + service: ingress.nfs.foo + +## export and mount + +- vip.exec: + host.a: + - mkdir /mnt/foo + - sleep 5 + - mount -t nfs {{VIP0}}:/foobucket /mnt/foo + - find /mnt/foo -ls + - grep thebody /mnt/foo/myobject + - echo test > /mnt/foo/newobject + - sync + +- python: + host.a: | + import boto3 + import json + from io import BytesIO + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + data = BytesIO() + bucket.download_fileobj(Fileobj=data, Key='newobject') + print(data.getvalue()) + assert data.getvalue().decode() == 'test\n' + +- vip.exec: + host.a: + - umount /mnt/foo + +- cephadm.shell: + host.a: + - ceph nfs export rm foo /foobucket + - ceph nfs cluster rm foo diff --git a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml new file mode 100644 index 000000000..721aecfc3 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml @@ -0,0 +1,90 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph orch apply rgw foorgw --port 8800 + - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} + +- vip.exec: + host.a: + - dnf install -y python3-boto3 || apt install -y python3-boto3 + - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json + +- python: + host.a: | + import boto3 + import json + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + bucket.create() + bucket.put_object(Key='myobject', Body='thebody') + +- cephadm.shell: + host.a: + - ceph nfs export create rgw --cluster-id foo --pseudo-path /foouser --user-id foouser + +- cephadm.wait_for_service: + service: nfs.foo +- cephadm.wait_for_service: + service: ingress.nfs.foo + +## export and mount + +- vip.exec: + host.a: + - mkdir /mnt/foo + - sleep 5 + - mount -t nfs {{VIP0}}:/foouser /mnt/foo + - test -d /mnt/foo/foobucket + - find /mnt/foo -ls + - grep thebody /mnt/foo/foobucket/myobject + - echo test > /mnt/foo/foobucket/newobject + - sync + +- python: + host.a: | + import boto3 + import json + from io import BytesIO + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + data = BytesIO() + bucket.download_fileobj(Fileobj=data, Key='newobject') + print(data.getvalue()) + assert data.getvalue().decode() == 'test\n' + +- vip.exec: + host.a: + - umount /mnt/foo + +- cephadm.shell: + host.a: + - ceph nfs export rm foo /foouser + - ceph nfs cluster rm foo diff --git a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml index 3e5ad1a2e..b4e843df2 100644 --- a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml +++ b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml @@ -40,7 +40,7 @@ tasks: - cephadm.shell: host.a: - - ceph nfs export create cephfs foofs foo --binding /fake + - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - vip.exec: host.a: diff --git a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml index 09fb3c768..a47dd9d76 100644 --- a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml +++ b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml @@ -14,8 +14,8 @@ tasks: - cephadm.shell: host.a: - ceph fs volume create foofs - - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} - - ceph nfs export create cephfs foofs foo --binding /fake + - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999 + - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - cephadm.wait_for_service: service: nfs.foo @@ -28,7 +28,7 @@ tasks: host.a: - mkdir /mnt/foo - sleep 5 - - mount -t nfs {{VIP0}}:/fake /mnt/foo + - mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999 - echo test > /mnt/foo/testfile - sync @@ -48,3 +48,23 @@ tasks: ceph orch daemon start $haproxy while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done done + +# take each ganesha down in turn. +# simulate "failure" by deleting the container +- vip.exec: + all-hosts: + - | + echo "Check with $(hostname) ganesha(s) down..." + for c in `systemctl | grep ceph- | grep @nfs | awk '{print $1}'`; do + cid=`echo $c | sed 's/@/-/'` + id=`echo $c | cut -d @ -f 2 | sed 's/.service$//'` + fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-` + echo "Removing daemon $id fsid $fsid..." + sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id + + echo "Waking up cephadm..." + sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh + + while ! timeout 1 cat /mnt/foo/testfile ; do true ; done + echo "Mount is back!" + done diff --git a/ceph/qa/suites/orch/cephadm/dashboard/0-distro/centos_8.2_container_tools_3.0.yaml b/ceph/qa/suites/orch/cephadm/smoke/distro/centos_8.2_container_tools_3.0.yaml similarity index 100% rename from ceph/qa/suites/orch/cephadm/dashboard/0-distro/centos_8.2_container_tools_3.0.yaml rename to ceph/qa/suites/orch/cephadm/smoke/distro/centos_8.2_container_tools_3.0.yaml diff --git a/ceph/qa/suites/orch/cephadm/smoke/distro/centos_8.stream_container_tools.yaml b/ceph/qa/suites/orch/cephadm/smoke/distro/centos_8.stream_container_tools.yaml new file mode 120000 index 000000000..7a86f967f --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/smoke/distro/centos_8.stream_container_tools.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.stream_container_tools.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_3.0.yaml b/ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_3.0.yaml new file mode 120000 index 000000000..9e1ab9a0e --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_3.0.yaml @@ -0,0 +1 @@ +.qa/distros/podman/rhel_8.4_container_tools_3.0.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_rhel8.yaml b/ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_rhel8.yaml new file mode 120000 index 000000000..b4b0a7892 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/smoke/distro/rhel_8.4_container_tools_rhel8.yaml @@ -0,0 +1 @@ +.qa/distros/podman/rhel_8.4_container_tools_rhel8.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-centos_8.3-octopus.yaml b/ceph/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-centos_8.stream_container-tools.yaml similarity index 52% rename from ceph/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-centos_8.3-octopus.yaml rename to ceph/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-centos_8.stream_container-tools.yaml index 2e551dd43..a1a9cb5ef 100644 --- a/ceph/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-centos_8.3-octopus.yaml +++ b/ceph/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-centos_8.stream_container-tools.yaml @@ -1,14 +1,16 @@ os_type: centos -os_version: "8.3" -overrides: - selinux: - whitelist: - - scontext=system_u:system_r:logrotate_t:s0 +os_version: "8.stream" tasks: +- pexec: + all: + - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup + - sudo dnf -y module reset container-tools + - sudo dnf -y module install container-tools + - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf - cephadm: - image: quay.ceph.io/ceph-ci/ceph:octopus - cephadm_branch: octopus + image: docker.io/ceph/ceph:v15.2.0 + cephadm_branch: v15.2.0 cephadm_git_url: https://github.com/ceph/ceph # avoid --cap-add=PTRACE + --privileged for older cephadm versions allow_ptrace: false @@ -16,7 +18,6 @@ tasks: add_mons_via_daemon_add: true avoid_pacific_features: true - roles: - - mon.a - mon.c @@ -26,7 +27,7 @@ roles: - osd.2 - osd.3 - client.0 -# - ceph.rgw.realm.zone.a # CLI change in v16 pacific +# - ceph.rgw.realm.zone.a # Disabled, needs 15.2.4 as an upgrade start - node-exporter.a - alertmanager.a - - mon.b @@ -39,4 +40,4 @@ roles: - prometheus.a - grafana.a - node-exporter.b - - ceph.iscsi.iscsi.a +# - ceph.iscsi.iscsi.a # needs later start point diff --git a/ceph/qa/suites/orch/cephadm/upgrade/5-upgrade-ls.yaml b/ceph/qa/suites/orch/cephadm/upgrade/5-upgrade-ls.yaml new file mode 100644 index 000000000..236ea6c6b --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/upgrade/5-upgrade-ls.yaml @@ -0,0 +1,6 @@ +tasks: +- cephadm.shell: + mon.a: + - ceph orch upgrade ls + - ceph orch upgrade ls --image quay.io/ceph/ceph | grep 16.2.0 + - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2 diff --git a/ceph/qa/suites/orch/cephadm/workunits/0-distro/centos_8.stream_container_tools.yaml b/ceph/qa/suites/orch/cephadm/workunits/0-distro/centos_8.stream_container_tools.yaml new file mode 120000 index 000000000..7a86f967f --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/workunits/0-distro/centos_8.stream_container_tools.yaml @@ -0,0 +1 @@ +.qa/distros/podman/centos_8.stream_container_tools.yaml \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml b/ceph/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml new file mode 100644 index 000000000..8448c1a2f --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml @@ -0,0 +1,17 @@ +roles: +- - host.a + - osd.0 + - osd.1 + - osd.2 + - mon.a + - mgr.a + - client.0 +tasks: +- install: +- cephadm: +- cephadm.shell: + host.a: + - ceph orch apply mds a +- cephfs_test_runner: + modules: + - tasks.cephfs.test_nfs diff --git a/ceph/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml b/ceph/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml index b62872123..ec65fb116 100644 --- a/ceph/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml +++ b/ceph/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml @@ -14,5 +14,4 @@ tasks: - ceph orch apply mds a - cephfs_test_runner: modules: - - tasks.cephfs.test_nfs - tasks.cephadm_cases.test_cli diff --git a/ceph/qa/suites/perf-basic/ubuntu_18.04.yaml b/ceph/qa/suites/perf-basic/ubuntu_18.04.yaml deleted file mode 120000 index cfb85f10e..000000000 --- a/ceph/qa/suites/perf-basic/ubuntu_18.04.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/perf-basic/ubuntu_latest.yaml b/ceph/qa/suites/perf-basic/ubuntu_latest.yaml new file mode 120000 index 000000000..3a09f9abb --- /dev/null +++ b/ceph/qa/suites/perf-basic/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/perf-basic/workloads/cosbench_64K_write.yaml b/ceph/qa/suites/perf-basic/workloads/cosbench_64K_write.yaml deleted file mode 100644 index 1dbeef4a8..000000000 --- a/ceph/qa/suites/perf-basic/workloads/cosbench_64K_write.yaml +++ /dev/null @@ -1,30 +0,0 @@ -meta: -- desc: | - Run cosbench benchmark using cbt. - 64K write workload. - -overrides: - rgw: - data_pool_pg_size: 64 - index_pool_pg_size: 64 -tasks: -- cbt: - benchmarks: - cosbench: - obj_size: [64KB] - osd_ra: [4096] - workers: 1 - containers_max: 1000 - objects_max: 100 - mode: [write] - template: [default] - rampup: 30 - runtime: 300 - rampdown: 30 - containers: ["u(1,100)"] - objects: ["u(1,100)"] - ratio: [100] - cluster: - user: 'ubuntu' - osds_per_node: 1 - iterations: 1 diff --git a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml index 5ca0c6621..0a6076cab 100644 --- a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml +++ b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml @@ -36,6 +36,7 @@ tasks: - tasks.mgr.dashboard.test_auth - tasks.mgr.dashboard.test_cephfs - tasks.mgr.dashboard.test_cluster_configuration + - tasks.mgr.dashboard.test_cluster - tasks.mgr.dashboard.test_crush_rule - tasks.mgr.dashboard.test_erasure_code_profile - tasks.mgr.dashboard.test_ganesha diff --git a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml index deab01adb..905e6f783 100644 --- a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml @@ -23,3 +23,4 @@ tasks: - cephfs_test_runner: modules: - tasks.mgr.test_module_selftest + fail_on_skip: false diff --git a/ceph/qa/suites/rados/mgr/tasks/progress.yaml b/ceph/qa/suites/rados/mgr/tasks/progress.yaml index 4a0e802b2..73bbe3c99 100644 --- a/ceph/qa/suites/rados/mgr/tasks/progress.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/progress.yaml @@ -17,6 +17,7 @@ tasks: - \(FS_WITH_FAILED_MDS\) - \(FS_DEGRADED\) - \(PG_ + - \(OSDMAP_FLAGS\) - replacing it with standby - No standby daemons available - cephfs_test_runner: diff --git a/ceph/qa/suites/rados/perf/ceph.yaml b/ceph/qa/suites/rados/perf/ceph.yaml index 59e8029b1..f7f1c8556 100644 --- a/ceph/qa/suites/rados/perf/ceph.yaml +++ b/ceph/qa/suites/rados/perf/ceph.yaml @@ -10,5 +10,4 @@ tasks: - \(OSD_ - \(OBJECT_ - overall HEALTH -- rgw: [client.0] - ssh_keys: diff --git a/ceph/qa/suites/rados/perf/ubuntu_18.04.yaml b/ceph/qa/suites/rados/perf/ubuntu_18.04.yaml deleted file mode 120000 index cfb85f10e..000000000 --- a/ceph/qa/suites/rados/perf/ubuntu_18.04.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/perf/ubuntu_latest.yaml b/ceph/qa/suites/rados/perf/ubuntu_latest.yaml new file mode 120000 index 000000000..3a09f9abb --- /dev/null +++ b/ceph/qa/suites/rados/perf/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml b/ceph/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml deleted file mode 100644 index ff0662627..000000000 --- a/ceph/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml +++ /dev/null @@ -1,25 +0,0 @@ -overrides: - rgw: - data_pool_pg_size: 64 - index_pool_pg_size: 64 -tasks: -- cbt: - benchmarks: - cosbench: - obj_size: [64KB] - osd_ra: [4096] - workers: 1 - containers_max: 1000 - objects_max: 100 - mode: [mix] - template: [default] - rampup: 30 - runtime: 300 - rampdown: 30 - containers: ["u(1,100)"] - objects: ["u(1,100)"] - ratio: [60] - cluster: - user: 'ubuntu' - osds_per_node: 1 - iterations: 1 diff --git a/ceph/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml b/ceph/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml deleted file mode 100644 index 39034a03b..000000000 --- a/ceph/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml +++ /dev/null @@ -1,25 +0,0 @@ -overrides: - rgw: - data_pool_pg_size: 64 - index_pool_pg_size: 64 -tasks: -- cbt: - benchmarks: - cosbench: - obj_size: [64KB] - osd_ra: [4096] - workers: 1 - containers_max: 1000 - objects_max: 100 - mode: [write] - template: [default] - rampup: 30 - runtime: 300 - rampdown: 30 - containers: ["u(1,100)"] - objects: ["u(1,100)"] - ratio: [100] - cluster: - user: 'ubuntu' - osds_per_node: 1 - iterations: 1 diff --git a/ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml b/ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml new file mode 100644 index 000000000..fcc3d0e29 --- /dev/null +++ b/ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml @@ -0,0 +1,37 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + create_rbd_pool: false + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + log-ignorelist: + - Error + - overall HEALTH_ + - \(OBJECT_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_BACKFILLFULL\) + - \(POOL_NEARFULL\) + - \(SLOW_OPS\) + - \(TOO_FEW_PGS\) + - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running + - slow request + conf: + osd: + osd min pg log entries: 5 + osd max pg log entries: 5 +- backfill_toofull: diff --git a/ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml b/ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml new file mode 100644 index 000000000..d71eab149 --- /dev/null +++ b/ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml @@ -0,0 +1,36 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + create_rbd_pool: false + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + log-ignorelist: + - \(OBJECT_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(SLOW_OPS\) + - deep-scrub + - missing + - overall HEALTH_ + - repair + - slow request + - unfound + conf: + osd: + osd min pg log entries: 5 + osd max pg log entries: 5 +- ec_inconsistent_hinfo: diff --git a/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml index f678d08ce..3f1c74831 100644 --- a/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml +++ b/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml @@ -24,6 +24,9 @@ tasks: - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ + conf: + mon: + debug auth: 30 - full_sequential: - radosbench: clients: [client.0] diff --git a/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-bootstrap-workunit.yaml b/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-journal-bootstrap-workunit.yaml similarity index 83% rename from ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-bootstrap-workunit.yaml rename to ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-journal-bootstrap-workunit.yaml index 585f58291..b9c5562be 100644 --- a/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-bootstrap-workunit.yaml +++ b/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-journal-bootstrap-workunit.yaml @@ -9,3 +9,5 @@ tasks: CEPH_ARGS: '' RBD_MIRROR_INSTANCES: '1' RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + MIRROR_POOL_MODE: 'pool' + MIRROR_IMAGE_MODE: 'journal' diff --git a/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-bootstrap-workunit.yaml b/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-bootstrap-workunit.yaml new file mode 100644 index 000000000..5ad78474d --- /dev/null +++ b/ceph/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-bootstrap-workunit.yaml @@ -0,0 +1,13 @@ +meta: +- desc: run the rbd_mirror_bootstrap.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_bootstrap.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_INSTANCES: '1' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + MIRROR_POOL_MODE: 'image' + MIRROR_IMAGE_MODE: 'snapshot' diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml b/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml new file mode 100644 index 000000000..7ac3f31da --- /dev/null +++ b/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_path: /home/ubuntu/cephtest/write_back_cache + rbd_persistent_cache_size: 8589934592 + rbd_plugins: pwl_cache + rbd_default_features: 61 +tasks: +- exec: + client.0: + - "mkdir -m 777 /home/ubuntu/cephtest/write_back_cache" +- exec_on_cleanup: + client.0: + - "rm -rf /home/ubuntu/cephtest/write_back_cache" diff --git a/ceph/qa/suites/rgw/verify/tasks/s3tests-java.yaml b/ceph/qa/suites/rgw/verify/tasks/s3tests-java.yaml index 06c134c37..455f86485 100644 --- a/ceph/qa/suites/rgw/verify/tasks/s3tests-java.yaml +++ b/ceph/qa/suites/rgw/verify/tasks/s3tests-java.yaml @@ -1,6 +1,6 @@ tasks: - s3tests-java: client.0: - force-branch: master + force-branch: ceph-pacific force-repo: https://github.com/ceph/java_s3tests.git diff --git a/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml b/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml index 9a2d9d8c8..061f6ddc5 100644 --- a/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml +++ b/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml @@ -3,7 +3,7 @@ meta: Run ceph on two nodes, using one of them as a client, with a separate client-only node. Use xfs beneath the osds. - install ceph/pacific v16.2.4 and the v16.2.x point versions + install ceph/pacific v16.2.5 and the v16.2.x point versions run workload and upgrade-sequence in parallel (every point release should be tested) run workload and upgrade-sequence in parallel @@ -69,28 +69,28 @@ openstack: count: 3 size: 30 # GB tasks: -- print: "**** done pacific about to install v16.2.4 " +- print: "**** done pacific about to install v16.2.5 " - install: - tag: v16.2.4 + tag: v16.2.5 # line below can be removed its from jewel test #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2'] -- print: "**** done v16.2.4 install" +- print: "**** done v16.2.5 install" - ceph: fs: xfs add_osds_to_crush: true - print: "**** done ceph xfs" - sequential: - workload -- print: "**** done workload v16.2.4" +- print: "**** done workload v16.2.5" -####### upgrade to v16.2.5 +####### upgrade to v16.2.6 - install.upgrade: #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] mon.a: - tag: v16.2.5 + tag: v16.2.6 mon.b: - tag: v16.2.5 + tag: v16.2.6 - parallel: - workload_pacific - upgrade-sequence_pacific diff --git a/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific..yaml b/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific.yaml similarity index 81% rename from ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific..yaml rename to ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific.yaml index 537ed596e..ff3c46897 100644 --- a/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific..yaml +++ b/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific.yaml @@ -1,13 +1,13 @@ meta: - desc: | - install ceph/pacific v16.2.4 + install ceph/pacific v16.2.6 Overall upgrade path is - pacific-latest.point => pacific-latest tasks: - install: - tag: v16.2.4 + tag: v16.2.6 exclude_packages: ['librados3'] extra_packages: ['librados2'] -- print: "**** done install pacific v16.2.4" +- print: "**** done install pacific v16.2.6" - ceph: - exec: osd.0: diff --git a/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/rbd-python.yaml b/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/rbd-python.yaml index 6436757c5..edf6d205c 100644 --- a/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/rbd-python.yaml +++ b/ceph/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/rbd-python.yaml @@ -3,7 +3,7 @@ meta: librbd python api tests tasks: - workunit: - tag: v16.2.4 + tag: v16.2.6 clients: client.0: - rbd/test_librbd_python.sh diff --git a/ceph/qa/tasks/backfill_toofull.py b/ceph/qa/tasks/backfill_toofull.py new file mode 100644 index 000000000..1a866595d --- /dev/null +++ b/ceph/qa/tasks/backfill_toofull.py @@ -0,0 +1,193 @@ +""" +Backfill_toofull +""" +import logging +import time +from tasks import ceph_manager +from tasks.util.rados import rados +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def wait_for_pg_state(manager, pgid, state, to_osd): + log.debug("waiting for pg %s state is %s" % (pgid, state)) + for i in range(300): + time.sleep(5) + manager.flush_pg_stats([0, 1, 2, 3]) + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.info('pg=%s' % pg); + assert pg + status = pg['state'].split('+') + if 'active' not in status: + log.debug('not active') + continue + if state not in status: + log.debug('not %s' % state) + continue + assert to_osd in pg['up'] + return + assert False, '%s not in %s' % (pgid, state) + + +def task(ctx, config): + """ + Test backfill reservation calculates "toofull" condition correctly. + + A pretty rigid cluster is brought up and tested by this task + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'backfill_toofull task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + profile = config.get('erasure_code_profile', { + 'k': '2', + 'm': '1', + 'crush-failure-domain': 'osd' + }) + profile_name = profile.get('name', 'backfill_toofull') + manager.create_erasure_code_profile(profile_name, profile) + pool = manager.create_pool_with_unique_name( + pg_num=1, + erasure_code_profile_name=profile_name, + min_size=2) + manager.raw_cluster_cmd('osd', 'pool', 'set', pool, + 'pg_autoscale_mode', 'off') + + manager.flush_pg_stats([0, 1, 2, 3]) + manager.wait_for_clean() + + pool_id = manager.get_pool_num(pool) + pgid = '%d.0' % pool_id + pgs = manager.get_pg_stats() + acting = next((pg['acting'] for pg in pgs if pg['pgid'] == pgid), None) + log.debug("acting=%s" % acting) + assert acting + primary = acting[0] + target = acting[1] + + log.debug("write some data") + rados(ctx, mon, ['-p', pool, 'bench', '60', 'write', '--no-cleanup']) + df = manager.get_osd_df(target) + log.debug("target osd df: %s" % df) + + total_kb = df['kb'] + used_kb = df['kb_used'] + + log.debug("pause recovery") + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'nobackfill') + manager.raw_cluster_cmd('osd', 'set', 'norecover') + + log.debug("stop tartget osd %s" % target) + manager.kill_osd(target) + manager.wait_till_active() + + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.debug('pg=%s' % pg) + assert pg + + log.debug("re-write data") + rados(ctx, mon, ['-p', pool, 'cleanup']) + time.sleep(10) + rados(ctx, mon, ['-p', pool, 'bench', '60', 'write', '--no-cleanup']) + + df = manager.get_osd_df(primary) + log.debug("primary osd df: %s" % df) + + primary_used_kb = df['kb_used'] + + log.info("test backfill reservation rejected with toofull") + + # We set backfillfull ratio less than new data size and expect the pg + # entering backfill_toofull state. + # + # We also need to update nearfull ratio to prevent "full ratio(s) out of order". + + backfillfull = 0.9 * primary_used_kb / total_kb + nearfull = backfillfull * 0.9 + + log.debug("update nearfull ratio to %s and backfillfull ratio to %s" % + (nearfull, backfillfull)) + manager.raw_cluster_cmd('osd', 'set-nearfull-ratio', + '{:.3f}'.format(nearfull)) + manager.raw_cluster_cmd('osd', 'set-backfillfull-ratio', + '{:.3f}'.format(backfillfull)) + + log.debug("start tartget osd %s" % target) + + manager.revive_osd(target) + manager.wait_for_active() + manager.wait_till_osd_is_up(target) + + wait_for_pg_state(manager, pgid, 'backfill_toofull', target) + + log.info("test pg not enter backfill_toofull after restarting backfill") + + # We want to set backfillfull ratio to be big enough for the target to + # successfully backfill new data but smaller than the sum of old and new + # data, so if the osd backfill reservation incorrectly calculates "toofull" + # the test will detect this (fail). + # + # Note, we need to operate with "uncompressed" bytes because currently + # osd backfill reservation does not take compression into account. + # + # We also need to update nearfull ratio to prevent "full ratio(s) out of order". + + pdf = manager.get_pool_df(pool) + log.debug("pool %s df: %s" % (pool, df)) + assert pdf + compress_ratio = 1.0 * pdf['compress_under_bytes'] / pdf['compress_bytes_used'] \ + if pdf['compress_bytes_used'] > 0 else 1.0 + log.debug("compress_ratio: %s" % compress_ratio) + + backfillfull = (used_kb + primary_used_kb) * compress_ratio / total_kb + assert backfillfull < 0.9 + nearfull_min = max(used_kb, primary_used_kb) * compress_ratio / total_kb + assert nearfull_min < backfillfull + delta = backfillfull - nearfull_min + nearfull = nearfull_min + delta * 0.1 + backfillfull = nearfull_min + delta * 0.2 + + log.debug("update nearfull ratio to %s and backfillfull ratio to %s" % + (nearfull, backfillfull)) + manager.raw_cluster_cmd('osd', 'set-nearfull-ratio', + '{:.3f}'.format(nearfull)) + manager.raw_cluster_cmd('osd', 'set-backfillfull-ratio', + '{:.3f}'.format(backfillfull)) + + wait_for_pg_state(manager, pgid, 'backfilling', target) + + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.debug('pg=%s' % pg) + assert pg + + log.debug("interrupt %s backfill" % target) + manager.mark_down_osd(target) + # after marking the target osd down it will automatically be + # up soon again + + log.debug("resume recovery") + manager.raw_cluster_cmd('osd', 'unset', 'noout') + manager.raw_cluster_cmd('osd', 'unset', 'nobackfill') + manager.raw_cluster_cmd('osd', 'unset', 'norecover') + + # wait for everything to peer, backfill and recover + manager.wait_for_clean() + + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.info('pg=%s' % pg) + assert pg + assert 'clean' in pg['state'].split('+') diff --git a/ceph/qa/tasks/barbican.py b/ceph/qa/tasks/barbican.py index cfa85e1dd..4d1354903 100644 --- a/ceph/qa/tasks/barbican.py +++ b/ceph/qa/tasks/barbican.py @@ -96,8 +96,11 @@ def setup_venv(ctx, config): assert isinstance(config, dict) log.info('Setting up virtualenv for barbican...') for (client, _) in config.items(): - run_in_barbican_dir(ctx, client, ['virtualenv', '.barbicanenv']) - run_in_barbican_venv(ctx, client, ['pip', 'install', 'pytz', '-e', get_barbican_dir(ctx)]) + run_in_barbican_dir(ctx, client, + ['python3', '-m', 'venv', '.barbicanenv']) + run_in_barbican_venv(ctx, client, + ['pip', 'install', 'pytz', + '-e', get_barbican_dir(ctx)]) yield def assign_ports(ctx, config, initial_port): diff --git a/ceph/qa/tasks/ceph.py b/ceph/qa/tasks/ceph.py index 9894addd7..a6eab9be8 100644 --- a/ceph/qa/tasks/ceph.py +++ b/ceph/qa/tasks/ceph.py @@ -46,8 +46,8 @@ def generate_caps(type_): """ defaults = dict( osd=dict( - mon='allow *', - mgr='allow *', + mon='allow profile osd', + mgr='allow profile osd', osd='allow *', ), mgr=dict( @@ -376,6 +376,20 @@ def crush_setup(ctx, config): yield +@contextlib.contextmanager +def setup_manager(ctx, config): + first_mon = teuthology.get_first_mon(ctx, config, config['cluster']) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + if not hasattr(ctx, 'managers'): + ctx.managers = {} + ctx.managers[config['cluster']] = CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager.' + config['cluster']), + cluster=config['cluster'], + ) + yield + @contextlib.contextmanager def create_rbd_pool(ctx, config): cluster_name = config['cluster'] @@ -1870,6 +1884,7 @@ def task(ctx, config): lambda: run_daemon(ctx=ctx, config=config, type_='mgr'), lambda: crush_setup(ctx=ctx, config=config), lambda: run_daemon(ctx=ctx, config=config, type_='osd'), + lambda: setup_manager(ctx=ctx, config=config), lambda: create_rbd_pool(ctx=ctx, config=config), lambda: run_daemon(ctx=ctx, config=config, type_='mds'), lambda: cephfs_setup(ctx=ctx, config=config), @@ -1877,17 +1892,6 @@ def task(ctx, config): ] with contextutil.nested(*subtasks): - first_mon = teuthology.get_first_mon(ctx, config, config['cluster']) - (mon,) = ctx.cluster.only(first_mon).remotes.keys() - if not hasattr(ctx, 'managers'): - ctx.managers = {} - ctx.managers[config['cluster']] = CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager.' + config['cluster']), - cluster=config['cluster'], - ) - try: if config.get('wait-for-healthy', True): healthy(ctx=ctx, config=dict(cluster=config['cluster'])) diff --git a/ceph/qa/tasks/ceph_manager.py b/ceph/qa/tasks/ceph_manager.py index 28f28f54f..a274b8f29 100644 --- a/ceph/qa/tasks/ceph_manager.py +++ b/ceph/qa/tasks/ceph_manager.py @@ -1438,7 +1438,7 @@ class ObjectStoreTool: if self.osd == "primary": self.osd = self.manager.get_object_primary(self.pool, self.object_name) - assert self.osd + assert self.osd is not None if self.object_name: self.pgid = self.manager.get_object_pg_with_shard(self.pool, self.object_name, @@ -2324,6 +2324,24 @@ class CephManager: except KeyError: return j['pg_stats'] + def get_osd_df(self, osdid): + """ + Get the osd df stats + """ + out = self.raw_cluster_cmd('osd', 'df', 'name', 'osd.{}'.format(osdid), + '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + return j['nodes'][0] + + def get_pool_df(self, name): + """ + Get the pool df stats + """ + out = self.raw_cluster_cmd('df', 'detail', '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + return next((p['stats'] for p in j['pools'] if p['name'] == name), + None) + def get_pgids_to_force(self, backfill): """ Return the randomized list of PGs that can have their recovery/backfill forced diff --git a/ceph/qa/tasks/ceph_test_case.py b/ceph/qa/tasks/ceph_test_case.py index 0f207c640..7040853df 100644 --- a/ceph/qa/tasks/ceph_test_case.py +++ b/ceph/qa/tasks/ceph_test_case.py @@ -189,16 +189,22 @@ class CephTestCase(unittest.TestCase): log.debug("wait_until_equal: success") @classmethod - def wait_until_true(cls, condition, timeout, period=5): + def wait_until_true(cls, condition, timeout, check_fn=None, period=5): elapsed = 0 + retry_count = 0 while True: if condition(): - log.debug("wait_until_true: success in {0}s".format(elapsed)) + log.debug("wait_until_true: success in {0}s and {1} retries".format(elapsed, retry_count)) return else: if elapsed >= timeout: - raise TestTimeoutError("Timed out after {0}s".format(elapsed)) + if check_fn and check_fn() and retry_count < 5: + elapsed = 0 + retry_count += 1 + log.debug("wait_until_true: making progress, waiting (timeout={0} retry_count={1})...".format(timeout, retry_count)) + else: + raise TestTimeoutError("Timed out after {0}s and {1} retries".format(elapsed, retry_count)) else: - log.debug("wait_until_true: waiting (timeout={0})...".format(timeout)) + log.debug("wait_until_true: waiting (timeout={0} retry_count={1})...".format(timeout, retry_count)) time.sleep(period) elapsed += period diff --git a/ceph/qa/tasks/cephfs/filesystem.py b/ceph/qa/tasks/cephfs/filesystem.py index e66185c42..881ce7d80 100644 --- a/ceph/qa/tasks/cephfs/filesystem.py +++ b/ceph/qa/tasks/cephfs/filesystem.py @@ -148,6 +148,13 @@ class FSStatus(object): if info['rank'] >= 0 and info['state'] != 'up:standby-replay': yield info + def get_damaged(self, fscid): + """ + Get the damaged ranks for the given FSCID. + """ + fs = self.get_fsmap(fscid) + return fs['mdsmap']['damaged'] + def get_rank(self, fscid, rank): """ Get the rank for the given FSCID. @@ -1045,6 +1052,11 @@ class Filesystem(MDSCluster): status = self.getinfo() return status.get_ranks(self.id) + def get_damaged(self, status=None): + if status is None: + status = self.getinfo() + return status.get_damaged(self.id) + def get_replays(self, status=None): if status is None: status = self.getinfo() diff --git a/ceph/qa/tasks/cephfs/mount.py b/ceph/qa/tasks/cephfs/mount.py index 883acb4d8..9c12d5272 100644 --- a/ceph/qa/tasks/cephfs/mount.py +++ b/ceph/qa/tasks/cephfs/mount.py @@ -530,6 +530,7 @@ class CephFSMount(object): raise NotImplementedError() def get_keyring_path(self): + # N.B.: default keyring is /etc/ceph/ceph.keyring; see ceph.py and generate_caps return '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id) def get_key_from_keyfile(self): diff --git a/ceph/qa/tasks/cephfs/test_failover.py b/ceph/qa/tasks/cephfs/test_failover.py index 304d27c2c..f2032b237 100644 --- a/ceph/qa/tasks/cephfs/test_failover.py +++ b/ceph/qa/tasks/cephfs/test_failover.py @@ -518,6 +518,22 @@ class TestStandbyReplay(CephFSTestCase): time.sleep(30) self._confirm_single_replay() + def test_standby_replay_damaged(self): + """ + That a standby-replay daemon can cause the rank to go damaged correctly. + """ + + self._confirm_no_replay() + self.config_set("mds", "mds_standby_replay_damaged", True) + self.fs.set_allow_standby_replay(True) + self.wait_until_true( + lambda: len(self.fs.get_damaged()) > 0, + timeout=30 + ) + status = self.fs.status() + self.assertListEqual([], list(self.fs.get_ranks(status=status))) + self.assertListEqual([0], self.fs.get_damaged(status=status)) + def test_standby_replay_disable(self): """ That turning off allow_standby_replay fails all standby-replay daemons. diff --git a/ceph/qa/tasks/cephfs/test_mirroring.py b/ceph/qa/tasks/cephfs/test_mirroring.py index c3c746a1d..91f07fdf4 100644 --- a/ceph/qa/tasks/cephfs/test_mirroring.py +++ b/ceph/qa/tasks/cephfs/test_mirroring.py @@ -237,8 +237,8 @@ class TestMirroring(CephFSTestCase): log.debug(f'command returned={res}') return json.loads(res) - def get_mirror_daemon_status(self, fs_name, fs_id): - daemon_status = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "daemon", "status", fs_name)) + def get_mirror_daemon_status(self): + daemon_status = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "daemon", "status")) log.debug(f'daemon_status: {daemon_status}') # running a single mirror daemon is supported status = daemon_status[0] @@ -657,7 +657,7 @@ class TestMirroring(CephFSTestCase): self.peer_add(self.primary_fs_name, self.primary_fs_id, "client.mirror_remote@ceph", self.secondary_fs_name) time.sleep(30) - status = self.get_mirror_daemon_status(self.primary_fs_name, self.primary_fs_id) + status = self.get_mirror_daemon_status() # assumption for this test: mirroring enabled for a single filesystem w/ single # peer @@ -673,7 +673,7 @@ class TestMirroring(CephFSTestCase): self.add_directory(self.primary_fs_name, self.primary_fs_id, '/d0') time.sleep(120) - status = self.get_mirror_daemon_status(self.primary_fs_name, self.primary_fs_id) + status = self.get_mirror_daemon_status() # we added one peer = status['filesystems'][0]['peers'][0] self.assertEquals(status['filesystems'][0]['directory_count'], 1) @@ -685,7 +685,7 @@ class TestMirroring(CephFSTestCase): self.mount_a.run_shell(["mkdir", "d0"]) time.sleep(120) - status = self.get_mirror_daemon_status(self.primary_fs_name, self.primary_fs_id) + status = self.get_mirror_daemon_status() peer = status['filesystems'][0]['peers'][0] self.assertEquals(status['filesystems'][0]['directory_count'], 1) # failure and recovery count should be reflected diff --git a/ceph/qa/tasks/cephfs/test_nfs.py b/ceph/qa/tasks/cephfs/test_nfs.py index c3feb1604..97269a32c 100644 --- a/ceph/qa/tasks/cephfs/test_nfs.py +++ b/ceph/qa/tasks/cephfs/test_nfs.py @@ -6,10 +6,12 @@ import logging from io import BytesIO from tasks.mgr.mgr_test_case import MgrTestCase +from teuthology import contextutil from teuthology.exceptions import CommandFailedError log = logging.getLogger(__name__) +NFS_POOL_NAME = '.nfs' # should match mgr_module.py # TODO Add test for cluster update when ganesha can be deployed on multiple ports. class TestNFS(MgrTestCase): @@ -23,7 +25,6 @@ class TestNFS(MgrTestCase): return self._cmd("orch", *args) def _sys_cmd(self, cmd): - cmd[0:0] = ['sudo'] ret = self.ctx.cluster.run(args=cmd, check_status=False, stdout=BytesIO(), stderr=BytesIO()) stdout = ret[0].stdout if stdout: @@ -44,7 +45,7 @@ class TestNFS(MgrTestCase): "cluster_id": self.cluster_id, "pseudo": self.pseudo_path, "access_type": "RW", - "squash": "no_root_squash", + "squash": "none", "security_label": True, "protocols": [ 4 @@ -54,21 +55,20 @@ class TestNFS(MgrTestCase): ], "fsal": { "name": "CEPH", - "user_id": "test1", + "user_id": "nfs.test.1", "fs_name": self.fs_name, - "sec_label_xattr": '' }, "clients": [] } def _check_nfs_server_status(self): - res = self._sys_cmd(['systemctl', 'status', 'nfs-server']) + res = self._sys_cmd(['sudo', 'systemctl', 'status', 'nfs-server']) if isinstance(res, bytes) and b'Active: active' in res: self._disable_nfs() def _disable_nfs(self): log.info("Disabling NFS") - self._sys_cmd(['systemctl', 'disable', 'nfs-server', '--now']) + self._sys_cmd(['sudo', 'systemctl', 'disable', 'nfs-server', '--now']) def _fetch_nfs_status(self): return self._orch_cmd('ps', f'--service_name={self.expected_name}') @@ -95,10 +95,11 @@ class TestNFS(MgrTestCase): :param check_in: Check specified export id ''' output = self._cmd('auth', 'ls') + client_id = f'client.nfs.{self.cluster_id}' if check_in: - self.assertIn(f'client.{self.cluster_id}{export_id}', output) + self.assertIn(f'{client_id}.{export_id}', output) else: - self.assertNotIn(f'client-{self.cluster_id}', output) + self.assertNotIn(f'{client_id}.{export_id}', output) def _test_idempotency(self, cmd_func, cmd_args): ''' @@ -153,16 +154,26 @@ class TestNFS(MgrTestCase): ''' if create_fs: self._cmd('fs', 'volume', 'create', self.fs_name) - export_cmd = ['nfs', 'export', 'create', 'cephfs', self.fs_name, self.cluster_id] + with contextutil.safe_while(sleep=5, tries=30) as proceed: + while proceed(): + output = self._cmd( + 'orch', 'ls', '-f', 'json', + '--service-name', f'mds.{self.fs_name}' + ) + j = json.loads(output) + if j[0]['status']['running']: + break + export_cmd = ['nfs', 'export', 'create', 'cephfs', + '--fsname', self.fs_name, '--cluster-id', self.cluster_id] if isinstance(extra_cmd, list): export_cmd.extend(extra_cmd) else: - export_cmd.append(self.pseudo_path) + export_cmd.extend(['--pseudo-path', self.pseudo_path]) # Runs the nfs export create command self._cmd(*export_cmd) # Check if user id for export is created self._check_auth_ls(export_id, check_in=True) - res = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'get', + res = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'get', f'export-{export_id}', '-']) # Check if export object is created if res == b'': @@ -201,26 +212,26 @@ class TestNFS(MgrTestCase): self.sample_export['export_id'] = 2 self.sample_export['pseudo'] = self.pseudo_path + '1' self.sample_export['access_type'] = 'RO' - self.sample_export['fsal']['user_id'] = self.cluster_id + '2' + self.sample_export['fsal']['user_id'] = f'{self.expected_name}.2' self.assertDictEqual(self.sample_export, nfs_output[1]) # Export-3 for subvolume with r only self.sample_export['export_id'] = 3 self.sample_export['path'] = sub_vol_path self.sample_export['pseudo'] = self.pseudo_path + '2' - self.sample_export['fsal']['user_id'] = self.cluster_id + '3' + self.sample_export['fsal']['user_id'] = f'{self.expected_name}.3' self.assertDictEqual(self.sample_export, nfs_output[2]) # Export-4 for subvolume self.sample_export['export_id'] = 4 self.sample_export['pseudo'] = self.pseudo_path + '3' self.sample_export['access_type'] = 'RW' - self.sample_export['fsal']['user_id'] = self.cluster_id + '4' + self.sample_export['fsal']['user_id'] = f'{self.expected_name}.4' self.assertDictEqual(self.sample_export, nfs_output[3]) def _get_export(self): ''' Returns export block in json format ''' - return json.loads(self._nfs_cmd('export', 'get', self.cluster_id, self.pseudo_path)) + return json.loads(self._nfs_cmd('export', 'info', self.cluster_id, self.pseudo_path)) def _test_get_export(self): ''' @@ -234,7 +245,7 @@ class TestNFS(MgrTestCase): Test if export or config object are deleted successfully. :param conf_obj: It denotes config object needs to be checked ''' - rados_obj_ls = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'ls']) + rados_obj_ls = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'ls']) if b'export-' in rados_obj_ls or (conf_obj and b'conf-nfs' in rados_obj_ls): self.fail("Delete export failed") @@ -255,14 +266,22 @@ class TestNFS(MgrTestCase): :param ip: IP of deployed nfs cluster :param check: It denotes if i/o testing needs to be done ''' - try: - self.ctx.cluster.run(args=['sudo', 'mount', '-t', 'nfs', '-o', f'port={port}', - f'{ip}:{pseudo_path}', '/mnt']) - except CommandFailedError as e: - # Check if mount failed only when non existing pseudo path is passed - if not check and e.exitstatus == 32: - return - raise + tries = 3 + while True: + try: + self.ctx.cluster.run( + args=['sudo', 'mount', '-t', 'nfs', '-o', f'port={port}', + f'{ip}:{pseudo_path}', '/mnt']) + break + except CommandFailedError as e: + if tries: + tries -= 1 + time.sleep(2) + continue + # Check if mount failed only when non existing pseudo path is passed + if not check and e.exitstatus == 32: + return + raise self.ctx.cluster.run(args=['sudo', 'chmod', '1777', '/mnt']) @@ -332,9 +351,10 @@ class TestNFS(MgrTestCase): ''' Test idempotency of export create and delete commands. ''' - self._test_idempotency(self._create_default_export, ['nfs', 'export', 'create', 'cephfs', - self.fs_name, self.cluster_id, - self.pseudo_path]) + self._test_idempotency(self._create_default_export, [ + 'nfs', 'export', 'create', 'cephfs', + '--fsname', self.fs_name, '--cluster-id', self.cluster_id, + '--pseudo-path', self.pseudo_path]) self._test_idempotency(self._delete_export, ['nfs', 'export', 'rm', self.cluster_id, self.pseudo_path]) self._test_delete_cluster() @@ -346,13 +366,18 @@ class TestNFS(MgrTestCase): # Export-1 with default values (access type = rw and path = '\') self._create_default_export() # Export-2 with r only - self._create_export(export_id='2', extra_cmd=[self.pseudo_path+'1', '--readonly']) + self._create_export(export_id='2', + extra_cmd=['--pseudo-path', self.pseudo_path+'1', '--readonly']) # Export-3 for subvolume with r only self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol') fs_path = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol').strip() - self._create_export(export_id='3', extra_cmd=[self.pseudo_path+'2', '--readonly', fs_path]) + self._create_export(export_id='3', + extra_cmd=['--pseudo-path', self.pseudo_path+'2', '--readonly', + '--path', fs_path]) # Export-4 for subvolume - self._create_export(export_id='4', extra_cmd=[self.pseudo_path+'3', fs_path]) + self._create_export(export_id='4', + extra_cmd=['--pseudo-path', self.pseudo_path+'3', + '--path', fs_path]) # Check if exports gets listed self._test_list_detailed(fs_path) self._test_delete_cluster() @@ -385,7 +410,9 @@ class TestNFS(MgrTestCase): try: fs_name = 'nfs-test' self._test_create_cluster() - self._nfs_cmd('export', 'create', 'cephfs', fs_name, self.cluster_id, self.pseudo_path) + self._nfs_cmd('export', 'create', 'cephfs', + '--fsname', fs_name, '--cluster-id', self.cluster_id, + '--pseudo-path', self.pseudo_path) self.fail(f"Export created with non-existing filesystem {fs_name}") except CommandFailedError as e: # Command should fail for test to pass @@ -400,7 +427,8 @@ class TestNFS(MgrTestCase): ''' try: cluster_id = 'invalidtest' - self._nfs_cmd('export', 'create', 'cephfs', self.fs_name, cluster_id, self.pseudo_path) + self._nfs_cmd('export', 'create', 'cephfs', '--fsname', self.fs_name, + '--cluster-id', cluster_id, '--pseudo-path', self.pseudo_path) self.fail(f"Export created with non-existing cluster id {cluster_id}") except CommandFailedError as e: # Command should fail for test to pass @@ -413,8 +441,9 @@ class TestNFS(MgrTestCase): ''' def check_pseudo_path(pseudo_path): try: - self._nfs_cmd('export', 'create', 'cephfs', self.fs_name, self.cluster_id, - pseudo_path) + self._nfs_cmd('export', 'create', 'cephfs', '--fsname', self.fs_name, + '--cluster-id', self.cluster_id, + '--pseudo-path', pseudo_path) self.fail(f"Export created for {pseudo_path}") except CommandFailedError as e: # Command should fail for test to pass @@ -434,8 +463,10 @@ class TestNFS(MgrTestCase): Test write to readonly export. ''' self._test_create_cluster() - self._create_export(export_id='1', create_fs=True, extra_cmd=[self.pseudo_path, '--readonly']) + self._create_export(export_id='1', create_fs=True, + extra_cmd=['--pseudo-path', self.pseudo_path, '--readonly']) port, ip = self._get_port_ip_info() + self._check_nfs_cluster_status('running', 'NFS Ganesha cluster restart failed') self._write_to_read_only_export(self.pseudo_path, port, ip) self._test_delete_cluster() @@ -471,7 +502,7 @@ class TestNFS(MgrTestCase): ''' self._test_create_cluster() - pool = 'nfs-ganesha' + pool = NFS_POOL_NAME user_id = 'test' fs_name = 'user_test_fs' pseudo_path = '/ceph' @@ -502,7 +533,7 @@ class TestNFS(MgrTestCase): }} }}""" port, ip = self._get_port_ip_info() - self.ctx.cluster.run(args=['sudo', 'ceph', 'nfs', 'cluster', 'config', + self.ctx.cluster.run(args=['ceph', 'nfs', 'cluster', 'config', 'set', self.cluster_id, '-i', '-'], stdin=config) time.sleep(30) res = self._sys_cmd(['rados', '-p', pool, '-N', self.cluster_id, 'get', @@ -510,7 +541,7 @@ class TestNFS(MgrTestCase): self.assertEqual(config, res.decode('utf-8')) self._test_mnt(pseudo_path, port, ip) self._nfs_cmd('cluster', 'config', 'reset', self.cluster_id) - rados_obj_ls = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'ls']) + rados_obj_ls = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'ls']) if b'conf-nfs' not in rados_obj_ls and b'userconf-nfs' in rados_obj_ls: self.fail("User config not deleted") time.sleep(30) @@ -524,7 +555,7 @@ class TestNFS(MgrTestCase): ''' try: cluster_id = 'invalidtest' - self.ctx.cluster.run(args=['sudo', 'ceph', 'nfs', 'cluster', + self.ctx.cluster.run(args=['ceph', 'nfs', 'cluster', 'config', 'set', self.cluster_id, '-i', '-'], stdin='testing') self.fail(f"User config set for non-existing cluster {cluster_id}") except CommandFailedError as e: @@ -545,6 +576,29 @@ class TestNFS(MgrTestCase): if e.exitstatus != errno.ENOENT: raise + def test_create_export_via_apply(self): + ''' + Test creation of export via apply + ''' + self._test_create_cluster() + self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply', + self.cluster_id, '-i', '-'], + stdin=json.dumps({ + "path": "/", + "pseudo": "/cephfs", + "squash": "none", + "access_type": "rw", + "protocols": [4], + "fsal": { + "name": "CEPH", + "fs_name": self.fs_name + } + })) + port, ip = self._get_port_ip_info() + self._test_mnt(self.pseudo_path, port, ip) + self._check_nfs_cluster_status('running', 'NFS Ganesha cluster restart failed') + self._test_delete_cluster() + def test_update_export(self): ''' Test update of exports @@ -556,8 +610,9 @@ class TestNFS(MgrTestCase): new_pseudo_path = '/testing' export_block['pseudo'] = new_pseudo_path export_block['access_type'] = 'RO' - self.ctx.cluster.run(args=['sudo', 'ceph', 'nfs', 'export', 'update', '-i', '-'], - stdin=json.dumps(export_block)) + self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply', + self.cluster_id, '-i', '-'], + stdin=json.dumps(export_block)) self._check_nfs_cluster_status('running', 'NFS Ganesha cluster restart failed') self._write_to_read_only_export(new_pseudo_path, port, ip) self._test_delete_cluster() @@ -577,7 +632,8 @@ class TestNFS(MgrTestCase): else: export_block_new[key] = value try: - self.ctx.cluster.run(args=['sudo', 'ceph', 'nfs', 'export', 'update', '-i', '-'], + self.ctx.cluster.run(args=['ceph', 'nfs', 'export', 'apply', + self.cluster_id, '-i', '-'], stdin=json.dumps(export_block_new)) except CommandFailedError: pass @@ -613,10 +669,11 @@ class TestNFS(MgrTestCase): exec_cmd_invalid('cluster', 'config', 'set') exec_cmd_invalid('cluster', 'config', 'reset') exec_cmd_invalid('export', 'create', 'cephfs') - exec_cmd_invalid('export', 'create', 'cephfs', 'a_fs') - exec_cmd_invalid('export', 'create', 'cephfs', 'a_fs', 'clusterid') + exec_cmd_invalid('export', 'create', 'cephfs', 'clusterid') + exec_cmd_invalid('export', 'create', 'cephfs', 'clusterid', 'a_fs') exec_cmd_invalid('export', 'ls') exec_cmd_invalid('export', 'delete') exec_cmd_invalid('export', 'delete', 'clusterid') - exec_cmd_invalid('export', 'get') - exec_cmd_invalid('export', 'get', 'clusterid') + exec_cmd_invalid('export', 'info') + exec_cmd_invalid('export', 'info', 'clusterid') + exec_cmd_invalid('export', 'apply') diff --git a/ceph/qa/tasks/cephfs/test_snapshots.py b/ceph/qa/tasks/cephfs/test_snapshots.py index fa4d9431f..306c80ce3 100644 --- a/ceph/qa/tasks/cephfs/test_snapshots.py +++ b/ceph/qa/tasks/cephfs/test_snapshots.py @@ -119,7 +119,7 @@ class TestSnapshots(CephFSTestCase): self.fs.rank_freeze(True, rank=1) # prevent failover... self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status) proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i)], wait=False) - self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2); + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*3); self.delete_mds_coredump(rank0['name']); self.fs.rank_signal(signal.SIGKILL, rank=1) diff --git a/ceph/qa/tasks/cephfs/test_volumes.py b/ceph/qa/tasks/cephfs/test_volumes.py index f5dc9fa55..383c7d54e 100644 --- a/ceph/qa/tasks/cephfs/test_volumes.py +++ b/ceph/qa/tasks/cephfs/test_volumes.py @@ -610,17 +610,20 @@ class TestSubvolumeGroups(TestVolumesHelper): expected_mode2 = "777" # create group - self._fs_cmd("subvolumegroup", "create", self.volname, group1) self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}") + self._fs_cmd("subvolumegroup", "create", self.volname, group1) group1_path = self._get_subvolume_group_path(self.volname, group1) group2_path = self._get_subvolume_group_path(self.volname, group2) + volumes_path = os.path.dirname(group1_path) # check group's mode actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip() actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip() + actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip() self.assertEqual(actual_mode1, expected_mode1) self.assertEqual(actual_mode2, expected_mode2) + self.assertEqual(actual_mode3, expected_mode1) self._fs_cmd("subvolumegroup", "rm", self.volname, group1) self._fs_cmd("subvolumegroup", "rm", self.volname, group2) @@ -921,6 +924,36 @@ class TestSubvolumes(TestVolumesHelper): # verify trash dir is clean self._wait_for_trash_empty() + def test_subvolume_create_with_desired_mode(self): + subvol1 = self._generate_random_subvolume_name() + + # default mode + default_mode = "755" + # desired mode + desired_mode = "777" + + self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777") + + subvol1_path = self._get_subvolume_path(self.volname, subvol1) + + # check subvolumegroup's mode + subvol_par_path = os.path.dirname(subvol1_path) + group_path = os.path.dirname(subvol_par_path) + actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip() + self.assertEqual(actual_mode1, default_mode) + # check /volumes mode + volumes_path = os.path.dirname(group_path) + actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip() + self.assertEqual(actual_mode2, default_mode) + # check subvolume's mode + actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip() + self.assertEqual(actual_mode3, desired_mode) + + self._fs_cmd("subvolume", "rm", self.volname, subvol1) + + # verify trash dir is clean + self._wait_for_trash_empty() + def test_subvolume_create_with_desired_mode_in_group(self): subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3) diff --git a/ceph/qa/tasks/cephfs_mirror.py b/ceph/qa/tasks/cephfs_mirror.py index 42c01aaea..9602a5a7f 100644 --- a/ceph/qa/tasks/cephfs_mirror.py +++ b/ceph/qa/tasks/cephfs_mirror.py @@ -54,6 +54,8 @@ class CephFSMirror(Task): '--id', self.client_id, ]) + if 'run_in_foreground' in self.config: + args.extend(['--foreground']) self.ctx.daemons.add_daemon( self.remote, 'cephfs-mirror', self.client, diff --git a/ceph/qa/tasks/cephfs_mirror_thrash.py b/ceph/qa/tasks/cephfs_mirror_thrash.py new file mode 100644 index 000000000..91f60ac50 --- /dev/null +++ b/ceph/qa/tasks/cephfs_mirror_thrash.py @@ -0,0 +1,219 @@ +""" +Task for thrashing cephfs-mirror daemons +""" + +import contextlib +import logging +import random +import signal +import socket +import time + +from gevent import sleep +from gevent.greenlet import Greenlet +from gevent.event import Event + +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra import run +from tasks.thrasher import Thrasher + +log = logging.getLogger(__name__) + + +class CephFSMirrorThrasher(Thrasher, Greenlet): + """ + CephFSMirrorThrasher:: + + The CephFSMirrorThrasher thrashes cephfs-mirror daemons during execution of other + tasks (workunits, etc). + + The config is optional. Many of the config parameters are a maximum value + to use when selecting a random value from a range. The config is a dict + containing some or all of: + + cluster: [default: ceph] cluster to thrash + + max_thrash: [default: 1] the maximum number of active cephfs-mirror daemons per + cluster will be thrashed at any given time. + + min_thrash_delay: [default: 60] minimum number of seconds to delay before + thrashing again. + + max_thrash_delay: [default: 120] maximum number of seconds to delay before + thrashing again. + + max_revive_delay: [default: 10] maximum number of seconds to delay before + bringing back a thrashed cephfs-mirror daemon. + + randomize: [default: true] enables randomization and use the max/min values + + seed: [no default] seed the random number generator + + Examples:: + + The following example disables randomization, and uses the max delay + values: + + tasks: + - ceph: + - cephfs_mirror_thrash: + randomize: False + max_thrash_delay: 10 + """ + + def __init__(self, ctx, config, cluster, daemons): + super(CephFSMirrorThrasher, self).__init__() + + self.ctx = ctx + self.config = config + self.cluster = cluster + self.daemons = daemons + + self.logger = log + self.name = 'thrasher.cephfs_mirror.[{cluster}]'.format(cluster = cluster) + self.stopping = Event() + + self.randomize = bool(self.config.get('randomize', True)) + self.max_thrash = int(self.config.get('max_thrash', 1)) + self.min_thrash_delay = float(self.config.get('min_thrash_delay', 5.0)) + self.max_thrash_delay = float(self.config.get('max_thrash_delay', 10)) + self.max_revive_delay = float(self.config.get('max_revive_delay', 15.0)) + + def _run(self): + try: + self.do_thrash() + except Exception as e: + # See _run exception comment for MDSThrasher + self.set_thrasher_exception(e) + self.logger.exception("exception:") + # Allow successful completion so gevent doesn't see an exception. + # The DaemonWatchdog will observe the error and tear down the test. + + def log(self, x): + """Write data to logger assigned to this CephFSMirrorThrasher""" + self.logger.info(x) + + def stop(self): + self.stopping.set() + + def do_thrash(self): + """ + Perform the random thrashing action + """ + + self.log('starting thrash for cluster {cluster}'.format(cluster=self.cluster)) + stats = { + "kill": 0, + } + + while not self.stopping.is_set(): + delay = self.max_thrash_delay + if self.randomize: + delay = random.randrange(self.min_thrash_delay, self.max_thrash_delay) + + if delay > 0.0: + self.log('waiting for {delay} secs before thrashing'.format(delay=delay)) + self.stopping.wait(delay) + if self.stopping.is_set(): + continue + + killed_daemons = [] + + weight = 1.0 / len(self.daemons) + count = 0 + for daemon in self.daemons: + skip = random.uniform(0.0, 1.0) + if weight <= skip: + self.log('skipping daemon {label} with skip ({skip}) > weight ({weight})'.format( + label=daemon.id_, skip=skip, weight=weight)) + continue + + self.log('kill {label}'.format(label=daemon.id_)) + try: + daemon.signal(signal.SIGTERM) + except Exception as e: + self.log(f'exception when stopping mirror daemon: {e}') + else: + killed_daemons.append(daemon) + stats['kill'] += 1 + + # if we've reached max_thrash, we're done + count += 1 + if count >= self.max_thrash: + break + + if killed_daemons: + # wait for a while before restarting + delay = self.max_revive_delay + if self.randomize: + delay = random.randrange(0.0, self.max_revive_delay) + + self.log('waiting for {delay} secs before reviving daemons'.format(delay=delay)) + sleep(delay) + + for daemon in killed_daemons: + self.log('waiting for {label}'.format(label=daemon.id_)) + try: + run.wait([daemon.proc], timeout=600) + except CommandFailedError: + pass + except: + self.log('Failed to stop {label}'.format(label=daemon.id_)) + + try: + # try to capture a core dump + daemon.signal(signal.SIGABRT) + except socket.error: + pass + raise + finally: + daemon.reset() + + for daemon in killed_daemons: + self.log('reviving {label}'.format(label=daemon.id_)) + daemon.start() + + for stat in stats: + self.log("stat['{key}'] = {value}".format(key = stat, value = stats[stat])) + +@contextlib.contextmanager +def task(ctx, config): + """ + Stress test the cephfs-mirror by thrashing while another task/workunit + is running. + + Please refer to CephFSMirrorThrasher class for further information on the + available options. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'cephfs_mirror_thrash task only accepts a dict for configuration' + + cluster = config.get('cluster', 'ceph') + daemons = list(ctx.daemons.iter_daemons_of_role('cephfs-mirror', cluster)) + assert len(daemons) > 0, \ + 'cephfs_mirror_thrash task requires at least 1 cephfs-mirror daemon' + + # choose random seed + if 'seed' in config: + seed = int(config['seed']) + else: + seed = int(time.time()) + log.info('cephfs_mirror_thrash using random seed: {seed}'.format(seed=seed)) + random.seed(seed) + + thrasher = CephFSMirrorThrasher(ctx, config, cluster, daemons) + thrasher.start() + ctx.ceph[cluster].thrashers.append(thrasher) + + try: + log.debug('Yielding') + yield + finally: + log.info('joining cephfs_mirror_thrash') + thrasher.stop() + if thrasher.exception is not None: + raise RuntimeError('error during thrashing') + thrasher.join() + log.info('done joining') diff --git a/ceph/qa/tasks/cram.py b/ceph/qa/tasks/cram.py index fd17f4832..a445a146f 100644 --- a/ceph/qa/tasks/cram.py +++ b/ceph/qa/tasks/cram.py @@ -71,7 +71,7 @@ def task(ctx, config): args=[ 'mkdir', '--', client_dir, run.Raw('&&'), - 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir), + 'python3', '-m', 'venv', '{tdir}/virtualenv'.format(tdir=testdir), run.Raw('&&'), '{tdir}/virtualenv/bin/pip'.format(tdir=testdir), 'install', 'cram==0.6', diff --git a/ceph/qa/tasks/ec_inconsistent_hinfo.py b/ceph/qa/tasks/ec_inconsistent_hinfo.py new file mode 100644 index 000000000..fa10f2c45 --- /dev/null +++ b/ceph/qa/tasks/ec_inconsistent_hinfo.py @@ -0,0 +1,225 @@ +""" +Inconsistent_hinfo +""" +import logging +import time +from dateutil.parser import parse +from tasks import ceph_manager +from tasks.util.rados import rados +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def wait_for_deep_scrub_complete(manager, pgid, check_time_now, inconsistent): + log.debug("waiting for pg %s deep-scrub complete (check_time_now=%s)" % + (pgid, check_time_now)) + for i in range(300): + time.sleep(5) + manager.flush_pg_stats([0, 1, 2, 3]) + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.debug('pg=%s' % pg); + assert pg + + last_deep_scrub_time = parse(pg['last_deep_scrub_stamp']).strftime('%s') + if last_deep_scrub_time < check_time_now: + log.debug('not scrubbed') + continue + + status = pg['state'].split('+') + if inconsistent: + assert 'inconsistent' in status + else: + assert 'inconsistent' not in status + return + + assert False, 'not scrubbed' + + +def wait_for_backfilling_complete(manager, pgid, from_osd, to_osd): + log.debug("waiting for pg %s backfill from osd.%s to osd.%s complete" % + (pgid, from_osd, to_osd)) + for i in range(300): + time.sleep(5) + manager.flush_pg_stats([0, 1, 2, 3]) + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.info('pg=%s' % pg); + assert pg + status = pg['state'].split('+') + if 'active' not in status: + log.debug('not active') + continue + if 'backfilling' in status: + assert from_osd in pg['acting'] and to_osd in pg['up'] + log.debug('backfilling') + continue + if to_osd not in pg['up']: + log.debug('backfill not started yet') + continue + log.debug('backfilled!') + break + +def task(ctx, config): + """ + Test handling of objects with inconsistent hash info during backfill and deep-scrub. + + A pretty rigid cluster is brought up and tested by this task + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'ec_inconsistent_hinfo task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + profile = config.get('erasure_code_profile', { + 'k': '2', + 'm': '1', + 'crush-failure-domain': 'osd' + }) + profile_name = profile.get('name', 'backfill_unfound') + manager.create_erasure_code_profile(profile_name, profile) + pool = manager.create_pool_with_unique_name( + pg_num=1, + erasure_code_profile_name=profile_name, + min_size=2) + manager.raw_cluster_cmd('osd', 'pool', 'set', pool, + 'pg_autoscale_mode', 'off') + + manager.flush_pg_stats([0, 1, 2, 3]) + manager.wait_for_clean() + + pool_id = manager.get_pool_num(pool) + pgid = '%d.0' % pool_id + pgs = manager.get_pg_stats() + acting = next((pg['acting'] for pg in pgs if pg['pgid'] == pgid), None) + log.info("acting=%s" % acting) + assert acting + primary = acting[0] + + # something that is always there, readable and never empty + dummyfile = '/etc/group' + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile]) + + manager.flush_pg_stats([0, 1]) + manager.wait_for_recovery() + + log.debug("create test object") + obj = 'test' + rados(ctx, mon, ['-p', pool, 'put', obj, dummyfile]) + + victim = acting[1] + + log.info("remove test object hash info from osd.%s shard and test deep-scrub and repair" + % victim) + + manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key', + object_name=obj, osd=victim) + check_time_now = time.strftime('%s') + manager.raw_cluster_cmd('pg', 'deep-scrub', pgid) + wait_for_deep_scrub_complete(manager, pgid, check_time_now, True) + + check_time_now = time.strftime('%s') + manager.raw_cluster_cmd('pg', 'repair', pgid) + wait_for_deep_scrub_complete(manager, pgid, check_time_now, False) + + log.info("remove test object hash info from primary osd.%s shard and test backfill" + % primary) + + log.debug("write some data") + rados(ctx, mon, ['-p', pool, 'bench', '30', 'write', '-b', '4096', + '--no-cleanup']) + + manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key', + object_name=obj, osd=primary) + + # mark the osd out to trigger a rebalance/backfill + source = acting[1] + target = [x for x in [0, 1, 2, 3] if x not in acting][0] + manager.mark_out_osd(source) + + # wait for everything to peer, backfill and recover + wait_for_backfilling_complete(manager, pgid, source, target) + manager.wait_for_clean() + + manager.flush_pg_stats([0, 1, 2, 3]) + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.debug('pg=%s' % pg) + assert pg + assert 'clean' in pg['state'].split('+') + assert 'inconsistent' not in pg['state'].split('+') + unfound = manager.get_num_unfound_objects() + log.debug("there are %d unfound objects" % unfound) + assert unfound == 0 + + source, target = target, source + log.info("remove test object hash info from non-primary osd.%s shard and test backfill" + % source) + + manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key', + object_name=obj, osd=source) + + # mark the osd in to trigger a rebalance/backfill + manager.mark_in_osd(target) + + # wait for everything to peer, backfill and recover + wait_for_backfilling_complete(manager, pgid, source, target) + manager.wait_for_clean() + + manager.flush_pg_stats([0, 1, 2, 3]) + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.debug('pg=%s' % pg) + assert pg + assert 'clean' in pg['state'].split('+') + assert 'inconsistent' not in pg['state'].split('+') + unfound = manager.get_num_unfound_objects() + log.debug("there are %d unfound objects" % unfound) + assert unfound == 0 + + log.info("remove hash info from two shards and test backfill") + + source = acting[2] + target = [x for x in [0, 1, 2, 3] if x not in acting][0] + manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key', + object_name=obj, osd=primary) + manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key', + object_name=obj, osd=source) + + # mark the osd out to trigger a rebalance/backfill + manager.mark_out_osd(source) + + # wait for everything to peer, backfill and detect unfound object + wait_for_backfilling_complete(manager, pgid, source, target) + + # verify that there is unfound object + manager.flush_pg_stats([0, 1, 2, 3]) + pgs = manager.get_pg_stats() + pg = next((pg for pg in pgs if pg['pgid'] == pgid), None) + log.debug('pg=%s' % pg) + assert pg + assert 'backfill_unfound' in pg['state'].split('+') + unfound = manager.get_num_unfound_objects() + log.debug("there are %d unfound objects" % unfound) + assert unfound == 1 + m = manager.list_pg_unfound(pgid) + log.debug('list_pg_unfound=%s' % m) + assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] + + # mark stuff lost + pgs = manager.get_pg_stats() + manager.raw_cluster_cmd('pg', pgid, 'mark_unfound_lost', 'delete') + + # wait for everything to peer and be happy... + manager.flush_pg_stats([0, 1, 2, 3]) + manager.wait_for_recovery() diff --git a/ceph/qa/tasks/fs.py b/ceph/qa/tasks/fs.py index 71d7b22e8..f7a9330e2 100644 --- a/ceph/qa/tasks/fs.py +++ b/ceph/qa/tasks/fs.py @@ -9,6 +9,74 @@ from tasks.cephfs.filesystem import Filesystem, MDSCluster log = logging.getLogger(__name__) +# Everything up to CEPH_MDSMAP_ALLOW_STANDBY_REPLAY +CEPH_MDSMAP_ALLOW_STANDBY_REPLAY = (1<<5) +CEPH_MDSMAP_LAST = CEPH_MDSMAP_ALLOW_STANDBY_REPLAY +UPGRADE_FLAGS_MASK = ((CEPH_MDSMAP_LAST<<1) - 1) +def pre_upgrade_save(ctx, config): + """ + That the upgrade procedure doesn't clobber state: save state. + """ + + mdsc = MDSCluster(ctx) + status = mdsc.status() + + state = {} + ctx['mds-upgrade-state'] = state + + for fs in list(status.get_filesystems()): + fscid = fs['id'] + mdsmap = fs['mdsmap'] + fs_state = {} + fs_state['epoch'] = mdsmap['epoch'] + fs_state['max_mds'] = mdsmap['max_mds'] + fs_state['flags'] = mdsmap['flags'] & UPGRADE_FLAGS_MASK + state[fscid] = fs_state + log.debug(f"fs fscid={fscid},name={mdsmap['fs_name']} state = {fs_state}") + + +def post_upgrade_checks(ctx, config): + """ + That the upgrade procedure doesn't clobber state. + """ + + state = ctx['mds-upgrade-state'] + + mdsc = MDSCluster(ctx) + status = mdsc.status() + + for fs in list(status.get_filesystems()): + fscid = fs['id'] + mdsmap = fs['mdsmap'] + fs_state = state[fscid] + log.debug(f"checking fs fscid={fscid},name={mdsmap['fs_name']} state = {fs_state}") + + # check state was restored to previous values + assert fs_state['max_mds'] == mdsmap['max_mds'] + assert fs_state['flags'] == (mdsmap['flags'] & UPGRADE_FLAGS_MASK) + + # now confirm that the upgrade procedure was followed + epoch = mdsmap['epoch'] + pre_upgrade_epoch = fs_state['epoch'] + assert pre_upgrade_epoch < epoch + should_decrease_max_mds = fs_state['max_mds'] > 1 + did_decrease_max_mds = False + should_disable_allow_standby_replay = fs_state['flags'] & CEPH_MDSMAP_ALLOW_STANDBY_REPLAY + did_disable_allow_standby_replay = False + for i in range(pre_upgrade_epoch+1, mdsmap['epoch']): + old_status = mdsc.status(epoch=i) + old_fs = old_status.get_fsmap(fscid) + old_mdsmap = old_fs['mdsmap'] + if should_decrease_max_mds and old_mdsmap['max_mds'] == 1: + log.debug(f"max_mds reduced in epoch {i}") + did_decrease_max_mds = True + if should_disable_allow_standby_replay and not (old_mdsmap['flags'] & CEPH_MDSMAP_ALLOW_STANDBY_REPLAY): + log.debug(f"allow_standby_replay disabled in epoch {i}") + did_disable_allow_standby_replay = True + assert not should_decrease_max_mds or did_decrease_max_mds + assert not should_disable_allow_standby_replay or did_disable_allow_standby_replay + + def ready(ctx, config): """ That the file system is ready for clients. diff --git a/ceph/qa/tasks/kubeadm.py b/ceph/qa/tasks/kubeadm.py index c870bbae8..2b967f556 100644 --- a/ceph/qa/tasks/kubeadm.py +++ b/ceph/qa/tasks/kubeadm.py @@ -54,6 +54,32 @@ def preflight(ctx, config): wait=False, ) ) + + # set docker cgroup driver = systemd + # see https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker + # see https://github.com/kubernetes/kubeadm/issues/2066 + daemon_json = """ +{ + "exec-opts": ["native.cgroupdriver=systemd"], + "log-driver": "json-file", + "log-opts": { + "max-size": "100m" + }, + "storage-driver": "overlay2" +} +""" + for remote in ctx.cluster.remotes.keys(): + remote.write_file('/etc/docker/daemon.json', daemon_json, sudo=True) + run.wait( + ctx.cluster.run( + args=[ + 'sudo', 'systemctl', 'restart', 'docker', + run.Raw('||'), + 'true', + ], + wait=False, + ) + ) yield diff --git a/ceph/qa/tasks/mgr/dashboard/__init__.py b/ceph/qa/tasks/mgr/dashboard/__init__.py index c066be5f4..2b022e024 100644 --- a/ceph/qa/tasks/mgr/dashboard/__init__.py +++ b/ceph/qa/tasks/mgr/dashboard/__init__.py @@ -1 +1 @@ -DEFAULT_VERSION = '1.0' +DEFAULT_API_VERSION = '1.0' diff --git a/ceph/qa/tasks/mgr/dashboard/helper.py b/ceph/qa/tasks/mgr/dashboard/helper.py index 2b6bedc94..2c6efa901 100644 --- a/ceph/qa/tasks/mgr/dashboard/helper.py +++ b/ceph/qa/tasks/mgr/dashboard/helper.py @@ -16,7 +16,7 @@ from tasks.mgr.mgr_test_case import MgrTestCase from teuthology.exceptions import \ CommandFailedError # pylint: disable=import-error -from . import DEFAULT_VERSION +from . import DEFAULT_API_VERSION log = logging.getLogger(__name__) @@ -276,7 +276,7 @@ class DashboardTestCase(MgrTestCase): # pylint: disable=inconsistent-return-statements, too-many-arguments, too-many-branches @classmethod - def _request(cls, url, method, data=None, params=None, version=DEFAULT_VERSION, + def _request(cls, url, method, data=None, params=None, version=DEFAULT_API_VERSION, set_cookies=False): url = "{}{}".format(cls._base_uri, url) log.debug("Request %s to %s", method, url) @@ -336,7 +336,7 @@ class DashboardTestCase(MgrTestCase): raise ex @classmethod - def _get(cls, url, params=None, version=DEFAULT_VERSION, set_cookies=False): + def _get(cls, url, params=None, version=DEFAULT_API_VERSION, set_cookies=False): return cls._request(url, 'GET', params=params, version=version, set_cookies=set_cookies) @classmethod @@ -344,7 +344,7 @@ class DashboardTestCase(MgrTestCase): retry = True while retry and retries > 0: retry = False - res = cls._get(url, version=DEFAULT_VERSION) + res = cls._get(url, version=DEFAULT_API_VERSION) if isinstance(res, dict): res = [res] for view in res: @@ -358,15 +358,15 @@ class DashboardTestCase(MgrTestCase): return res @classmethod - def _post(cls, url, data=None, params=None, version=DEFAULT_VERSION, set_cookies=False): + def _post(cls, url, data=None, params=None, version=DEFAULT_API_VERSION, set_cookies=False): cls._request(url, 'POST', data, params, version=version, set_cookies=set_cookies) @classmethod - def _delete(cls, url, data=None, params=None, version=DEFAULT_VERSION, set_cookies=False): + def _delete(cls, url, data=None, params=None, version=DEFAULT_API_VERSION, set_cookies=False): cls._request(url, 'DELETE', data, params, version=version, set_cookies=set_cookies) @classmethod - def _put(cls, url, data=None, params=None, version=DEFAULT_VERSION, set_cookies=False): + def _put(cls, url, data=None, params=None, version=DEFAULT_API_VERSION, set_cookies=False): cls._request(url, 'PUT', data, params, version=version, set_cookies=set_cookies) @classmethod @@ -386,7 +386,8 @@ class DashboardTestCase(MgrTestCase): # pylint: disable=too-many-arguments @classmethod - def _task_request(cls, method, url, data, timeout, version=DEFAULT_VERSION, set_cookies=False): + def _task_request(cls, method, url, data, timeout, version=DEFAULT_API_VERSION, + set_cookies=False): res = cls._request(url, method, data, version=version, set_cookies=set_cookies) cls._assertIn(cls._resp.status_code, [200, 201, 202, 204, 400, 403, 404]) @@ -438,17 +439,17 @@ class DashboardTestCase(MgrTestCase): return res_task['exception'] @classmethod - def _task_post(cls, url, data=None, timeout=60, version=DEFAULT_VERSION, set_cookies=False): + def _task_post(cls, url, data=None, timeout=60, version=DEFAULT_API_VERSION, set_cookies=False): return cls._task_request('POST', url, data, timeout, version=version, set_cookies=set_cookies) @classmethod - def _task_delete(cls, url, timeout=60, version=DEFAULT_VERSION, set_cookies=False): + def _task_delete(cls, url, timeout=60, version=DEFAULT_API_VERSION, set_cookies=False): return cls._task_request('DELETE', url, None, timeout, version=version, set_cookies=set_cookies) @classmethod - def _task_put(cls, url, data=None, timeout=60, version=DEFAULT_VERSION, set_cookies=False): + def _task_put(cls, url, data=None, timeout=60, version=DEFAULT_API_VERSION, set_cookies=False): return cls._task_request('PUT', url, data, timeout, version=version, set_cookies=set_cookies) diff --git a/ceph/qa/tasks/mgr/dashboard/test_api.py b/ceph/qa/tasks/mgr/dashboard/test_api.py index 2fe1a78be..22f235698 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_api.py +++ b/ceph/qa/tasks/mgr/dashboard/test_api.py @@ -4,14 +4,14 @@ from __future__ import absolute_import import unittest -from . import DEFAULT_VERSION +from . import DEFAULT_API_VERSION from .helper import DashboardTestCase class VersionReqTest(DashboardTestCase, unittest.TestCase): def test_version(self): for (version, expected_status) in [ - (DEFAULT_VERSION, 200), + (DEFAULT_API_VERSION, 200), (None, 415), ("99.99", 415) ]: diff --git a/ceph/qa/tasks/mgr/dashboard/test_auth.py b/ceph/qa/tasks/mgr/dashboard/test_auth.py index 8fc7cd199..a2266229b 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_auth.py +++ b/ceph/qa/tasks/mgr/dashboard/test_auth.py @@ -154,7 +154,7 @@ class AuthTest(DashboardTestCase): self.assertJsonBody({ "redirect_url": "#/login" }) - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(401) self.set_jwt_token(None) @@ -169,7 +169,7 @@ class AuthTest(DashboardTestCase): self.assertJsonBody({ "redirect_url": "#/login" }) - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(401) self.set_jwt_token(None) @@ -179,10 +179,10 @@ class AuthTest(DashboardTestCase): self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(200) time.sleep(6) - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(401) self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) self.set_jwt_token(None) @@ -192,10 +192,10 @@ class AuthTest(DashboardTestCase): self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(200) time.sleep(6) - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(401) self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) self.set_jwt_token(None) @@ -209,7 +209,7 @@ class AuthTest(DashboardTestCase): # the following call adds the token to the blocklist self._post("/api/auth/logout") self.assertStatus(200) - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(401) time.sleep(6) self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) @@ -229,7 +229,7 @@ class AuthTest(DashboardTestCase): # the following call adds the token to the blocklist self._post("/api/auth/logout", set_cookies=True) self.assertStatus(200) - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(401) time.sleep(6) self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) @@ -243,61 +243,61 @@ class AuthTest(DashboardTestCase): def test_unauthorized(self): # test with Authorization header - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(401) # test with Cookies set - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(401) def test_invalidate_token_by_admin(self): # test with Authorization header - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(401) self.create_user('user', 'user', ['read-only']) time.sleep(1) self._post("/api/auth", {'username': 'user', 'password': 'user'}) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(200) time.sleep(1) self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', '--force-password', 'user'], 'user2') time.sleep(1) - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(401) self.set_jwt_token(None) self._post("/api/auth", {'username': 'user', 'password': 'user2'}) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - self._get("/api/host") + self._get("/api/host", version='1.1') self.assertStatus(200) self.delete_user("user") # test with Cookies set - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(401) self.create_user('user', 'user', ['read-only']) time.sleep(1) self._post("/api/auth", {'username': 'user', 'password': 'user'}, set_cookies=True) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(200) time.sleep(1) self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', '--force-password', 'user'], 'user2') time.sleep(1) - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(401) self.set_jwt_token(None) self._post("/api/auth", {'username': 'user', 'password': 'user2'}, set_cookies=True) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - self._get("/api/host", set_cookies=True) + self._get("/api/host", set_cookies=True, version='1.1') self.assertStatus(200) self.delete_user("user") @@ -335,7 +335,8 @@ class AuthTest(DashboardTestCase): self.assertStatus(200) data = self.jsonBody() self.assertSchema(data, JObj(sub_elems={ - "login_url": JLeaf(str) + "login_url": JLeaf(str), + "cluster_status": JLeaf(str) }, allow_unknown=False)) self.logout() @@ -345,6 +346,7 @@ class AuthTest(DashboardTestCase): self.assertStatus(200) data = self.jsonBody() self.assertSchema(data, JObj(sub_elems={ - "login_url": JLeaf(str) + "login_url": JLeaf(str), + "cluster_status": JLeaf(str) }, allow_unknown=False)) self.logout(set_cookies=True) diff --git a/ceph/qa/tasks/mgr/dashboard/test_cluster.py b/ceph/qa/tasks/mgr/dashboard/test_cluster.py new file mode 100644 index 000000000..14f854279 --- /dev/null +++ b/ceph/qa/tasks/mgr/dashboard/test_cluster.py @@ -0,0 +1,23 @@ +from .helper import DashboardTestCase, JLeaf, JObj + + +class ClusterTest(DashboardTestCase): + + def setUp(self): + super().setUp() + self.reset_session() + + def test_get_status(self): + data = self._get('/api/cluster', version='0.1') + self.assertStatus(200) + self.assertSchema(data, JObj(sub_elems={ + "status": JLeaf(str) + }, allow_unknown=False)) + + def test_update_status(self): + req = {'status': 'POST_INSTALLED'} + self._put('/api/cluster', req, version='0.1') + self.assertStatus(200) + data = self._get('/api/cluster', version='0.1') + self.assertStatus(200) + self.assertEqual(data, req) diff --git a/ceph/qa/tasks/mgr/dashboard/test_ganesha.py b/ceph/qa/tasks/mgr/dashboard/test_ganesha.py deleted file mode 100644 index 6868e0cb3..000000000 --- a/ceph/qa/tasks/mgr/dashboard/test_ganesha.py +++ /dev/null @@ -1,208 +0,0 @@ -# -*- coding: utf-8 -*- -# pylint: disable=too-many-public-methods - -from __future__ import absolute_import - -from .helper import DashboardTestCase, JList, JObj - - -class GaneshaTest(DashboardTestCase): - CEPHFS = True - AUTH_ROLES = ['pool-manager', 'ganesha-manager'] - - @classmethod - def setUpClass(cls): - super(GaneshaTest, cls).setUpClass() - cls.create_pool('ganesha', 2**2, 'replicated') - cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha1', 'create', 'conf-node1']) - cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha1', 'create', 'conf-node2']) - cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha1', 'create', 'conf-node3']) - cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha2', 'create', 'conf-node1']) - cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha2', 'create', 'conf-node2']) - cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha2', 'create', 'conf-node3']) - cls._ceph_cmd(['dashboard', 'set-ganesha-clusters-rados-pool-namespace', - 'cluster1:ganesha/ganesha1,cluster2:ganesha/ganesha2']) - - # RGW setup - cls._radosgw_admin_cmd([ - 'user', 'create', '--uid', 'admin', '--display-name', 'admin', - '--system', '--access-key', 'admin', '--secret', 'admin' - ]) - cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') - cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') - - @classmethod - def tearDownClass(cls): - super(GaneshaTest, cls).tearDownClass() - cls._radosgw_admin_cmd(['user', 'rm', '--uid', 'admin', '--purge-data']) - cls._ceph_cmd(['osd', 'pool', 'delete', 'ganesha', 'ganesha', - '--yes-i-really-really-mean-it']) - - @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['create', 'update', 'delete']}]) - def test_read_access_permissions(self): - self._get('/api/nfs-ganesha/export') - self.assertStatus(403) - - def test_list_daemons(self): - daemons = self._get("/api/nfs-ganesha/daemon") - self.assertEqual(len(daemons), 6) - daemons = [(d['daemon_id'], d['cluster_id']) for d in daemons] - self.assertIn(('node1', 'cluster1'), daemons) - self.assertIn(('node2', 'cluster1'), daemons) - self.assertIn(('node3', 'cluster1'), daemons) - self.assertIn(('node1', 'cluster2'), daemons) - self.assertIn(('node2', 'cluster2'), daemons) - self.assertIn(('node3', 'cluster2'), daemons) - - @classmethod - def create_export(cls, path, cluster_id, daemons, fsal, sec_label_xattr=None): - if fsal == 'CEPH': - fsal = {"name": "CEPH", "user_id": "admin", "fs_name": None, - "sec_label_xattr": sec_label_xattr} - pseudo = "/cephfs{}".format(path) - else: - fsal = {"name": "RGW", "rgw_user_id": "admin"} - pseudo = "/rgw/{}".format(path if path[0] != '/' else "") - ex_json = { - "path": path, - "fsal": fsal, - "cluster_id": cluster_id, - "daemons": daemons, - "pseudo": pseudo, - "tag": None, - "access_type": "RW", - "squash": "no_root_squash", - "security_label": sec_label_xattr is not None, - "protocols": [4], - "transports": ["TCP"], - "clients": [{ - "addresses": ["10.0.0.0/8"], - "access_type": "RO", - "squash": "root" - }] - } - return cls._task_post('/api/nfs-ganesha/export', ex_json) - - def tearDown(self): - super(GaneshaTest, self).tearDown() - exports = self._get("/api/nfs-ganesha/export") - if self._resp.status_code != 200: - return - self.assertIsInstance(exports, list) - for exp in exports: - self._task_delete("/api/nfs-ganesha/export/{}/{}" - .format(exp['cluster_id'], exp['export_id'])) - - def _test_create_export(self, cephfs_path): - exports = self._get("/api/nfs-ganesha/export") - self.assertEqual(len(exports), 0) - - data = self.create_export(cephfs_path, 'cluster1', ['node1', 'node2'], 'CEPH', - "security.selinux") - - exports = self._get("/api/nfs-ganesha/export") - self.assertEqual(len(exports), 1) - self.assertDictEqual(exports[0], data) - return data - - def test_create_export(self): - self._test_create_export('/foo') - - def test_create_export_for_cephfs_root(self): - self._test_create_export('/') - - def test_update_export(self): - export = self._test_create_export('/foo') - export['access_type'] = 'RO' - export['daemons'] = ['node1', 'node3'] - export['security_label'] = True - data = self._task_put('/api/nfs-ganesha/export/{}/{}' - .format(export['cluster_id'], export['export_id']), - export) - exports = self._get("/api/nfs-ganesha/export") - self.assertEqual(len(exports), 1) - self.assertDictEqual(exports[0], data) - self.assertEqual(exports[0]['daemons'], ['node1', 'node3']) - self.assertEqual(exports[0]['security_label'], True) - - def test_delete_export(self): - export = self._test_create_export('/foo') - self._task_delete("/api/nfs-ganesha/export/{}/{}" - .format(export['cluster_id'], export['export_id'])) - self.assertStatus(204) - - def test_get_export(self): - exports = self._get("/api/nfs-ganesha/export") - self.assertEqual(len(exports), 0) - - data1 = self.create_export("/foo", 'cluster2', ['node1', 'node2'], 'CEPH') - data2 = self.create_export("mybucket", 'cluster2', ['node2', 'node3'], 'RGW') - - export1 = self._get("/api/nfs-ganesha/export/cluster2/1") - self.assertDictEqual(export1, data1) - - export2 = self._get("/api/nfs-ganesha/export/cluster2/2") - self.assertDictEqual(export2, data2) - - def test_invalid_status(self): - self._ceph_cmd(['dashboard', 'set-ganesha-clusters-rados-pool-namespace', '']) - - data = self._get('/api/nfs-ganesha/status') - self.assertStatus(200) - self.assertIn('available', data) - self.assertIn('message', data) - self.assertFalse(data['available']) - self.assertIn(("NFS-Ganesha cluster is not detected. " - "Please set the GANESHA_RADOS_POOL_NAMESPACE " - "setting or deploy an NFS-Ganesha cluster with the Orchestrator."), - data['message']) - - self._ceph_cmd(['dashboard', 'set-ganesha-clusters-rados-pool-namespace', - 'cluster1:ganesha/ganesha1,cluster2:ganesha/ganesha2']) - - def test_valid_status(self): - data = self._get('/api/nfs-ganesha/status') - self.assertStatus(200) - self.assertIn('available', data) - self.assertIn('message', data) - self.assertTrue(data['available']) - - def test_ganesha_fsals(self): - data = self._get('/ui-api/nfs-ganesha/fsals') - self.assertStatus(200) - self.assertIn('CEPH', data) - - def test_ganesha_filesystems(self): - data = self._get('/ui-api/nfs-ganesha/cephfs/filesystems') - self.assertStatus(200) - self.assertSchema(data, JList(JObj({ - 'id': int, - 'name': str - }))) - - def test_ganesha_lsdir(self): - fss = self._get('/ui-api/nfs-ganesha/cephfs/filesystems') - self.assertStatus(200) - for fs in fss: - data = self._get('/ui-api/nfs-ganesha/lsdir/{}'.format(fs['name'])) - self.assertStatus(200) - self.assertSchema(data, JObj({'paths': JList(str)})) - self.assertEqual(data['paths'][0], '/') - - def test_ganesha_buckets(self): - data = self._get('/ui-api/nfs-ganesha/rgw/buckets') - self.assertStatus(200) - schema = JList(str) - self.assertSchema(data, schema) - - def test_ganesha_clusters(self): - data = self._get('/ui-api/nfs-ganesha/clusters') - self.assertStatus(200) - schema = JList(str) - self.assertSchema(data, schema) - - def test_ganesha_cephx_clients(self): - data = self._get('/ui-api/nfs-ganesha/cephx/clients') - self.assertStatus(200) - schema = JList(str) - self.assertSchema(data, schema) diff --git a/ceph/qa/tasks/mgr/dashboard/test_host.py b/ceph/qa/tasks/mgr/dashboard/test_host.py index 124fff8d1..78d784473 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_host.py +++ b/ceph/qa/tasks/mgr/dashboard/test_host.py @@ -32,11 +32,11 @@ class HostControllerTest(DashboardTestCase): @DashboardTestCase.RunAs('test', 'test', ['block-manager']) def test_access_permissions(self): - self._get(self.URL_HOST) + self._get(self.URL_HOST, version='1.1') self.assertStatus(403) def test_host_list(self): - data = self._get(self.URL_HOST) + data = self._get(self.URL_HOST, version='1.1') self.assertStatus(200) orch_hostnames = {inventory_node['name'] for inventory_node in @@ -65,14 +65,14 @@ class HostControllerTest(DashboardTestCase): self.assertIn(server['hostname'], orch_hostnames) def test_host_list_with_sources(self): - data = self._get('{}?sources=orchestrator'.format(self.URL_HOST)) + data = self._get('{}?sources=orchestrator'.format(self.URL_HOST), version='1.1') self.assertStatus(200) test_hostnames = {inventory_node['name'] for inventory_node in self.ORCHESTRATOR_TEST_DATA['inventory']} resp_hostnames = {host['hostname'] for host in data} self.assertEqual(test_hostnames, resp_hostnames) - data = self._get('{}?sources=ceph'.format(self.URL_HOST)) + data = self._get('{}?sources=ceph'.format(self.URL_HOST), version='1.1') self.assertStatus(200) test_hostnames = {inventory_node['name'] for inventory_node in self.ORCHESTRATOR_TEST_DATA['inventory']} @@ -80,7 +80,7 @@ class HostControllerTest(DashboardTestCase): self.assertEqual(len(test_hostnames.intersection(resp_hostnames)), 0) def test_host_devices(self): - hosts = self._get('{}'.format(self.URL_HOST)) + hosts = self._get('{}'.format(self.URL_HOST), version='1.1') hosts = [host['hostname'] for host in hosts if host['hostname'] != ''] assert hosts[0] data = self._get('{}/devices'.format('{}/{}'.format(self.URL_HOST, hosts[0]))) @@ -88,7 +88,7 @@ class HostControllerTest(DashboardTestCase): self.assertSchema(data, devices_schema) def test_host_daemons(self): - hosts = self._get('{}'.format(self.URL_HOST)) + hosts = self._get('{}'.format(self.URL_HOST), version='1.1') hosts = [host['hostname'] for host in hosts if host['hostname'] != ''] assert hosts[0] data = self._get('{}/daemons'.format('{}/{}'.format(self.URL_HOST, hosts[0]))) @@ -100,7 +100,7 @@ class HostControllerTest(DashboardTestCase): }))) def test_host_smart(self): - hosts = self._get('{}'.format(self.URL_HOST)) + hosts = self._get('{}'.format(self.URL_HOST), version='1.1') hosts = [host['hostname'] for host in hosts if host['hostname'] != ''] assert hosts[0] self._get('{}/smart'.format('{}/{}'.format(self.URL_HOST, hosts[0]))) diff --git a/ceph/qa/tasks/mgr/dashboard/test_pool.py b/ceph/qa/tasks/mgr/dashboard/test_pool.py index cbf920b91..055ba2b00 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_pool.py +++ b/ceph/qa/tasks/mgr/dashboard/test_pool.py @@ -92,7 +92,7 @@ class PoolTest(DashboardTestCase): self.assertEqual(pool[prop], int(value), '{}: {} != {}'.format(prop, pool[prop], value)) elif prop == 'pg_num': - self._check_pg_num(value, pool) + self._check_pg_num(pool['pool_name'], int(value)) elif prop == 'application_metadata': self.assertIsInstance(pool[prop], list) self.assertEqual(value, pool[prop]) @@ -117,10 +117,7 @@ class PoolTest(DashboardTestCase): else: self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value)) - self.wait_until_equal(self._get_health_status, 'HEALTH_OK', timeout) - - def _get_health_status(self): - return self._get('/api/health/minimal')['health']['status'] + self.wait_for_health_clear(timeout) def _get_pool(self, pool_name): pool = self._get("/api/pool/" + pool_name) @@ -128,22 +125,24 @@ class PoolTest(DashboardTestCase): self.assertSchemaBody(self.pool_schema) return pool - def _check_pg_num(self, value, pool): + def _check_pg_num(self, pool_name, pg_num): """ If both properties have not the same value, the cluster goes into a warning state, which will only happen during a pg update on an existing pool. The test that does that is currently commented out because our QA systems can't deal with the change. Feel free to test it locally. """ - pgp_prop = 'pg_placement_num' - t = 0 - while (int(value) != pool[pgp_prop] or self._get_health_status() != 'HEALTH_OK') \ - and t < 180: - time.sleep(2) - t += 2 - pool = self._get_pool(pool['pool_name']) - for p in ['pg_num', pgp_prop]: # Should have the same values - self.assertEqual(pool[p], int(value), '{}: {} != {}'.format(p, pool[p], value)) + self.wait_until_equal( + lambda: self._get_pool(pool_name)['pg_placement_num'], + expect_val=pg_num, + timeout=180 + ) + + pool = self._get_pool(pool_name) + + for prop in ['pg_num', 'pg_placement_num']: + self.assertEqual(pool[prop], int(pg_num), + '{}: {} != {}'.format(prop, pool[prop], pg_num)) @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}]) def test_read_access_permissions(self): diff --git a/ceph/qa/tasks/mgr/dashboard/test_requests.py b/ceph/qa/tasks/mgr/dashboard/test_requests.py index 93b175bfd..0d7fb75ad 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_requests.py +++ b/ceph/qa/tasks/mgr/dashboard/test_requests.py @@ -2,7 +2,7 @@ from __future__ import absolute_import -from . import DEFAULT_VERSION +from . import DEFAULT_API_VERSION from .helper import DashboardTestCase @@ -11,7 +11,7 @@ class RequestsTest(DashboardTestCase): self._get('/api/summary') self.assertHeaders({ 'Content-Encoding': 'gzip', - 'Content-Type': 'application/vnd.ceph.api.v{}+json'.format(DEFAULT_VERSION) + 'Content-Type': 'application/vnd.ceph.api.v{}+json'.format(DEFAULT_API_VERSION) }) def test_force_no_gzip(self): @@ -27,7 +27,7 @@ class RequestsTest(DashboardTestCase): self._get('/api/summary') self.assertHeaders({ 'server': 'Ceph-Dashboard', - 'Content-Type': 'application/vnd.ceph.api.v{}+json'.format(DEFAULT_VERSION), + 'Content-Type': 'application/vnd.ceph.api.v{}+json'.format(DEFAULT_API_VERSION), 'Content-Security-Policy': "frame-ancestors 'self';", 'X-Content-Type-Options': 'nosniff', 'Strict-Transport-Security': 'max-age=63072000; includeSubDomains; preload' diff --git a/ceph/qa/tasks/mgr/dashboard/test_rgw.py b/ceph/qa/tasks/mgr/dashboard/test_rgw.py index 1bfb99506..dc972d3ed 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_rgw.py +++ b/ceph/qa/tasks/mgr/dashboard/test_rgw.py @@ -183,13 +183,13 @@ class RgwBucketTest(RgwTestCase): self.assertEqual(data['tenant'], '') # List all buckets. - data = self._get('/api/rgw/bucket') + data = self._get('/api/rgw/bucket', version='1.1') self.assertStatus(200) self.assertEqual(len(data), 1) self.assertIn('teuth-test-bucket', data) # List all buckets with stats. - data = self._get('/api/rgw/bucket?stats=true') + data = self._get('/api/rgw/bucket?stats=true', version='1.1') self.assertStatus(200) self.assertEqual(len(data), 1) self.assertSchema(data[0], JObj(sub_elems={ @@ -203,7 +203,7 @@ class RgwBucketTest(RgwTestCase): }, allow_unknown=True)) # List all buckets names without stats. - data = self._get('/api/rgw/bucket?stats=false') + data = self._get('/api/rgw/bucket?stats=false', version='1.1') self.assertStatus(200) self.assertEqual(data, ['teuth-test-bucket']) @@ -283,7 +283,7 @@ class RgwBucketTest(RgwTestCase): # Delete the bucket. self._delete('/api/rgw/bucket/teuth-test-bucket') self.assertStatus(204) - data = self._get('/api/rgw/bucket') + data = self._get('/api/rgw/bucket', version='1.1') self.assertStatus(200) self.assertEqual(len(data), 0) @@ -306,7 +306,7 @@ class RgwBucketTest(RgwTestCase): self.assertIsNone(data) # List all buckets. - data = self._get('/api/rgw/bucket') + data = self._get('/api/rgw/bucket', version='1.1') self.assertStatus(200) self.assertEqual(len(data), 1) self.assertIn('testx/teuth-test-bucket', data) @@ -379,7 +379,7 @@ class RgwBucketTest(RgwTestCase): self._delete('/api/rgw/bucket/{}'.format( parse.quote_plus('testx/teuth-test-bucket'))) self.assertStatus(204) - data = self._get('/api/rgw/bucket') + data = self._get('/api/rgw/bucket', version='1.1') self.assertStatus(200) self.assertEqual(len(data), 0) diff --git a/ceph/qa/tasks/mgr/dashboard/test_user.py b/ceph/qa/tasks/mgr/dashboard/test_user.py index 73ead662d..3a6464f5a 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_user.py +++ b/ceph/qa/tasks/mgr/dashboard/test_user.py @@ -412,6 +412,9 @@ class UserTest(DashboardTestCase): user_1 = self._get('/api/user/user1') self.assertStatus(200) + # Let's wait 1 s to ensure pwd expiration date is not the same + time.sleep(1) + self.login('user1', 'mypassword10#') self._post('/api/user/user1/change_password', { 'old_password': 'mypassword10#', diff --git a/ceph/qa/tasks/mgr/test_module_selftest.py b/ceph/qa/tasks/mgr/test_module_selftest.py index 8abfe51ab..b054642db 100644 --- a/ceph/qa/tasks/mgr/test_module_selftest.py +++ b/ceph/qa/tasks/mgr/test_module_selftest.py @@ -3,7 +3,6 @@ import time import requests import errno import logging -import sys from teuthology.exceptions import CommandFailedError @@ -52,10 +51,13 @@ class TestModuleSelftest(MgrTestCase): self._selftest_plugin("influx") def test_diskprediction_local(self): - if sys.version_info >= (3, 8): + self._load_module("selftest") + python_version = self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "python-version") + if tuple(int(v) for v in python_version.split('.')) >= (3, 8): # https://tracker.ceph.com/issues/45147 - python_version = f'python {sys.version_info.major}.{sys.version_info.minor}' - self.skipTest(f'{python_version} not compatible with diskprediction_local') + self.skipTest(f'python {python_version} not compatible with ' + 'diskprediction_local') self._selftest_plugin("diskprediction_local") def test_telegraf(self): diff --git a/ceph/qa/tasks/mgr/test_orchestrator_cli.py b/ceph/qa/tasks/mgr/test_orchestrator_cli.py index 49be3d8ab..3fccef9a6 100644 --- a/ceph/qa/tasks/mgr/test_orchestrator_cli.py +++ b/ceph/qa/tasks/mgr/test_orchestrator_cli.py @@ -123,8 +123,7 @@ data_devices: self._orch_cmd('daemon', 'add', 'rgw', 'realm', 'zone') def test_nfs_add(self): - self._orch_cmd('daemon', 'add', "nfs", "service_name", "pool", "--namespace", "ns") - self._orch_cmd('daemon', 'add', "nfs", "service_name", "pool") + self._orch_cmd('daemon', 'add', "nfs", "service_name") def test_osd_rm(self): self._orch_cmd('daemon', "rm", "osd.0", '--force') diff --git a/ceph/qa/tasks/mgr/test_progress.py b/ceph/qa/tasks/mgr/test_progress.py index 69e918753..082653f62 100644 --- a/ceph/qa/tasks/mgr/test_progress.py +++ b/ceph/qa/tasks/mgr/test_progress.py @@ -2,9 +2,8 @@ import json import logging import time - from .mgr_test_case import MgrTestCase - +from contextlib import contextmanager log = logging.getLogger(__name__) @@ -14,7 +13,7 @@ class TestProgress(MgrTestCase): # How long we expect to wait at most between taking an OSD out # and seeing the progress event pop up. - EVENT_CREATION_PERIOD = 15 + EVENT_CREATION_PERIOD = 60 WRITE_PERIOD = 30 @@ -149,6 +148,18 @@ class TestProgress(MgrTestCase): osd_map = self.mgr_cluster.mon_manager.get_osd_dump_json() return len(osd_map['osds']) + @contextmanager + def recovery_backfill_disabled(self): + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'set', 'nobackfill') + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'set', 'norecover') + yield + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'unset', 'nobackfill') + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'unset', 'norecover') + def setUp(self): super(TestProgress, self).setUp() # Ensure we have at least four OSDs @@ -181,15 +192,16 @@ class TestProgress(MgrTestCase): self._setup_pool() self._write_some_data(self.WRITE_PERIOD) + with self.recovery_backfill_disabled(): + for osd_id in osd_ids: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', str(osd_id)) - for osd_id in osd_ids: - self.mgr_cluster.mon_manager.raw_cluster_cmd( - 'osd', 'out', str(osd_id)) + # Wait for a progress event to pop up + self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1, + timeout=self.EVENT_CREATION_PERIOD, + period=1) - # Wait for a progress event to pop up - self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1, - timeout=self.EVENT_CREATION_PERIOD*2, - period=1) ev = self._get_osd_in_out_events('out')[0] log.info(json.dumps(ev, indent=1)) self.assertIn("Rebalancing after osd.0 marked out", ev['message']) @@ -202,20 +214,23 @@ class TestProgress(MgrTestCase): # First Event should complete promptly self.wait_until_true(lambda: self._is_complete(initial_event['id']), - timeout=self.EVENT_CREATION_PERIOD) - try: - # Wait for progress event marked in to pop up - self.wait_until_equal(lambda: self._osd_in_out_events_count('in'), 1, - timeout=self.EVENT_CREATION_PERIOD*2, - period=1) - except RuntimeError as ex: - if not "Timed out after" in str(ex): - raise ex + timeout=self.RECOVERY_PERIOD) + + with self.recovery_backfill_disabled(): + + try: + # Wait for progress event marked in to pop up + self.wait_until_equal(lambda: self._osd_in_out_events_count('in'), 1, + timeout=self.EVENT_CREATION_PERIOD, + period=1) + except RuntimeError as ex: + if not "Timed out after" in str(ex): + raise ex - log.info("There was no PGs affected by osd being marked in") - return None + log.info("There was no PGs affected by osd being marked in") + return None - new_event = self._get_osd_in_out_events('in')[0] + new_event = self._get_osd_in_out_events('in')[0] return new_event def _no_events_anywhere(self): @@ -243,10 +258,22 @@ class TestProgress(MgrTestCase): assert ev_id in live_ids return False + def _is_inprogress_or_complete(self, ev_id): + for ev in self._events_in_progress(): + if ev['id'] == ev_id: + return ev['progress'] > 0 + # check if the event completed + return self._is_complete(ev_id) + def tearDown(self): if self.POOL in self.mgr_cluster.mon_manager.pools: self.mgr_cluster.mon_manager.remove_pool(self.POOL) + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'unset', 'nobackfill') + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'unset', 'norecover') + osd_map = self.mgr_cluster.mon_manager.get_osd_dump_json() for osd in osd_map['osds']: if osd['weight'] == 0.0: @@ -280,7 +307,7 @@ class TestProgress(MgrTestCase): # Event should complete promptly self.wait_until_true(lambda: self._is_complete(ev['id']), - timeout=self.EVENT_CREATION_PERIOD) + timeout=self.RECOVERY_PERIOD) self.assertEqual(self._osd_in_out_events_count(), 0) def test_osd_came_back(self): @@ -301,57 +328,6 @@ class TestProgress(MgrTestCase): self.assertEqual(self._osd_in_out_events_count(), 0) - def test_osd_cannot_recover(self): - """ - When the cluster cannot recover from a lost OSD, e.g. - because there is no suitable new placement for it. - (a size=3 pool when there are only 2 OSDs left) - (a size=3 pool when the remaining osds are only on 2 hosts) - - Progress event should not be created. - """ - - pool_size = 3 - - self._setup_pool(size=pool_size) - self._write_some_data(self.WRITE_PERIOD) - - # Fail enough OSDs so there are less than N_replicas OSDs - # available. - osd_count = self._osd_count() - - # First do some failures that will result in a normal rebalance - # (Assumption: we're in a test environment that is configured - # not to require replicas be on different hosts, like teuthology) - for osd_id in range(0, osd_count - pool_size): - self.mgr_cluster.mon_manager.raw_cluster_cmd( - 'osd', 'out', str(osd_id)) - - # We should see an event for each of the OSDs we took out - self.wait_until_equal( - lambda: self._osd_in_out_events_count('out'), - osd_count - pool_size, - timeout=self.EVENT_CREATION_PERIOD*(osd_count - pool_size)) - - # Those should complete cleanly - self.wait_until_equal( - lambda: self._osd_in_out_completed_events_count('out'), - osd_count - pool_size, - timeout=self.RECOVERY_PERIOD*(osd_count - pool_size) - ) - - # Fail one last OSD, at the point the PGs have nowhere to go - victim_osd = osd_count - pool_size - self.mgr_cluster.mon_manager.raw_cluster_cmd( - 'osd', 'out', str(victim_osd)) - - # Check that no event is created - time.sleep(self.EVENT_CREATION_PERIOD) - - self.assertEqual( - self._osd_in_out_completed_events_count('out'), - osd_count - pool_size) - def test_turn_off_module(self): """ When the the module is turned off, there should not @@ -364,18 +340,19 @@ class TestProgress(MgrTestCase): pool_size = 3 self._setup_pool(size=pool_size) self._write_some_data(self.WRITE_PERIOD) - self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", "off") - self.mgr_cluster.mon_manager.raw_cluster_cmd( - 'osd', 'out', '0') + with self.recovery_backfill_disabled(): + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', '0') - time.sleep(self.EVENT_CREATION_PERIOD) + time.sleep(self.EVENT_CREATION_PERIOD/2) - self.mgr_cluster.mon_manager.raw_cluster_cmd( + with self.recovery_backfill_disabled(): + self.mgr_cluster.mon_manager.raw_cluster_cmd( 'osd', 'in', '0') - time.sleep(self.EVENT_CREATION_PERIOD) + time.sleep(self.EVENT_CREATION_PERIOD/2) self.assertTrue(self._no_events_anywhere()) @@ -383,18 +360,21 @@ class TestProgress(MgrTestCase): self._write_some_data(self.WRITE_PERIOD) - self.mgr_cluster.mon_manager.raw_cluster_cmd( - 'osd', 'out', '0') + with self.recovery_backfill_disabled(): - # Wait for a progress event to pop up - self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1, - timeout=self.EVENT_CREATION_PERIOD*2, - period=1) + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', '0') + + # Wait for a progress event to pop up + self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1, + timeout=self.EVENT_CREATION_PERIOD, + period=1) ev1 = self._get_osd_in_out_events('out')[0] log.info(json.dumps(ev1, indent=1)) self.wait_until_true(lambda: self._is_complete(ev1['id']), + check_fn=lambda: self._is_inprogress_or_complete(ev1['id']), timeout=self.RECOVERY_PERIOD) self.assertTrue(self._is_quiet()) diff --git a/ceph/qa/tasks/pykmip.py b/ceph/qa/tasks/pykmip.py index d0b72f69e..3491babc5 100644 --- a/ceph/qa/tasks/pykmip.py +++ b/ceph/qa/tasks/pykmip.py @@ -147,7 +147,7 @@ def setup_venv(ctx, config): assert isinstance(config, dict) log.info('Setting up virtualenv for pykmip...') for (client, _) in config.items(): - run_in_pykmip_dir(ctx, client, ['virtualenv', '.pykmipenv']) + run_in_pykmip_dir(ctx, client, ['python3', '-m', 'venv', '.pykmipenv']) run_in_pykmip_venv(ctx, client, ['pip', 'install', 'pytz', '-e', get_pykmip_dir(ctx)]) yield diff --git a/ceph/qa/tasks/python.py b/ceph/qa/tasks/python.py new file mode 100644 index 000000000..4ddb14f71 --- /dev/null +++ b/ceph/qa/tasks/python.py @@ -0,0 +1,45 @@ +import logging +from teuthology import misc as teuthology +from tasks.vip import subst_vip + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Execute some python code. + + tasks: + - python: + host.a: | + import boto3 + c = boto3.resource(...) + + The provided dict is normally indexed by role. You can also include a + 'sudo: false' key to run the code without sudo. + + tasks: + - python: + sudo: false + host.b: | + import boto3 + c = boto3.resource(...) + """ + assert isinstance(config, dict), "task python got invalid config" + + testdir = teuthology.get_testdir(ctx) + + sudo = config.pop('sudo', True) + + for role, code in config.items(): + (remote,) = ctx.cluster.only(role).remotes.keys() + log.info('Running python on role %s host %s', role, remote.name) + log.info(code) + args=[ + 'TESTDIR={tdir}'.format(tdir=testdir), + 'python3', + ] + if sudo: + args = ['sudo'] + args + remote.run(args=args, stdin=subst_vip(ctx, code)) + diff --git a/ceph/qa/tasks/s3a_hadoop.py b/ceph/qa/tasks/s3a_hadoop.py index f06c9acfe..cfccf65e3 100644 --- a/ceph/qa/tasks/s3a_hadoop.py +++ b/ceph/qa/tasks/s3a_hadoop.py @@ -144,7 +144,9 @@ def setup_user_bucket(client, dns_name, access_key, secret_key, bucket_name, tes ) client.run( args=[ - 'virtualenv', + 'python3', + '-m', + 'venv', '{testdir}/venv'.format(testdir=testdir), run.Raw('&&'), run.Raw('{testdir}/venv/bin/pip'.format(testdir=testdir)), diff --git a/ceph/qa/tasks/tox.py b/ceph/qa/tasks/tox.py index 36c226d0b..61c5b7411 100644 --- a/ceph/qa/tasks/tox.py +++ b/ceph/qa/tasks/tox.py @@ -29,14 +29,14 @@ def task(ctx, config): log.info('Deploying tox from pip...') for (client, _) in config.items(): # yup, we have to deploy tox first. The packaged one, available - # on Sepia's Ubuntu machines, is outdated for Keystone/Tempest. + # on Sepia's Ubuntu machines, is outdated for Keystone/Tempest. tvdir = get_toxvenv_dir(ctx) - ctx.cluster.only(client).run(args=[ 'virtualenv', '-p', 'python3', tvdir ]) - ctx.cluster.only(client).run(args= - [ 'source', '{tvdir}/bin/activate'.format(tvdir=tvdir), - run.Raw('&&'), - 'pip', 'install', 'tox==3.15.0' - ]) + ctx.cluster.only(client).run(args=['python3', '-m', 'venv', tvdir]) + ctx.cluster.only(client).run(args=[ + 'source', '{tvdir}/bin/activate'.format(tvdir=tvdir), + run.Raw('&&'), + 'pip', 'install', 'tox==3.15.0' + ]) # export the path Keystone and Tempest ctx.tox = argparse.Namespace() diff --git a/ceph/qa/tasks/vip.py b/ceph/qa/tasks/vip.py index d4d27b85e..52114b104 100644 --- a/ceph/qa/tasks/vip.py +++ b/ceph/qa/tasks/vip.py @@ -62,6 +62,7 @@ def exec(ctx, config): 'sudo', 'TESTDIR={tdir}'.format(tdir=testdir), 'bash', + '-ex', '-c', subst_vip(ctx, c)], ) diff --git a/ceph/qa/tasks/vstart_runner.py b/ceph/qa/tasks/vstart_runner.py index 61ca07615..db0cb41cf 100644 --- a/ceph/qa/tasks/vstart_runner.py +++ b/ceph/qa/tasks/vstart_runner.py @@ -1233,6 +1233,12 @@ class LocalCluster(object): def only(self, requested): return self.__class__(rolename=requested) + def run(self, *args, **kwargs): + r = [] + for remote in self.remotes.keys(): + r.append(remote.run(*args, **kwargs)) + return r + class LocalContext(object): def __init__(self): diff --git a/ceph/qa/workunits/cephadm/test_dashboard_e2e.sh b/ceph/qa/workunits/cephadm/test_dashboard_e2e.sh index bd37154d9..e0f542e0f 100755 --- a/ceph/qa/workunits/cephadm/test_dashboard_e2e.sh +++ b/ceph/qa/workunits/cephadm/test_dashboard_e2e.sh @@ -93,23 +93,10 @@ find cypress # List all specs cypress_run "orchestrator/01-hosts.e2e-spec.ts" -ceph orch apply rgw foo --placement=3 -sleep 15 -ceph orch device ls --refresh -ceph orch ps --refresh -sleep 10 # the previous call is asynchronous -ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json -ceph orch ps --format=json | tee cypress/fixtures/orchestrator/services.json - -cypress_run "orchestrator/01-hosts-force-maintenance.e2e-spec.ts" - # Hosts are removed and added in the previous step. Do a refresh again. -ceph orch rm rgw.foo ceph orch device ls --refresh -ceph orch ps --refresh sleep 10 ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json -ceph orch ps --format=json | tee cypress/fixtures/orchestrator/services.json cypress_run "orchestrator/02-hosts-inventory.e2e-spec.ts" cypress_run "orchestrator/03-inventory.e2e-spec.ts" diff --git a/ceph/qa/workunits/fs/cephfs_mirror_ha_gen.sh b/ceph/qa/workunits/fs/cephfs_mirror_ha_gen.sh new file mode 100755 index 000000000..35ee9d4c7 --- /dev/null +++ b/ceph/qa/workunits/fs/cephfs_mirror_ha_gen.sh @@ -0,0 +1,69 @@ +#!/bin/bash -ex +# +# cephfs_mirror_ha_gen.sh - generate workload to synchronize +# + +. $(dirname $0)/cephfs_mirror_helpers.sh + +cleanup() +{ + for i in `seq 1 $NR_DIRECTORIES` + do + local repo_name="${REPO_PATH_PFX}_$i" + for j in `seq 1 $NR_SNAPSHOTS` + do + snap_name=$repo_name/.snap/snap_$j + if test -d $snap_name; then + rmdir $snap_name + fi + done + done + exit 1 +} +trap cleanup EXIT + +configure_peer() +{ + ceph mgr module enable mirroring + ceph fs snapshot mirror enable $PRIMARY_FS + ceph fs snapshot mirror peer_add $PRIMARY_FS client.mirror_remote@ceph $BACKUP_FS + + for i in `seq 1 $NR_DIRECTORIES` + do + local repo_name="${REPO_PATH_PFX}_$i" + ceph fs snapshot mirror add $PRIMARY_FS "$MIRROR_SUBDIR/$repo_name" + done +} + +create_snaps() +{ + for i in `seq 1 $NR_DIRECTORIES` + do + local repo_name="${REPO_PATH_PFX}_$i" + for j in `seq 1 $NR_SNAPSHOTS` + do + snap_name=$repo_name/.snap/snap_$j + r=$(( $RANDOM % 100 + 5 )) + arr=($repo_name "reset" "--hard" "HEAD~$r") + exec_git_cmd "${arr[@]}" + mkdir $snap_name + store_checksum $snap_name + done + done +} + +unset CEPH_CLI_TEST_DUP_COMMAND + +echo "running generator on prmary file system..." + +# setup git repos to be used as data set +setup_repos + +# turn on mirroring, add peers... +configure_peer + +# snapshots on primary +create_snaps + +# do not cleanup when exiting on success.. +trap - EXIT diff --git a/ceph/qa/workunits/fs/cephfs_mirror_ha_verify.sh b/ceph/qa/workunits/fs/cephfs_mirror_ha_verify.sh new file mode 100755 index 000000000..8d8b3859c --- /dev/null +++ b/ceph/qa/workunits/fs/cephfs_mirror_ha_verify.sh @@ -0,0 +1,40 @@ +#!/bin/bash -ex +# +# cephfs_mirror_ha_verify.sh - verify synchronized snapshots +# + +. $(dirname $0)/cephfs_mirror_helpers.sh + +echo "running verifier on secondary file system..." + +for i in `seq 1 $NR_DIRECTORIES` +do + repo_name="${REPO_PATH_PFX}_$i" + for j in `seq 1 $NR_SNAPSHOTS` + do + for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64 64 128 128 + do + sleep $s + snap_name=$repo_name/.snap/snap_$j + if test -d $repo_name; then + echo "checking snapshot [$snap_name] in $repo_name" + if test -d $snap_name; then + echo "generating hash for $snap_name" + cksum='' + calc_checksum $snap_name cksum + ret=$(compare_checksum $cksum $snap_name) + if [ $ret -ne 0 ]; then + echo "checksum failed $snap_name ($cksum)" + return $ret + else + echo "checksum matched $snap_name ($cksum)" + break + fi + fi + fi + done + echo "couldn't complete verification for: $snap_name" + done +done + +echo "verify done!" diff --git a/ceph/qa/workunits/fs/cephfs_mirror_helpers.sh b/ceph/qa/workunits/fs/cephfs_mirror_helpers.sh new file mode 100644 index 000000000..69f1c6f3d --- /dev/null +++ b/ceph/qa/workunits/fs/cephfs_mirror_helpers.sh @@ -0,0 +1,66 @@ +PRIMARY_FS='dc' +BACKUP_FS='dc-backup' + +REPO=ceph-qa-suite +REPO_DIR=ceph_repo +REPO_PATH_PFX="$REPO_DIR/$REPO" + +NR_DIRECTORIES=4 +NR_SNAPSHOTS=4 +MIRROR_SUBDIR='/mirror' + +calc_checksum() +{ + local path=$1 + local -n ref=$2 + ref=`find -L $path -type f -exec md5sum {} + | awk '{ print $1 }' | md5sum | awk '{ print $1 }'` +} + +store_checksum() +{ + local path=$1 + local cksum='' #something invalid + local fhash=`echo -n $path | md5sum | awk '{ print $1 }'` + calc_checksum $path cksum + echo -n $cksum > "/tmp/primary-$fhash" +} + +compare_checksum() +{ + local ret=0 + local cksum=$1 + local path=$2 + local fhash=`echo -n $path | md5sum | awk '{ print $1 }'` + local cksum_ondisk=`cat /tmp/primary-$fhash` + if [ $cksum != $cksum_ondisk ]; then + echo "$cksum <> $cksum_ondisk" + ret=1 + fi + echo $ret +} + +exec_git_cmd() +{ + local arg=("$@") + local repo_name=${arg[0]} + local cmd=${arg[@]:1} + git --git-dir "$repo_name/.git" $cmd +} + +clone_repo() +{ + local repo_name=$1 + git clone --branch giant "http://github.com/ceph/$REPO" $repo_name +} + +setup_repos() +{ + mkdir "$REPO_DIR" + + for i in `seq 1 $NR_DIRECTORIES` + do + local repo_name="${REPO_PATH_PFX}_$i" + mkdir $repo_name + clone_repo $repo_name + done +} diff --git a/ceph/qa/workunits/mon/pg_autoscaler.sh b/ceph/qa/workunits/mon/pg_autoscaler.sh index 3d24b1a6c..215b58707 100755 --- a/ceph/qa/workunits/mon/pg_autoscaler.sh +++ b/ceph/qa/workunits/mon/pg_autoscaler.sh @@ -45,6 +45,56 @@ ceph osd pool set b pg_autoscale_mode on # get num pools again since we created more pools NUM_POOLS=$(ceph osd pool ls | wc -l) +# get profiles of pool a and b +PROFILE1=$(ceph osd pool autoscale-status | grep 'a' | grep -o -m 1 'scale-up\|scale-down' || true) +PROFILE2=$(ceph osd pool autoscale-status | grep 'b' | grep -o -m 1 'scale-up\|scale-down' || true) + +# evaluate the default profile a +if [[ $PROFILE1 = "scale-up" ]] +then + echo "Success: pool a PROFILE is scale-up" +else + echo "Error: a PROFILE is scale-down" + exit 1 +fi + +# evaluate the default profile of pool b +if [[ $PROFILE2 = "scale-up" ]] +then + echo "Success: pool b PROFILE is scale-up" +else + echo "Error: b PROFILE is scale-down" + exit 1 +fi + +# This part of this code will now evaluate the accuracy of +# scale-down profile + +# change to scale-down profile +ceph osd pool set autoscale-profile scale-down + +# get profiles of pool a and b +PROFILE1=$(ceph osd pool autoscale-status | grep 'a' | grep -o -m 1 'scale-up\|scale-down' || true) +PROFILE2=$(ceph osd pool autoscale-status | grep 'b' | grep -o -m 1 'scale-up\|scale-down' || true) + +# evaluate that profile a is now scale-down +if [[ $PROFILE1 = "scale-down" ]] +then + echo "Success: pool a PROFILE is scale-down" +else + echo "Error: a PROFILE is scale-up" + exit 1 +fi + +# evaluate the profile of b is now scale-down +if [[ $PROFILE2 = "scale-down" ]] +then + echo "Success: pool b PROFILE is scale-down" +else + echo "Error: b PROFILE is scale-up" + exit 1 +fi + # get pool size POOL_SIZE_A=$(ceph osd pool get a size| grep -Eo '[0-9]{1,4}') POOL_SIZE_B=$(ceph osd pool get b size| grep -Eo '[0-9]{1,4}') diff --git a/ceph/qa/workunits/rbd/rbd_mirror_bootstrap.sh b/ceph/qa/workunits/rbd/rbd_mirror_bootstrap.sh index e0a096ee8..fb77c0d9b 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_bootstrap.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_bootstrap.sh @@ -27,7 +27,7 @@ testlog "TEST: verify rx-only direction" [ "$(rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format xml | ${XMLSTARLET} sel -t -v '//mirror/peers/peer[1]/uuid')" = "" ] -create_image ${CLUSTER1} ${POOL} image1 +create_image_and_enable_mirror ${CLUSTER1} ${POOL} image1 wait_for_image_replay_started ${CLUSTER2} ${POOL} image1 write_image ${CLUSTER1} ${POOL} image1 100 diff --git a/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh b/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh index 82299715e..ca715d854 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh @@ -549,6 +549,8 @@ status() echo "${cluster} ${image_pool} ${image_ns} rbd_mirroring omap vals" rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirroring + echo "${cluster} ${image_pool} ${image_ns} rbd_mirror_leader omap vals" + rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirror_leader echo done done @@ -1096,6 +1098,20 @@ unprotect_snapshot() rbd --cluster ${cluster} snap unprotect ${pool}/${image}@${snap} } +unprotect_snapshot_retry() +{ + local cluster=$1 + local pool=$2 + local image=$3 + local snap=$4 + + for s in 0 1 2 4 8 16 32; do + sleep ${s} + unprotect_snapshot ${cluster} ${pool} ${image} ${snap} && return 0 + done + return 1 +} + wait_for_snap_present() { local cluster=$1 @@ -1292,6 +1308,8 @@ enable_mirror() local mode=${4:-${MIRROR_IMAGE_MODE}} rbd --cluster=${cluster} mirror image enable ${pool}/${image} ${mode} + # Display image info including the global image id for debugging purpose + rbd --cluster=${cluster} info ${pool}/${image} } test_image_present() @@ -1389,6 +1407,58 @@ get_clone_format() }' } +list_omap_keys() +{ + local cluster=$1 + local pool=$2 + local obj_name=$3 + + rados --cluster ${cluster} -p ${pool} listomapkeys ${obj_name} +} + +count_omap_keys_with_filter() +{ + local cluster=$1 + local pool=$2 + local obj_name=$3 + local filter=$4 + + list_omap_keys ${cluster} ${pool} ${obj_name} | grep -c ${filter} +} + +wait_for_omap_keys() +{ + local cluster=$1 + local pool=$2 + local obj_name=$3 + local filter=$4 + + for s in 0 1 2 2 4 4 8 8 8 16 16 32; do + sleep $s + + set +e + test "$(count_omap_keys_with_filter ${cluster} ${pool} ${obj_name} ${filter})" = 0 + error_code=$? + set -e + + if [ $error_code -eq 0 ]; then + return 0 + fi + done + + return 1 +} + +wait_for_image_in_omap() +{ + local cluster=$1 + local pool=$2 + + wait_for_omap_keys ${cluster} ${pool} rbd_mirroring status_global + wait_for_omap_keys ${cluster} ${pool} rbd_mirroring image_ + wait_for_omap_keys ${cluster} ${pool} rbd_mirror_leader image_map +} + # # Main # diff --git a/ceph/qa/workunits/rbd/rbd_mirror_journal.sh b/ceph/qa/workunits/rbd/rbd_mirror_journal.sh index 84fd2424f..56a8b13a9 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_journal.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_journal.sh @@ -119,6 +119,8 @@ if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then all_admin_daemons ${CLUSTER1} rbd mirror status fi +remove_image_retry ${CLUSTER2} ${POOL} ${image1} + testlog "TEST: test image rename" new_name="${image}_RENAMED" rename_image ${CLUSTER2} ${POOL} ${image} ${new_name} @@ -138,6 +140,18 @@ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' trash_restore ${CLUSTER2} ${POOL} ${image_id} wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} +testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)" +remove_image_retry ${CLUSTER2} ${POOL} ${image} + +wait_for_image_in_omap ${CLUSTER1} ${POOL} +wait_for_image_in_omap ${CLUSTER2} ${POOL} + +create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} +wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} +write_image ${CLUSTER2} ${POOL} ${image} 100 +wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image} +wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' + testlog "TEST: failover and failback" start_mirrors ${CLUSTER2} @@ -216,6 +230,8 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopp wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped' write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100 write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100 +remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image} +remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image} testlog "TEST: cloned images" testlog " - default" @@ -240,6 +256,7 @@ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image} wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying' 'primary_position' compare_images ${POOL} ${clone_image} +remove_image_retry ${CLUSTER2} ${POOL} ${clone_image} testlog " - clone v1" clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}1 @@ -249,6 +266,10 @@ clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \ test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1 test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1 +remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1 +remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1 +unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} +remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} testlog " - clone v2" parent_snap=snap_v2 @@ -277,6 +298,7 @@ test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2 wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} +remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} testlog "TEST: data pool" dp_image=test_data_pool @@ -295,6 +317,7 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying' 'prim compare_images ${POOL} ${dp_image}@snap1 compare_images ${POOL} ${dp_image}@snap2 compare_images ${POOL} ${dp_image} +remove_image_retry ${CLUSTER2} ${POOL} ${dp_image} testlog "TEST: disable mirroring / delete non-primary image" image2=test2 @@ -379,6 +402,12 @@ for i in `seq 1 20`; do done wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}" +unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1' +unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2' +for i in ${image2} ${image4}; do + remove_image_retry ${CLUSTER2} ${POOL} ${i} +done + testlog "TEST: disable mirror while daemon is stopped" stop_mirrors ${CLUSTER1} stop_mirrors ${CLUSTER2} @@ -415,6 +444,7 @@ remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image} disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image} wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted' wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted' +remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image} testlog " - data pool" dp_image=test_data_pool @@ -428,6 +458,7 @@ write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100 wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying' 'primary_position' compare_images ${POOL}/${NS1} ${dp_image} +remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image} testlog "TEST: simple image resync" request_resync_image ${CLUSTER1} ${POOL} ${image} image_id @@ -460,6 +491,7 @@ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present' wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position' compare_images ${POOL} ${image} +remove_image_retry ${CLUSTER2} ${POOL} ${image} testlog "TEST: client disconnect" image=laggy @@ -531,6 +563,7 @@ disconnect_image ${CLUSTER2} ${POOL} ${image} test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})" wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected' +remove_image_retry ${CLUSTER2} ${POOL} ${image} testlog "TEST: split-brain" image=split-brain @@ -544,6 +577,12 @@ demote_image ${CLUSTER1} ${POOL} ${image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain' request_resync_image ${CLUSTER1} ${POOL} ${image} image_id wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position' +remove_image_retry ${CLUSTER2} ${POOL} ${image} + +testlog "TEST: check if removed images' OMAP are removed" +start_mirrors ${CLUSTER2} +wait_for_image_in_omap ${CLUSTER1} ${POOL} +wait_for_image_in_omap ${CLUSTER2} ${POOL} if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then # teuthology will trash the daemon diff --git a/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh b/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh index 645273902..0060440fb 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh @@ -122,6 +122,8 @@ if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then all_admin_daemons ${CLUSTER1} rbd mirror status fi +remove_image_retry ${CLUSTER2} ${POOL} ${image1} + testlog "TEST: test image rename" new_name="${image}_RENAMED" rename_image ${CLUSTER2} ${POOL} ${image} ${new_name} @@ -144,6 +146,18 @@ trash_restore ${CLUSTER2} ${POOL} ${image_id} enable_mirror ${CLUSTER2} ${POOL} ${image} snapshot wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} +testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)" +remove_image_retry ${CLUSTER2} ${POOL} ${image} + +wait_for_image_in_omap ${CLUSTER1} ${POOL} +wait_for_image_in_omap ${CLUSTER2} ${POOL} + +create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} +wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} +write_image ${CLUSTER2} ${POOL} ${image} 100 +wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image} +wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' + testlog "TEST: failover and failback" start_mirrors ${CLUSTER2} @@ -222,6 +236,8 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopp wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped' write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100 write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100 +remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image} +remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image} testlog "TEST: cloned images" testlog " - default" @@ -246,6 +262,7 @@ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image} wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying' compare_images ${POOL} ${clone_image} +remove_image_retry ${CLUSTER2} ${POOL} ${clone_image} testlog " - clone v1" clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \ @@ -256,6 +273,10 @@ clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \ test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1 wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1 test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1 +remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1 +remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1 +unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} +remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} testlog " - clone v2" parent_snap=snap_v2 @@ -288,6 +309,7 @@ mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2 wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} +remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} testlog "TEST: data pool" dp_image=test_data_pool @@ -306,6 +328,7 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying' compare_images ${POOL} ${dp_image}@snap1 compare_images ${POOL} ${dp_image}@snap2 compare_images ${POOL} ${dp_image} +remove_image_retry ${CLUSTER2} ${POOL} ${dp_image} testlog "TEST: disable mirroring / delete non-primary image" image2=test2 @@ -354,6 +377,12 @@ done mirror_image_snapshot ${CLUSTER2} ${POOL} ${image2} wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}" +unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1' +unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2' +for i in ${image2} ${image4}; do + remove_image_retry ${CLUSTER2} ${POOL} ${i} +done + testlog "TEST: disable mirror while daemon is stopped" stop_mirrors ${CLUSTER1} stop_mirrors ${CLUSTER2} @@ -387,6 +416,7 @@ remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image} disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image} wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted' wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted' +remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image} testlog " - data pool" dp_image=test_data_pool @@ -400,6 +430,7 @@ write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100 wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying' compare_images ${POOL}/${NS1} ${dp_image} +remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image} testlog "TEST: simple image resync" request_resync_image ${CLUSTER1} ${POOL} ${image} image_id @@ -432,6 +463,7 @@ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present' wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' compare_images ${POOL} ${image} +remove_image_retry ${CLUSTER2} ${POOL} ${image} testlog "TEST: split-brain" image=split-brain @@ -445,6 +477,12 @@ demote_image ${CLUSTER1} ${POOL} ${image} wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain' request_resync_image ${CLUSTER1} ${POOL} ${image} image_id wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' +remove_image_retry ${CLUSTER2} ${POOL} ${image} + +testlog "TEST: check if removed images' OMAP are removed" +start_mirrors ${CLUSTER2} +wait_for_image_in_omap ${CLUSTER1} ${POOL} +wait_for_image_in_omap ${CLUSTER2} ${POOL} if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then # teuthology will trash the daemon diff --git a/ceph/qa/workunits/rbd/rbd_mirror_stress.sh b/ceph/qa/workunits/rbd/rbd_mirror_stress.sh index a17ad75e1..cb79aba7e 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_stress.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_stress.sh @@ -214,3 +214,8 @@ do purge_snapshots ${CLUSTER2} ${POOL} ${image} remove_image_retry ${CLUSTER2} ${POOL} ${image} done + +testlog "TEST: check if removed images' OMAP are removed" + +wait_for_image_in_omap ${CLUSTER1} ${POOL} +wait_for_image_in_omap ${CLUSTER2} ${POOL} diff --git a/ceph/src/.git_version b/ceph/src/.git_version index 43145fdd5..1d4b77a74 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -ee28fb57e47e9f88813e24bbf4c14496ca299d31 -16.2.6 +dd0603118f56ab514f133c8d2e3adfc983942503 +16.2.7 diff --git a/ceph/src/CMakeLists.txt b/ceph/src/CMakeLists.txt index b6fa63366..cc53f9a72 100644 --- a/ceph/src/CMakeLists.txt +++ b/ceph/src/CMakeLists.txt @@ -526,6 +526,7 @@ if(WITH_BLUESTORE_PMEM OR WITH_RBD_RWL) if(WITH_RBD_RWL) find_package(pmem REQUIRED COMPONENTS pmemobj) endif() + find_package(pmem 1.10 REQUIRED COMPONENTS ${pmem_COMPONENTS}) else() include(Buildpmem) build_pmem() diff --git a/ceph/src/auth/cephx/CephxKeyServer.cc b/ceph/src/auth/cephx/CephxKeyServer.cc index adfe74d2b..ac83b7f3e 100644 --- a/ceph/src/auth/cephx/CephxKeyServer.cc +++ b/ceph/src/auth/cephx/CephxKeyServer.cc @@ -72,8 +72,11 @@ bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id, uint64_t secret_id, CryptoKey& secret) const { auto iter = rotating_secrets.find(service_id); - if (iter == rotating_secrets.end()) + if (iter == rotating_secrets.end()) { + ldout(cct, 10) << __func__ << " no rotating_secrets for service " << service_id + << " " << ceph_entity_type_name(service_id) << dendl; return false; + } const RotatingSecrets& secrets = iter->second; auto riter = secrets.secrets.find(secret_id); @@ -146,31 +149,13 @@ int KeyServer::start_server() { std::scoped_lock l{lock}; - _check_rotating_secrets(); _dump_rotating_secrets(); return 0; } -bool KeyServer::_check_rotating_secrets() +void KeyServer::dump() { - ldout(cct, 10) << "_check_rotating_secrets" << dendl; - - int added = 0; - added += _rotate_secret(CEPH_ENTITY_TYPE_AUTH); - added += _rotate_secret(CEPH_ENTITY_TYPE_MON); - added += _rotate_secret(CEPH_ENTITY_TYPE_OSD); - added += _rotate_secret(CEPH_ENTITY_TYPE_MDS); - added += _rotate_secret(CEPH_ENTITY_TYPE_MGR); - - if (added) { - ldout(cct, 10) << __func__ << " added " << added << dendl; - data.rotating_ver++; - //data.next_rotating_time = ceph_clock_now(cct); - //data.next_rotating_time += std::min(cct->_conf->auth_mon_ticket_ttl, cct->_conf->auth_service_ticket_ttl); - _dump_rotating_secrets(); - return true; - } - return false; + _dump_rotating_secrets(); } void KeyServer::_dump_rotating_secrets() @@ -189,9 +174,9 @@ void KeyServer::_dump_rotating_secrets() } } -int KeyServer::_rotate_secret(uint32_t service_id) +int KeyServer::_rotate_secret(uint32_t service_id, KeyServerData &pending_data) { - RotatingSecrets& r = data.rotating_secrets[service_id]; + RotatingSecrets& r = pending_data.rotating_secrets[service_id]; int added = 0; utime_t now = ceph_clock_now(); double ttl = service_id == CEPH_ENTITY_TYPE_AUTH ? cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl; @@ -356,19 +341,30 @@ void KeyServer::encode_plaintext(bufferlist &bl) bl.append(os.str()); } -bool KeyServer::updated_rotating(bufferlist& rotating_bl, version_t& rotating_ver) +bool KeyServer::prepare_rotating_update(bufferlist& rotating_bl) { std::scoped_lock l{lock}; + ldout(cct, 20) << __func__ << " before: data.rotating_ver=" << data.rotating_ver + << dendl; - _check_rotating_secrets(); + KeyServerData pending_data(nullptr); + pending_data.rotating_ver = data.rotating_ver + 1; + pending_data.rotating_secrets = data.rotating_secrets; - if (data.rotating_ver <= rotating_ver) + int added = 0; + added += _rotate_secret(CEPH_ENTITY_TYPE_AUTH, pending_data); + added += _rotate_secret(CEPH_ENTITY_TYPE_MON, pending_data); + added += _rotate_secret(CEPH_ENTITY_TYPE_OSD, pending_data); + added += _rotate_secret(CEPH_ENTITY_TYPE_MDS, pending_data); + added += _rotate_secret(CEPH_ENTITY_TYPE_MGR, pending_data); + if (!added) { return false; - - data.encode_rotating(rotating_bl); - - rotating_ver = data.rotating_ver; + } + ldout(cct, 20) << __func__ << " after: pending_data.rotating_ver=" + << pending_data.rotating_ver + << dendl; + pending_data.encode_rotating(rotating_bl); return true; } diff --git a/ceph/src/auth/cephx/CephxKeyServer.h b/ceph/src/auth/cephx/CephxKeyServer.h index 3576c42c2..945a7f4dc 100644 --- a/ceph/src/auth/cephx/CephxKeyServer.h +++ b/ceph/src/auth/cephx/CephxKeyServer.h @@ -195,8 +195,7 @@ class KeyServer : public KeyStore { KeyServerData data; mutable ceph::mutex lock; - int _rotate_secret(uint32_t service_id); - bool _check_rotating_secrets(); + int _rotate_secret(uint32_t service_id, KeyServerData &pending_data); void _dump_rotating_secrets(); int _build_session_auth_info(uint32_t service_id, const AuthTicket& parent_ticket, @@ -215,6 +214,8 @@ public: int start_server(); void rotate_timeout(double timeout); + void dump(); + int build_session_auth_info(uint32_t service_id, const AuthTicket& parent_ticket, CephXSessionAuthInfo& info); @@ -297,7 +298,7 @@ public: } } - bool updated_rotating(ceph::buffer::list& rotating_bl, version_t& rotating_ver); + bool prepare_rotating_update(ceph::buffer::list& rotating_bl); bool get_rotating_encrypted(const EntityName& name, ceph::buffer::list& enc_bl) const; diff --git a/ceph/src/ceph-volume/CMakeLists.txt b/ceph/src/ceph-volume/CMakeLists.txt index 4224499c4..9166553dc 100644 --- a/ceph/src/ceph-volume/CMakeLists.txt +++ b/ceph/src/ceph-volume/CMakeLists.txt @@ -13,7 +13,7 @@ set(CEPH_VOLUME_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/ceph-volume-virtualenv) add_custom_command( OUTPUT ${CEPH_VOLUME_VIRTUALENV}/bin/python - COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${Python_EXECUTABLE} ${CEPH_VOLUME_VIRTUALENV} + COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${Python3_EXECUTABLE} ${CEPH_VOLUME_VIRTUALENV} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/ceph-volume COMMENT "ceph-volume venv is being created") diff --git a/ceph/src/ceph-volume/ceph_volume/api/lvm.py b/ceph/src/ceph-volume/ceph_volume/api/lvm.py index 0f38249e1..e5551206e 100644 --- a/ceph/src/ceph-volume/ceph_volume/api/lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/api/lvm.py @@ -376,8 +376,12 @@ class PVolume(object): self.set_tag(k, v) # after setting all the tags, refresh them for the current object, use the # pv_* identifiers to filter because those shouldn't change - pv_object = self.get_first_pv(filter={'pv_name': self.pv_name, - 'pv_uuid': self.pv_uuid}) + pv_object = self.get_single_pv(filter={'pv_name': self.pv_name, + 'pv_uuid': self.pv_uuid}) + + if not pv_object: + raise RuntimeError('No PV was found.') + self.tags = pv_object.tags def set_tag(self, key, value): @@ -471,15 +475,21 @@ def get_pvs(fields=PV_FIELDS, filters='', tags=None): return [PVolume(**pv_report) for pv_report in pvs_report] -def get_first_pv(fields=PV_FIELDS, filters=None, tags=None): +def get_single_pv(fields=PV_FIELDS, filters=None, tags=None): """ - Wrapper of get_pv meant to be a convenience method to avoid the phrase:: + Wrapper of get_pvs() meant to be a convenience method to avoid the phrase:: pvs = get_pvs() if len(pvs) >= 1: pv = pvs[0] """ pvs = get_pvs(fields=fields, filters=filters, tags=tags) - return pvs[0] if len(pvs) > 0 else [] + + if len(pvs) == 0: + return None + if len(pvs) > 1: + raise RuntimeError('Filters {} matched more than 1 PV present on this host.'.format(str(filters))) + + return pvs[0] ################################ @@ -650,7 +660,7 @@ def create_vg(devices, name=None, name_prefix=None): name] + devices ) - return get_first_vg(filters={'vg_name': name}) + return get_single_vg(filters={'vg_name': name}) def extend_vg(vg, devices): @@ -674,7 +684,7 @@ def extend_vg(vg, devices): vg.name] + devices ) - return get_first_vg(filters={'vg_name': vg.name}) + return get_single_vg(filters={'vg_name': vg.name}) def reduce_vg(vg, devices): @@ -696,7 +706,7 @@ def reduce_vg(vg, devices): vg.name] + devices ) - return get_first_vg(filter={'vg_name': vg.name}) + return get_single_vg(filter={'vg_name': vg.name}) def remove_vg(vg_name): @@ -742,15 +752,21 @@ def get_vgs(fields=VG_FIELDS, filters='', tags=None): return [VolumeGroup(**vg_report) for vg_report in vgs_report] -def get_first_vg(fields=VG_FIELDS, filters=None, tags=None): +def get_single_vg(fields=VG_FIELDS, filters=None, tags=None): """ - Wrapper of get_vg meant to be a convenience method to avoid the phrase:: + Wrapper of get_vgs() meant to be a convenience method to avoid the phrase:: vgs = get_vgs() if len(vgs) >= 1: vg = vgs[0] """ vgs = get_vgs(fields=fields, filters=filters, tags=tags) - return vgs[0] if len(vgs) > 0 else [] + + if len(vgs) == 0: + return None + if len(vgs) > 1: + raise RuntimeError('Filters {} matched more than 1 VG present on this host.'.format(str(filters))) + + return vgs[0] def get_device_vgs(device, name_prefix=''): @@ -970,7 +986,7 @@ def create_lv(name_prefix, ] process.run(command) - lv = get_first_lv(filters={'lv_name': name, 'vg_name': vg.vg_name}) + lv = get_single_lv(filters={'lv_name': name, 'vg_name': vg.vg_name}) if tags is None: tags = { @@ -1095,15 +1111,21 @@ def get_lvs(fields=LV_FIELDS, filters='', tags=None): return [Volume(**lv_report) for lv_report in lvs_report] -def get_first_lv(fields=LV_FIELDS, filters=None, tags=None): +def get_single_lv(fields=LV_FIELDS, filters=None, tags=None): """ - Wrapper of get_lv meant to be a convenience method to avoid the phrase:: + Wrapper of get_lvs() meant to be a convenience method to avoid the phrase:: lvs = get_lvs() if len(lvs) >= 1: lv = lvs[0] """ lvs = get_lvs(fields=fields, filters=filters, tags=tags) - return lvs[0] if len(lvs) > 0 else [] + + if len(lvs) == 0: + return None + if len(lvs) > 1: + raise RuntimeError('Filters {} matched more than 1 LV present on this host.'.format(str(filters))) + + return lvs[0] def get_lv_by_name(name): diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py index c864b0e9f..70fceeab6 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/activate.py @@ -245,7 +245,7 @@ class Activate(object): terminal.warning('Verify OSDs are present with "ceph-volume lvm list"') return for osd_fsid, osd_id in osds.items(): - if systemctl.osd_is_active(osd_id): + if not args.no_systemd and systemctl.osd_is_active(osd_id): terminal.warning( 'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id, osd_fsid) ) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py index e64e4b64e..c1b23b448 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -112,35 +112,55 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar requested_size = get_size_fct(lv_format=False) ret = [] - for dev in devices: - if not dev.available_lvm: - continue - # any LV present is considered a taken slot - occupied_slots = len(dev.lvs) - # this only looks at the first vg on device, unsure if there is a better - # way - dev_size = dev.vg_size[0] - abs_size = disk.Size(b=int(dev_size / requested_slots)) - free_size = dev.vg_free[0] - relative_size = int(abs_size) / dev_size - if requested_size: - if requested_size <= abs_size: - abs_size = requested_size - relative_size = int(abs_size) / dev_size - else: - mlogger.error( - '{} was requested for {}, but only {} can be fulfilled'.format( - requested_size, - '{}_size'.format(type_), - abs_size, - )) - exit(1) - while abs_size <= free_size and len(ret) < new_osds and occupied_slots < fast_slots_per_device: - free_size -= abs_size.b - occupied_slots += 1 - ret.append((dev.path, relative_size, abs_size, requested_slots)) + vg_device_map = group_devices_by_vg(devices) + for vg_devices in vg_device_map.values(): + for dev in vg_devices: + if not dev.available_lvm: + continue + # any LV present is considered a taken slot + occupied_slots = len(dev.lvs) + # prior to v15.2.8, db/wal deployments were grouping multiple fast devices into single VGs - we need to + # multiply requested_slots (per device) by the number of devices in the VG in order to ensure that + # abs_size is calculated correctly from vg_size + slots_for_vg = len(vg_devices) * requested_slots + dev_size = dev.vg_size[0] + # this only looks at the first vg on device, unsure if there is a better + # way + abs_size = disk.Size(b=int(dev_size / slots_for_vg)) + free_size = dev.vg_free[0] + relative_size = int(abs_size) / dev_size + if requested_size: + if requested_size <= abs_size: + abs_size = requested_size + relative_size = int(abs_size) / dev_size + else: + mlogger.error( + '{} was requested for {}, but only {} can be fulfilled'.format( + requested_size, + '{}_size'.format(type_), + abs_size, + )) + exit(1) + while abs_size <= free_size and len(ret) < new_osds and occupied_slots < fast_slots_per_device: + free_size -= abs_size.b + occupied_slots += 1 + ret.append((dev.path, relative_size, abs_size, requested_slots)) return ret +def group_devices_by_vg(devices): + result = dict() + result['unused_devices'] = [] + for dev in devices: + if len(dev.vgs) > 0: + # already using assumption that a PV only belongs to single VG in other places + vg_name = dev.vgs[0].name + if vg_name in result: + result[vg_name].append(dev) + else: + result[vg_name] = [dev] + else: + result['unused_devices'].append(dev) + return result def get_lvm_fast_allocs(lvs): return [("{}/{}".format(d.vg_name, d.lv_name), 100.0, @@ -319,6 +339,7 @@ class Batch(object): nargs='*', default=[], help='Reuse existing OSD ids', + type=common.valid_osd_id ) self.args = parser.parse_args(argv) self.parser = parser diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py index 06369e479..752f354f3 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/common.py @@ -3,6 +3,8 @@ from ceph_volume import process, conf from ceph_volume import terminal import argparse +def valid_osd_id(val): + return str(int(val)) def rollback_osd(args, osd_id=None): """ @@ -56,6 +58,7 @@ common_args = { '--osd-id': { 'help': 'Reuse an existing OSD id', 'default': None, + 'type': valid_osd_id, }, '--osd-fsid': { 'help': 'Reuse an existing OSD fsid', diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py index 886b9f7b4..dc982f153 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py @@ -8,6 +8,7 @@ from ceph_volume.util.device import Device from ceph_volume import decorators, terminal, process from ceph_volume.api import lvm as api from ceph_volume.systemd import systemctl +from ceph_volume.devices.lvm.common import valid_osd_id logger = logging.getLogger(__name__) @@ -275,7 +276,7 @@ class Migrate(object): # (in the order of precedence, stop on the first match) # if source list has DB volume - target device replaces it. # if source list has WAL volume - target device replace it. - # if source list has slow volume only - operation isn’t permitted, + # if source list has slow volume only - operation isn't permitted, # requires explicit allocation via new-db/new-wal command.detects which def get_target_type_by_source(self, devices): ret = None @@ -413,7 +414,7 @@ class Migrate(object): target_lv = api.get_lv_by_fullname(self.args.target) if not target_lv: mlogger.error( - 'Target path "{}" is not a Logical Volume'.formaat( + 'Target path "{}" is not a Logical Volume'.format( self.args.target)) raise SystemExit( 'Unable to migrate to : {}'.format(self.args.target)) @@ -447,6 +448,7 @@ class Migrate(object): '--osd-id', required=True, help='Specify an OSD ID to detect associated devices for zapping', + type=valid_osd_id ) parser.add_argument( @@ -545,6 +547,7 @@ class NewVolume(object): '--osd-id', required=True, help='Specify an OSD ID to attach new volume to', + type=valid_osd_id, ) parser.add_argument( diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py index f0c3959a6..2f715fdba 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py @@ -151,8 +151,8 @@ class Prepare(object): try: vg_name, lv_name = device_name.split('/') - lv = api.get_first_lv(filters={'lv_name': lv_name, - 'vg_name': vg_name}) + lv = api.get_single_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}) except ValueError: lv = None @@ -240,8 +240,8 @@ class Prepare(object): try: vgname, lvname = self.args.data.split('/') - lv = api.get_first_lv(filters={'lv_name': lvname, - 'vg_name': vgname}) + lv = api.get_single_lv(filters={'lv_name': lvname, + 'vg_name': vgname}) except ValueError: lv = None @@ -325,7 +325,7 @@ class Prepare(object): try: vg_name, lv_name = self.args.data.split('/') - data_lv = api.get_first_lv(filters={'lv_name': lv_name, + data_lv = api.get_single_lv(filters={'lv_name': lv_name, 'vg_name': vg_name}) except ValueError: data_lv = None @@ -340,8 +340,8 @@ class Prepare(object): data_lv.set_tags(tags) if not journal_device.startswith('/'): # we got a journal lv, set rest of the tags - api.get_first_lv(filters={'lv_name': lv_name, - 'vg_name': vg_name}).set_tags(tags) + api.get_single_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}).set_tags(tags) prepare_filestore( data_lv.lv_path, @@ -354,8 +354,8 @@ class Prepare(object): elif self.args.bluestore: try: vg_name, lv_name = self.args.data.split('/') - block_lv = api.get_first_lv(filters={'lv_name': lv_name, - 'vg_name': vg_name}) + block_lv = api.get_single_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}) except ValueError: block_lv = None diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py index 21b54b6c0..20023a27c 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py @@ -10,6 +10,7 @@ from ceph_volume.api import lvm as api from ceph_volume.util import system, encryption, disk, arg_validators, str_to_int, merge_dict from ceph_volume.util.device import Device from ceph_volume.systemd import systemctl +from ceph_volume.devices.lvm.common import valid_osd_id logger = logging.getLogger(__name__) mlogger = terminal.MultiLogger(__name__) @@ -166,8 +167,8 @@ class Zap(object): Device examples: vg-name/lv-name, /dev/vg-name/lv-name Requirements: Must be a logical volume (LV) """ - lv = api.get_first_lv(filters={'lv_name': device.lv_name, 'vg_name': - device.vg_name}) + lv = api.get_single_lv(filters={'lv_name': device.lv_name, 'vg_name': + device.vg_name}) self.unmount_lv(lv) wipefs(device.abspath) @@ -231,7 +232,7 @@ class Zap(object): mlogger.info('Zapping lvm member {}. lv_path is {}'.format(device.abspath, lv.lv_path)) self.zap_lv(Device(lv.lv_path)) else: - vg = api.get_first_vg(filters={'vg_name': lv.vg_name}) + vg = api.get_single_vg(filters={'vg_name': lv.vg_name}) if vg: mlogger.info('Found empty VG {}, removing'.format(vg.vg_name)) api.remove_vg(vg.vg_name) @@ -376,6 +377,7 @@ class Zap(object): parser.add_argument( '--osd-id', + type=valid_osd_id, help='Specify an OSD ID to detect associated devices for zapping', ) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py b/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py index 34da0962b..70e5256d2 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py @@ -80,7 +80,7 @@ class Scan(object): device = os.readlink(path) else: device = path - lvm_device = lvm.get_first_lv(filters={'lv_path': device}) + lvm_device = lvm.get_single_lv(filters={'lv_path': device}) if lvm_device: device_uuid = lvm_device.lv_uuid else: diff --git a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py index f01ceb4f3..16e586e02 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py @@ -191,23 +191,23 @@ class TestCreateLV(object): @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_uses_size(self, m_get_first_lv, m_call, m_run, monkeypatch): - m_get_first_lv.return_value = self.foo_volume + @patch('ceph_volume.api.lvm.get_single_lv') + def test_uses_size(self, m_get_single_lv, m_call, m_run, monkeypatch): + m_get_single_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', '100', '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected) @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_uses_size_adjust_if_1percent_over(self, m_get_first_lv, m_call, m_run, monkeypatch): + @patch('ceph_volume.api.lvm.get_single_lv') + def test_uses_size_adjust_if_1percent_over(self, m_get_single_lv, m_call, m_run, monkeypatch): foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='') foo_group = api.VolumeGroup(vg_name='foo_group', vg_extent_size="4194304", vg_extent_count="1000", vg_free_count="1000") - m_get_first_lv.return_value = foo_volume + m_get_single_lv.return_value = foo_volume # 423624704 should be just under 1% off of the available size 419430400 api.create_lv('foo', 0, vg=foo_group, size=4232052736, tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-0', 'foo_group'] @@ -215,17 +215,17 @@ class TestCreateLV(object): @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_uses_size_too_large(self, m_get_first_lv, m_call, m_run, monkeypatch): - m_get_first_lv.return_value = self.foo_volume + @patch('ceph_volume.api.lvm.get_single_lv') + def test_uses_size_too_large(self, m_get_single_lv, m_call, m_run, monkeypatch): + m_get_single_lv.return_value = self.foo_volume with pytest.raises(RuntimeError): api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'}) @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_uses_extents(self, m_get_first_lv, m_call, m_run, monkeypatch): - m_get_first_lv.return_value = self.foo_volume + @patch('ceph_volume.api.lvm.get_single_lv') + def test_uses_extents(self, m_get_single_lv, m_call, m_run, monkeypatch): + m_get_single_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, extents='50', tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected) @@ -235,18 +235,18 @@ class TestCreateLV(object): (3, 33),]) @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_uses_slots(self, m_get_first_lv, m_call, m_run, monkeypatch, test_input, expected): - m_get_first_lv.return_value = self.foo_volume + @patch('ceph_volume.api.lvm.get_single_lv') + def test_uses_slots(self, m_get_single_lv, m_call, m_run, monkeypatch, test_input, expected): + m_get_single_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected) @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_uses_all(self, m_get_first_lv, m_call, m_run, monkeypatch): - m_get_first_lv.return_value = self.foo_volume + @patch('ceph_volume.api.lvm.get_single_lv') + def test_uses_all(self, m_get_single_lv, m_call, m_run, monkeypatch): + m_get_single_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected) @@ -254,9 +254,9 @@ class TestCreateLV(object): @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') @patch('ceph_volume.api.lvm.Volume.set_tags') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_calls_to_set_tags_default(self, m_get_first_lv, m_set_tags, m_call, m_run, monkeypatch): - m_get_first_lv.return_value = self.foo_volume + @patch('ceph_volume.api.lvm.get_single_lv') + def test_calls_to_set_tags_default(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch): + m_get_single_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group) tags = { "ceph.osd_id": "null", @@ -269,9 +269,9 @@ class TestCreateLV(object): @patch('ceph_volume.api.lvm.process.run') @patch('ceph_volume.api.lvm.process.call') @patch('ceph_volume.api.lvm.Volume.set_tags') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_calls_to_set_tags_arg(self, m_get_first_lv, m_set_tags, m_call, m_run, monkeypatch): - m_get_first_lv.return_value = self.foo_volume + @patch('ceph_volume.api.lvm.get_single_lv') + def test_calls_to_set_tags_arg(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch): + m_get_single_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'}) tags = { "ceph.type": "data", @@ -283,10 +283,10 @@ class TestCreateLV(object): @patch('ceph_volume.api.lvm.process.call') @patch('ceph_volume.api.lvm.get_device_vgs') @patch('ceph_volume.api.lvm.create_vg') - @patch('ceph_volume.api.lvm.get_first_lv') - def test_create_vg(self, m_get_first_lv, m_create_vg, m_get_device_vgs, m_call, + @patch('ceph_volume.api.lvm.get_single_lv') + def test_create_vg(self, m_get_single_lv, m_create_vg, m_get_device_vgs, m_call, m_run, monkeypatch): - m_get_first_lv.return_value = self.foo_volume + m_get_single_lv.return_value = self.foo_volume m_get_device_vgs.return_value = [] api.create_lv('foo', 0, device='dev/foo', size='5G', tags={'ceph.type': 'data'}) m_create_vg.assert_called_with('dev/foo', name_prefix='ceph') @@ -377,19 +377,19 @@ class TestExtendVG(object): self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='') def test_uses_single_device_in_list(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.extend_vg(self.foo_volume, ['/dev/sda']) expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda'] assert fake_run.calls[0]['args'][0] == expected def test_uses_single_device(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.extend_vg(self.foo_volume, '/dev/sda') expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda'] assert fake_run.calls[0]['args'][0] == expected def test_uses_multiple_devices(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb']) expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb'] assert fake_run.calls[0]['args'][0] == expected @@ -401,19 +401,19 @@ class TestReduceVG(object): self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='') def test_uses_single_device_in_list(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.reduce_vg(self.foo_volume, ['/dev/sda']) expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda'] assert fake_run.calls[0]['args'][0] == expected def test_uses_single_device(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.reduce_vg(self.foo_volume, '/dev/sda') expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda'] assert fake_run.calls[0]['args'][0] == expected def test_uses_multiple_devices(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.reduce_vg(self.foo_volume, ['/dev/sda', '/dev/sdb']) expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb'] assert fake_run.calls[0]['args'][0] == expected @@ -425,28 +425,28 @@ class TestCreateVG(object): self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='') def test_no_name(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.create_vg('/dev/sda') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result assert result[-2].startswith('ceph-') def test_devices_list(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph') result = fake_run.calls[0]['args'][0] expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb'] assert result == expected def test_name_prefix(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.create_vg('/dev/sda', name_prefix='master') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result assert result[-2].startswith('master-') def test_specific_name(self, monkeypatch, fake_run): - monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True) api.create_vg('/dev/sda', name='master') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result @@ -780,91 +780,106 @@ class TestGetLVs(object): assert api.get_lvs() == [] -class TestGetFirstPV(object): +class TestGetSinglePV(object): - def test_get_first_pv(self, monkeypatch): - pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, - vg_name='vg1') - pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={}, - vg_name='vg2') - stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name), - '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)] - monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + @patch('ceph_volume.devices.lvm.prepare.api.get_pvs') + def test_get_single_pv_multiple_matches_raises_runtimeerror(self, m_get_pvs): + fake_pvs = [] + fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={})) + fake_pvs.append(api.PVolume(pv_name='/dev/sdb', pv_tags={})) - pv_ = api.get_first_pv() - assert isinstance(pv_, api.PVolume) - assert pv_.pv_name == pv1.pv_name + m_get_pvs.return_value = fake_pvs - def test_get_first_pv_single_pv(self, monkeypatch): - pv = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, - vg_name='vg1') - stdout = ['{};;;;;;'.format(pv.pv_name)] - monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + with pytest.raises(RuntimeError) as e: + api.get_single_pv() + assert "matched more than 1 PV present on this host." in str(e.value) - pv_ = api.get_first_pv() - assert isinstance(pv_, api.PVolume) - assert pv_.pv_name == pv.pv_name + @patch('ceph_volume.devices.lvm.prepare.api.get_pvs') + def test_get_single_pv_no_match_returns_none(self, m_get_pvs): + m_get_pvs.return_value = [] - def test_get_first_pv_empty(self, monkeypatch): - monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) - assert api.get_first_pv() == [] + pv = api.get_single_pv() + assert pv == None + @patch('ceph_volume.devices.lvm.prepare.api.get_pvs') + def test_get_single_pv_one_match(self, m_get_pvs): + fake_pvs = [] + fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={})) + m_get_pvs.return_value = fake_pvs -class TestGetFirstVG(object): + pv = api.get_single_pv() - def test_get_first_vg(self, monkeypatch): - vg1 = api.VolumeGroup(vg_name='vg1') - vg2 = api.VolumeGroup(vg_name='vg2') - stdout = ['{};;;;;;'.format(vg1.vg_name), '{};;;;;;'.format(vg2.vg_name)] - monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + assert isinstance(pv, api.PVolume) + assert pv.name == '/dev/sda' - vg_ = api.get_first_vg() - assert isinstance(vg_, api.VolumeGroup) - assert vg_.vg_name == vg1.vg_name - def test_get_first_vg_single_vg(self, monkeypatch): - vg = api.VolumeGroup(vg_name='vg') - stdout = ['{};;;;;;'.format(vg.vg_name)] - monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) +class TestGetSingleVG(object): - vg_ = api.get_first_vg() - assert isinstance(vg_, api.VolumeGroup) - assert vg_.vg_name == vg.vg_name + @patch('ceph_volume.devices.lvm.prepare.api.get_vgs') + def test_get_single_vg_multiple_matches_raises_runtimeerror(self, m_get_vgs): + fake_vgs = [] + fake_vgs.append(api.VolumeGroup(vg_name='vg1')) + fake_vgs.append(api.VolumeGroup(vg_name='vg2')) - def test_get_first_vg_empty(self, monkeypatch): - monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) - vg_ = api.get_first_vg() - assert vg_ == [] + m_get_vgs.return_value = fake_vgs + with pytest.raises(RuntimeError) as e: + api.get_single_vg() + assert "matched more than 1 VG present on this host." in str(e.value) -class TestGetFirstLV(object): + @patch('ceph_volume.devices.lvm.prepare.api.get_vgs') + def test_get_single_vg_no_match_returns_none(self, m_get_vgs): + m_get_vgs.return_value = [] - def test_get_first_lv(self, monkeypatch): - lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1', - lv_name='lv1', vg_name='vg1') - lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2', - lv_name='lv2', vg_name='vg2') - stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name, - lv1.vg_name), - '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name, - lv2.vg_name)] - monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + vg = api.get_single_vg() + assert vg == None - lv_ = api.get_first_lv() - assert isinstance(lv_, api.Volume) - assert lv_.lv_name == lv1.lv_name + @patch('ceph_volume.devices.lvm.prepare.api.get_vgs') + def test_get_single_vg_one_match(self, m_get_vgs): + fake_vgs = [] + fake_vgs.append(api.VolumeGroup(vg_name='vg1')) + m_get_vgs.return_value = fake_vgs - def test_get_first_lv_single_lv(self, monkeypatch): - stdout = ['ceph.type=data;/dev/vg/lv;lv;vg'] - monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) - lv = api.Volume(lv_tags='ceph.type=data', - lv_path='/dev/vg/lv', - lv_name='lv', vg_name='vg') + vg = api.get_single_vg() - lv_ = api.get_first_lv() - assert isinstance(lv_, api.Volume) - assert lv_.lv_name == lv.lv_name + assert isinstance(vg, api.VolumeGroup) + assert vg.name == 'vg1' - def test_get_first_lv_empty(self, monkeypatch): - monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) - assert api.get_lvs() == [] +class TestGetSingleLV(object): + + @patch('ceph_volume.devices.lvm.prepare.api.get_lvs') + def test_get_single_lv_multiple_matches_raises_runtimeerror(self, m_get_lvs): + fake_lvs = [] + fake_lvs.append(api.Volume(lv_name='lv1', + lv_path='/dev/vg1/lv1', + vg_name='vg1', + lv_tags='', + lv_uuid='fake-uuid')) + fake_lvs.append(api.Volume(lv_name='lv1', + lv_path='/dev/vg2/lv1', + vg_name='vg2', + lv_tags='', + lv_uuid='fake-uuid')) + m_get_lvs.return_value = fake_lvs + + with pytest.raises(RuntimeError) as e: + api.get_single_lv() + assert "matched more than 1 LV present on this host" in str(e.value) + + @patch('ceph_volume.devices.lvm.prepare.api.get_lvs') + def test_get_single_lv_no_match_returns_none(self, m_get_lvs): + m_get_lvs.return_value = [] + + lv = api.get_single_lv() + assert lv == None + + @patch('ceph_volume.devices.lvm.prepare.api.get_lvs') + def test_get_single_lv_one_match(self, m_get_lvs): + fake_lvs = [] + fake_lvs.append(api.Volume(lv_name='lv1', lv_path='/dev/vg1/lv1', vg_name='vg1', lv_tags='', lv_uuid='fake-uuid')) + m_get_lvs.return_value = fake_lvs + + lv_ = api.get_single_lv() + + assert isinstance(lv_, api.Volume) + assert lv_.name == 'lv1' diff --git a/ceph/src/ceph-volume/ceph_volume/tests/conftest.py b/ceph/src/ceph-volume/ceph_volume/tests/conftest.py index 149afbbc6..8a7d5326d 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/conftest.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/conftest.py @@ -63,6 +63,9 @@ def mock_lv_device_generator(): def mock_devices_available(): dev = create_autospec(device.Device) dev.path = '/dev/foo' + dev.vg_name = 'vg_foo' + dev.lv_name = 'lv_foo' + dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)] dev.available_lvm = True dev.vg_size = [21474836480] dev.vg_free = dev.vg_size @@ -73,6 +76,9 @@ def mock_device_generator(): def mock_device(): dev = create_autospec(device.Device) dev.path = '/dev/foo' + dev.vg_name = 'vg_foo' + dev.lv_name = 'lv_foo' + dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)] dev.available_lvm = True dev.vg_size = [21474836480] dev.vg_free = dev.vg_size @@ -287,7 +293,7 @@ def device_info(monkeypatch, patch_bluestore_label): monkeypatch.setattr("ceph_volume.sys_info.devices", {}) monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: devices) if not devices: - monkeypatch.setattr("ceph_volume.util.device.lvm.get_first_lv", lambda filters: lv) + monkeypatch.setattr("ceph_volume.util.device.lvm.get_single_lv", lambda filters: lv) else: monkeypatch.setattr("ceph_volume.util.device.lvm.get_device_lvs", lambda path: [lv]) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py index 46d7c3c83..9b8fcbebe 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py @@ -51,7 +51,7 @@ class TestActivate(object): volumes = [] volumes.append(FooVolume) monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: []) - monkeypatch.setattr(api, 'get_first_lv', lambda **kwargs: []) + monkeypatch.setattr(api, 'get_single_lv', lambda **kwargs: []) monkeypatch.setattr(activate, 'activate_filestore', capture) args = Args(osd_id=None, osd_fsid='2222') @@ -357,7 +357,7 @@ class TestActivateAll(object): assert 'a8789a96ce8b process is active. Skipping activation' in err assert 'b8218eaa1634 process is active. Skipping activation' in err - def test_detects_osds_to_activate(self, is_root, capture, monkeypatch): + def test_detects_osds_to_activate_systemd(self, is_root, capture, monkeypatch): monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report) monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False) args = ['--all'] @@ -370,6 +370,18 @@ class TestActivateAll(object): assert calls[1]['kwargs']['osd_id'] == '1' assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b' + def test_detects_osds_to_activate_no_systemd(self, is_root, capture, monkeypatch): + monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report) + args = ['--all', '--no-systemd'] + activation = activate.Activate(args) + activation.activate = capture + activation.main() + calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id']) + assert calls[0]['kwargs']['osd_id'] == '0' + assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634' + assert calls[1]['kwargs']['osd_id'] == '1' + assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b' + # # Activate All fixture # diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py index 4bf026ae1..2a334dad9 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py @@ -15,6 +15,10 @@ class TestBatch(object): b = batch.Batch([]) b.main() + def test_invalid_osd_ids_passed(self): + with pytest.raises(SystemExit): + batch.Batch(argv=['--osd-ids', '1', 'foo']).main() + def test_disjoint_device_lists(self, factory): device1 = factory(used_by_ceph=False, available=True, abspath="/dev/sda") device2 = factory(used_by_ceph=False, available=True, abspath="/dev/sdb") diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py index cf4b68c71..30d91b20f 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py @@ -106,7 +106,7 @@ class TestFullReport(object): lv_path='/dev/VolGroup/lv', vg_name='VolGroup') volumes = [] volumes.append(osd) - monkeypatch.setattr(lvm.listing.api, 'get_first_pv', lambda **kwargs: pv) + monkeypatch.setattr(lvm.listing.api, 'get_single_pv', lambda **kwargs: pv) monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: volumes) @@ -126,7 +126,7 @@ class TestFullReport(object): volumes = [] volumes.append(osd) volumes.append(journal) - monkeypatch.setattr(lvm.listing.api,'get_first_pv',lambda **kwargs:pv) + monkeypatch.setattr(lvm.listing.api,'get_single_pv',lambda **kwargs:pv) monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: volumes) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py index 6c2027350..74d658950 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py @@ -992,13 +992,17 @@ class TestNew(object): class TestMigrate(object): + def test_invalid_osd_id_passed(self, is_root): + with pytest.raises(SystemExit): + migrate.Migrate(argv=['--osd-fsid', '123', '--from', 'data', '--target', 'foo', '--osd-id', 'foo']).main() + mock_volume = None def mock_get_lv_by_fullname(self, *args, **kwargs): return self.mock_volume mock_process_input = [] def mock_process(self, *args, **kwargs): - self.mock_process_input.append(args[0]); + self.mock_process_input.append(args[0]) return ('', '', 0) mock_single_volumes = {} diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py index 70915a0fe..fcbc276f0 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py @@ -121,10 +121,10 @@ class TestPrepare(object): assert result == ('', '', {'ceph.type': 'data'}) @patch('ceph_volume.api.lvm.Volume.set_tags') - @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv') - def test_setup_device_lv_passed(self, m_get_first_lv, m_set_tags): + @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv') + def test_setup_device_lv_passed(self, m_get_single_lv, m_set_tags): fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid') - m_get_first_lv.return_value = fake_volume + m_get_single_lv.return_value = fake_volume result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None) assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data', @@ -147,9 +147,9 @@ class TestPrepare(object): 'ceph.data_device': '/fake-path'}) @patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid') - @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv') - def test_setup_device_partition_passed(self, m_get_first_lv, m_get_ptuuid): - m_get_first_lv.side_effect = ValueError() + @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv') + def test_setup_device_partition_passed(self, m_get_single_lv, m_get_ptuuid): + m_get_single_lv.side_effect = ValueError() m_get_ptuuid.return_value = 'fake-uuid' result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None) @@ -158,6 +158,10 @@ class TestPrepare(object): 'ceph.data_uuid': 'fake-uuid', 'ceph.data_device': '/dev/sdx'}) + def test_invalid_osd_id_passed(self): + with pytest.raises(SystemExit): + lvm.prepare.Prepare(argv=['--osd-id', 'foo']).main() + class TestActivate(object): diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py index 1fa22e5b6..eff187228 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py @@ -7,6 +7,11 @@ from ceph_volume.api import lvm as api from ceph_volume.devices.lvm import zap +class TestZap(object): + def test_invalid_osd_id_passed(self): + with pytest.raises(SystemExit): + zap.Zap(argv=['--osd-id', 'foo']).main() + class TestFindAssociatedDevices(object): def test_no_lvs_found_that_match_id(self, monkeypatch, device_info): diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile b/ceph/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile index 8f8b31b7e..f5425165a 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile @@ -390,7 +390,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # always make /dev/sd{a/b/c/d} so that CI can ensure that # virtualbox and libvirt will have the same devices to use for OSDs (0..3).each do |d| - lv.storage :file, :device => "sd#{driverletters[d]}", :size => '12G' + lv.storage :file, :device => "sd#{driverletters[d]}", :size => '100G' end lv.memory = MEMORY lv.random_hostname = true diff --git a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml index 8b155d97d..5460fdd0d 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml +++ b/ceph/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml @@ -1,4 +1,4 @@ - +--- - hosts: osds become: yes tasks: @@ -119,8 +119,8 @@ - /opt/vdd/loop0_nvme0 - /opt/vde/loop1_nvme1 - - name: create 11GB sparse files for NVMe - command: "fallocate -l 11G {{ item }}" + - name: create 20GB sparse files for NVMe + command: "fallocate -l 20G {{ item }}" loop: - /opt/vdd/loop0_nvme0 - /opt/vde/loop1_nvme1 diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_prepare.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_prepare.py index ced5d49e7..080823307 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_prepare.py @@ -33,16 +33,7 @@ class TestOSDIDAvailable(object): stdout = ['', json.dumps(stdout)] monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) result = prepare.osd_id_available(1) - assert not result - - def test_invalid_osd_id(self, monkeypatch): - stdout = dict(nodes=[ - dict(id=0), - ]) - stdout = ['', json.dumps(stdout)] - monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) - result = prepare.osd_id_available("foo") - assert not result + assert result def test_returns_true_when_id_is_destroyed(self, monkeypatch): stdout = dict(nodes=[ diff --git a/ceph/src/ceph-volume/ceph_volume/util/device.py b/ceph/src/ceph-volume/ceph_volume/util/device.py index 9a455883e..010aad65d 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/device.py +++ b/ceph/src/ceph-volume/ceph_volume/util/device.py @@ -162,11 +162,11 @@ class Device(object): # if the path is not absolute, we have 'vg/lv', let's use LV name # to get the LV. if self.path[0] == '/': - lv = lvm.get_first_lv(filters={'lv_path': self.path}) + lv = lvm.get_single_lv(filters={'lv_path': self.path}) else: vgname, lvname = self.path.split('/') - lv = lvm.get_first_lv(filters={'lv_name': lvname, - 'vg_name': vgname}) + lv = lvm.get_single_lv(filters={'lv_name': lvname, + 'vg_name': vgname}) if lv: self.lv_api = lv self.lvs = [lv] diff --git a/ceph/src/ceph-volume/ceph_volume/util/prepare.py b/ceph/src/ceph-volume/ceph_volume/util/prepare.py index 85b7033c2..2c0bdc049 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/util/prepare.py @@ -183,6 +183,7 @@ def osd_id_available(osd_id): """ if osd_id is None: return False + bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster stdout, stderr, returncode = process.call( [ @@ -202,7 +203,7 @@ def osd_id_available(osd_id): output = json.loads(''.join(stdout).strip()) osds = output['nodes'] osd = [osd for osd in osds if str(osd['id']) == str(osd_id)] - if osd and osd[0].get('status') == "destroyed": + if not osd or (osd and osd[0].get('status') == "destroyed"): return True return False diff --git a/ceph/src/cephadm/cephadm b/ceph/src/cephadm/cephadm index 13f0f6e6a..9fe5fb376 100755 --- a/ceph/src/cephadm/cephadm +++ b/ceph/src/cephadm/cephadm @@ -31,7 +31,7 @@ from contextlib import redirect_stdout import ssl from enum import Enum -from typing import Dict, List, Tuple, Optional, Union, Any, NoReturn, Callable, IO, Sequence, TypeVar, cast, Set +from typing import Dict, List, Tuple, Optional, Union, Any, NoReturn, Callable, IO, Sequence, TypeVar, cast, Set, Iterable import re import uuid @@ -175,6 +175,9 @@ class ContainerEngine: def EXE(cls) -> str: raise NotImplementedError() + def __str__(self) -> str: + return f'{self.EXE} ({self.path})' + class Podman(ContainerEngine): EXE = 'podman' @@ -193,6 +196,10 @@ class Podman(ContainerEngine): out, _, _ = call_throws(ctx, [self.path, 'version', '--format', '{{.Client.Version}}']) self._version = _parse_podman_version(out) + def __str__(self) -> str: + version = '.'.join(map(str, self.version)) + return f'{self.EXE} ({self.path}) version {version}' + class Docker(ContainerEngine): EXE = 'docker' @@ -207,7 +214,7 @@ logging_config = { 'disable_existing_loggers': True, 'formatters': { 'cephadm': { - 'format': '%(asctime)s %(levelname)s %(message)s' + 'format': '%(asctime)s %(thread)x %(levelname)s %(message)s' }, }, 'handlers': { @@ -549,7 +556,7 @@ class CephIscsi(object): mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z' mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' - mounts[log_dir] = '/var/log/rbd-target-api:z' + mounts[log_dir] = '/var/log:z' mounts['/dev'] = '/dev' return mounts @@ -1286,28 +1293,28 @@ if sys.version_info < (3, 8): def __init__(self) -> None: self._pid_counter = itertools.count(0) - self._threads = {} + self._threads: Dict[Any, Any] = {} - def is_active(self): + def is_active(self) -> bool: return True - def close(self): + def close(self) -> None: self._join_threads() - def _join_threads(self): + def _join_threads(self) -> None: """Internal: Join all non-daemon threads""" threads = [thread for thread in list(self._threads.values()) if thread.is_alive() and not thread.daemon] for thread in threads: thread.join() - def __enter__(self): + def __enter__(self) -> Any: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: pass - def __del__(self, _warn=warnings.warn): + def __del__(self, _warn: Any = warnings.warn) -> None: threads = [thread for thread in list(self._threads.values()) if thread.is_alive()] if threads: @@ -1315,7 +1322,7 @@ if sys.version_info < (3, 8): ResourceWarning, source=self) - def add_child_handler(self, pid, callback, *args): + def add_child_handler(self, pid: Any, callback: Any, *args: Any) -> None: loop = events.get_event_loop() thread = threading.Thread(target=self._do_waitpid, name=f'waitpid-{next(self._pid_counter)}', @@ -1324,16 +1331,16 @@ if sys.version_info < (3, 8): self._threads[pid] = thread thread.start() - def remove_child_handler(self, pid): + def remove_child_handler(self, pid: Any) -> bool: # asyncio never calls remove_child_handler() !!! # The method is no-op but is implemented because # abstract base classe requires it return True - def attach_loop(self, loop): + def attach_loop(self, loop: Any) -> None: pass - def _do_waitpid(self, loop, expected_pid, callback, args): + def _do_waitpid(self, loop: Any, expected_pid: Any, callback: Any, args: Any) -> None: assert expected_pid > 0 try: @@ -1408,8 +1415,6 @@ def call(ctx: CephadmContext, prefix += ': ' timeout = timeout or ctx.timeout - logger.debug('Running command: %s' % ' '.join(command)) - async def tee(reader: asyncio.StreamReader) -> str: collected = StringIO() async for line in reader: @@ -1994,13 +1999,12 @@ def find_container_engine(ctx: CephadmContext) -> Optional[ContainerEngine]: for i in CONTAINER_PREFERENCE: try: return i() - except Exception as e: - logger.debug('Could not locate %s: %s' % (i.EXE, e)) + except Exception: + pass return None -def check_container_engine(ctx): - # type: (CephadmContext) -> None +def check_container_engine(ctx: CephadmContext) -> ContainerEngine: engine = ctx.container_engine if not isinstance(engine, CONTAINER_PREFERENCE): # See https://github.com/python/mypy/issues/8993 @@ -2010,6 +2014,7 @@ def check_container_engine(ctx): engine.get_version(ctx) if engine.version < MIN_PODMAN_VERSION: raise Error('podman version %d.%d.%d or later is required' % MIN_PODMAN_VERSION) + return engine def get_unit_name(fsid, daemon_type, daemon_id=None): @@ -2310,6 +2315,8 @@ def get_config_and_keyring(ctx): d = get_parm(ctx.config_json) config = d.get('config') keyring = d.get('keyring') + if config and keyring: + return config, keyring if 'config' in ctx and ctx.config: try: @@ -2806,7 +2813,7 @@ def deploy_daemon_units( f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=True)) + '\n') ceph_iscsi = CephIscsi.init(ctx, fsid, daemon_id) tcmu_container = ceph_iscsi.get_tcmu_runner_container() - _write_container_cmd_to_bash(ctx, f, tcmu_container, 'iscsi tcmu-runnter container', background=True) + _write_container_cmd_to_bash(ctx, f, tcmu_container, 'iscsi tcmu-runner container', background=True) _write_container_cmd_to_bash(ctx, f, c, '%s.%s' % (daemon_type, str(daemon_id))) @@ -3433,12 +3440,12 @@ def command_version(ctx): def command_pull(ctx): # type: (CephadmContext) -> int - _pull_image(ctx, ctx.image) + _pull_image(ctx, ctx.image, ctx.insecure) return command_inspect_image(ctx) -def _pull_image(ctx, image): - # type: (CephadmContext, str) -> None +def _pull_image(ctx, image, insecure=False): + # type: (CephadmContext, str, bool) -> None logger.info('Pulling container image %s...' % image) ignorelist = [ @@ -3448,8 +3455,12 @@ def _pull_image(ctx, image): ] cmd = [ctx.container_engine.path, 'pull', image] - if isinstance(ctx.container_engine, Podman) and os.path.exists('/etc/ceph/podman-auth.json'): - cmd.append('--authfile=/etc/ceph/podman-auth.json') + if isinstance(ctx.container_engine, Podman): + if insecure: + cmd.append('--tls-verify=false') + + if os.path.exists('/etc/ceph/podman-auth.json'): + cmd.append('--authfile=/etc/ceph/podman-auth.json') cmd_str = ' '.join(cmd) for sleep_secs in [1, 4, 25]: @@ -3458,12 +3469,12 @@ def _pull_image(ctx, image): return if not any(pattern in err for pattern in ignorelist): - raise RuntimeError('Failed command: %s' % cmd_str) + raise Error('Failed command: %s' % cmd_str) logger.info('`%s` failed transiently. Retrying. waiting %s seconds...' % (cmd_str, sleep_secs)) time.sleep(sleep_secs) - raise RuntimeError('Failed command: %s: maximum retries reached' % cmd_str) + raise Error('Failed command: %s: maximum retries reached' % cmd_str) ################################## @@ -4036,10 +4047,10 @@ def prepare_bootstrap_config( logger.info('Adjusting default settings to suit single-host cluster...') # replicate across osds, not hosts if ( - not cp.has_option('global', 'osd_crush_choose_leaf_type') - and not cp.has_option('global', 'osd crush choose leaf type') + not cp.has_option('global', 'osd_crush_chooseleaf_type') + and not cp.has_option('global', 'osd crush chooseleaf type') ): - cp.set('global', 'osd_crush_choose_leaf_type', '0') + cp.set('global', 'osd_crush_chooseleaf_type', '0') # replica 2x if ( not cp.has_option('global', 'osd_pool_default_size') @@ -4125,6 +4136,65 @@ def finish_bootstrap_config( pass +# funcs to process spec file for apply spec +def _parse_yaml_docs(f: Iterable[str]) -> List[List[str]]: + docs = [] + current_doc = [] # type: List[str] + for line in f: + if '---' in line: + if current_doc: + docs.append(current_doc) + current_doc = [] + else: + current_doc.append(line.rstrip()) + if current_doc: + docs.append(current_doc) + return docs + + +def _parse_yaml_obj(doc: List[str]) -> Dict[str, str]: + # note: this only parses the first layer of yaml + obj = {} # type: Dict[str, str] + current_key = '' + for line in doc: + if line.startswith(' '): + obj[current_key] += line.strip() + elif line.endswith(':'): + current_key = line.strip(':') + obj[current_key] = '' + else: + current_key, val = line.split(':') + obj[current_key] = val.strip() + return obj + + +def parse_yaml_objs(f: Iterable[str]) -> List[Dict[str, str]]: + objs = [] + for d in _parse_yaml_docs(f): + objs.append(_parse_yaml_obj(d)) + return objs + + +def _distribute_ssh_keys(ctx: CephadmContext, host_spec: Dict[str, str], bootstrap_hostname: str) -> int: + # copy ssh key to hosts in host spec (used for apply spec) + ssh_key = '/etc/ceph/ceph.pub' + if ctx.ssh_public_key: + ssh_key = ctx.ssh_public_key.name + + if bootstrap_hostname != host_spec['hostname']: + if 'addr' in host_spec: + addr = host_spec['addr'] + else: + addr = host_spec['hostname'] + out, err, code = call(ctx, ['sudo', '-u', ctx.ssh_user, 'ssh-copy-id', '-f', '-i', ssh_key, '-o StrictHostKeyChecking=no', '%s@%s' % (ctx.ssh_user, addr)]) + if code: + logger.info('\nCopying ssh key to host %s at address %s failed!\n' % (host_spec['hostname'], addr)) + return 1 + else: + logger.info('Added ssh key to host %s at address %s\n' % (host_spec['hostname'], addr)) + return 0 + + @default_image def command_bootstrap(ctx): # type: (CephadmContext) -> int @@ -4339,25 +4409,22 @@ def command_bootstrap(ctx): if ctx.apply_spec: logger.info('Applying %s to cluster' % ctx.apply_spec) - + # copy ssh key to hosts in spec file with open(ctx.apply_spec) as f: - for line in f: - if 'hostname:' in line: - line = line.replace('\n', '') - split = line.split(': ') - if split[1] != hostname: - logger.info('Adding ssh key to %s' % split[1]) - - ssh_key = '/etc/ceph/ceph.pub' - if ctx.ssh_public_key: - ssh_key = ctx.ssh_public_key.name - out, err, code = call_throws(ctx, ['sudo', '-u', ctx.ssh_user, 'ssh-copy-id', '-f', '-i', ssh_key, '-o StrictHostKeyChecking=no', '%s@%s' % (ctx.ssh_user, split[1])]) + try: + for spec in parse_yaml_objs(f): + if spec.get('service_type') == 'host': + _distribute_ssh_keys(ctx, spec, hostname) + except ValueError: + logger.info('Unable to parse %s succesfully' % ctx.apply_spec) mounts = {} - mounts[pathify(ctx.apply_spec)] = '/tmp/spec.yml:z' - - out = cli(['orch', 'apply', '-i', '/tmp/spec.yml'], extra_mounts=mounts) - logger.info(out) + mounts[pathify(ctx.apply_spec)] = '/tmp/spec.yml:ro' + try: + out = cli(['orch', 'apply', '-i', '/tmp/spec.yml'], extra_mounts=mounts) + logger.info(out) + except Exception: + logger.info('\nApplying %s to cluster failed!\n' % ctx.apply_spec) logger.info('You can access the Ceph CLI with:\n\n' '\tsudo %s shell --fsid %s -c %s -k %s\n' % ( @@ -4631,10 +4698,12 @@ def command_shell(ctx): mount = pathify(split_src_dst[0]) filename = os.path.basename(split_src_dst[0]) if len(split_src_dst) > 1: - dst = split_src_dst[1] + ':z' if len(split_src_dst) == 3 else split_src_dst[1] + dst = split_src_dst[1] + if len(split_src_dst) == 3: + dst = '{}:{}'.format(dst, split_src_dst[2]) mounts[mount] = dst else: - mounts[mount] = '/mnt/{}:z'.format(filename) + mounts[mount] = '/mnt/{}'.format(filename) if ctx.command: command = ctx.command else: @@ -5770,14 +5839,12 @@ def check_time_sync(ctx, enabler=None): def command_check_host(ctx: CephadmContext) -> None: - container_path = ctx.container_engine.path - errors = [] commands = ['systemctl', 'lvcreate'] try: - check_container_engine(ctx) - logger.info('podman|docker (%s) is present' % container_path) + engine = check_container_engine(ctx) + logger.info(f'{engine} is present') except Error as e: errors.append(str(e)) @@ -6104,6 +6171,7 @@ class YumDnf(Packager): 'rocky': ('centos', 'el'), 'almalinux': ('centos', 'el'), 'fedora': ('fedora', 'fc'), + 'mariner': ('mariner', 'cm'), } def __init__(self, ctx: CephadmContext, @@ -6120,6 +6188,8 @@ class YumDnf(Packager): if (self.distro_code == 'fc' and self.major >= 30) or \ (self.distro_code == 'el' and self.major >= 8): self.tool = 'dnf' + elif (self.distro_code == 'cm'): + self.tool = 'tdnf' else: self.tool = 'yum' @@ -6494,6 +6564,7 @@ class HostFacts(): _disk_vendor_workarounds = { '0x1af4': 'Virtio Block Device' } + _excluded_block_devices = ('sr', 'zram', 'dm-') def __init__(self, ctx: CephadmContext): self.ctx: CephadmContext = ctx @@ -6533,7 +6604,7 @@ class HostFacts(): # type: () -> List[str] """Determine the list of block devices by looking at /sys/block""" return [dev for dev in os.listdir('/sys/block') - if not dev.startswith('dm')] + if not dev.startswith(HostFacts._excluded_block_devices)] def _get_devs_by_type(self, rota='0'): # type: (str) -> List[str] @@ -7680,27 +7751,31 @@ def command_exporter(ctx: CephadmContext) -> None: ################################## -def systemd_target_state(target_name: str, subsystem: str = 'ceph') -> bool: +def systemd_target_state(ctx: CephadmContext, target_name: str, subsystem: str = 'ceph') -> bool: # TODO: UNITTEST return os.path.exists( os.path.join( - UNIT_DIR, + ctx.unit_dir, f'{subsystem}.target.wants', target_name ) ) +def target_exists(ctx: CephadmContext) -> bool: + return os.path.exists(ctx.unit_dir + '/ceph.target') + + @infer_fsid def command_maintenance(ctx: CephadmContext) -> str: if not ctx.fsid: - raise Error('must pass --fsid to specify cluster') + raise Error('failed - must pass --fsid to specify cluster') target = f'ceph-{ctx.fsid}.target' if ctx.maintenance_action.lower() == 'enter': logger.info('Requested to place host into maintenance') - if systemd_target_state(target): + if systemd_target_state(ctx, target): _out, _err, code = call(ctx, ['systemctl', 'disable', target], verbosity=CallVerbosity.DEBUG) @@ -7723,8 +7798,14 @@ def command_maintenance(ctx: CephadmContext) -> str: else: logger.info('Requested to exit maintenance state') + # if we've never deployed a daemon on this host there will be no systemd + # target to disable so attempting a disable will fail. We still need to + # return success here or host will be permanently stuck in maintenance mode + # as no daemons can be deployed so no systemd target will ever exist to disable. + if not target_exists(ctx): + return 'skipped - systemd target not present on this host. Host removed from maintenance mode.' # exit maintenance request - if not systemd_target_state(target): + if not systemd_target_state(ctx, target): _out, _err, code = call(ctx, ['systemctl', 'enable', target], verbosity=CallVerbosity.DEBUG) @@ -7813,6 +7894,11 @@ def _get_parser(): parser_pull = subparsers.add_parser( 'pull', help='pull latest image version') parser_pull.set_defaults(func=command_pull) + parser_pull.add_argument( + '--insecure', + action='store_true', + help=argparse.SUPPRESS, + ) parser_inspect_image = subparsers.add_parser( 'inspect-image', help='inspect local container image') @@ -8438,7 +8524,7 @@ def cephadm_init(args: List[str]) -> CephadmContext: for handler in logger.handlers: if handler.name == 'console': handler.setLevel(logging.DEBUG) - + logger.debug('%s\ncephadm %s' % ('-' * 80, args)) return ctx @@ -8461,7 +8547,13 @@ def main() -> None: # podman or docker? ctx.container_engine = find_container_engine(ctx) if ctx.func not in \ - [command_check_host, command_prepare_host, command_add_repo, command_install]: + [ + command_check_host, + command_prepare_host, + command_add_repo, + command_rm_repo, + command_install + ]: check_container_engine(ctx) # command handler r = ctx.func(ctx) diff --git a/ceph/src/cephadm/tests/test_cephadm.py b/ceph/src/cephadm/tests/test_cephadm.py index 40afdd422..5d45fdb40 100644 --- a/ceph/src/cephadm/tests/test_cephadm.py +++ b/ceph/src/cephadm/tests/test_cephadm.py @@ -12,6 +12,7 @@ import threading import unittest from http.server import HTTPServer +from textwrap import dedent from urllib.request import Request, urlopen from urllib.error import HTTPError @@ -25,7 +26,6 @@ from .fixtures import ( with_cephadm_ctx, ) - with mock.patch('builtins.open', create=True): from importlib.machinery import SourceFileLoader cd = SourceFileLoader('cephadm', 'cephadm').load_module() @@ -893,6 +893,7 @@ iMN28C2bKGao5UHvdER1rGy7 class TestMaintenance: systemd_target = "ceph.00000000-0000-0000-0000-000000c0ffee.target" + fsid = '0ea8cdd0-1bbf-11ec-a9c7-5254002763fa' def test_systemd_target_OK(self, tmp_path): base = tmp_path @@ -900,14 +901,16 @@ class TestMaintenance: wants.mkdir() target = wants / TestMaintenance.systemd_target target.touch() - cd.UNIT_DIR = str(base) + ctx = cd.CephadmContext() + ctx.unit_dir = str(base) - assert cd.systemd_target_state(target.name) + assert cd.systemd_target_state(ctx, target.name) def test_systemd_target_NOTOK(self, tmp_path): base = tmp_path - cd.UNIT_DIR = str(base) - assert not cd.systemd_target_state(TestMaintenance.systemd_target) + ctx = cd.CephadmContext() + ctx.unit_dir = str(base) + assert not cd.systemd_target_state(ctx, TestMaintenance.systemd_target) def test_parser_OK(self): args = cd._parse_args(['host-maintenance', 'enter']) @@ -917,6 +920,58 @@ class TestMaintenance: with pytest.raises(SystemExit): cd._parse_args(['host-maintenance', 'wah']) + @mock.patch('os.listdir', return_value=[]) + @mock.patch('cephadm.call') + @mock.patch('cephadm.systemd_target_state') + def test_enter_failure_1(self, _target_state, _call, _listdir): + _call.return_value = '', '', 999 + _target_state.return_value = True + ctx: cd.CephadmContext = cd.cephadm_init_ctx( + ['host-maintenance', 'enter', '--fsid', TestMaintenance.fsid]) + ctx.container_engine = mock_podman() + retval = cd.command_maintenance(ctx) + assert retval.startswith('failed') + + @mock.patch('os.listdir', return_value=[]) + @mock.patch('cephadm.call') + @mock.patch('cephadm.systemd_target_state') + def test_enter_failure_2(self, _target_state, _call, _listdir): + _call.side_effect = [('', '', 0), ('', '', 999)] + _target_state.return_value = True + ctx: cd.CephadmContext = cd.cephadm_init_ctx( + ['host-maintenance', 'enter', '--fsid', TestMaintenance.fsid]) + ctx.container_engine = mock_podman() + retval = cd.command_maintenance(ctx) + assert retval.startswith('failed') + + @mock.patch('os.listdir', return_value=[]) + @mock.patch('cephadm.call') + @mock.patch('cephadm.systemd_target_state') + @mock.patch('cephadm.target_exists') + def test_exit_failure_1(self, _target_exists, _target_state, _call, _listdir): + _call.return_value = '', '', 999 + _target_state.return_value = False + _target_exists.return_value = True + ctx: cd.CephadmContext = cd.cephadm_init_ctx( + ['host-maintenance', 'exit', '--fsid', TestMaintenance.fsid]) + ctx.container_engine = mock_podman() + retval = cd.command_maintenance(ctx) + assert retval.startswith('failed') + + @mock.patch('os.listdir', return_value=[]) + @mock.patch('cephadm.call') + @mock.patch('cephadm.systemd_target_state') + @mock.patch('cephadm.target_exists') + def test_exit_failure_2(self, _target_exists, _target_state, _call, _listdir): + _call.side_effect = [('', '', 0), ('', '', 999)] + _target_state.return_value = False + _target_exists.return_value = True + ctx: cd.CephadmContext = cd.cephadm_init_ctx( + ['host-maintenance', 'exit', '--fsid', TestMaintenance.fsid]) + ctx.container_engine = mock_podman() + retval = cd.command_maintenance(ctx) + assert retval.startswith('failed') + class TestMonitoring(object): @mock.patch('cephadm.call') @@ -1016,6 +1071,9 @@ class TestBootstrap(object): *args, ] + +###############################################3 + def test_config(self, cephadm_fs): conf_file = 'foo' cmd = self._get_cmd( @@ -1329,6 +1387,29 @@ class TestShell(object): assert retval == 0 assert ctx.keyring == 'foo' + @mock.patch('cephadm.CephContainer') + def test_mount_no_dst(self, m_ceph_container, cephadm_fs): + cmd = ['shell', '--mount', '/etc/foo'] + with with_cephadm_ctx(cmd) as ctx: + retval = cd.command_shell(ctx) + assert retval == 0 + assert m_ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/mnt/foo' + + @mock.patch('cephadm.CephContainer') + def test_mount_with_dst_no_opt(self, m_ceph_container, cephadm_fs): + cmd = ['shell', '--mount', '/etc/foo:/opt/foo/bar'] + with with_cephadm_ctx(cmd) as ctx: + retval = cd.command_shell(ctx) + assert retval == 0 + assert m_ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/opt/foo/bar' + + @mock.patch('cephadm.CephContainer') + def test_mount_with_dst_and_opt(self, m_ceph_container, cephadm_fs): + cmd = ['shell', '--mount', '/etc/foo:/opt/foo/bar:Z'] + with with_cephadm_ctx(cmd) as ctx: + retval = cd.command_shell(ctx) + assert retval == 0 + assert m_ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/opt/foo/bar:Z' class TestCephVolume(object): @@ -1418,8 +1499,7 @@ class TestIscsi: ctx.config_json = json.dumps(config_json) ctx.fsid = fsid cd.get_parm.return_value = config_json - iscsi = cd.CephIscsi(ctx, '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'daemon_id', config_json) - c = iscsi.get_tcmu_runner_container() + c = cd.get_container(ctx, fsid, 'iscsi', 'daemon_id') cd.make_data_dir(ctx, fsid, 'iscsi', 'daemon_id') cd.deploy_daemon_units( @@ -1435,14 +1515,14 @@ class TestIscsi: with open('/var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/unit.run') as f: assert f.read() == """set -e if ! grep -qs /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs /proc/mounts; then mount -t configfs none /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs; fi -# iscsi tcmu-runnter container +# iscsi tcmu-runner container ! /usr/bin/podman rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id-tcmu 2> /dev/null ! /usr/bin/podman rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu 2> /dev/null -/usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/bin/tcmu-runner --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log/rbd-target-api:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph & +/usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/bin/tcmu-runner --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph & # iscsi.daemon_id -! /usr/bin/podman rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id-tcmu 2> /dev/null -! /usr/bin/podman rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu 2> /dev/null -/usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/bin/tcmu-runner --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log/rbd-target-api:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph +! /usr/bin/podman rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id 2> /dev/null +! /usr/bin/podman rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id 2> /dev/null +/usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/bin/rbd-target-api --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph """ def test_get_container(self): @@ -1460,4 +1540,177 @@ if ! grep -qs /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id assert c.old_cname == 'ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.something' +class TestCheckHost: + + @mock.patch('cephadm.find_executable', return_value='foo') + @mock.patch('cephadm.check_time_sync', return_value=True) + def test_container_engine(self, find_executable, check_time_sync): + ctx = cd.CephadmContext() + + ctx.container_engine = None + err = r'No container engine binary found' + with pytest.raises(cd.Error, match=err): + cd.command_check_host(ctx) + + ctx.container_engine = mock_podman() + cd.command_check_host(ctx) + + ctx.container_engine = mock_docker() + cd.command_check_host(ctx) + + +class TestRmRepo: + + @pytest.mark.parametrize('os_release', + [ + # Apt + dedent(""" + NAME="Ubuntu" + VERSION="20.04 LTS (Focal Fossa)" + ID=ubuntu + ID_LIKE=debian + PRETTY_NAME="Ubuntu 20.04 LTS" + VERSION_ID="20.04" + HOME_URL="https://www.ubuntu.com/" + SUPPORT_URL="https://help.ubuntu.com/" + BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" + PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" + VERSION_CODENAME=focal + UBUNTU_CODENAME=focal + """), + + # YumDnf + dedent(""" + NAME="CentOS Linux" + VERSION="8 (Core)" + ID="centos" + ID_LIKE="rhel fedora" + VERSION_ID="8" + PLATFORM_ID="platform:el8" + PRETTY_NAME="CentOS Linux 8 (Core)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:centos:centos:8" + HOME_URL="https://www.centos.org/" + BUG_REPORT_URL="https://bugs.centos.org/" + + CENTOS_MANTISBT_PROJECT="CentOS-8" + CENTOS_MANTISBT_PROJECT_VERSION="8" + REDHAT_SUPPORT_PRODUCT="centos" + REDHAT_SUPPORT_PRODUCT_VERSION="8" + """), + + # Zypper + dedent(""" + NAME="openSUSE Tumbleweed" + # VERSION="20210810" + ID="opensuse-tumbleweed" + ID_LIKE="opensuse suse" + VERSION_ID="20210810" + PRETTY_NAME="openSUSE Tumbleweed" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:tumbleweed:20210810" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" + DOCUMENTATION_URL="https://en.opensuse.org/Portal:Tumbleweed" + LOGO="distributor-logo" + """), + ]) + @mock.patch('cephadm.find_executable', return_value='foo') + def test_container_engine(self, find_executable, os_release, cephadm_fs): + cephadm_fs.create_file('/etc/os-release', contents=os_release) + ctx = cd.CephadmContext() + + ctx.container_engine = None + cd.command_rm_repo(ctx) + + ctx.container_engine = mock_podman() + cd.command_rm_repo(ctx) + + ctx.container_engine = mock_docker() + cd.command_rm_repo(ctx) + + +class TestPull: + + @mock.patch('time.sleep') + @mock.patch('cephadm.call', return_value=('', '', 0)) + @mock.patch('cephadm.get_image_info_from_inspect', return_value={}) + def test_error(self, get_image_info_from_inspect, call, sleep): + ctx = cd.CephadmContext() + ctx.container_engine = mock_podman() + ctx.insecure = False + + call.return_value = ('', '', 0) + retval = cd.command_pull(ctx) + assert retval == 0 + + err = 'maximum retries reached' + + call.return_value = ('', 'foobar', 1) + with pytest.raises(cd.Error) as e: + cd.command_pull(ctx) + assert err not in str(e.value) + + call.return_value = ('', 'net/http: TLS handshake timeout', 1) + with pytest.raises(cd.Error) as e: + cd.command_pull(ctx) + assert err in str(e.value) + + +class TestApplySpec: + + def test_parse_yaml(self, cephadm_fs): + yaml = '''service_type: host +hostname: vm-00 +addr: 192.168.122.44 +labels: + - example1 + - example2 +--- +service_type: host +hostname: vm-01 +addr: 192.168.122.247 +labels: + - grafana +--- +service_type: host +hostname: vm-02 +addr: 192.168.122.165''' + + cephadm_fs.create_file('spec.yml', contents=yaml) + + retdic = [{'service_type': 'host', 'hostname': 'vm-00', 'addr': '192.168.122.44', 'labels': '- example1- example2'}, + {'service_type': 'host', 'hostname': 'vm-01', 'addr': '192.168.122.247', 'labels': '- grafana'}, + {'service_type': 'host', 'hostname': 'vm-02', 'addr': '192.168.122.165'}] + + with open('spec.yml') as f: + dic = cd.parse_yaml_objs(f) + assert dic == retdic + + @mock.patch('cephadm.call', return_value=('', '', 0)) + def test_distribute_ssh_keys(self, call): + ctx = cd.CephadmContext() + ctx.ssh_public_key = None + ctx.ssh_user = 'root' + + host_spec = {'service_type': 'host', 'hostname': 'vm-02', 'addr': '192.168.122.165'} + + retval = cd._distribute_ssh_keys(ctx, host_spec, 'bootstrap_hostname') + + assert retval == 0 + + call.return_value = ('', '', 1) + + retval = cd._distribute_ssh_keys(ctx, host_spec, 'bootstrap_hostname') + + assert retval == 1 + + + + + + + + + diff --git a/ceph/src/client/Client.cc b/ceph/src/client/Client.cc index 3f344c3fa..86936de67 100644 --- a/ceph/src/client/Client.cc +++ b/ceph/src/client/Client.cc @@ -5456,6 +5456,7 @@ void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const M mds_rank_t mds = session->mds_num; int used = get_caps_used(in); int wanted = in->caps_wanted(); + int flags = 0; const unsigned new_caps = m->get_caps(); const bool was_stale = session->cap_gen > cap->gen; @@ -5569,11 +5570,14 @@ void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const M !_flush(in, new C_Client_FlushComplete(this, in))) { // waitin' for flush } else if (used & revoked & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) { - if (_release(in)) - check = true; + if (_release(in)) { + check = true; + flags = CHECK_CAPS_NODELAY; + } } else { cap->wanted = 0; // don't let check_caps skip sending a response to MDS check = true; + flags = CHECK_CAPS_NODELAY; } } else if (cap->issued == new_caps) { ldout(cct, 10) << " caps unchanged at " << ccap_string(cap->issued) << dendl; @@ -5596,7 +5600,7 @@ void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const M } if (check) - check_caps(in, 0); + check_caps(in, flags); // wake up waiters if (new_caps) @@ -6735,6 +6739,16 @@ void Client::collect_and_send_global_metrics() { } message.push_back(metric); + // read io sizes + metric = ClientMetricMessage(ReadIoSizesPayload(total_read_ops, + total_read_size)); + message.push_back(metric); + + // write io sizes + metric = ClientMetricMessage(WriteIoSizesPayload(total_write_ops, + total_write_size)); + message.push_back(metric); + session->con->send_message2(make_message(std::move(message))); } @@ -9967,6 +9981,7 @@ retry: success: ceph_assert(rc >= 0); + update_read_io_size(bl->length()); if (movepos) { // adjust fd pos f->pos = start_pos + rc; @@ -10014,6 +10029,9 @@ Client::C_Readahead::~C_Readahead() { void Client::C_Readahead::finish(int r) { lgeneric_subdout(client->cct, client, 20) << "client." << client->get_nodeid() << " " << "C_Readahead on " << f->inode << dendl; client->put_cap_ref(f->inode.get(), CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE); + if (r > 0) { + client->update_read_io_size(r); + } } int Client::_read_async(Fh *f, uint64_t off, uint64_t len, bufferlist *bl) @@ -10049,6 +10067,7 @@ int Client::_read_async(Fh *f, uint64_t off, uint64_t len, bufferlist *bl) r = onfinish.wait(); client_lock.lock(); put_cap_ref(in, CEPH_CAP_FILE_CACHE); + update_read_io_size(bl->length()); } if(f->readahead.get_min_readahead_size() > 0) { @@ -10424,6 +10443,7 @@ int64_t Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf, // if we get here, write was successful, update client metadata success: + update_write_io_size(size); // time lat = ceph_clock_now(); lat -= start; diff --git a/ceph/src/client/Client.h b/ceph/src/client/Client.h index 2884cee55..4a30c1ba7 100644 --- a/ceph/src/client/Client.h +++ b/ceph/src/client/Client.h @@ -808,6 +808,16 @@ public: void tick(); void start_tick_thread(); + void update_read_io_size(size_t size) { + total_read_ops++; + total_read_size += size; + } + + void update_write_io_size(size_t size) { + total_write_ops++; + total_write_size += size; + } + void inc_dentry_nr() { ++dentry_nr; } @@ -1553,6 +1563,12 @@ private: uint64_t pinned_icaps = 0; uint64_t opened_inodes = 0; + uint64_t total_read_ops = 0; + uint64_t total_read_size = 0; + + uint64_t total_write_ops = 0; + uint64_t total_write_size = 0; + ceph::spinlock delay_i_lock; std::map delay_i_release; }; diff --git a/ceph/src/cls/rbd/cls_rbd.cc b/ceph/src/cls/rbd/cls_rbd.cc index 33910b7df..870383d3d 100644 --- a/ceph/src/cls/rbd/cls_rbd.cc +++ b/ceph/src/cls/rbd/cls_rbd.cc @@ -5059,7 +5059,22 @@ int image_status_set(cls_method_context_t hctx, const string &global_image_id, ondisk_status.up = false; ondisk_status.last_update = ceph_clock_now(); - int r = cls_get_request_origin(hctx, &ondisk_status.origin); + std::string global_id_key = global_key(global_image_id); + std::string image_id; + int r = read_key(hctx, global_id_key, &image_id); + if (r < 0) { + return 0; + } + cls::rbd::MirrorImage mirror_image; + r = image_get(hctx, image_id, &mirror_image); + if (r < 0) { + return 0; + } + if (mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) { + return 0; + } + + r = cls_get_request_origin(hctx, &ondisk_status.origin); ceph_assert(r == 0); bufferlist bl; @@ -6366,7 +6381,6 @@ int mirror_image_status_set(cls_method_context_t hctx, bufferlist *in, * Output: * @returns 0 on success, negative error code on failure * - * NOTE: deprecated - remove this method after Octopus is unsupported */ int mirror_image_status_remove(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { diff --git a/ceph/src/cls/rbd/cls_rbd_client.cc b/ceph/src/cls/rbd/cls_rbd_client.cc index cefa1fed7..535de35ef 100644 --- a/ceph/src/cls/rbd/cls_rbd_client.cc +++ b/ceph/src/cls/rbd/cls_rbd_client.cc @@ -2310,6 +2310,20 @@ int mirror_image_status_get_summary_finish( return 0; } +int mirror_image_status_remove(librados::IoCtx *ioctx, + const std::string &global_image_id) { + librados::ObjectWriteOperation op; + mirror_image_status_remove(&op, global_image_id); + return ioctx->operate(RBD_MIRRORING, &op); +} + +void mirror_image_status_remove(librados::ObjectWriteOperation *op, + const std::string &global_image_id) { + bufferlist bl; + encode(global_image_id, bl); + op->exec("rbd", "mirror_image_status_remove", bl); +} + int mirror_image_status_remove_down(librados::IoCtx *ioctx) { librados::ObjectWriteOperation op; mirror_image_status_remove_down(&op); diff --git a/ceph/src/cls/rbd/cls_rbd_client.h b/ceph/src/cls/rbd/cls_rbd_client.h index 12b34c483..9bb5a41d8 100644 --- a/ceph/src/cls/rbd/cls_rbd_client.h +++ b/ceph/src/cls/rbd/cls_rbd_client.h @@ -478,6 +478,10 @@ void mirror_image_status_get_summary_start( int mirror_image_status_get_summary_finish( ceph::buffer::list::const_iterator *iter, std::map *states); +int mirror_image_status_remove(librados::IoCtx *ioctx, + const std::string &global_image_id); +void mirror_image_status_remove(librados::ObjectWriteOperation *op, + const std::string &global_image_id); int mirror_image_status_remove_down(librados::IoCtx *ioctx); void mirror_image_status_remove_down(librados::ObjectWriteOperation *op); diff --git a/ceph/src/common/Timer.cc b/ceph/src/common/Timer.cc index eab46661c..b96d9d2ff 100644 --- a/ceph/src/common/Timer.cc +++ b/ceph/src/common/Timer.cc @@ -24,18 +24,19 @@ using std::pair; using ceph::operator <<; -class SafeTimerThread : public Thread { - SafeTimer *parent; +template +class CommonSafeTimerThread : public Thread { + CommonSafeTimer *parent; public: - explicit SafeTimerThread(SafeTimer *s) : parent(s) {} + explicit CommonSafeTimerThread(CommonSafeTimer *s) : parent(s) {} void *entry() override { parent->timer_thread(); return NULL; } }; - -SafeTimer::SafeTimer(CephContext *cct_, ceph::mutex &l, bool safe_callbacks) +template +CommonSafeTimer::CommonSafeTimer(CephContext *cct_, Mutex &l, bool safe_callbacks) : cct(cct_), lock(l), safe_callbacks(safe_callbacks), thread(NULL), @@ -43,19 +44,22 @@ SafeTimer::SafeTimer(CephContext *cct_, ceph::mutex &l, bool safe_callbacks) { } -SafeTimer::~SafeTimer() +template +CommonSafeTimer::~CommonSafeTimer() { ceph_assert(thread == NULL); } -void SafeTimer::init() +template +void CommonSafeTimer::init() { ldout(cct,10) << "init" << dendl; - thread = new SafeTimerThread(this); + thread = new CommonSafeTimerThread(this); thread->create("safe_timer"); } -void SafeTimer::shutdown() +template +void CommonSafeTimer::shutdown() { ldout(cct,10) << "shutdown" << dendl; if (thread) { @@ -71,7 +75,8 @@ void SafeTimer::shutdown() } } -void SafeTimer::timer_thread() +template +void CommonSafeTimer::timer_thread() { std::unique_lock l{lock}; ldout(cct,10) << "timer_thread starting" << dendl; @@ -115,12 +120,14 @@ void SafeTimer::timer_thread() ldout(cct,10) << "timer_thread exiting" << dendl; } -Context* SafeTimer::add_event_after(double seconds, Context *callback) +template +Context* CommonSafeTimer::add_event_after(double seconds, Context *callback) { return add_event_after(ceph::make_timespan(seconds), callback); } -Context* SafeTimer::add_event_after(ceph::timespan duration, Context *callback) +template +Context* CommonSafeTimer::add_event_after(ceph::timespan duration, Context *callback) { ceph_assert(ceph_mutex_is_locked(lock)); @@ -128,7 +135,8 @@ Context* SafeTimer::add_event_after(ceph::timespan duration, Context *callback) return add_event_at(when, callback); } -Context* SafeTimer::add_event_at(SafeTimer::clock_t::time_point when, Context *callback) +template +Context* CommonSafeTimer::add_event_at(CommonSafeTimer::clock_t::time_point when, Context *callback) { ceph_assert(ceph_mutex_is_locked(lock)); ldout(cct,10) << __func__ << " " << when << " -> " << callback << dendl; @@ -153,10 +161,11 @@ Context* SafeTimer::add_event_at(SafeTimer::clock_t::time_point when, Context *c return callback; } -bool SafeTimer::cancel_event(Context *callback) +template +bool CommonSafeTimer::cancel_event(Context *callback) { ceph_assert(ceph_mutex_is_locked(lock)); - + auto p = events.find(callback); if (p == events.end()) { ldout(cct,10) << "cancel_event " << callback << " not found" << dendl; @@ -171,7 +180,8 @@ bool SafeTimer::cancel_event(Context *callback) return true; } -void SafeTimer::cancel_all_events() +template +void CommonSafeTimer::cancel_all_events() { ldout(cct,10) << "cancel_all_events" << dendl; ceph_assert(ceph_mutex_is_locked(lock)); @@ -185,7 +195,8 @@ void SafeTimer::cancel_all_events() } } -void SafeTimer::dump(const char *caller) const +template +void CommonSafeTimer::dump(const char *caller) const { if (!caller) caller = ""; @@ -196,3 +207,6 @@ void SafeTimer::dump(const char *caller) const ++s) ldout(cct,10) << " " << s->first << "->" << s->second << dendl; } + +template class CommonSafeTimer; +template class CommonSafeTimer; diff --git a/ceph/src/common/Timer.h b/ceph/src/common/Timer.h index f543be68a..f22956a34 100644 --- a/ceph/src/common/Timer.h +++ b/ceph/src/common/Timer.h @@ -19,19 +19,23 @@ #include "include/common_fwd.h" #include "ceph_time.h" #include "ceph_mutex.h" +#include "fair_mutex.h" +#include class Context; -class SafeTimerThread; -class SafeTimer +template class CommonSafeTimerThread; + +template +class CommonSafeTimer { CephContext *cct; - ceph::mutex& lock; - ceph::condition_variable cond; + Mutex& lock; + std::condition_variable_any cond; bool safe_callbacks; - friend class SafeTimerThread; - SafeTimerThread *thread; + friend class CommonSafeTimerThread; + class CommonSafeTimerThread *thread; void timer_thread(); void _shutdown(); @@ -47,8 +51,8 @@ class SafeTimer public: // This class isn't supposed to be copied - SafeTimer(const SafeTimer&) = delete; - SafeTimer& operator=(const SafeTimer&) = delete; + CommonSafeTimer(const CommonSafeTimer&) = delete; + CommonSafeTimer& operator=(const CommonSafeTimer&) = delete; /* Safe callbacks determines whether callbacks are called with the lock * held. @@ -60,8 +64,8 @@ public: * If you are able to relax requirements on cancelled callbacks, then * setting safe_callbacks = false eliminates the lock cycle issue. * */ - SafeTimer(CephContext *cct, ceph::mutex &l, bool safe_callbacks=true); - virtual ~SafeTimer(); + CommonSafeTimer(CephContext *cct, Mutex &l, bool safe_callbacks=true); + virtual ~CommonSafeTimer(); /* Call with the event_lock UNLOCKED. * @@ -96,4 +100,8 @@ public: }; +extern template class CommonSafeTimer; +extern template class CommonSafeTimer; +using SafeTimer = class CommonSafeTimer; + #endif diff --git a/ceph/src/common/config.cc b/ceph/src/common/config.cc index 491685867..e15c99e7d 100644 --- a/ceph/src/common/config.cc +++ b/ceph/src/common/config.cc @@ -1029,6 +1029,16 @@ void md_config_t::get_config_bl( } } +std::optional md_config_t::get_val_default(std::string_view key) +{ + std::string val; + const Option *opt = find_option(key); + if (opt && (conf_stringify(_get_val_default(*opt), &val) == 0)) { + return std::make_optional(std::move(val)); + } + return std::nullopt; +} + int md_config_t::get_val(const ConfigValues& values, const std::string_view key, char **buf, int len) const { diff --git a/ceph/src/common/config.h b/ceph/src/common/config.h index ef7d5b34f..989f5029e 100644 --- a/ceph/src/common/config.h +++ b/ceph/src/common/config.h @@ -191,6 +191,9 @@ public: /// get encoded map of compiled-in defaults void get_defaults_bl(const ConfigValues& values, ceph::buffer::list *bl); + /// Get the default value of a configuration option + std::optional get_val_default(std::string_view key); + // Get a configuration value. // No metavariables will be returned (they will have already been expanded) int get_val(const ConfigValues& values, const std::string_view key, char **buf, int len) const; diff --git a/ceph/src/common/config_proxy.h b/ceph/src/common/config_proxy.h index cb30a2d7f..e43a7c6dd 100644 --- a/ceph/src/common/config_proxy.h +++ b/ceph/src/common/config_proxy.h @@ -344,6 +344,9 @@ public: const std::string& get_conf_path() const { return config.get_conf_path(); } + std::optional get_val_default(std::string_view key) { + return config.get_val_default(key); + } }; } diff --git a/ceph/src/common/fair_mutex.h b/ceph/src/common/fair_mutex.h new file mode 100644 index 000000000..9baa04400 --- /dev/null +++ b/ceph/src/common/fair_mutex.h @@ -0,0 +1,80 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- + +#pragma once + +#include "common/ceph_mutex.h" + +#include +#include + +namespace ceph { +/// a FIFO mutex +class fair_mutex { +public: + fair_mutex(const std::string& name) + : mutex{ceph::make_mutex(name)} + {} + ~fair_mutex() = default; + fair_mutex(const fair_mutex&) = delete; + fair_mutex& operator=(const fair_mutex&) = delete; + + void lock() + { + std::unique_lock lock(mutex); + const unsigned my_id = next_id++; + cond.wait(lock, [&] { + return my_id == unblock_id; + }); + _set_locked_by(); + } + + bool try_lock() + { + std::lock_guard lock(mutex); + if (is_locked()) { + return false; + } + ++next_id; + _set_locked_by(); + return true; + } + + void unlock() + { + std::lock_guard lock(mutex); + ++unblock_id; + _reset_locked_by(); + cond.notify_all(); + } + + bool is_locked() const + { + return next_id != unblock_id; + } + +#ifdef CEPH_DEBUG_MUTEX + bool is_locked_by_me() const { + return is_locked() && locked_by == std::this_thread::get_id(); + } +private: + void _set_locked_by() { + locked_by = std::this_thread::get_id(); + } + void _reset_locked_by() { + locked_by = {}; + } +#else + void _set_locked_by() {} + void _reset_locked_by() {} +#endif + +private: + unsigned next_id = 0; + unsigned unblock_id = 0; + ceph::condition_variable cond; + ceph::mutex mutex; +#ifdef CEPH_DEBUG_MUTEX + std::thread::id locked_by = {}; +#endif +}; +} // namespace ceph diff --git a/ceph/src/common/legacy_config_opts.h b/ceph/src/common/legacy_config_opts.h index f2610f33a..d22061f8a 100644 --- a/ceph/src/common/legacy_config_opts.h +++ b/ceph/src/common/legacy_config_opts.h @@ -1036,6 +1036,7 @@ OPTION(bluestore_debug_fsck_abort, OPT_BOOL) OPTION(bluestore_debug_omit_kv_commit, OPT_BOOL) OPTION(bluestore_debug_permit_any_bdev_label, OPT_BOOL) OPTION(bluestore_debug_random_read_err, OPT_DOUBLE) +OPTION(bluestore_debug_legacy_omap, OPT_BOOL) OPTION(bluestore_debug_inject_bug21040, OPT_BOOL) OPTION(bluestore_debug_inject_csum_err_probability, OPT_FLOAT) OPTION(bluestore_fsck_error_on_no_per_pool_stats, OPT_BOOL) @@ -1405,6 +1406,7 @@ OPTION(rgw_usage_max_user_shards, OPT_INT) OPTION(rgw_enable_ops_log, OPT_BOOL) // enable logging every rgw operation OPTION(rgw_enable_usage_log, OPT_BOOL) // enable logging bandwidth usage OPTION(rgw_ops_log_rados, OPT_BOOL) // whether ops log should go to rados +OPTION(rgw_ops_log_file_path, OPT_STR) // path to file where ops log can go OPTION(rgw_ops_log_socket_path, OPT_STR) // path to unix domain socket where ops log can go OPTION(rgw_ops_log_data_backlog, OPT_INT) // max data backlog for ops log OPTION(rgw_fcgi_socket_backlog, OPT_INT) // socket backlog for fcgi diff --git a/ceph/src/common/options.cc b/ceph/src/common/options.cc index bdec3edaf..2f28123ba 100644 --- a/ceph/src/common/options.cc +++ b/ceph/src/common/options.cc @@ -3123,12 +3123,6 @@ std::vector
"+this.getTitle(e)+"
'+this.getBody(t)+"