From 2a845540123ad00df2e55947b8080306ebdcf410 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sat, 1 Oct 2022 10:42:36 +0200 Subject: [PATCH] import ceph quincy 17.2.4 Signed-off-by: Thomas Lamprecht --- ceph/.readthedocs.yml | 12 +- ceph/CMakeLists.txt | 12 +- ceph/PendingReleaseNotes | 10 + ceph/admin/rtd-checkout-main | 10 + ceph/ceph.spec | 75 +- ceph/ceph.spec.in | 69 +- ceph/changelog.upstream | 6 + ceph/cmake/modules/AddCephTest.cmake | 2 +- ceph/cmake/modules/BuildBoost.cmake | 20 +- ceph/cmake/modules/Buildpmem.cmake | 3 +- ceph/cmake/modules/Distutils.cmake | 4 +- ceph/debian/control | 7 +- ceph/doc/cephadm/host-management.rst | 108 +- ceph/doc/cephadm/operations.rst | 21 +- ceph/doc/cephadm/services/index.rst | 72 +- ceph/doc/cephadm/services/monitoring.rst | 32 + ceph/doc/cephadm/services/osd.rst | 12 + ceph/doc/cephadm/services/rgw.rst | 37 +- ceph/doc/cephadm/upgrade.rst | 6 + ceph/doc/cephfs/cephfs-top.png | Bin 18369 -> 13928 bytes ceph/doc/cephfs/cephfs-top.rst | 2 +- ceph/doc/cephfs/fs-volumes.rst | 127 +- ceph/doc/cephfs/mds-state-diagram.dot | 2 + ceph/doc/cephfs/quota.rst | 12 + ceph/doc/dev/continuous-integration.rst | 17 +- ceph/doc/dev/crimson/crimson.rst | 39 + ceph/doc/dev/delayed-delete.rst | 15 +- .../dev/developer_guide/basic-workflow.rst | 50 +- ceph/doc/dev/developer_guide/essentials.rst | 38 +- ceph/doc/dev/developer_guide/merging.rst | 34 +- .../osd_internals/mclock_wpq_cmp_study.rst | 6 +- ceph/doc/dev/release-process.rst | 304 +- ceph/doc/index.rst | 7 + ceph/doc/man/8/cephadm.rst | 11 + ceph/doc/man/8/cephfs-top.rst | 35 +- ceph/doc/man/8/rbd.rst | 3 +- ceph/doc/mgr/dashboard.rst | 614 +- ceph/doc/mgr/dashboard_plugins/debug.inc.rst | 25 +- .../dashboard_plugins/feature_toggles.inc.rst | 18 +- ceph/doc/mgr/dashboard_plugins/motd.inc.rst | 18 +- ceph/doc/mgr/orchestrator.rst | 106 +- ceph/doc/mgr/telemetry.rst | 1 + .../rados/configuration/mclock-config-ref.rst | 145 +- ceph/doc/rados/operations/operating.rst | 86 +- .../doc/rados/operations/placement-groups.rst | 13 +- ceph/doc/radosgw/STS.rst | 11 +- ceph/doc/radosgw/STSLite.rst | 2 +- ceph/doc/radosgw/config-ref.rst | 16 + ceph/doc/radosgw/encryption.rst | 24 +- ceph/doc/radosgw/vault.rst | 14 +- ceph/doc/rbd/rbd-mirroring.rst | 2 +- ceph/doc/start/documenting-ceph.rst | 24 +- ceph/doc/start/hardware-recommendations.rst | 106 +- ceph/doc/start/intro.rst | 10 +- ceph/install-deps.sh | 19 +- ceph/monitoring/ceph-mixin/CMakeLists.txt | 15 +- ceph/monitoring/ceph-mixin/Makefile | 2 +- ceph/monitoring/ceph-mixin/README.md | 15 +- ceph/monitoring/ceph-mixin/alerts.jsonnet | 1 + ceph/monitoring/ceph-mixin/alerts.libsonnet | 3 +- ceph/monitoring/ceph-mixin/config.libsonnet | 12 +- .../ceph-mixin/dashboards.libsonnet | 10 + .../ceph-mixin/dashboards/cephfs.libsonnet | 180 +- .../dashboards/dashboards.libsonnet | 6 - .../ceph-mixin/dashboards/host.libsonnet | 1230 +-- .../ceph-mixin/dashboards/osd.libsonnet | 1069 ++- .../ceph-mixin/dashboards/pool.libsonnet | 1093 ++- .../ceph-mixin/dashboards/rbd.libsonnet | 607 +- .../ceph-mixin/dashboards/rgw.libsonnet | 1429 +-- .../ceph-mixin/dashboards/utils.libsonnet | 172 +- .../ceph-mixin/dashboards_out/.lint | 5 + .../dashboards_out/cephfs-overview.json | 55 +- .../dashboards_out/host-details.json | 91 +- .../dashboards_out/hosts-overview.json | 70 +- .../dashboards_out/osd-device-details.json | 74 +- .../dashboards_out/osds-overview.json | 99 +- .../dashboards_out/pool-detail.json | 77 +- .../dashboards_out/pool-overview.json | 105 +- .../dashboards_out/radosgw-detail.json | 70 +- .../dashboards_out/radosgw-overview.json | 146 +- .../dashboards_out/radosgw-sync-overview.json | 82 +- .../dashboards_out/rbd-details.json | 76 +- .../dashboards_out/rbd-overview.json | 60 +- ceph/monitoring/ceph-mixin/jsonnet-build.sh | 10 - ceph/monitoring/ceph-mixin/mixin.libsonnet | 2 +- .../ceph-mixin/prometheus_alerts.libsonnet | 718 ++ .../ceph-mixin/prometheus_alerts.yml | 1454 ++- ceph/monitoring/ceph-mixin/test-jsonnet.sh | 4 + .../ceph-mixin/tests_alerts/test_alerts.yml | 461 +- .../features/host-details.feature | 110 +- .../features/hosts_overview.feature | 4 +- .../features/osd-device-details.feature | 28 +- .../features/radosgw-detail.feature | 18 +- .../features/radosgw_overview.feature | 76 +- .../ceph-mixin/tests_dashboards/util.py | 5 + ceph/monitoring/ceph-mixin/tox.ini | 5 +- .../misc/test-mclock-profile-switch.sh | 198 + .../qa/suites/fs/full/tasks/mgr-osd-full.yaml | 12 +- .../tasks/snap_schedule_snapdir.yaml | 30 + .../permission/tasks/cfuse_workunit_misc.yaml | 1 + .../qa/suites/orch/cephadm/workunits/0-distro | 1 + .../workloads/dynamic_features_no_cache.yaml | 1 + .../rbd/persistent-writeback-cache/1-base | 1 - .../4-pool/big-cache.yaml | 15 - .../4-pool/cache.yaml | 15 - .../c_api_tests_with_defaults.yaml | 1 - .../6-workloads/recovery.yaml | 10 - .../.qa | 0 .../home}/% | 0 .../2-cluster => pwl-cache/home}/.qa | 0 ceph/qa/suites/rbd/pwl-cache/home/1-base | 1 + .../home}/2-cluster/+ | 0 .../home/2-cluster}/.qa | 0 .../home}/2-cluster/fix-2.yaml | 0 .../home}/2-cluster/openstack.yaml | 0 .../home}/3-supported-random-distro$ | 0 .../rbd/pwl-cache/home/4-cache-path.yaml | 13 + .../rbd/pwl-cache/home/5-cache-mode/.qa | 1 + .../home}/5-cache-mode/rwl.yaml | 0 .../home}/5-cache-mode/ssd.yaml | 0 .../rbd/pwl-cache/home/6-cache-size/.qa | 1 + .../rbd/pwl-cache/home/6-cache-size/1G.yaml | 5 + .../rbd/pwl-cache/home/6-cache-size/8G.yaml | 5 + .../suites/rbd/pwl-cache/home/7-workloads/.qa | 1 + .../c_api_tests_with_defaults.yaml | 1 + .../home/7-workloads}/fio.yaml | 0 .../pwl-cache/home/7-workloads/recovery.yaml | 9 + .../suites/rbd/pwl-cache/tmpfs/%} | 0 ceph/qa/suites/rbd/pwl-cache/tmpfs/.qa | 1 + ceph/qa/suites/rbd/pwl-cache/tmpfs/1-base | 1 + .../qa/suites/rbd/pwl-cache/tmpfs/2-cluster/+ | 0 .../suites/rbd/pwl-cache/tmpfs/2-cluster/.qa | 1 + .../rbd/pwl-cache/tmpfs/2-cluster/fix-2.yaml | 3 + .../pwl-cache/tmpfs/2-cluster/openstack.yaml | 4 + .../tmpfs/3-supported-random-distro$ | 1 + .../rbd/pwl-cache/tmpfs/4-cache-path.yaml | 22 + .../rbd/pwl-cache/tmpfs/5-cache-mode/.qa | 1 + .../rbd/pwl-cache/tmpfs/5-cache-mode/rwl.yaml | 5 + .../rbd/pwl-cache/tmpfs/5-cache-mode/ssd.yaml | 5 + .../rbd/pwl-cache/tmpfs/6-cache-size/.qa | 1 + .../rbd/pwl-cache/tmpfs/6-cache-size/1G.yaml | 5 + .../rbd/pwl-cache/tmpfs/6-cache-size/5G.yaml | 5 + .../rbd/pwl-cache/tmpfs/7-workloads/.qa | 1 + .../tmpfs/7-workloads}/qemu_xfstests.yaml | 0 .../suites/rgw/crypt/2-kms/vault_transit.yaml | 6 + ceph/qa/tasks/ceph_manager.py | 48 +- ceph/qa/tasks/cephfs/filesystem.py | 5 + ceph/qa/tasks/cephfs/kernel_mount.py | 39 +- ceph/qa/tasks/cephfs/mount.py | 31 +- ceph/qa/tasks/cephfs/test_failover.py | 19 + ceph/qa/tasks/cephfs/test_nfs.py | 7 +- ceph/qa/tasks/cephfs/test_readahead.py | 3 +- ceph/qa/tasks/cephfs/test_snap_schedules.py | 71 +- ceph/qa/tasks/cephfs/test_strays.py | 41 +- ceph/qa/tasks/cephfs/test_volumes.py | 1388 ++- .../tasks/mgr/dashboard/test_orchestrator.py | 2 +- ceph/qa/tasks/mgr/dashboard/test_rbd.py | 10 +- ceph/qa/tasks/mgr/dashboard/test_rgw.py | 4 +- ceph/qa/tasks/rbd_pwl_cache_recovery.py | 96 + ceph/qa/tasks/rgw.py | 7 +- ceph/qa/tasks/s3tests.py | 7 + ceph/qa/workunits/cephadm/test_repos.sh | 2 +- ceph/qa/workunits/fs/full/subvolume_clone.sh | 114 + ceph/qa/workunits/fs/full/subvolume_rm.sh | 10 + .../fs/full/subvolume_snapshot_rm.sh | 86 + ceph/qa/workunits/fs/misc/dac_override.sh | 19 + ceph/qa/workunits/mon/config.sh | 11 + ceph/qa/workunits/rbd/cli_generic.sh | 119 +- ceph/src/.git_version | 4 +- ceph/src/CMakeLists.txt | 3 + ceph/src/SimpleRADOSStriper.cc | 5 +- ceph/src/auth/Crypto.cc | 10 + ceph/src/blk/kernel/KernelDevice.cc | 11 + ceph/src/ceph-volume/ceph_volume/api/lvm.py | 11 + .../ceph-volume/ceph_volume/configuration.py | 5 +- .../ceph_volume/devices/lvm/batch.py | 18 +- .../ceph_volume/devices/lvm/listing.py | 5 +- .../ceph_volume/devices/lvm/zap.py | 28 +- .../ceph_volume/devices/raw/list.py | 50 +- .../ceph_volume/devices/simple/scan.py | 8 +- ceph/src/ceph-volume/ceph_volume/main.py | 4 +- .../ceph-volume/ceph_volume/tests/conftest.py | 62 +- .../tests/devices/lvm/test_batch.py | 10 + .../tests/devices/lvm/test_migrate.py | 12 +- .../ceph_volume/tests/devices/lvm/test_zap.py | 6 +- .../tests/devices/raw/test_list.py | 39 +- .../tests/devices/simple/test_activate.py | 48 +- .../tests/devices/simple/test_scan.py | 27 +- .../ceph_volume/tests/test_inventory.py | 12 +- .../ceph_volume/tests/test_main.py | 10 +- .../tests/util/test_arg_validators.py | 24 +- .../ceph_volume/tests/util/test_device.py | 282 +- .../ceph_volume/tests/util/test_disk.py | 143 +- .../ceph_volume/tests/util/test_encryption.py | 6 +- .../ceph_volume/tests/util/test_system.py | 60 +- .../ceph-volume/ceph_volume/util/device.py | 190 +- ceph/src/ceph-volume/ceph_volume/util/disk.py | 173 +- .../ceph_volume/util/encryption.py | 13 +- .../ceph-volume/ceph_volume/util/prepare.py | 9 +- .../ceph-volume/ceph_volume/util/system.py | 159 +- ceph/src/ceph-volume/tox.ini | 1 + ceph/src/ceph-volume/tox_install_command.sh | 2 +- ceph/src/cephadm/cephadm | 383 +- .../cephadm/containers/keepalived/Dockerfile | 24 + .../src/cephadm/containers/keepalived/LICENSE | 21 + .../cephadm/containers/keepalived/README.md | 233 + .../containers/keepalived/skel/init.sh | 22 + ceph/src/cephadm/tests/test_cephadm.py | 113 +- ceph/src/cephadm/tox.ini | 4 +- ceph/src/client/Client.cc | 240 +- ceph/src/client/Client.h | 25 +- ceph/src/client/Inode.h | 2 +- ceph/src/client/MetaRequest.h | 44 +- ceph/src/client/fuse_ll.cc | 279 +- ceph/src/client/hypertable/CephBroker.cc | 2 +- ceph/src/cls/rbd/cls_rbd_types.cc | 16 +- ceph/src/common/ceph_crypto.cc | 9 + ceph/src/common/ceph_crypto.h | 9 + ceph/src/common/ceph_mutex.h | 23 +- ceph/src/common/dout.h | 5 +- ceph/src/common/openssl_opts_handler.cc | 7 + ceph/src/common/options/CMakeLists.txt | 1 + ceph/src/common/options/build_options.cc | 2 + ceph/src/common/options/ceph-exporter.yaml.in | 54 + ceph/src/common/options/mds-client.yaml.in | 19 +- ceph/src/common/options/mds.yaml.in | 6 + ceph/src/common/options/rbd.yaml.in | 2 +- ceph/src/common/options/rgw.yaml.in | 149 + ceph/src/common/subsys.h | 1 + ceph/src/crimson/admin/osd_admin.cc | 3 +- ceph/src/crimson/os/seastore/logging.h | 22 - .../onode_manager/staged-fltree/node_types.h | 34 + ceph/src/crimson/os/seastore/seastore.cc | 40 + .../os/seastore/segment_manager/block.cc | 24 + ceph/src/dokan/ceph_dokan.cc | 2 +- ceph/src/dokan/ceph_dokan.h | 2 +- ceph/src/dokan/dbg.cc | 2 +- ceph/src/exporter/CMakeLists.txt | 10 + ceph/src/exporter/DaemonMetricCollector.cc | 391 + ceph/src/exporter/DaemonMetricCollector.h | 104 + ceph/src/exporter/ceph_exporter.cc | 65 + ceph/src/exporter/http_server.cc | 169 + ceph/src/exporter/http_server.h | 5 + ceph/src/exporter/util.cc | 48 + ceph/src/exporter/util.h | 22 + ceph/src/include/buffer.h | 5 +- ceph/src/include/cephfs/ceph_ll_client.h | 15 +- ceph/src/include/cephfs/libcephfs.h | 6 +- ceph/src/include/cephfs/metrics/Types.h | 113 +- ceph/src/include/err.h | 9 +- ceph/src/include/interval_set.h | 30 + ceph/src/kv/RocksDBStore.cc | 7 + ceph/src/librados/RadosClient.cc | 2 +- ceph/src/librados/librados_c.cc | 4 +- ceph/src/librbd/ImageWatcher.cc | 7 +- ceph/src/librbd/api/Mirror.cc | 25 +- ceph/src/librbd/cache/pwl/AbstractWriteLog.cc | 44 +- ceph/src/librbd/cache/pwl/AbstractWriteLog.h | 4 +- ceph/src/librbd/cache/pwl/ImageCacheState.cc | 7 +- ceph/src/librbd/cache/pwl/ImageCacheState.h | 3 +- ceph/src/librbd/cache/pwl/rwl/WriteLog.cc | 9 +- ceph/src/librbd/cache/pwl/ssd/WriteLog.cc | 9 +- ceph/src/librbd/deep_copy/ImageCopyRequest.cc | 43 +- ceph/src/librbd/deep_copy/ImageCopyRequest.h | 2 +- ceph/src/librbd/image/RefreshRequest.cc | 88 +- ceph/src/librbd/image/RefreshRequest.h | 58 +- ceph/src/librbd/mirror/PromoteRequest.cc | 3 +- .../mirror/snapshot/CreatePrimaryRequest.cc | 2 +- ceph/src/mds/Beacon.cc | 6 + ceph/src/mds/CDir.cc | 35 +- ceph/src/mds/CDir.h | 1 + ceph/src/mds/CInode.cc | 8 +- ceph/src/mds/Locker.cc | 111 +- ceph/src/mds/Locker.h | 6 +- ceph/src/mds/MDCache.cc | 7 +- ceph/src/mds/MDLog.cc | 57 +- ceph/src/mds/MDLog.h | 6 +- ceph/src/mds/MDSDaemon.cc | 4 +- ceph/src/mds/MDSMap.cc | 56 +- ceph/src/mds/MDSMap.h | 7 +- ceph/src/mds/MDSPerfMetricTypes.h | 48 +- ceph/src/mds/MDSRank.cc | 11 +- ceph/src/mds/MDSRank.h | 2 +- ceph/src/mds/MetricAggregator.cc | 36 + ceph/src/mds/MetricsHandler.cc | 21 +- ceph/src/mds/OpenFileTable.cc | 14 +- ceph/src/mds/OpenFileTable.h | 1 - ceph/src/mds/Server.cc | 107 +- ceph/src/mds/Server.h | 1 + ceph/src/mds/SimpleLock.h | 5 +- ceph/src/mds/cephfs_features.h | 6 + ceph/src/messages/MMDSBeacon.h | 2 + ceph/src/messages/MMgrUpdate.h | 84 + ceph/src/mgr/BaseMgrModule.cc | 6 + ceph/src/mgr/CMakeLists.txt | 1 + ceph/src/mgr/DaemonServer.cc | 58 +- ceph/src/mgr/DaemonServer.h | 2 + ceph/src/mgr/MDSPerfMetricTypes.cc | 30 + ceph/src/mgr/MDSPerfMetricTypes.h | 12 + ceph/src/mgr/MgrClient.cc | 43 + ceph/src/mgr/MgrClient.h | 6 + ceph/src/mon/ConfigMonitor.cc | 6 +- ceph/src/mon/Elector.cc | 69 +- ceph/src/mon/LogMonitor.cc | 2 +- ceph/src/mon/MDSMonitor.cc | 63 +- ceph/src/mon/MonCap.cc | 3 + ceph/src/mon/MonClient.cc | 2 +- ceph/src/mon/MonClient.h | 2 +- ceph/src/mon/Monitor.cc | 23 +- ceph/src/mon/Monitor.h | 2 + ceph/src/mon/MonmapMonitor.cc | 2 + ceph/src/mon/OSDMonitor.cc | 8 + ceph/src/mrun | 2 +- ceph/src/msg/Message.cc | 5 + ceph/src/msg/Message.h | 3 + ceph/src/msg/MessageRef.h | 1 + ceph/src/msg/async/ProtocolV1.cc | 6 +- ceph/src/msg/async/ProtocolV2.cc | 22 +- ceph/src/msg/msg_types.h | 31 +- ceph/src/mypy-constrains.txt | 1 + ceph/src/neorados/RADOSImpl.cc | 2 +- ceph/src/os/bluestore/BlueStore.cc | 87 +- ceph/src/os/bluestore/BlueStore.h | 2 + ceph/src/os/bluestore/bluestore_types.cc | 33 +- ceph/src/os/bluestore/bluestore_types.h | 43 +- ceph/src/osd/OSD.cc | 28 +- ceph/src/osd/PG.cc | 19 +- ceph/src/osd/PGLog.cc | 89 +- ceph/src/osd/PGLog.h | 32 +- ceph/src/osd/PeeringState.cc | 4 +- ceph/src/osd/PrimaryLogPG.cc | 29 +- ceph/src/osd/PrimaryLogPG.h | 2 +- ceph/src/osd/SnapMapper.cc | 40 +- ceph/src/osd/SnapMapper.h | 5 + ceph/src/osd/osd_types.cc | 17 +- ceph/src/osd/osd_types_fmt.h | 11 + ceph/src/osd/scheduler/mClockScheduler.cc | 18 +- ceph/src/osd/scrubber/pg_scrubber.cc | 14 +- ceph/src/pybind/cephfs/cephfs.pyx | 15 +- ceph/src/pybind/mgr/cephadm/inventory.py | 209 +- ceph/src/pybind/mgr/cephadm/migrations.py | 1 + ceph/src/pybind/mgr/cephadm/module.py | 103 +- ceph/src/pybind/mgr/cephadm/registry.py | 6 +- ceph/src/pybind/mgr/cephadm/schedule.py | 34 +- ceph/src/pybind/mgr/cephadm/serve.py | 50 +- .../mgr/cephadm/services/cephadmservice.py | 10 +- .../pybind/mgr/cephadm/services/ingress.py | 70 +- .../pybind/mgr/cephadm/services/monitoring.py | 91 +- .../services/ingress/keepalived.conf.j2 | 12 +- .../templates/services/promtail.yml.j2 | 8 +- ceph/src/pybind/mgr/cephadm/tests/fixtures.py | 3 +- .../pybind/mgr/cephadm/tests/test_cephadm.py | 177 +- .../mgr/cephadm/tests/test_scheduling.py | 230 +- .../pybind/mgr/cephadm/tests/test_services.py | 299 +- .../mgr/cephadm/tests/test_tuned_profiles.py | 220 + ceph/src/pybind/mgr/cephadm/tuned_profiles.py | 80 + ceph/src/pybind/mgr/cephadm/upgrade.py | 8 +- ceph/src/pybind/mgr/dashboard/__init__.py | 8 + .../mgr/dashboard/cherrypy_backports.py | 27 +- .../dashboard/ci/cephadm/bootstrap-cluster.sh | 11 +- .../ci/cephadm/run-cephadm-e2e-tests.sh | 13 +- .../mgr/dashboard/ci/cephadm/start-cluster.sh | 31 +- ceph/src/pybind/mgr/dashboard/constraints.txt | 2 +- .../pybind/mgr/dashboard/controllers/home.py | 3 +- .../pybind/mgr/dashboard/controllers/nfs.py | 38 +- .../mgr/dashboard/controllers/orchestrator.py | 4 +- .../pybind/mgr/dashboard/controllers/osd.py | 123 +- .../pybind/mgr/dashboard/controllers/pool.py | 6 + .../mgr/dashboard/controllers/prometheus.py | 7 +- .../pybind/mgr/dashboard/controllers/rbd.py | 117 +- .../dashboard/controllers/rbd_mirroring.py | 102 +- .../pybind/mgr/dashboard/controllers/rgw.py | 4 +- .../mgr/dashboard/controllers/service.py | 27 +- .../mgr/dashboard/frontend/.gherkin-lintrc | 33 + .../cypress/fixtures/block-rbd-status.json | 1 + .../integration/block/mirroring.e2e-spec.ts | 6 +- .../integration/cluster/create-cluster.po.ts | 8 +- .../cypress/integration/cluster/hosts.po.ts | 18 +- .../integration/cluster/logs.e2e-spec.ts | 8 +- .../integration/cluster/mgr-modules.po.ts | 18 +- .../cypress/integration/cluster/osds.po.ts | 1 + .../integration/cluster/services.po.ts | 24 +- .../common/01-global.feature.po.ts | 188 + .../create-cluster.feature.po.ts | 12 + .../integration/common/grafana.feature.po.ts | 86 + .../cypress/integration/common/urls.po.ts | 44 + .../orchestrator/grafana/grafana.feature | 63 + ...01-create-cluster-welcome-page.e2e-spec.ts | 19 - .../01-create-cluster-welcome.feature | 26 + .../02-create-cluster-add-host.e2e-spec.ts | 66 - .../02-create-cluster-add-host.feature | 74 + ...create-cluster-create-services.e2e-spec.ts | 6 +- .../04-create-cluster-create-osds.e2e-spec.ts | 8 +- .../05-create-cluster-review.e2e-spec.ts | 4 +- .../workflow/06-cluster-check.e2e-spec.ts | 45 +- .../orchestrator/workflow/07-osds.e2e-spec.ts | 4 +- .../workflow/08-hosts.e2e-spec.ts | 22 +- .../workflow/09-services.e2e-spec.ts | 12 +- .../workflow/10-nfs-exports.e2e-spec.ts | 8 +- .../workflow/nfs/nfs-export.po.ts | 4 +- .../integration/ui/api-docs.e2e-spec.ts | 2 +- .../cypress/integration/ui/navigation.po.ts | 9 +- .../frontend/cypress/plugins/index.js | 16 + .../frontend/cypress/support/commands.ts | 20 +- .../dist/en-US/281.7c1918629ff8b413cc76.js | 1 + .../dist/en-US/281.cd14092ccedeaf2d7d79.js | 1 - .../dist/en-US/330.4192d10f1b1db19145cc.js | 1 - .../frontend/dist/en-US/3rdpartylicenses.txt | 2665 +++++- .../dist/en-US/483.43ef92bcd845cb24eae3.js | 1 + ...9fdfd7f.js => 585.7d0bcf3a0ac0c40fef3b.js} | 2 +- .../dashboard/frontend/dist/en-US/index.html | 4 +- .../dist/en-US/main.30fafaca6a3d4e1868e0.js | 3 - .../dist/en-US/main.86799889c70942fa9a19.js | 3 + .../en-US/runtime.ab6c27cac6d7501e18e8.js | 1 + .../en-US/runtime.d9a3c3d3ac8fa3cc7c93.js | 1 - .../en-US/styles.e6093c94066da7ab35c7.css | 20 - .../en-US/styles.ffb7f665775e3c191fa3.css | 20 + .../mgr/dashboard/frontend/package-lock.json | 8368 ++++++++++++----- .../mgr/dashboard/frontend/package.json | 18 +- .../frontend/src/app/app-routing.module.ts | 10 +- .../src/app/ceph/block/block.module.ts | 37 +- .../iscsi-target-form.component.spec.ts | 5 +- .../iscsi-target-form.component.ts | 7 +- .../edit-site-name-modal.component.html | 39 - .../edit-site-name-modal.component.spec.ts | 73 - .../edit-site-name-modal.component.ts | 58 - .../image-list/image-list.component.html | 17 +- .../image-list/image-list.component.ts | 22 +- .../ceph/block/mirroring/mirroring.module.ts | 2 - .../overview/overview.component.html | 44 +- .../overview/overview.component.spec.ts | 30 + .../mirroring/overview/overview.component.ts | 54 +- .../pool-edit-mode-modal.component.html | 3 +- .../pool-edit-mode-modal.component.spec.ts | 17 +- .../pool-edit-mode-modal.component.ts | 11 +- .../pool-list/pool-list.component.html | 1 + .../pool-list/pool-list.component.ts | 15 +- .../rbd-details/rbd-details.component.html | 2 + .../rbd-form/rbd-form-edit-request.model.ts | 6 + .../block/rbd-form/rbd-form.component.html | 65 +- .../block/rbd-form/rbd-form.component.spec.ts | 63 +- .../ceph/block/rbd-form/rbd-form.component.ts | 115 +- .../app/ceph/block/rbd-form/rbd-form.model.ts | 6 + .../block/rbd-list/rbd-list.component.html | 26 +- .../block/rbd-list/rbd-list.component.spec.ts | 61 +- .../ceph/block/rbd-list/rbd-list.component.ts | 204 +- .../rbd-performance.component.html | 1 + .../rbd-snapshot-form-modal.component.html | 5 +- .../rbd-snapshot-form-modal.component.ts | 7 +- .../rbd-snapshot-actions.model.ts | 34 +- .../rbd-snapshot-list.component.ts | 7 +- .../cephfs-tabs/cephfs-tabs.component.html | 1 + .../create-cluster-review.component.html | 2 - .../create-cluster.component.html | 10 +- .../create-cluster.component.scss | 24 +- .../create-cluster.component.spec.ts | 1 + .../create-cluster.component.ts | 62 +- .../host-details/host-details.component.html | 1 + .../hosts/host-form/host-form.component.html | 3 +- .../hosts/host-form/host-form.component.ts | 3 +- .../ceph/cluster/hosts/hosts.component.html | 1 + .../app/ceph/cluster/hosts/hosts.component.ts | 41 +- .../app/ceph/cluster/logs/logs.component.html | 23 + .../app/ceph/cluster/logs/logs.component.ts | 20 + .../osd-creation-preview-modal.component.ts | 5 +- .../osd-details/osd-details.component.html | 1 + .../osd/osd-form/osd-form.component.html | 301 +- .../osd/osd-form/osd-form.component.spec.ts | 88 +- .../osd/osd-form/osd-form.component.ts | 89 +- .../osd/osd-list/osd-list.component.html | 1 + .../active-alert-list.component.ts | 38 +- .../prometheus-tabs.component.html | 6 +- .../prometheus-tabs.component.spec.ts | 4 +- .../prometheus-tabs.component.ts | 4 +- .../rules-list/rules-list.component.ts | 37 +- .../service-form/service-form.component.html | 74 +- .../service-form/service-form.component.ts | 128 +- .../nfs-form-client.component.ts | 2 +- .../ceph/nfs/nfs-form/nfs-form.component.html | 9 +- .../nfs/nfs-form/nfs-form.component.spec.ts | 31 +- .../ceph/nfs/nfs-form/nfs-form.component.ts | 16 +- .../pool-details/pool-details.component.html | 1 + .../pool/pool-list/pool-list.component.html | 1 + .../rgw-daemon-details.component.html | 1 + .../rgw-daemon-list.component.html | 2 + .../rgw-user-form.component.html | 3 + .../rgw-user-form/rgw-user-form.component.ts | 2 +- .../src/app/core/error/error.component.html | 85 +- .../src/app/core/error/error.component.scss | 29 - .../app/core/error/error.component.spec.ts | 4 +- .../src/app/core/error/error.component.ts | 43 +- .../navigation/navigation.component.html | 6 +- .../app/shared/api/ceph-service.service.ts | 14 + .../src/app/shared/api/nfs.service.ts | 7 +- .../shared/api/orchestrator.service.spec.ts | 4 +- .../app/shared/api/orchestrator.service.ts | 2 +- .../src/app/shared/api/osd.service.spec.ts | 11 +- .../src/app/shared/api/osd.service.ts | 13 +- .../frontend/src/app/shared/api/rbd.model.ts | 1 + .../src/app/shared/api/rbd.service.spec.ts | 7 +- .../src/app/shared/api/rbd.service.ts | 42 +- .../form-button-panel.component.html | 1 + .../components/grafana/grafana.component.html | 21 + .../grafana/grafana.component.spec.ts | 23 + .../components/grafana/grafana.component.ts | 24 +- .../submit-button.component.html | 3 +- .../submit-button/submit-button.component.ts | 3 + .../components/wizard/wizard.component.scss | 4 + .../src/app/shared/constants/app.constants.ts | 11 +- .../table-actions.component.html | 5 +- .../datatable/table/table.component.html | 36 +- .../shared/datatable/table/table.component.ts | 118 +- .../src/app/shared/enum/cell-template.enum.ts | 7 +- .../models/cd-table-fetch-data-context.ts | 27 + .../src/app/shared/models/cd-table-paging.ts | 20 + .../src/app/shared/models/cd-user-config.ts | 2 + .../shared/models/osd-deployment-options.ts | 24 + .../pipes/ceph-release-name.pipe.spec.ts | 4 +- .../shared/pipes/ceph-release-name.pipe.ts | 4 +- .../rxjs/operators/page-visibilty.operator.ts | 20 + .../cd-table-server-side.service.spec.ts | 16 + .../services/cd-table-server-side.service.ts | 14 + .../module-status-guard.service.spec.ts | 2 +- .../services/module-status-guard.service.ts | 14 +- .../services/motd-notification.service.ts | 4 +- .../services/prometheus-alert.service.ts | 14 + .../app/shared/services/task-list.service.ts | 12 +- .../src/app/shared/services/timer.service.ts | 4 +- .../src/styles/ceph-custom/_basics.scss | 39 + .../src/styles/ceph-custom/_forms.scss | 4 + ceph/src/pybind/mgr/dashboard/module.py | 27 +- ceph/src/pybind/mgr/dashboard/openapi.yaml | 217 +- .../mgr/dashboard/plugins/feature_toggles.py | 4 +- .../src/pybind/mgr/dashboard/requirements.txt | 1 + .../mgr/dashboard/run-backend-api-tests.sh | 2 +- .../src/pybind/mgr/dashboard/services/auth.py | 1 - .../mgr/dashboard/services/custom_banner.py | 27 - .../mgr/dashboard/services/exception.py | 15 +- .../mgr/dashboard/services/iscsi_config.py | 2 +- .../mgr/dashboard/services/orchestrator.py | 6 +- ceph/src/pybind/mgr/dashboard/services/osd.py | 25 + ceph/src/pybind/mgr/dashboard/services/rbd.py | 254 +- .../pybind/mgr/dashboard/tests/test_nfs.py | 12 +- .../mgr/dashboard/tests/test_orchestrator.py | 2 +- .../pybind/mgr/dashboard/tests/test_osd.py | 122 +- .../mgr/dashboard/tests/test_rbd_mirroring.py | 26 +- .../mgr/dashboard/tests/test_rbd_service.py | 72 +- .../pybind/mgr/dashboard/tests/test_rgw.py | 10 +- ceph/src/pybind/mgr/iostat/module.py | 14 +- ceph/src/pybind/mgr/mgr_module.py | 25 + ceph/src/pybind/mgr/mgr_util.py | 1 + ceph/src/pybind/mgr/nfs/cluster.py | 7 + .../src/pybind/mgr/orchestrator/_interface.py | 34 +- ceph/src/pybind/mgr/orchestrator/module.py | 91 +- .../orchestrator/tests/test_orchestrator.py | 44 +- ceph/src/pybind/mgr/pg_autoscaler/module.py | 7 +- ceph/src/pybind/mgr/prometheus/module.py | 9 + .../rbd_support/mirror_snapshot_schedule.py | 223 +- .../mgr/rbd_support/trash_purge_schedule.py | 87 +- ceph/src/pybind/mgr/requirements-required.txt | 2 +- .../mgr/snap_schedule/fs/schedule_client.py | 26 +- ceph/src/pybind/mgr/stats/fs/perf_stats.py | 10 +- ceph/src/pybind/mgr/status/module.py | 2 +- ceph/src/pybind/mgr/telemetry/module.py | 174 +- ceph/src/pybind/mgr/tox.ini | 3 +- .../src/pybind/mgr/volumes/fs/async_cloner.py | 11 +- ceph/src/pybind/mgr/volumes/fs/fs_util.py | 22 + .../pybind/mgr/volumes/fs/operations/group.py | 164 +- .../fs/operations/versions/__init__.py | 5 +- .../operations/versions/metadata_manager.py | 134 +- .../fs/operations/versions/subvolume_base.py | 54 +- .../fs/operations/versions/subvolume_v1.py | 131 +- .../fs/operations/versions/subvolume_v2.py | 8 +- .../mgr/volumes/fs/operations/volume.py | 34 +- ceph/src/pybind/mgr/volumes/fs/vol_spec.py | 8 + ceph/src/pybind/mgr/volumes/fs/volume.py | 156 +- ceph/src/pybind/mgr/volumes/module.py | 63 +- .../ceph/deployment/drive_group.py | 14 +- .../python-common/ceph/deployment/hostspec.py | 2 +- .../ceph/deployment/service_spec.py | 152 +- .../ceph/deployment/translate.py | 3 + ceph/src/rbd_fuse/rbd-fuse.cc | 27 +- ceph/src/rgw/rgw_admin.cc | 18 +- ceph/src/rgw/rgw_auth.cc | 19 +- ceph/src/rgw/rgw_auth.h | 28 +- ceph/src/rgw/rgw_auth_filters.h | 4 + ceph/src/rgw/rgw_auth_keystone.cc | 15 +- ceph/src/rgw/rgw_auth_keystone.h | 3 +- ceph/src/rgw/rgw_auth_s3.h | 16 +- ceph/src/rgw/rgw_bucket_encryption.cc | 22 +- ceph/src/rgw/rgw_bucket_encryption.h | 14 +- ceph/src/rgw/rgw_common.cc | 38 + ceph/src/rgw/rgw_common.h | 10 + ceph/src/rgw/rgw_crypt.cc | 383 +- ceph/src/rgw/rgw_crypt.h | 5 +- ceph/src/rgw/rgw_data_sync.cc | 24 +- ceph/src/rgw/rgw_datalog.cc | 6 +- ceph/src/rgw/rgw_dencoder.cc | 12 + ceph/src/rgw/rgw_kms.cc | 347 +- ceph/src/rgw/rgw_kms.h | 12 + ceph/src/rgw/rgw_lc.cc | 2 +- ceph/src/rgw/rgw_log.cc | 20 +- ceph/src/rgw/rgw_log.h | 18 +- ceph/src/rgw/rgw_main.cc | 14 +- ceph/src/rgw/rgw_op.cc | 34 +- ceph/src/rgw/rgw_opa.cc | 16 +- ceph/src/rgw/rgw_rados.cc | 7 + ceph/src/rgw/rgw_rest_s3.cc | 172 +- ceph/src/rgw/rgw_rest_sts.h | 9 + ceph/src/rgw/rgw_rest_swift.cc | 8 +- ceph/src/rgw/rgw_rest_user_policy.cc | 18 +- ceph/src/rgw/rgw_sal.h | 4 +- ceph/src/rgw/rgw_swift_auth.cc | 10 +- ceph/src/rgw/rgw_swift_auth.h | 10 +- ceph/src/rgw/rgw_sync_policy.cc | 4 + ceph/src/rgw/store/dbstore/CMakeLists.txt | 1 + ceph/src/script/build-integration-branch | 2 +- .../crimson/seastore/onode_tree/test_value.h | 22 +- ceph/src/test/encoding.cc | 36 +- ceph/src/test/lazy-omap-stats/CMakeLists.txt | 2 +- .../lazy-omap-stats/lazy_omap_stats_test.cc | 147 +- .../lazy-omap-stats/lazy_omap_stats_test.h | 14 +- ceph/src/test/libcephfs/test.cc | 6 +- ceph/src/test/librados/aio_cxx.cc | 44 + ceph/src/test/librados/misc.cc | 2 +- ceph/src/test/librados/tier_cxx.cc | 3 + .../cache/pwl/test_mock_ReplicatedWriteLog.cc | 5 +- .../librbd/cache/pwl/test_mock_SSDWriteLog.cc | 5 +- .../deep_copy/test_mock_ImageCopyRequest.cc | 106 + .../librbd/image/test_mock_RefreshRequest.cc | 202 +- .../librbd/migration/test_mock_HttpClient.cc | 22 +- .../test_mock_CreatePrimaryRequest.cc | 6 +- ceph/src/test/librbd/test_mirroring.cc | 18 +- ceph/src/test/objectstore/CMakeLists.txt | 7 + .../src/test/objectstore/run_test_deferred.sh | 52 + .../test/objectstore/test_bluestore_types.cc | 83 + ceph/src/test/objectstore/test_deferred.cc | 146 + ceph/src/test/osd/TestPGLog.cc | 8 +- .../journal/test_mock_PrepareReplayRequest.cc | 130 +- .../snapshot/test_mock_Replayer.cc | 868 +- .../test_mock_BootstrapRequest.cc | 127 +- ceph/src/test/rgw/test_http_manager.cc | 43 +- ceph/src/test/rgw/test_rgw_kms.cc | 21 +- ceph/src/test/test_snap_mapper.cc | 31 + ceph/src/tools/ceph-dencoder/common_types.h | 3 + ceph/src/tools/ceph-dencoder/rgw_types.h | 3 + ceph/src/tools/ceph_objectstore_tool.cc | 179 +- ceph/src/tools/ceph_objectstore_tool.h | 2 +- ceph/src/tools/cephfs/CMakeLists.txt | 7 +- ceph/src/tools/cephfs/shell/CMakeLists.txt | 7 + .../src/tools/cephfs/{ => shell}/cephfs-shell | 4 +- ceph/src/tools/cephfs/{ => shell}/setup.py | 0 ceph/src/tools/cephfs/{ => shell}/tox.ini | 0 ceph/src/tools/cephfs/top/cephfs-top | 68 +- ceph/src/tools/cephfs_mirror/Mirror.cc | 2 +- ceph/src/tools/cephfs_mirror/PeerReplayer.cc | 14 +- ceph/src/tools/rbd/Shell.cc | 25 +- ceph/src/tools/rbd/Utils.cc | 3 +- ceph/src/tools/rbd/Utils.h | 3 +- ceph/src/tools/rbd/action/Group.cc | 2 +- ceph/src/tools/rbd/action/List.cc | 2 +- ceph/src/tools/rbd/action/MirrorPool.cc | 22 +- ceph/src/tools/rbd/action/Namespace.cc | 6 +- ceph/src/tools/rbd/action/Perf.cc | 16 +- ceph/src/tools/rbd/action/Pool.cc | 4 +- ceph/src/tools/rbd/action/Trash.cc | 4 +- ceph/src/tools/rbd_mirror/ImageReplayer.cc | 4 +- .../image_replayer/BootstrapRequest.cc | 30 +- .../PrepareRemoteImageRequest.cc | 14 +- .../rbd_mirror/image_replayer/StateBuilder.cc | 26 +- .../rbd_mirror/image_replayer/StateBuilder.h | 5 +- .../journal/PrepareReplayRequest.cc | 11 - .../journal/PrepareReplayRequest.h | 8 +- .../image_replayer/journal/StateBuilder.cc | 4 +- .../snapshot/PrepareReplayRequest.h | 8 +- .../image_replayer/snapshot/Replayer.cc | 37 +- .../image_replayer/snapshot/StateBuilder.cc | 4 +- ceph/win32_deps_build.sh | 6 +- 678 files changed, 33063 insertions(+), 11324 deletions(-) create mode 100755 ceph/admin/rtd-checkout-main create mode 100644 ceph/monitoring/ceph-mixin/alerts.jsonnet create mode 100644 ceph/monitoring/ceph-mixin/dashboards.libsonnet delete mode 100644 ceph/monitoring/ceph-mixin/dashboards/dashboards.libsonnet create mode 100644 ceph/monitoring/ceph-mixin/dashboards_out/.lint delete mode 100755 ceph/monitoring/ceph-mixin/jsonnet-build.sh create mode 100644 ceph/monitoring/ceph-mixin/prometheus_alerts.libsonnet create mode 100644 ceph/qa/standalone/misc/test-mclock-profile-switch.sh create mode 100644 ceph/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml create mode 120000 ceph/qa/suites/orch/cephadm/workunits/0-distro delete mode 120000 ceph/qa/suites/rbd/persistent-writeback-cache/1-base delete mode 100644 ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml delete mode 100644 ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/cache.yaml delete mode 120000 ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/c_api_tests_with_defaults.yaml delete mode 100644 ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/recovery.yaml rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache}/.qa (100%) rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache/home}/% (100%) rename ceph/qa/suites/rbd/{persistent-writeback-cache/2-cluster => pwl-cache/home}/.qa (100%) create mode 120000 ceph/qa/suites/rbd/pwl-cache/home/1-base rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache/home}/2-cluster/+ (100%) rename ceph/qa/suites/rbd/{persistent-writeback-cache/6-workloads => pwl-cache/home/2-cluster}/.qa (100%) rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache/home}/2-cluster/fix-2.yaml (100%) rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache/home}/2-cluster/openstack.yaml (100%) rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache/home}/3-supported-random-distro$ (100%) create mode 100644 ceph/qa/suites/rbd/pwl-cache/home/4-cache-path.yaml create mode 120000 ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/.qa rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache/home}/5-cache-mode/rwl.yaml (100%) rename ceph/qa/suites/rbd/{persistent-writeback-cache => pwl-cache/home}/5-cache-mode/ssd.yaml (100%) create mode 120000 ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/.qa create mode 100644 ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/1G.yaml create mode 100644 ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/8G.yaml create mode 120000 ceph/qa/suites/rbd/pwl-cache/home/7-workloads/.qa create mode 120000 ceph/qa/suites/rbd/pwl-cache/home/7-workloads/c_api_tests_with_defaults.yaml rename ceph/qa/suites/rbd/{persistent-writeback-cache/6-workloads => pwl-cache/home/7-workloads}/fio.yaml (100%) create mode 100644 ceph/qa/suites/rbd/pwl-cache/home/7-workloads/recovery.yaml rename ceph/{src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/edit-site-name-modal/edit-site-name-modal.component.scss => qa/suites/rbd/pwl-cache/tmpfs/%} (100%) create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/.qa create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/1-base create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/+ create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/.qa create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/fix-2.yaml create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/openstack.yaml create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/3-supported-random-distro$ create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/4-cache-path.yaml create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/.qa create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/rwl.yaml create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/ssd.yaml create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/.qa create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/1G.yaml create mode 100644 ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/5G.yaml create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/7-workloads/.qa rename ceph/qa/suites/rbd/{persistent-writeback-cache/6-workloads => pwl-cache/tmpfs/7-workloads}/qemu_xfstests.yaml (100%) create mode 100644 ceph/qa/tasks/rbd_pwl_cache_recovery.py create mode 100755 ceph/qa/workunits/fs/full/subvolume_clone.sh create mode 100755 ceph/qa/workunits/fs/full/subvolume_snapshot_rm.sh create mode 100755 ceph/qa/workunits/fs/misc/dac_override.sh create mode 100644 ceph/src/cephadm/containers/keepalived/Dockerfile create mode 100644 ceph/src/cephadm/containers/keepalived/LICENSE create mode 100644 ceph/src/cephadm/containers/keepalived/README.md create mode 100755 ceph/src/cephadm/containers/keepalived/skel/init.sh create mode 100644 ceph/src/common/options/ceph-exporter.yaml.in create mode 100644 ceph/src/exporter/CMakeLists.txt create mode 100644 ceph/src/exporter/DaemonMetricCollector.cc create mode 100644 ceph/src/exporter/DaemonMetricCollector.h create mode 100644 ceph/src/exporter/ceph_exporter.cc create mode 100644 ceph/src/exporter/http_server.cc create mode 100644 ceph/src/exporter/http_server.h create mode 100644 ceph/src/exporter/util.cc create mode 100644 ceph/src/exporter/util.h create mode 100644 ceph/src/messages/MMgrUpdate.h create mode 100644 ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py create mode 100644 ceph/src/pybind/mgr/cephadm/tuned_profiles.py create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/.gherkin-lintrc create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/fixtures/block-rbd-status.json create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/01-global.feature.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/create-cluster/create-cluster.feature.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/grafana.feature.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/urls.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome-page.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.e2e-spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.7c1918629ff8b413cc76.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.cd14092ccedeaf2d7d79.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/330.4192d10f1b1db19145cc.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/483.43ef92bcd845cb24eae3.js rename ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/{585.764bfab2e2f489fdfd7f.js => 585.7d0bcf3a0ac0c40fef3b.js} (87%) delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.30fafaca6a3d4e1868e0.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.86799889c70942fa9a19.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.ab6c27cac6d7501e18e8.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.d9a3c3d3ac8fa3cc7c93.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.e6093c94066da7ab35c7.css create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.ffb7f665775e3c191fa3.css delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/edit-site-name-modal/edit-site-name-modal.component.html delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/edit-site-name-modal/edit-site-name-modal.component.spec.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/edit-site-name-modal/edit-site-name-modal.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/cd-table-paging.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/osd-deployment-options.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/rxjs/operators/page-visibilty.operator.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/services/cd-table-server-side.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/services/cd-table-server-side.service.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/services/custom_banner.py create mode 100644 ceph/src/pybind/mgr/dashboard/services/osd.py create mode 100755 ceph/src/test/objectstore/run_test_deferred.sh create mode 100644 ceph/src/test/objectstore/test_deferred.cc create mode 100644 ceph/src/tools/cephfs/shell/CMakeLists.txt rename ceph/src/tools/cephfs/{ => shell}/cephfs-shell (99%) rename ceph/src/tools/cephfs/{ => shell}/setup.py (100%) rename ceph/src/tools/cephfs/{ => shell}/tox.ini (100%) diff --git a/ceph/.readthedocs.yml b/ceph/.readthedocs.yml index ce7de68ae..361c664fa 100644 --- a/ceph/.readthedocs.yml +++ b/ceph/.readthedocs.yml @@ -1,15 +1,23 @@ --- # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details +# +# The pre_build command checks if we're building a named branch (i.e., not a PR). +# If so, check out doc/releases from the main branch before building so +# it's always up to date on docs.ceph.com/en/*. version: 2 formats: [] build: - image: latest + os: ubuntu-22.04 + tools: + python: "3.8" apt_packages: - ditaa + jobs: + pre_build: + - bash admin/rtd-checkout-main python: - version: 3.8 install: - requirements: admin/doc-requirements.txt - requirements: admin/doc-read-the-docs.txt diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index 28133cc65..5fb3601bd 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.16) project(ceph - VERSION 17.2.3 + VERSION 17.2.4 LANGUAGES CXX C ASM) cmake_policy(SET CMP0028 NEW) @@ -39,7 +39,15 @@ if(WIN32) # the targeted Windows version. The availability of certain functions and # structures will depend on it. set(WIN32_WINNT "0x0A00" CACHE STRING "Targeted Windows version.") - add_definitions(-D_WIN32_WINNT=${WIN32_WINNT}) + # In order to avoid known winpthread issues, we're using the boost + # shared mutex implementation. + # https://github.com/msys2/MINGW-packages/issues/3319 + add_definitions( + -D_WIN32_WINNT=${WIN32_WINNT} + -DBOOST_THREAD_PROVIDES_GENERIC_SHARED_MUTEX_ON_WIN + -DBOOST_THREAD_V2_SHARED_MUTEX + ) + set(Boost_THREADAPI "win32") endif() if(MINGW) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index 3854ccb56..873c3e7ca 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -1,3 +1,13 @@ +>=17.2.4 +-------- + +* Cephfs: The 'AT_NO_ATTR_SYNC' macro is deprecated, please use the standard + 'AT_STATX_DONT_SYNC' macro. The 'AT_NO_ATTR_SYNC' macro will be removed in + the future. + +* OSD: The issue of high CPU utilization during recovery/backfill operations + has been fixed. For more details, see: https://tracker.ceph.com/issues/56530. + >=17.2.1 * The "BlueStore zero block detection" feature (first introduced to Quincy in diff --git a/ceph/admin/rtd-checkout-main b/ceph/admin/rtd-checkout-main new file mode 100755 index 000000000..829d7c384 --- /dev/null +++ b/ceph/admin/rtd-checkout-main @@ -0,0 +1,10 @@ +# See .readthedocs.yml +set -ex +re='^[0-9]+$' +if [[ $READTHEDOCS_VERSION =~ $re ]]; then + echo "Building docs for PR $READTHEDOCS_VERSION. Will not check out doc/releases from main branch." +else + echo "Building docs for $READTHEDOCS_VERSION branch. Will check out doc/releases from main branch." + git checkout origin/main -- doc/releases +fi +git status diff --git a/ceph/ceph.spec b/ceph/ceph.spec index 4c5e78556..f2cac36f4 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -39,8 +39,12 @@ %if 0%{?rhel} < 9 %bcond_with system_pmdk %else +%ifarch s390x aarch64 +%bcond_with system_pmdk +%else %bcond_without system_pmdk %endif +%endif %bcond_without selinux %if 0%{?rhel} >= 8 %bcond_with cephfs_java @@ -118,6 +122,7 @@ %{!?python3_pkgversion: %global python3_pkgversion 3} %{!?python3_version_nodots: %global python3_version_nodots 3} %{!?python3_version: %global python3_version 3} +%{!?gts_prefix: %global gts_prefix gcc-toolset-11} %if ! 0%{?suse_version} # use multi-threaded xz compression: xz level 7 using ncpus threads @@ -145,18 +150,19 @@ %endif %endif -%if 0%{with seastar} -# disable -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1, as gcc-toolset-{9,10}-annobin +# disable -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1, as gcc-toolset-{10,11}-annobin # do not provide gcc-annobin.so anymore, despite that they provide annobin.so. but # redhat-rpm-config still passes -fplugin=gcc-annobin to the compiler. %undefine _annotated_build +%if 0%{?rhel} == 8 && 0%{?enable_devtoolset11:1} +%enable_devtoolset11 %endif ################################################################################# # main package definition ################################################################################# Name: ceph -Version: 17.2.3 +Version: 17.2.4 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -172,7 +178,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-17.2.3.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-17.2.4.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -196,10 +202,18 @@ BuildRequires: selinux-policy-devel BuildRequires: gperf BuildRequires: cmake > 3.5 BuildRequires: fuse-devel -%if 0%{with seastar} && 0%{?rhel} -BuildRequires: gcc-toolset-9-gcc-c++ >= 9.2.1-2.3 -%else -BuildRequires: gcc-c++ +%if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} == 9 +BuildRequires: gcc-c++ >= 11 +%endif +%if 0%{?rhel} == 8 +BuildRequires: %{gts_prefix}-gcc-c++ +BuildRequires: %{gts_prefix}-build +%ifarch aarch64 +BuildRequires: %{gts_prefix}-libatomic-devel +%endif +%endif +%if 0%{?fedora} || 0%{?rhel} == 9 +BuildRequires: libatomic %endif %if 0%{with tcmalloc} # libprofiler did not build on ppc64le until 2.7.90 @@ -268,6 +282,8 @@ BuildRequires: python%{python3_pkgversion}-dateutil BuildRequires: python%{python3_pkgversion}-coverage BuildRequires: python%{python3_pkgversion}-pyOpenSSL BuildRequires: socat +BuildRequires: python%{python3_pkgversion}-asyncssh +BuildRequires: python%{python3_pkgversion}-natsort %endif %if 0%{with zbd} BuildRequires: libzbd-devel @@ -311,13 +327,12 @@ BuildRequires: systemtap-sdt-devel %if 0%{?fedora} BuildRequires: libubsan BuildRequires: libasan -BuildRequires: libatomic %endif -%if 0%{?rhel} -BuildRequires: gcc-toolset-9-annobin -BuildRequires: gcc-toolset-9-libubsan-devel -BuildRequires: gcc-toolset-9-libasan-devel -BuildRequires: gcc-toolset-9-libatomic-devel +%if 0%{?rhel} == 8 +BuildRequires: %{gts_prefix}-annobin +BuildRequires: %{gts_prefix}-annobin-plugin-gcc +BuildRequires: %{gts_prefix}-libubsan-devel +BuildRequires: %{gts_prefix}-libasan-devel %endif %endif ################################################################################# @@ -347,6 +362,7 @@ BuildRequires: rdma-core-devel BuildRequires: liblz4-devel >= 1.7 # for prometheus-alerts BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet %endif %if 0%{?fedora} || 0%{?rhel} Requires: systemd @@ -388,6 +404,7 @@ BuildRequires: python%{python3_pkgversion}-pyOpenSSL %endif %if 0%{?suse_version} BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet BuildRequires: libxmlsec1-1 BuildRequires: libxmlsec1-nss1 BuildRequires: libxmlsec1-openssl1 @@ -581,6 +598,7 @@ Group: System/Filesystems Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release} Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-setuptools %if 0%{?fedora} || 0%{?rhel} Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-jwt @@ -717,6 +735,15 @@ Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} %description -n cephfs-mirror Daemon for mirroring CephFS snapshots between Ceph clusters. +%package -n ceph-exporter +Summary: Daemon for exposing perf counters as Prometheus metrics +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%description -n ceph-exporter +Daemon for exposing perf counters as Prometheus metrics + %package -n rbd-fuse Summary: Ceph fuse-based client %if 0%{?suse_version} @@ -1239,7 +1266,7 @@ This package provides Ceph default alerts for Prometheus. # common ################################################################################# %prep -%autosetup -p1 -n ceph-17.2.3 +%autosetup -p1 -n ceph-17.2.4 %build # Disable lto on systems that do not support symver attribute @@ -1248,10 +1275,6 @@ This package provides Ceph default alerts for Prometheus. %define _lto_cflags %{nil} %endif -%if 0%{with seastar} && 0%{?rhel} -. /opt/rh/gcc-toolset-9/enable -%endif - %if 0%{with cephfs_java} # Find jni.h for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do @@ -1296,6 +1319,9 @@ cmake .. \ -DWITH_MANPAGE:BOOL=ON \ -DWITH_PYTHON3:STRING=%{python3_version} \ -DWITH_MGR_DASHBOARD_FRONTEND:BOOL=OFF \ +%if 0%{?suse_version} + -DWITH_RADOSGW_SELECT_PARQUET:BOOL=OFF \ +%endif %if 0%{without ceph_test_package} -DWITH_TESTS:BOOL=OFF \ %endif @@ -1368,6 +1394,10 @@ cmake .. \ %endif %if 0%{with system_utf8proc} -DWITH_SYSTEM_UTF8PROC:BOOL=ON \ +%endif +%if 0%{with seastar} + -DWITH_SEASTAR:BOOL=ON \ + -DWITH_JAEGER:BOOL=OFF \ %endif -DWITH_GRAFANA:BOOL=ON @@ -1394,6 +1424,7 @@ popd %install + pushd %{_vpath_builddir} %make_install # we have dropped sysvinit bits @@ -1575,8 +1606,7 @@ exit 0 %if ! 0%{?suse_version} %postun -n cephadm -userdel -r cephadm || true -exit 0 +[ $1 -ne 0 ] || userdel cephadm || : %endif %files -n cephadm @@ -1962,6 +1992,9 @@ if [ $1 -ge 1 ] ; then fi fi +%files -n ceph-exporter +%{_bindir}/ceph-exporter + %files -n rbd-fuse %{_bindir}/rbd-fuse %{_mandir}/man8/rbd-fuse.8* diff --git a/ceph/ceph.spec.in b/ceph/ceph.spec.in index 9f522a750..5c5e390f4 100644 --- a/ceph/ceph.spec.in +++ b/ceph/ceph.spec.in @@ -39,8 +39,12 @@ %if 0%{?rhel} < 9 %bcond_with system_pmdk %else +%ifarch s390x aarch64 +%bcond_with system_pmdk +%else %bcond_without system_pmdk %endif +%endif %bcond_without selinux %if 0%{?rhel} >= 8 %bcond_with cephfs_java @@ -118,6 +122,7 @@ %{!?python3_pkgversion: %global python3_pkgversion 3} %{!?python3_version_nodots: %global python3_version_nodots 3} %{!?python3_version: %global python3_version 3} +%{!?gts_prefix: %global gts_prefix gcc-toolset-11} %if ! 0%{?suse_version} # use multi-threaded xz compression: xz level 7 using ncpus threads @@ -145,11 +150,12 @@ %endif %endif -%if 0%{with seastar} -# disable -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1, as gcc-toolset-{9,10}-annobin +# disable -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1, as gcc-toolset-{10,11}-annobin # do not provide gcc-annobin.so anymore, despite that they provide annobin.so. but # redhat-rpm-config still passes -fplugin=gcc-annobin to the compiler. %undefine _annotated_build +%if 0%{?rhel} == 8 && 0%{?enable_devtoolset11:1} +%enable_devtoolset11 %endif ################################################################################# @@ -196,10 +202,18 @@ BuildRequires: selinux-policy-devel BuildRequires: gperf BuildRequires: cmake > 3.5 BuildRequires: fuse-devel -%if 0%{with seastar} && 0%{?rhel} -BuildRequires: gcc-toolset-9-gcc-c++ >= 9.2.1-2.3 -%else -BuildRequires: gcc-c++ +%if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} == 9 +BuildRequires: gcc-c++ >= 11 +%endif +%if 0%{?rhel} == 8 +BuildRequires: %{gts_prefix}-gcc-c++ +BuildRequires: %{gts_prefix}-build +%ifarch aarch64 +BuildRequires: %{gts_prefix}-libatomic-devel +%endif +%endif +%if 0%{?fedora} || 0%{?rhel} == 9 +BuildRequires: libatomic %endif %if 0%{with tcmalloc} # libprofiler did not build on ppc64le until 2.7.90 @@ -268,6 +282,8 @@ BuildRequires: python%{python3_pkgversion}-dateutil BuildRequires: python%{python3_pkgversion}-coverage BuildRequires: python%{python3_pkgversion}-pyOpenSSL BuildRequires: socat +BuildRequires: python%{python3_pkgversion}-asyncssh +BuildRequires: python%{python3_pkgversion}-natsort %endif %if 0%{with zbd} BuildRequires: libzbd-devel @@ -311,13 +327,12 @@ BuildRequires: systemtap-sdt-devel %if 0%{?fedora} BuildRequires: libubsan BuildRequires: libasan -BuildRequires: libatomic %endif -%if 0%{?rhel} -BuildRequires: gcc-toolset-9-annobin -BuildRequires: gcc-toolset-9-libubsan-devel -BuildRequires: gcc-toolset-9-libasan-devel -BuildRequires: gcc-toolset-9-libatomic-devel +%if 0%{?rhel} == 8 +BuildRequires: %{gts_prefix}-annobin +BuildRequires: %{gts_prefix}-annobin-plugin-gcc +BuildRequires: %{gts_prefix}-libubsan-devel +BuildRequires: %{gts_prefix}-libasan-devel %endif %endif ################################################################################# @@ -347,6 +362,7 @@ BuildRequires: rdma-core-devel BuildRequires: liblz4-devel >= 1.7 # for prometheus-alerts BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet %endif %if 0%{?fedora} || 0%{?rhel} Requires: systemd @@ -388,6 +404,7 @@ BuildRequires: python%{python3_pkgversion}-pyOpenSSL %endif %if 0%{?suse_version} BuildRequires: golang-github-prometheus-prometheus +BuildRequires: jsonnet BuildRequires: libxmlsec1-1 BuildRequires: libxmlsec1-nss1 BuildRequires: libxmlsec1-openssl1 @@ -581,6 +598,7 @@ Group: System/Filesystems Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release} Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release} +Requires: python%{python3_pkgversion}-setuptools %if 0%{?fedora} || 0%{?rhel} Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-jwt @@ -717,6 +735,15 @@ Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} %description -n cephfs-mirror Daemon for mirroring CephFS snapshots between Ceph clusters. +%package -n ceph-exporter +Summary: Daemon for exposing perf counters as Prometheus metrics +%if 0%{?suse_version} +Group: System/Filesystems +%endif +Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} +%description -n ceph-exporter +Daemon for exposing perf counters as Prometheus metrics + %package -n rbd-fuse Summary: Ceph fuse-based client %if 0%{?suse_version} @@ -1248,10 +1275,6 @@ This package provides Ceph default alerts for Prometheus. %define _lto_cflags %{nil} %endif -%if 0%{with seastar} && 0%{?rhel} -. /opt/rh/gcc-toolset-9/enable -%endif - %if 0%{with cephfs_java} # Find jni.h for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do @@ -1296,6 +1319,9 @@ cmake .. \ -DWITH_MANPAGE:BOOL=ON \ -DWITH_PYTHON3:STRING=%{python3_version} \ -DWITH_MGR_DASHBOARD_FRONTEND:BOOL=OFF \ +%if 0%{?suse_version} + -DWITH_RADOSGW_SELECT_PARQUET:BOOL=OFF \ +%endif %if 0%{without ceph_test_package} -DWITH_TESTS:BOOL=OFF \ %endif @@ -1368,6 +1394,10 @@ cmake .. \ %endif %if 0%{with system_utf8proc} -DWITH_SYSTEM_UTF8PROC:BOOL=ON \ +%endif +%if 0%{with seastar} + -DWITH_SEASTAR:BOOL=ON \ + -DWITH_JAEGER:BOOL=OFF \ %endif -DWITH_GRAFANA:BOOL=ON @@ -1394,6 +1424,7 @@ popd %install + pushd %{_vpath_builddir} %make_install # we have dropped sysvinit bits @@ -1575,8 +1606,7 @@ exit 0 %if ! 0%{?suse_version} %postun -n cephadm -userdel -r cephadm || true -exit 0 +[ $1 -ne 0 ] || userdel cephadm || : %endif %files -n cephadm @@ -1962,6 +1992,9 @@ if [ $1 -ge 1 ] ; then fi fi +%files -n ceph-exporter +%{_bindir}/ceph-exporter + %files -n rbd-fuse %{_bindir}/rbd-fuse %{_mandir}/man8/rbd-fuse.8* diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index c23038092..71c362339 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,3 +1,9 @@ +ceph (17.2.4-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Wed, 28 Sep 2022 22:55:56 +0000 + ceph (17.2.3-1) stable; urgency=medium * New upstream release diff --git a/ceph/cmake/modules/AddCephTest.cmake b/ceph/cmake/modules/AddCephTest.cmake index 46d3a1b4c..2784567c6 100644 --- a/ceph/cmake/modules/AddCephTest.cmake +++ b/ceph/cmake/modules/AddCephTest.cmake @@ -98,7 +98,7 @@ function(add_tox_test name) CEPH_LIB=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} CEPH_BUILD_VIRTUALENV=${CEPH_BUILD_VIRTUALENV} LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/lib - PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}:${CMAKE_SOURCE_DIR}/src:$ENV{PATH} + PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}:${CMAKE_SOURCE_DIR}/src:${CMAKE_CURRENT_BINARY_DIR}:$ENV{PATH} PYTHONPATH=${CMAKE_SOURCE_DIR}/src/pybind) list(APPEND tox_test run-tox-${name}) endfunction() diff --git a/ceph/cmake/modules/BuildBoost.cmake b/ceph/cmake/modules/BuildBoost.cmake index bdda30f70..bd9497f1c 100644 --- a/ceph/cmake/modules/BuildBoost.cmake +++ b/ceph/cmake/modules/BuildBoost.cmake @@ -80,10 +80,20 @@ function(do_build_boost root_dir version) endforeach() list_replace(boost_with_libs "unit_test_framework" "test") string(REPLACE ";" "," boost_with_libs "${boost_with_libs}") + + if(CMAKE_CXX_COMPILER_ID STREQUAL GNU) + set(toolset gcc) + elseif(CMAKE_CXX_COMPILER_ID STREQUAL Clang) + set(toolset clang) + else() + message(SEND_ERROR "unknown compiler: ${CMAKE_CXX_COMPILER_ID}") + endif() + # build b2 and prepare the project-config.jam for boost set(configure_command ./bootstrap.sh --prefix= - --with-libraries=${boost_with_libs}) + --with-libraries=${boost_with_libs} + --with-toolset=${toolset}) set(b2 ./b2) if(BOOST_J) @@ -93,14 +103,6 @@ function(do_build_boost root_dir version) # suppress all debugging levels for b2 list(APPEND b2 -d0) - if(CMAKE_CXX_COMPILER_ID STREQUAL GNU) - set(toolset gcc) - elseif(CMAKE_CXX_COMPILER_ID STREQUAL Clang) - set(toolset clang) - else() - message(SEND_ERROR "unknown compiler: ${CMAKE_CXX_COMPILER_ID}") - endif() - set(user_config ${CMAKE_BINARY_DIR}/user-config.jam) # edit the user-config.jam so b2 will be able to use the specified # toolset and python diff --git a/ceph/cmake/modules/Buildpmem.cmake b/ceph/cmake/modules/Buildpmem.cmake index ead5c80ae..61c5ba601 100644 --- a/ceph/cmake/modules/Buildpmem.cmake +++ b/ceph/cmake/modules/Buildpmem.cmake @@ -21,6 +21,7 @@ function(build_pmem) set(PMDK_LIB_DIR "nondebug") endif() + set(pmdk_cflags "-Wno-error -fno-lto") include(ExternalProject) ExternalProject_Add(pmdk_ext ${source_dir_args} @@ -29,7 +30,7 @@ function(build_pmem) # build system tests statically linking to librbd (which uses # libpmemobj) will not link (because we don't build the ndctl # static library here). - BUILD_COMMAND ${make_cmd} CC=${CMAKE_C_COMPILER} NDCTL_ENABLE=n BUILD_EXAMPLES=n BUILD_BENCHMARKS=n DOC=n + BUILD_COMMAND ${make_cmd} CC=${CMAKE_C_COMPILER} "EXTRA_CFLAGS=${pmdk_cflags}" NDCTL_ENABLE=n BUILD_EXAMPLES=n BUILD_BENCHMARKS=n DOC=n BUILD_IN_SOURCE 1 BUILD_BYPRODUCTS "/src/${PMDK_LIB_DIR}/libpmem.a" "/src/${PMDK_LIB_DIR}/libpmemobj.a" INSTALL_COMMAND "") diff --git a/ceph/cmake/modules/Distutils.cmake b/ceph/cmake/modules/Distutils.cmake index 191636338..9d66ae979 100644 --- a/ceph/cmake/modules/Distutils.cmake +++ b/ceph/cmake/modules/Distutils.cmake @@ -69,7 +69,7 @@ function(distutils_add_cython_module target name src) # This little bit of magic wipes out __Pyx_check_single_interpreter() # Note: this is reproduced in distutils_install_cython_module list(APPEND PY_CPPFLAGS -D'void0=dead_function\(void\)') - list(APPEND PY_CPPFLAGS -D'__Pyx_check_single_interpreter\(ARG\)=ARG \#\# 0') + list(APPEND PY_CPPFLAGS -D'__Pyx_check_single_interpreter\(ARG\)=ARG\#\#0') set(PY_CC ${compiler_launcher} ${CMAKE_C_COMPILER} ${c_compiler_arg1}) set(PY_CXX ${compiler_launcher} ${CMAKE_CXX_COMPILER} ${cxx_compiler_arg1}) set(PY_LDSHARED ${link_launcher} ${CMAKE_C_COMPILER} ${c_compiler_arg1} "-shared") @@ -129,7 +129,7 @@ function(distutils_install_cython_module name) set(ENV{LDSHARED} \"${PY_LDSHARED}\") set(ENV{CPPFLAGS} \"-iquote${CMAKE_SOURCE_DIR}/src/include -D'void0=dead_function\(void\)' \ - -D'__Pyx_check_single_interpreter\(ARG\)=ARG \#\# 0' \ + -D'__Pyx_check_single_interpreter\(ARG\)=ARG\#\#0' \ ${CFLAG_DISABLE_VTA}\") set(ENV{LDFLAGS} \"-L${CMAKE_LIBRARY_OUTPUT_DIRECTORY}\") set(ENV{CYTHON_BUILD_DIR} \"${CMAKE_CURRENT_BINARY_DIR}\") diff --git a/ceph/debian/control b/ceph/debian/control index af0f351ee..cbb5ccaa4 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -24,6 +24,7 @@ Build-Depends: automake, hostname , javahelper, jq , + jsonnet , junit4, libarrow-dev , libparquet-dev , @@ -235,6 +236,8 @@ Depends: ceph-base (= ${binary:Version}), python3-pecan, python3-requests, python3-werkzeug, + libsqlite3-mod-ceph (= ${binary:Version}), + librados2 (= ${binary:Version}), ${misc:Depends}, ${python:Depends}, ${shlibs:Depends}, @@ -259,6 +262,7 @@ Depends: ceph-mgr (= ${binary:Version}), python3-cherrypy3, python3-jwt, python3-bcrypt, + python3-pkg-resources, python3-werkzeug, python3-routes, ${misc:Depends}, @@ -763,7 +767,8 @@ Description: RADOS distributed object store client C++ library (development file Package: libsqlite3-mod-ceph Architecture: any Section: libs -Depends: ${misc:Depends}, +Depends: librados2 (= ${binary:Version}), + ${misc:Depends}, ${shlibs:Depends}, Description: SQLite3 VFS for Ceph A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS diff --git a/ceph/doc/cephadm/host-management.rst b/ceph/doc/cephadm/host-management.rst index f342558fa..fee286e3a 100644 --- a/ceph/doc/cephadm/host-management.rst +++ b/ceph/doc/cephadm/host-management.rst @@ -161,7 +161,9 @@ The following host labels have a special meaning to cephadm. All start with ``_ bootstrap was originally run), and the ``client.admin`` key is set to be distributed to that host via the ``ceph orch client-keyring ...`` function. Adding this label to additional hosts will normally cause cephadm to deploy config and keyring files - in ``/etc/ceph``. + in ``/etc/ceph``. Starting from versions 16.2.10 (Pacific) and 17.2.1 (Quincy) in + addition to the default location ``/etc/ceph/`` cephadm also stores config and keyring + files in the ``/var/lib/ceph//config`` directory. Maintenance Mode ================ @@ -175,6 +177,21 @@ Where the force flag when entering maintenance allows the user to bypass warning See also :ref:`cephadm-fqdn` +Rescanning Host Devices +======================= + +Some servers and external enclosures may not register device removal or insertion with the +kernel. In these scenarios, you'll need to perform a host rescan. A rescan is typically +non-disruptive, and can be performed with the following CLI command.:: + + ceph orch host rescan [--with-summary] + +The ``with-summary`` flag provides a breakdown of the number of HBAs found and scanned, together +with any that failed.:: + + [ceph: root@rh9-ceph1 /]# ceph orch host rescan rh9-ceph1 --with-summary + Ok. 2 adapters detected: 2 rescanned, 0 skipped, 0 failed (0.32s) + Creating many hosts at once =========================== @@ -226,6 +243,95 @@ create a new CRUSH host located in the specified hierarchy. See also :ref:`crush_map_default_types`. +OS Tuning Profiles +================== + +Cephadm can manage operating system tuning profiles that apply a set of sysctl settings +to a given set of hosts. First create a YAML spec file in the following format + +.. code-block:: yaml + + profile_name: 23-mon-host-profile + placement: + hosts: + - mon-host-01 + - mon-host-02 + settings: + fs.file-max: 1000000 + vm.swappiness: '13' + +Then apply the tuning profile with:: + + ceph orch tuned-profile apply -i + +This profile will then be written to ``/etc/sysctl.d/`` on each host matching the +given placement and `sysctl --system` will be run on the host. + +.. note:: + + The exact filename the profile will be written to is within ``/etc/sysctl.d/`` is + ``-cephadm-tuned-profile.conf`` where + is the `profile_name` setting specified in the provided YAML spec. Since sysctl + settings are applied in lexicographical order by the filename the setting is + specified in, you may want to set the `profile_name` in your spec so + that it is applied before or after other conf files that may exist. + +.. note:: + + These settings are applied only at the host level, and are not specific + to any certain daemon or container + +.. note:: + + Applying tuned profiles is idempotent when the ``--no-overwrite`` option is passed. + In this case existing profiles with the same name are not overwritten. + + +Viewing Profiles +---------------- + +To view all current profiles cephadm is managing:: + + ceph orch tuned-profile ls + +.. note:: + + If you'd like to make modifications and re-apply a profile passing `--format yaml` to the + ``tuned-profile ls`` command will present the profiles in a format where they can be copied + and re-applied. + + +Removing Profiles +----------------- + +If you no longer want one of the previously applied profiles, it can be removed with:: + + ceph orch tuned-profile rm + +When a profile is removed, cephadm will clean up the file previously written to /etc/sysctl.d + + +Modifying Profiles +------------------ + +While you can modify a profile by simply re-applying a YAML spec with the same profile name, +you may also want to adjust a setting within a given profile, so there are commands +for this purpose. + +To add or modify a setting for an existing profile:: + + ceph orch tuned-profile add-setting + +To remove a setting from an existing profile:: + + ceph orch tuned-profile rm-setting + +.. note:: + + Modifying the placement will require re-applying a profile with the same name. Keep + in mind that profiles are tracked by their name, so whenever a profile with the same + name as an existing profile is applied, it will overwrite the old profile. + SSH Configuration ================= diff --git a/ceph/doc/cephadm/operations.rst b/ceph/doc/cephadm/operations.rst index fb7d5481d..9ec8371c8 100644 --- a/ceph/doc/cephadm/operations.rst +++ b/ceph/doc/cephadm/operations.rst @@ -395,25 +395,34 @@ process is active within the cluster.* CEPHADM_CHECK_KERNEL_VERSION ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The OS kernel version (maj.min) is checked for consistency across the hosts. -The kernel version of the majority of the hosts is used as the basis for +The kernel version of the majority of the hosts is used as the basis for identifying anomalies. .. _client_keyrings_and_configs: Client keyrings and configs =========================== - Cephadm can distribute copies of the ``ceph.conf`` file and client keyring -files to hosts. It is usually a good idea to store a copy of the config and -``client.admin`` keyring on any host used to administer the cluster via the -CLI. By default, cephadm does this for any nodes that have the ``_admin`` -label (which normally includes the bootstrap host). +files to hosts. Starting from versions 16.2.10 (Pacific) and 17.2.1 (Quincy), +in addition to the default location ``/etc/ceph/`` cephadm also stores config +and keyring files in the ``/var/lib/ceph//config`` directory. It is usually +a good idea to store a copy of the config and ``client.admin`` keyring on any host +used to administer the cluster via the CLI. By default, cephadm does this for any +nodes that have the ``_admin`` label (which normally includes the bootstrap host). + +.. note:: Ceph daemons will still use files on ``/etc/ceph/``. The new configuration + location ``/var/lib/ceph//config`` is used by cephadm only. Having this config + directory under the fsid helps cephadm to load the configuration associated with + the cluster. + When a client keyring is placed under management, cephadm will: - build a list of target hosts based on the specified placement spec (see :ref:`orchestrator-cli-placement-spec`) - store a copy of the ``/etc/ceph/ceph.conf`` file on the specified host(s) + - store a copy of the ``ceph.conf`` file at ``/var/lib/ceph//config/ceph.conf`` on the specified host(s) + - store a copy of the ``ceph.client.admin.keyring`` file at ``/var/lib/ceph//config/ceph.client.admin.keyring`` on the specified host(s) - store a copy of the keyring file on the specified host(s) - update the ``ceph.conf`` file as needed (e.g., due to a change in the cluster monitors) - update the keyring file if the entity's key is changed (e.g., via ``ceph diff --git a/ceph/doc/cephadm/services/index.rst b/ceph/doc/cephadm/services/index.rst index be9b3661a..d6520ea40 100644 --- a/ceph/doc/cephadm/services/index.rst +++ b/ceph/doc/cephadm/services/index.rst @@ -86,7 +86,20 @@ Service Specification ===================== A *Service Specification* is a data structure that is used to specify the -deployment of services. Here is an example of a service specification in YAML: +deployment of services. In addition to parameters such as `placement` or +`networks`, the user can set initial values of service configuration parameters +by means of the `config` section. For each param/value configuration pair, +cephadm calls the following command to set its value: + + .. prompt:: bash # + + ceph config set + +cephadm raises health warnings in case invalid configuration parameters are +found in the spec (`CEPHADM_INVALID_CONFIG_OPTION`) or if any error while +trying to apply the new configuration option(s) (`CEPHADM_FAILED_SET_OPTION`). + +Here is an example of a service specification in YAML: .. code-block:: yaml @@ -97,6 +110,10 @@ deployment of services. Here is an example of a service specification in YAML: - host1 - host2 - host3 + config: + param_1: val_1 + ... + param_N: val_N unmanaged: false networks: - 192.169.142.0/24 @@ -414,7 +431,7 @@ Cephadm supports the deployment of multiple daemons on the same host: service_type: rgw placement: label: rgw - count-per-host: 2 + count_per_host: 2 The main reason for deploying multiple daemons per host is an additional performance benefit for running multiple RGW and MDS daemons on the same host. @@ -505,6 +522,57 @@ a spec like which would cause each mon daemon to be deployed with `--cpus=2`. +Custom Config Files +=================== + +Cephadm supports specifying miscellaneous config files for daemons. +To do so, users must provide both the content of the config file and the +location within the daemon's container at which it should be mounted. After +applying a YAML spec with custom config files specified and having cephadm +redeploy the daemons for which the config files are specified, these files will +be mounted within the daemon's container at the specified location. + +Example service spec: + +.. code-block:: yaml + + service_type: grafana + service_name: grafana + custom_configs: + - mount_path: /etc/example.conf + content: | + setting1 = value1 + setting2 = value2 + - mount_path: /usr/share/grafana/example.cert + content: | + -----BEGIN PRIVATE KEY----- + V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt + ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15 + IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu + YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg + ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8= + -----END PRIVATE KEY----- + -----BEGIN CERTIFICATE----- + V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt + ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15 + IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu + YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg + ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8= + -----END CERTIFICATE----- + +To make these new config files actually get mounted within the +containers for the daemons + +.. prompt:: bash + + ceph orch redeploy + +For example: + +.. prompt:: bash + + ceph orch redeploy grafana + .. _orch-rm: Removing a Service diff --git a/ceph/doc/cephadm/services/monitoring.rst b/ceph/doc/cephadm/services/monitoring.rst index a17beba6d..08ccd9482 100644 --- a/ceph/doc/cephadm/services/monitoring.rst +++ b/ceph/doc/cephadm/services/monitoring.rst @@ -83,6 +83,28 @@ steps below: ceph orch apply grafana +.. _cephadm-monitoring-centralized-logs: + +Centralized Logging in Ceph +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Ceph now provides centralized logging with Loki & Promtail. Centralized Log Management (CLM) consolidates all log data and pushes it to a central repository, +with an accessible and easy-to-use interface. Centralized logging is designed to make your life easier. +Some of the advantages are: + +#. **Linear event timeline**: it is easier to troubleshoot issues analyzing a single chain of events than thousands of different logs from a hundred nodes. +#. **Real-time live log monitoring**: it is impractical to follow logs from thousands of different sources. +#. **Flexible retention policies**: with per-daemon logs, log rotation is usually set to a short interval (1-2 weeks) to save disk usage. +#. **Increased security & backup**: logs can contain sensitive information and expose usage patterns. Additionally, centralized logging allows for HA, etc. + +Centralized Logging in Ceph is implemented using two new services - ``loki`` & ``promtail``. + +Loki: It is basically a log aggregation system and is used to query logs. It can be configured as a datasource in Grafana. + +Promtail: It acts as an agent that gathers logs from the system and makes them available to Loki. + +These two services are not deployed by default in a Ceph cluster. To enable the centralized logging you can follow the steps mentioned here :ref:`centralized-logging`. + .. _cephadm-monitoring-networks-ports: Networks and Ports @@ -195,6 +217,7 @@ set``: - ``services/grafana/ceph-dashboard.yml`` - ``services/grafana/grafana.ini`` - ``services/prometheus/prometheus.yml`` +- ``services/prometheus/alerting/custom_alerts.yml`` You can look up the file templates that are currently used by cephadm in ``src/pybind/mgr/cephadm/templates``: @@ -240,6 +263,15 @@ Example # reconfig the prometheus service ceph orch reconfig prometheus +.. code-block:: bash + + # set additional custom alerting rules for Prometheus + ceph config-key set mgr/cephadm/services/prometheus/alerting/custom_alerts.yml \ + -i $PWD/custom_alerts.yml + + # Note that custom alerting rules are not parsed by Jinja and hence escaping + # will not be an issue. + Deploying monitoring without cephadm ------------------------------------ diff --git a/ceph/doc/cephadm/services/osd.rst b/ceph/doc/cephadm/services/osd.rst index c4260b84d..70a4ad0cd 100644 --- a/ceph/doc/cephadm/services/osd.rst +++ b/ceph/doc/cephadm/services/osd.rst @@ -245,6 +245,18 @@ Expected output:: OSDs that are not safe to destroy will be rejected. +.. note:: + After removing OSDs, if the drives the OSDs were deployed on once again + become available, cephadm may automatically try to deploy more OSDs + on these drives if they match an existing drivegroup spec. If you deployed + the OSDs you are removing with a spec and don't want any new OSDs deployed on + the drives after removal, it's best to modify the drivegroup spec before removal. + Either set ``unmanaged: true`` to stop it from picking up new drives at all, + or modify it in some way that it no longer matches the drives used for the + OSDs you wish to remove. Then re-apply the spec. For more info on drivegroup + specs see :ref:`drivegroups`. For more info on the declarative nature of + cephadm in reference to deploying OSDs, see :ref:`cephadm-osd-declarative` + Monitoring OSD State -------------------- diff --git a/ceph/doc/cephadm/services/rgw.rst b/ceph/doc/cephadm/services/rgw.rst index a0f130a8e..58318b727 100644 --- a/ceph/doc/cephadm/services/rgw.rst +++ b/ceph/doc/cephadm/services/rgw.rst @@ -65,14 +65,14 @@ example spec file: .. code-block:: yaml service_type: rgw - service_name: foo + service_id: foo placement: label: rgw - count-per-host: 2 + count_per_host: 2 networks: - 192.169.142.0/24 spec: - port: 8000 + rgw_frontend_port: 8080 Multisite zones @@ -224,6 +224,33 @@ It is a yaml format file with the following properties: ... -----END PRIVATE KEY----- +.. code-block:: yaml + + service_type: ingress + service_id: rgw.something # adjust to match your existing RGW service + placement: + hosts: + - host1 + - host2 + - host3 + spec: + backend_service: rgw.something # adjust to match your existing RGW service + virtual_ips_list: + - / # ex: 192.168.20.1/24 + - / # ex: 192.168.20.2/24 + - / # ex: 192.168.20.3/24 + frontend_port: # ex: 8080 + monitor_port: # ex: 1967, used by haproxy for load balancer status + virtual_interface_networks: [ ... ] # optional: list of CIDR networks + ssl_cert: | # optional: SSL certificate and key + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + -----BEGIN PRIVATE KEY----- + ... + -----END PRIVATE KEY----- + + where the properties of this service specification are: * ``service_type`` @@ -237,6 +264,10 @@ where the properties of this service specification are: to match the nodes where RGW is deployed. * ``virtual_ip`` The virtual IP (and network) in CIDR format where the ingress service will be available. +* ``virtual_ips_list`` + The virtual IP address in CIDR format where the ingress service will be available. + Each virtual IP address will be primary on one node running the ingress service. The number + of virtual IP addresses must be less than or equal to the number of ingress nodes. * ``virtual_interface_networks`` A list of networks to identify which ethernet interface to use for the virtual IP. * ``frontend_port`` diff --git a/ceph/doc/cephadm/upgrade.rst b/ceph/doc/cephadm/upgrade.rst index b7f60b7fd..221f21244 100644 --- a/ceph/doc/cephadm/upgrade.rst +++ b/ceph/doc/cephadm/upgrade.rst @@ -119,6 +119,12 @@ You can stop the upgrade process at any time by running the following command: ceph orch upgrade stop +Post upgrade actions +==================== + +In case the new version is based on ``cephadm``, once done with the upgrade the user +has to update the ``cephadm`` package (or ceph-common package in case the user +doesn't use ``cephadm shell``) to a version compatible with the new version. Potential problems ================== diff --git a/ceph/doc/cephfs/cephfs-top.png b/ceph/doc/cephfs/cephfs-top.png index f65aea1d2b9011701999729c8c71f9f00344c845..807029d00703effe49032b6f7d2d6f46492f7cb6 100644 GIT binary patch literal 13928 zcmeHu_dnMC-*;zqo@q&l|8ak_SR4&Ari8(m6aVrMcI23O7_g& z+)tg?_j+9S^#|NP-Jc&M9mmJ~*aScPJU#+SnLc=|61PyJN@c z9a3UfZa8#J_84hUsI2k)&XTrla@O|OE8WR0cBCcg%9(#+CD<7xo}bO|SneSI8qmS@ zm7GiLx%ZVT3Xi<*-#_i0eKsxAW`9!jImH7O!;b7xHSU&ncA6gIFZ-4#!y`hi)2!=i z>T);*q)G5eaDCe9SD<*Twd2QK-jUsZe)3OV#Q6TlL{iTz!|J1z!FqanPD@j%2?@6i z46;p{6LRl3PJPesbD5JJUh;bI;B!;Fbf~-g)`mT)r;>)o1?N;RFA`GEBPUK&t&SDD zuJN9%bDl1`O43>ED&SeVGT$sO5SJq#CEVKDYHM$AbnQ-00hd7qnXGKB+s10ohkK&m z3LG6BBO@bCO-<|S>SktUY;(Ed&Rn>`(jMDGlzn7K86&GuDjh#AmN*R+N~?7+lSA{(L~=eWUtFDr)K@M2Y7o#ILu^4c9L$EJQ>^!qt=Xk{hHrq zL}7Jjqh-kkL#ou|CD+yYE06xc3R!mNrpCn?{QUCR*Vi|NhIsvkf`Wf&XlPN<27Z^} zus10w>Fd|8-@bk8(_bjqeqWoJyW`C zs;cNb4p4EFef#!A${|g&P=chh(4I~F`X17gr#&vpS`2P(ta4tHSwA5oCnx_>z;8B5NlB^gG9&Kbz$<<|Q7(ecBOL#lGn2Bt^lkrH`XBC>xVZTE z_&Pf~ai5iym2>CL?cM#bt4m!;sTs!vhwFk`rkIjaR%T{qQj!;0Ok(1(qemBideV{; z)3l1N4ST#Kh0vRa+}-mu75aG(0$%qw{&E zg@uK-b|0VhPT7AVnGp}#+uPgO85)xm7e~*~SzS?a+GC#Dj$guj17Q&a!M zL=C;*_3PI?$oO9rJIv5!os-JQ$nfwN*<5}UlQc15POR{y5kNVMlZ0}ay?XHAK|w*m zvTYA;r>UvQKb*LC_f8y1ZTF4sZeo)rnVg(lXg%zko}T{dRa>d&AvG1F)QHY6&yKM@ zIY^z5lHxMmE7bYuY;oLrx4|U;$?b-{W5?#6ji6kXTe%7pH?_3fX8OObEloF@d`mCc zymtMDq2XD=z>m+5=Is}6*GJZBw5aEpQbsVlt&S#Ah&P6H6*=>`&Q1;uwH4Z1&Tu;G z>+7qiv>#!Vd->`W$K(1a5$#3VA>~&0%@t2Fy2#`P6>nc!kva`IjUp#5v%t7P$`nee zef##&d6;*+e^5QRy1H6WSjeXJF(gNHMB3=el`9nDH2gM_SZOn}LZ<~&PR=ZL{cp=F zE487VSLw-vf`gmw-RfR*J*ytf&CXV!82s_$qSb(;cMP{hSJ|MI^tEdS!_xhG>#-I= zFJGd4*iQEr`3bLjk!9MBE4SsaYJ9kCq->K;PHaopj`H`vOo9#Iv!4o2PiG_0v$D#m z7hw-^(Y;8rpYH9?r*plTf7S2Qa9u=TF00w3%sW1(d1ET6Ob5mD#RJUdNAl^mm-POn z?@djLM6b7R&k@QhDvbNfo^o?@q2BK3~w*(vmwi>mW$< z%FX2_#ARe$S2ry8r7<%$4iR$B4GL=AL+hfTtu0UTHaU51R78d)V0g-pR)oPLBqXF^ zlHNnmaaK$skUm41zVh?u2V~?_RD32)anw9_Zrr%R%gg)Tjzm+E=V_W|?%^{Mcx_p8 zbKtqokB*#v z*75)ZaBT%MzYYuI3{_QA!|+^Nb}0@Dq9o8?yy#y)Jh!rr3!S-%S5@zY ze~{WlpuF(mr((Lgx}&3`wC?NoZe5)Zvn;Nxsi~>1j`j2@=2H%M_G~{P?!yO%zGBy{ z%{4Yc9G=zA&JKNj-r&uCE-o%L6?sWXV`Jmvym#Le7rV*H)h&#-3keEtZmvz|n6)e4 zxFHk9ZLzjEDJv@*%&HksVn?hPYfVnqDk^Z_+6)%nsbW>RXnEw%f^Eu+iashTD$2>R zH87xeWi)VdTE+2CNljf_7(Yf!TcTf1iW7PF*yU^29A(Vsn=k*m=aV09-jXZ$YHpT%FMr4Zi$`qI+U!tO3E zE*?I7cz)Eu&Mr4NI2gs1le2&oIOiR6B+GGb=-^3S1r-&Z)2A=flUGJDD^l1l|JL2J zXV1->H}~&<#%0##B}PZZ*Zs59Gc`3eUn4C({_IA4G2j~MDM3|f>FUvSy#4+&Ce9ItYw$YBIl)Vezbk2E9k8% zo9BD5z!M`QOlrWuks%>TX=$v^3%|FMcn)_$@}TxC@;LQs^STV{otjf`YsVuIi9i)44gvuDz1=cqpgCYQ%iZGTwGh|bTS zKhYMNTU%*(@BWAouwPx8uK!3uPEO~@G1=d*r=#=R)K>iJRq5M1uU)&gckf<|LHE(I zgoFe@Fa`#ObcLMw_`#{3!pB72Jwx8JmYiqL8g{(bO)a?2p>pH;^~%~>OkXBK+2_w! zm?##<+x_Y&o##hpswNK{Jg8Y<^EM!$sUt(5lamuQ+a1MhH*{e1_r*a=7o#EX?pzBL zUN9Oh>9y@u|J`*DWsI(tYu>4-sTmO(y3(qmE#$g-qpOKoC2e4C1wd$?Ew$M}Pz$|K zQLzb=a+KbaEFduO*Yq^)$&)S{s|$bcKYG#RGHUEgY3Yi~CIEgYiw~{HCJMH{#B*w5 z!Ug!n!^2~FdHI}D!o|U@^*MP1gMsh))?aTK>gg$}tDoFoa~CemFD$TJxL`ia4VD2coP=t6QMfjTe6zscJFn>IoQ_Zc(qQ+S$C?JHJSU~$zhv1f zI1_N@s&7N$53szF_1S03)>og9(!352zfk->I$9SzEIHY(Y4`5kHp@auv*t>5~_3he2>cz;C+R#o}TT`Q`s2IXr1M;}Jx#>Z6 z^=kS0x(hXRm|P@*=2XJ^^J65tfoRVJS5KmQqlEFoY|CFMnN57q2=^Xg;N{~(eY!5Rs`MW+tF5SbpOu9pV|eX6GxMH< zCu{)j^K=Bo=w3ucY2&1>E{y-Z^E18zFpdP{S3LW3edUyKL|tO=UsTbiK5+s#-{3-e zTG}ZJiuuw*BEMH$h+bfB&z@C}HYe^S{QdXekN!D${@l6I0je0Ui-X5-D9N9qGdw0n zgA|&1R*-OlXp$*vX+;JH4-XAV9E;?=7ZV>Z&P37H-X4ZhOjiHq`)BXpC{*R-ivWg0Gk`gGG<@QJya$cNBNl78?G&eS8np=vK3dCZSu^qwvUr352?h$|r z0Q#4lJf5Lnz7Ok5NB5`Dfa%>IJB^QzL$MhfANQjXP*zloiH*gkFzT-WZdvn&V~AX= z)Uvg;m6qPWVR@^&fxsztShvbAG#q0JJvxPk?u^V(!qn^6CjhTjr$O%<-7oR*RC{Uu z=aD-!X3&>FcXD%cm!|s?t&g(?J8!N#^0l+Ev00j%pX9eOF4vk#g6{C)9(Is=&w=B2 z$6DUT#l_*2%8kD~#-`=q?A+|Ga`UEb%WZJd^KLWEAkU(rqBn2mWMsS`(=#?!)zX?m zpENSEUzs%|O3KN}-MH~GFi=TgoQ0K@o0Id%Q!#*mGajfQY-pwGCZ;H+3#$1S`kwYj zhwEBeX^Dx6adBcsSMf=Mf`YJ9>Gn^uMPydk?YFR`$~SLj*m?O}46WBgUomu{`mlfM zkL=VCaj&|nDorClQ@8Y=UAuOnsDYk^-PS`K^HBzi)2c6nf_jRa?b}k+Jjk%W8*5AB zBO?{6#c64EV3pN_-hK8e*RShh|B5?iewAZGrAR94>%snxm24)BcT^{cA7mQdEv z;O}@;((?_|y9trn+S+|%tFyDSdk&qdi4?kp<*5l_$4uS%;0^jAZW3L>WUQ+@QztOp z!1M50mh;k7vPyc@=80R_P*c;f{;$vQn69p_fCu0R27YLs<`NSBMA!&9%}X*-)P?f} zl70C#gPK796mWdPL?`2TLOz!EygpjkzyO}=YZrmzOI*oD#jlz0@Nfga1bV9fI_F;i zqiKX)KD>EznuWyz``ORNqWbugs0GRF=tA>s#pUd*yWTS2$8Uge)ZUW&a0Q9^RpMHfu zAvMzCOueZ@)`QI$gsmeVVMJ zt4lBKPaFNX@9f!l45Z4r*kc&~|AK&#OGK}zs6K4#;oHaRDMPED{JC?(ZOBPapFU;V zm_@0V16^_296PP@uRQwa75@*u+$SxIX4+Tu?{7*cQ2)H!uvhbF-bUE(g@wVpyeCPG zzWGoE=(DLYC4xM<&DHYE%5$`N)6voKo|HqkX0$g8JF%sHo_yb9yteuki`8Ocwl@w` z?7MfbqX5VFY)1Ui;?|~1`kI=EB%RqNN|!J1APk_7wY9ZP7cE7u*B+ter8**Wn1bRu zGdnvw5Tde@5_&gKV_iw zz88C01ljFip>*fLNVKl?)$#P6@882CB7pZ6n&t`?J=L_eEp>J8jWk57ICr~hG7dDH zwfagUVE6sQy+;f2U^;248U3x?)U>p0TVE#MN*?ZNS!PvzVc?#rd_zlX6N;nTVy6*W zI=zQ^XXe@S=ie0+@MEt(d|37q*(5n|fZhYE0gZ)#Z6^?-^%+$?$HToy2#}QIBL(*! zIwWE}R0En5zu;o@t@rPx*tE{L#Tt3>iT9}t7GX9+#hrB>g;F8M^$xHJ^^6x&zD)A0OS)ClEITczMMnBorxvA|hl-I)NX7uQB#T)_<4cj3vrP zdFM4ic9RHHWyYc6fEJXOrwtxj#GIM7hR*`6tgW%}9;9rb^7ghiVcRhZ@$2Z=F!p`|69!|@v)9uXljI1Z;i~{3Y>k3Mw?Phxl?e`Lrl~Ye}nZ8U*o2m+8rfi{a6O!v1 zOR;_%p;=x~n(6z(fK95~k3Ja4kOBJnwjO$oo*ek_;PH$1W0KIzp*4XUyRI!Z3b2jG z1=*t-dLeuwBa)yV8ot>S540>y8Ej~Z|8w40LInYk4T)ohMv6>WuuA+)c zFyDSXp&mUN`!-@F4@I25rBqr6OoM>|10$njl$3+R!x*IWWF&Dk#@OGH4gYhHEl>cE zB`hT*zCmyTc!9t>8fsx;LUrQAHFxEEkj^o?LPCxcy-G^l)z!Pef(){+Wybt(u~s7O zgLx;%!Gq67MvRFSXvcllo4APU$%Q&44ds1LUWGyE8c!M-rVh+3Iejc6!BP%p*eFvQ~ z+w>cHr>u;O!Tghn2hO7!rY0urJUr7VfA}>}X|l37i{7I^{ru!HSzmLRfmhAV&Z?@Y zh>4}_TACki&d$EX$jG>7k03R z5J{5cJ1xAUdtFvgo{?6^AKcYxn<8*w{CLJ9`M6tI!2p=jUIGSvJkuCR>HMB800NXM z`T|fQt%#c-!2se&O5m%7_Y49x!F2}WWp!QjRC^xCk<3Bb(Ee~YT9UrNWn~s!$@e6m z-km$WvSv36@IA(LxIFMff6)j@#IB zy03(dMdjs%j}XfV1MS-G@gh;DJx&Tb_(yi`KgJp-vhqh^_l!yUqd1e3k)1Z`E@S4m z9y&mvXJYb-VOlLVrOMZX6gT>9eW6|Z*-;iip>25UaZC1m_d(X1G$i_=G9i03o2_<#$?HK`uXl@#q-%+slS&(OriH@yFiEx?S6El@DYqI)Z8pk{bm`zg{lC0QwSM{^P;(T3;^zk{7uS6oP1wf+qV3cx-|VBUA69W7U!=` zT}_eZzI171Y)mec<8e$9WTd;i;lO$g(XJZ{?a=I(NZ5@+QyEysb!F{B*di*uy&`g? zI7oL!kPSh7WsnK+fAQiG{vi<@CTW6_ig**dg5i92?y>aMzm)XQ|HbJ z`Q|Lj3$F|eN4ZCGTbvM*P)XC6nVr=WN)J>PFk_V7KL_aw2pB(p!7i#=uGwYa?;?7I zzbz=|=bCqXe7px4xE#^z#f!$Wr$?X|4QLm|#5^R+bXl>2o#G_Ey6gC=5*g78P8%UE zI{HDKjy4~C0o{=!M-p;4j7?3g@->KF;1I#pluyN=h~HM`;ozYAqlbSx+STk(U`k6% zo8XaSXCfPrMMt%({MQT#0i(&o|JyeSDq5!ZuRso{jvv3cRTsh@`~H0cG(SxBtG*}i zSy`!nbYP!*7adI?cnCktv;GlR8xK2Z&OwliD+lNI8Tg{47zv-MOK5?KvBfRJRlk7fmh3M+sh|Uv$FE^7WKZ5y0AX( z$0ln2M~D}m8`MmAOIGkhsV*tIf z#*X``ZI|S@0kHy%D1?QYnwmLAYBv3UbjQk(c;z@2K8Ty3oOsdDz<|s8Ot}Xccn-rS zWzW-_wT@hiunDR! z{8O3r~1H*apcv5+X>q{3QAd6uDk z33fNY46uP_vC9g4Ka0s1Fl(r(s5SuWaFRQrx$wy&vEIJ`NB+Zu(liExg7W-Yj854TwGjP3ly2JAJsAggzokq z@v&vyKqbz5TEN}_8Dj7NjeiOGWo*n1O$#ptxAq`I*Zgy~0=VzaE`1E&hDPpzs%AzJ z+$j+`wF%v>IWB%lYO@u!B2XyDJ;NT*CLOT$BKvH)50$=-&U=$)21?4H zZ5vQHh=+?y=w3fO@G?mAw~BhIHeW`N(fzN<*ZlSCAO`2o2R{I{VKl;ICojoev^$hR zr#Jmj)Td}UweMNs7e~!x`%=5&@{G1SCMKglekkBbH_MAy^nP?4tO^1s4HmI8iKsa- zv>hankDR~|G!L}J$a5BuuZT`IZnjo{Lu84vz;-x0uOUS;6a4VeqfvPh@LR@}xN=Ax zjeZUoSzz9oxw*XvlvrCY4%LR8^Db~*<0cs31kC?)h*lV~WbL38R&2B|=B!jQ6BAQK zM@LBcuf`HS>)8P*lck(PR2&{|^GY%@H2_x7`EdsvKx{c8BO02T+sdiaPe8Bnv9ZO` z=3!(>Jp55sd08_Q;;HI6-ej}Ce~Qi!+q%^D52j06{ZOT5OBIQ~dC6_v0WNUDD}R5o zp~6|vfP4fke5g3P=GPBU?P3)EtU9cSmI=WLTAGCK$r{DH<$u8NHQyK6Z<+FGD^;zq1s-JEq`?Ut%4Kz?IA`GErm3?j0dNXR*w%^tEpY2od_bn32r@PPVR9|0H z<1!k6b_fL-rXf3>(^|u{Z=c&5O93$&jEedE`H=GSQ+*{Rse!p+6JeHj{$*av&q=eI zdk><2ZR?hkU0mKVU_s~&_(|Ug4=*5frBG7(b+{XX+%;#U8kXDjfW0t-=d)4`_LGvP zB`1f8xC_%cJrl7dGBVvLaod1j0YzhJqBAxwZhoq8I@h+zF3@$Z_TUjwAHp}XlEb>! z1kz(m0^0XZKYecn|1GpG!o55C@T1y!8$#S_;pM&m~jCm z@?KuO3FF?VN>LRi@UGVgDWStcV!M0y?kz?Oo=%>=k&EnQAeJ-KGz`55?`h7YmVGAq zr!a{)RJ8gZ(d_f`050s_uxj{T2; zaVgz%*~Yoi{P2R{z(8xTxTRj_bnBS4@$?cTA{LJx(oT-d%g6u$t_QP}l+=lQQlnmR zZS(Bll9QQ=2Wxkoa^PP@EkeVLdgWyHc^!iyg?I1W%fI{c3oP1S{rwVtr&waiM86vr zbvgc*S(G$lE&kxLj*(G0|4d9w46x;6B4MLCJO%#E&Rx6eK~Of=SI{@@B<-cl0^-!N zZqKZ&V^N>eifA3)#YnMll-BW%NN4Q1dNOJ0TmNcDOQt5$$rhQn6=!KJ2Ksa%#zHo~ ze4318H`v3z&T+hIs3ugDgbK1KQWJisgh&X|uP&^hUE!2D2=+oA19=J^%(LjW+gP<_ zPyw6xUgVsoz(g^9y3GRmKdV}%N9j+*pf0en$zQ#CwQ8k04Hf;aX9o-%VGlsKUjS`zQ+^no=mVL$hzK`Rc5dS3Y_YvqB z8Oy(ZeM}bc@?{Iqb>-l;uLFpKWogQX>OP3=R7p|IFl{}b@JKbUa?lF;aY}Ubm1D@f zHBz4Q4G2(ladC+fcI|BwyPi3W`rf~9U&y0Im{b_CpVcc0WLJV;_Kl}%9xl)kP-EQX z^wzmfzRZ^}btj|R-R|?xyQhR*B)0}&f}{W`Pk6VfXRtC6X(h; z#0b7T-v8*)Ba|sn`Vj{9Fai5JE-r#m^-%)$tb{UX$o0eE-FqJs{{T48L$q?i(6reh zN(u|RRlPjtK_(zj2+!~kHTML>AqWqUu1a9sr+YCRCjLWu@;D7mL_mNQ#B&uD6{>t} zt3Grl#IZCq!t?U-e7seIPjZ{9T3O8^$50IWwlkk@d@Lm+%a>t%EMR*x4NOg8=U2qB zz}mp(2W@?#n>;DQ5g4##+?+teEhNO zN0M`JV1Q)z!#oZZgGom`Y(M~cLL8!J<-anHX5LZy?>^8W*H(F7Yb5ui^J#u4SX2;T<{X0$fj{+MkNWKnIhH6zpto|JOeY+5Dy?E&U8r5UwNayQa0vD_>d4 zFf_1j14~K}k&yQ=VYWVfDUfZGxwaWA8)m19YJxz=edf#=28P>s5Sl3FrYDDJBcq~9 zj2mO%YeDLPP!8c2Y7@+at);EaK6t=@IeQ6Fu8W-r1L2r0@9Q?Gf`f~k#0Law5sLjv zqky0Yf{X=-zcfYs>*K-Dg=JX0HdP2W4U-taH9gIX!B%}TqOEdw?_#6N+P9*x?ISFl zQEhFPfm<0?zv@9>!dd8ZU7~Td`l1eAWlCQ^j*`dRhCvBvhA9~`fBM1&AK^#o$;q5t zTtV((w{PDDQ-g%b;6Xt_fe0ZYfo1qeNWT<6Viyot!@fS+b0{I$&&P+}<0Txy!9iFT zjgJ}S!i`N#4)1%6j)R*ty-R~neNp!zCMaW=_8`h?x#xvynZx58I1q_4VaT z3W&U|$tpOUNd4v(6uhn;JjHL51cw*9dgjcw--1sBe{#E>C0hitW@GvuJSc?Fo`b^& zqTe~|;W`LrAMK~L^7ya7%4%2X`b7q zzT$h+4BIDDYGO8A6X7BxSW(PxFb1GQZpT!xEPf#$Fm47?i0W9uGj!7tO)u7a(1zhBSu3@2J0a{Ue#|^=EJD zY;+o9#34l^?K%!w)Xt8(SwB5I_x&GafQ`WvA3z!%XCJ9?m}ypSwKvr^b)YMv_}q|W zKpYlvi<)pg@Iy{s-emyv#@1HX^<{R>BB1e67<7@5awLsyZAYJqK`em+Ruta-(S55J zO6WG42ggF0JwB!2ge1tya1>`=ogFPA$f!;Pu#00qRaTyZ)Rv>L*-F_MCzo8*%uiMM z^OneK%f1ixfgS8+Jp?((3K#;6 zm;|~je*N|9*CjKRjS4EXTH&au zz`!$G4<6q{`p$sis}qTyP*+7sDGY!^rv#UQjB6P6ta9HUY^OwzILe|fX7B&#QR$m2 zk35J30zIJB%IqM{8iE}7_>!`+uyV|iav2Frm5<=hao=(S>iEXJG}&!GR6`}s=xy_a z=IGIHG9y#5FnW-4M(PD`N6Dc|0fkc@%P7~o)T(!lc^fMxw)UDt_ze6$I2oU3=|*RF z;2w{UW>=?p@>)+27PFx4Z#$mqE}##*ztKBCS1Ycg{zJ-&8l4jKS4+J7@+ z-zmRiG&HH{=}1c0L3jdqJxug@{u~y4y$O~X- zAUPEl76#$Kd11^@M@PrZ4800iVsvZ_z^7!sStCI{igioE1Jd68!!OOds7g3I1xX3G(TdH{`C{cBF{6`VZ zp@J+wKfi~<+Q@;%xyS!vVMraukowmRE_0`QlH)(cyeem$Al4NyIkU|)(%R%q{s(?e z?l273lzhSmIc9anuRTq3ZfPm_I))9uYW?`_x^O9NGQ`O*va^Tc=>ecCwl>9fN7Wl` zE-o#V05;sYGlgkw5YF7y+4 zIAWX@ajkB@e|*N_c&2g*;SpRgvfvjrjW&R-zhcwghUXa2i(nPPRs$riMkby-PfLGJ zbmvW@#6b-)!isU$Ad;6|GKUWY)f-;CoB4qa>PMB8l}L!zOdrWqh7D2i;*90@4?w*c znCvMjICab>u{GPf7Z6)nz&}nVEueiu(BUqE#|ljAbM+z^3SBljFLJlhLf1CTSGJOp zm#^>WP>ohNe(cy1;tFJch@!*u-8-+8yXD+eQ{xj94Bw0v1X5l#NlMR?N-v^QIB96} zhg{x&cu~H2lnjj$@mQ_AI}uNxO5r3cD3Hym2Di7yr70Z#FSmPS3e#jAFeirg|AFQG h|9{1f9R)A94(<8C5&ai=QpWarNQujdr4e-={vTkEGg8M={fqgWvw` zea_kY{ByYGqTYGonJ4ah-D|CT{A8qrpFJUXf`EYVOjJZb76IYmd+_}?l)u2|wYS`^ z;0vNPR8$@X1!ZPYdI9{1ZX>8{BWGb?W3O$ck057cYh$BtrRy>H7y*G4K~#WG-eGzN zZm5nWN{Fy8`t<{;{Dxq)#~0s8oS6spr;nbTJ}NJ<@Ap2Tf33qW_z~ThTmdQ9_sNsw zU%!069M5q*cI3^CTJEHRy&hk?JNhPJU~iBx6(28Zhc&#_>M+W|R{;|jdE5S-sOd&l zT)d~nM>-%t3>#eJ&Xa7$gFM`A4pXfM3662naSSQ6p}o`iqJVP?Fqk zzCYB+q@d2Im2%-4aVpNt6j3T;_j;t!^Hf~&^0wWij8wonVeiKp6zt! zX5*%*UDJAdb475xF}CO$8KE%yJeWLG#HQM)V2;!E@_0Afp}oN+J~Wh)k&)f(`~w*o z8N{#ko5b5g``uZ_H#E!F9JbRAkN97b9Uthn&-|7r2xM~*amU9u<`bvZbexTAJrlh~ zK)fIK7ca4n$EeE^8>*_ROG}tk(!>>3CpwsZ6zh<%Fxfip8)~W2?QE~^9M9BvcOpF| z6_g~xxvz%(M4{o~>w5kBg_$|T5Cw%Xlq80%&2P@qOUqgn;@CJi8?(@~*`ATaqqSd6 zH}*So5^1uWas%Vz<9TXvKjvy%ZsipeI2|{CMZbvW!cgNyf68>wIbY{`cXJgS$J=qM z-T%oK8D8yr8;2a*CxoI=NxJFMEGjPk?bF6n@ApmJ%jNM+=+yPnyK!$ZAKbr=Qht^C zSz@wQz2b!ST8G8Y9 zVO(o76?@um?@um*QGh?XiHXT{AzV0PPMH+@y+&dQ#fMp%mc(gWfpLn){ipqa!2R z+$a$?V$t>-{PN%i9h#P=U+GuL+Vt#DVyml`JQ$Arnug|`V|Qga*TK;lKozGen;LtKc@|e?W=cK{8?9$3K4D$A>%n`aoo4X4xhpx_L=9n&Pva)2g=}hOWW0qgiK*4i##Dw8xvO>pZlTmXeqE z6&({RB`PxVh*pB{V}5@A+{9XBXy|QwU(Rc6G1cje%1KYVDWO2(^z`xDu&=|zhsDMI zDkT}GAz?dY)Hx|BF{g`Ir9=I!URz%|$MU!l^-R#0zRS*3Q2K=h{w4p_y4(H1F5*uP7Pa z8y3JG85>iGkx(=ec?k8D!ixyo8v4p`YqDZ#?RFi__c`&}`p>ca;*LjKR(7&vPD~IP z8QR^$D1d^46T$6voSRfsRFqVdR)t>^>V5RW+ZQFTKL57o&X{GJSC0w5h81a^@ ziRC3sk8itgUo25e6P-t&b_HxPqnn#Q+r@Wji5eCx;uYLTRzMQex(!xJvukZ@Lx+Z9 zpdmk9XLmZ@-%5C(EM{nhDO?*P@ae2-YMDrv}QoGpx};37T4P*Gv5GQPpYz@QjzsiCib z;%ysbs1A>p-d&!Hljakk4UP>vhge#VAH0YC$q8;-UjEtHvg+zk1%<#=if?@e^Us3` zIL*+Z>u3R?sCZL47v0^oh4`PP?1)dxU8U^m<6O|bK5@n*GBss4zx^u~95@BKa&EWl z-f_E2Prt}>))ZL`XqkX2PB2KSI4&+Oyj}@E zTPfo>Yii6{MY<5=DkljE(R=xR{k0h0N|;C?*SSDFoS&+#EscZ2@#gBREgL@E9RBI8 zn0QaLM0$P#Zux3xR8$Y+>sEN265m6*zfT4(PDJCUhxdnK&@G9{_VRpveT{IO<2ixZ z=^{U8JOih8^2+*93*t3@PdDw0rw65IiIdyo%hac2o?>F%c01e>DUS{H^>u?o(eZFD zI(tUk_ZMy*9lcPuwv)IN*7 zxbo@j6zMBXm?gSN%}c?@KiDtbN((l|pX6~E9{y2#iJ~+dYtKLpPbT5bka<1MCg)%t zSW#LRJ@?Dze+PeUG4A4*V-SYCYv z7hcoRL7XrDBGc1m6;n`95ffv2y)LE8ud86&*Aa}r`pRbs7Nmv_3;0YoKu3MxYPIHa zyG{92=zc;GcJOEc7_*zM=sWtUrEHX%o?hK@=!KGGOBg9GD$4D8nVg+1q@m%(%sMeN z1U5=eR(|~3p=N_|?5|H|DvejFtce#FKVo7&Rjb*!U;Hq~&cO$|?Loc)k`9;0NT+#P zTjPuwGM(?Fmjna_S9{~T9OZw0iil*0RZ>#AV_EeA5b{Twwa@ht%+=7!?L1`e6tz1w zjk3l#iOGJqcYB};oW{WK&)#b__2vZcNVZys^H`VgA+}Z$#IW+bp1u) zuMaYcxZ~)5_T`uIP7l~12%#xui^#c{e`Xe8A(lb@XRLdrLKB)Y<==kw!7v2d$q0)5 zx1SJrv6(#o{w)G(gw&M%Cuc1z{`Km~$zXR$YV~!8$yk+`$}sW5dJiKlVj? zM5egtQ63*Po6D=IRpd1-`Q4~hb}Fiq?V~hWf{~H%AX@>?~B@yW@m)ZR*W59AFHW!4D7>iOE1_5Ty!x z$W#8e2tMW2Z8l@9#N#3>H-Zk;P6~Cg0w7MaYSSOR+Ig+%@tU=Aa%t)B>@o4H7y<=F z`FO}q!a40@vfq{`$MBl74(UR4tQ0M~v*&05EG*l77x3f9SZ~mwa?}H+Mn(cC0JE&G z8-ZG?k48jPMSCP&fwNk5YO0EXdUkk(xyXhsHa`3q*7!k=kJ|dpn>SL( zxg=FpvukUU!xm>swnWU$;n}=VGM(WJ8)!&K95ggtjj`HhY+Q77SDo|u@$=MyRNQ)* zQuGgogn4@AO zRx7xn^^*1rU}ji8eQLNtwr63lP`5m*cjx^n*JAge$$a&M5QetPTSUNKomI3kOj*zo zoS+?h#=21gwgq%PZps995@zX3{77 zV#o&JGS$_p`uaE#5eo5WTIS{&6Q{x|Dvpwpa8Ni5Gs$X{Nh1k5(nefz(9M)KSXx_) zOGzbU|75z8w6Q6(+nu&qXnbrIp#y{H?Q)y0Ew8N(XvF;dnVyt%8W4O97NaF{KsUIiQ(v{}3rD8%t1Qk0dNnWUm14U|c!^0kB zL^US?1Am{P07m)Lv@~=m>sT4ljHak)Q|IIY$U{GWPN$}Z<5?Emmj{|p9#^SpXkZZ% z28&SOs~AJLhDPNFy1Fc4n{~>HmFaN(vfVXk%Km9%>j-xt_Iq;^V`IXC5X^Pg8wL(& zeSzB7ik$3hEG^2kbeFe;pF9ZBA$?DuJ}t=3mJ)F|dbfjB&T&wF#DVc=bGVt0`umG{ zqVq#*b@zmNy1IUdq<(31C#lQK&%ZcK%x1hZHa1RrjCwTX7agr^Z0tCLeHNg3v^o>V z=~|gz-hXx3*V?MrFAgXRbGUo@2GJM5HIW8?<{|>YY!xLxNqn?PI-7N1(j8c2ZihfSObr+|GpXDQiP1vuibviN^nY7^5fP|sf-Z)k*Qwcc3K)kFwT9Zc1yrsgKqYj1{M zJnYe}C>BjU<;u;;!NS2YkCjzXS#jDVhjyBM@mubH#+<>pRGXTg|MOtO8u_PKlFZAy z>5zz+ot2FNYBdLCiqqZ?_NHcL0w|$fTJ;Wr&@{Gk72eYF@_L7ZP2Fi~B8@j@NyFNL zUTqRo;+lL6t8qRdAtA-Z&|P=lh9y-QFXVGC^-7!OSr-wTV{11=FR){pP##ldon z-Cm8h9!3O6NIW%WA^1i%cRGcG>8EFAwtuLvGt(=O@R2N3gb@653#0ctP8-ONT?GXd zV6Y~Do|A2cz9LHEU^}^7hEtL?O%_^urKfWSQq2yGDl18Iy?PZeAO$wWci+QAZvB^u z+~6pHVPZc~k(C9QDg5W=^yny>%V9}$?(}qZSpS+*fr>W|Ef)TTQG2iGhtWh_2ca!@ZbuQJp>CqaDhA$-iJ6_JAx&;+o=2k{i8B2(C+YXaR6Sv zt3zUt{#?B-{FG>`@J_3C0e^wS1054LncQ>JWa|R&_okEaRype&1m;+UbQKqbbpcxf z*t@N*y+EtM1u#H>z7iE@=!IA7`^N>VEO9z#_P3p;z_ zXNZ=ntT%e|Gn4`YD(bXKEjwcq6Nu%tdX3u42LFF$8=B+6Ym80Hg%DkleR$>M!`rD- zqDi+A-s6+4ib3la9S!4W;}dG73(}hNcu)JT>+|P7&(vIrtVOl{2}uovhiI~B!9DC| z29h@>Nw~ManvpuOT89Bo76& zCm=;Gk2l`PAnwgpf%RoGIeyE^8sUG~U05hW5NN<``ULv=)hjwOh^T-ko69+=F@6@v zi@($=o?RYq&NnbBV&Mj~>6F=}3xYwAHQA#>v7ZQP)^{wB87wX7Op5P_bx76%6yvb& zYyS=k(h6kXAUZX?^>^2DA$BgiC}{ zHre~L8E(0P6X8NTU{G>%<55=1+S@DPFct?*!v2lt?IWmp`Z;aL6KT_4Gjt7NL@kZq zL4=+g^DvlmR?gcwvDS&1hbOGlk!8e*$=(7yT~SeS;3rR@SQ@FPmGvPB1-(#?Lh;U~EoZ^1|9Cu5?Bsk!p^Et}s%gq8*=C^NqzgK@-V=9b= z6A}^tEd$j1<9YTUP>_)4vvTrA#>c-0yTkL1K7Pcw_Ijekr?aG^nQ$8y&iCzVZ@Lbj z(8aIP`RdjOZNs+AwnvG<>HO_#wGslz6U?UFDL&F@Bv#rbQz%KMO0F8}Xyl%?+&6~3 zamV_-PXdi#FqkoHLn0tMCuc@6p$VOLu~E!~;1BqdMEFAPKx5)pFO(Ua~@i= zeb-jEZXN88wkwP(?*u@yx*1eO#mh-Lg74eqQT2!A$bPt`R=kxcaDQx~jXVipVc zY`UqqXp_uIM>~2z1uysfJBWw zuRtMm#eGxmN^qoeSHJw=mJF~QYYeW-=@GMj9)QXX0Hh;2G%qiYQHv^w$aJop9c)!Q zPU7P;)S|LjzKj4&dfy3*=A1(aq(b5n9{^EOPX^6OJ7&_SWHu|sXB*7!rY0x9^|rE- z(zC`&5GQD7I}7FuJ+6MOV1@L*wEeebUp!r9y1D>tbAdCGvU1R6c*4a}Th0bERd(iU zOJw}8n1c(DoS;x>O2<1b)i5Rqe;Vk9^m9cTZx3CJ_GAcvJL z1iR6_Yb+FQy;$F=3tXWcW#0&YKi875wmQ7NfmRkUx%?D8en|aRo{f!7!|sfYWz>GQ z>T8$Yqq(QxR5s4BuGzkJ`_^<D$#%tGKk|nfAc$t2pWW zN6&V74Rm~XB4Ei4qN+Vl ziq+aF4-d~ymt4DFK)^F%vvT93&Hg<(d3nz^9e6d)+)5dMBS7l-Ao2ZneTexh+vwOx zeol@%0oEDhL>CY&o-$HWZpNZhouT~L*uNj4vB}BFIY#WKtEmZ4gn~xGoX=|e@oe@w}R8oZVNny?x98ujx>WNa9(LGS17f#YL( zef>62ovW%%;o~P3b@4e~MENO!^8mIUD+`Mq!_VT)wU%GsX9~E+Qb7|kaIRkRk zmNn{3l1K8(gHC&&c`~qSs!D=c@ORSO3Gv^rEru;yk#YGLdCUjiL;AL-!*T!((x$Vq zZXzZw-rd#3Nw4+Em}9dB?UuJ$uwkd|xW(s~fw8W<9JZH`^5lsX@^elG4K|Af(h;B& zNN=dja#+qV7D7+~*rZ!u3$9 z!#1-C229xDI@t&rKPN` zt*z+Lwl+$XBw`-DqNi4e#RZs$s%mPM?0YT&b9JunJJ}XivQMB!t-ei_X?Qre%Zh#y zRQ8T|8L6oXN)yNI%*Og9SWEMT%JcxMZuA`<`XisEM1*4(I_p_d);ao|aE;^g)j1wL zCmZe9@41-@z@6eG;dN%Dbs8icX>U)rH6@ah8&=HI^nla;`b9cFJ*_G)pPZC5-qpEG zTnflkE9BH8VWI+3`*ZH4u~?9mke{>E?cy}*6e;Gb_EwEl+^-%QNh=U)88z&TjbRYy zRVz!&EKs$b%653pEF9au}4Gsx^o09~Jz`P%E@{2Lku)(u0JW|z0r z0+l{X75LW;SAq@jw$g|(vFl5z!eieWG*eU4Gm_b&kpoi0$Y9nII1-=@tgMP_IfJ4a z8w~*uR}xUF~h1vbgV_{)&xFj+oa8 zmza2rXbd-5R!vp);dE+Mm183*V1Bf&Ev`x4yy-(B!wvI7Us6XZx+{#%t34g;!mV>Y zy9I<6N)q6#L8NN#Z?{WhXRr87%@jkgt!))$e-Kdl&{IY$4AS?e&#&MISngo>fd5H) zWo5J|?<-^lQfkYm>XV=Du6G7*$cT=wQq(1NS+PR=im96Sf- zT-A`#*jhB(5>m}zCYvt5EzwkuLiwu@$;oMlyHbLbLbzOr4>+`=IC|ZS^x}()b!MC? zrjhWka#Q|YDx2>Aiwjo|MO@FuWt7-zJcBS zkci*me%*qJco0Wil{2q>fV>0E1a1swuBN&?MPa3!#H@q7#?Q26Tc(;C8sA*EUw1~2 zF&TwyfTRj`Tb=V71d38KR#{*Fphre);zP>g0N>$hbCDz&RT=R$H9cn2>^%4}AYO?f zzM_(%EpLdNa|$$)3kzlJZ7kKd=`j6{w>MDDj=u*6c2GTRC3zanHQ+SoLg+c@ZOaAn z2IprSvaqf=!x8B!^;#lgeLb^JCMGZMbxfP}#x6iT16dUvh+M2FNd@&&{1k^^Nh|w) zKkv*OYTxZInDcd4q%-`FV)2%Q>RS*|1rj<7sbtoDNsd~VX=Hx8&jc;A7TO%@_&c{RvfZC8 z7)e*RSb^GJA*_xcXr-W1n`PACbshh=<(jbDhL|i(*psILk(CgC6XXLCF(!G12oSC~ zxXkRowwu=!7NQpfwcy|qOv%iDas8@nm>+A7HA7VTcH%C}wuhOPveD7exQK|nq^zL$-oQYuxmsg0BR$^$dxd~?n~&h+fcy`jaAn#um7Opi9$thV z(8JI^@K)YEBs<>WwTSo;Ze(uWX5xijT=;Ust!-t!hzD4I(%T{Rcx|og(h1xhntkWt zfNLfjxPq`Og?9AqkA1OO$lS^%t>D>w$fG@jgim%%!8tw-n~o`QuSwugS5UY)KWJco zVo%>G*6mM$NdtJ*I!(?xr;XHFXTa=;a38`K+5Nb83&*`aF1-XRG1b+12O*+fZ-ibC zkH8II!brv@*tu`lLJ@g*%qc0o%B2ro3YzYOE7vQ=zHVJ9iD!g#flwhN^ax0bK4?|j zaM#kTtQd!lziidT#l?XY0t<^DXcPVF>{VC|J^Fvy`0QSe7jRe`?!v8Yb`ochpR!1b zUVKQC>5x801jZXCj*3WfPb$fBCrlzW6_qxFk|Qfr?t%;Lh7^xmXDnKU^?9dLX4dhc zF|oU7GNXvX0vrpqkm$-6(AOJ%Tv1^#z&Oag#x{+m@u+v7KMln7ME^zxF_O6haq%UW zQ~Ix8ph%qSHYo)WEht}6ua*BvjJm3#ro23pQ}4d>$6&50^5>um1wId9A#VCNZ`K%y zD#YMJZ4$UxWTk+`0T*6*-a=e9n{KElbsxny!7W`N0^>Vrf#}`m;a+vR*#&niJ9a86 z8|)TyPEMCV{^~gm;X`3%Wvz3&vHP8w6rV*%eF&^Qv&e7k@=WkTfG_B4P__Z$~x1Kj0`A$3*rvQG9%uZpjX}%$!4fN363k_};4;xzeaB*>egzbG)mmkiFva?l? zl#rPH@-@Ad{Em$y;v8d`fz)Mp?3U+MFjp=HnueMBUroBi?9?!BzqRW=uAqs=VM@G_ za>KZEQVUy4t>eefBdV${kkR5QZRutke<^*`wgeuW2KK2XBL8`V{X0H}%a^tJd$SXg zOE>ws$>9+(_u3eTbsoRR9+30_r}{%y_My08kuu0*Gj*9AI4cpcYz3Q*FFbfqTGw>C zIgGUP(GoKkUT~T%0PRkc!U`A;Y9emdruFZ>tI#t_yZ1MAcW(|3qJVFKRuZMOu(ENt zsc;w-npy1~*4M#RxYKX`P+iI;X7BJdu)?e?wqyX_UpQ!Id}s*!bFYc{Iv!nu3Gv%lrq3myaJmZnMJS?6Gr_es%(24DggeoQ^*Ow~3E5 zAz>_)WbtP9eMMOL!13y|?t`#KjhxURDHeCILww=GZ?%>qVv>0YK%T|NS1Gp};b8vK z|4>Cx&`$_!cS=sOAc_r|xkF96L(Vve(M(N>?g(iiXd1-eUG?jb8|=I*C+aZ$w2>WP zJX0hU;w5$!02xYR;y77YY(R%L@!11o*)98RFb*Bwf$)V@-*4Ez_sAhltRiIwA>9i1%lJwESBaQjRQY~jb5nC(~I(l~6Ng~NQ_HlLBosK_I z=|YB(Q~Zm&0M@LXSoW75Zgps->ucsiMJa|$T2z`xoE>Wr4cve-y@P4ZI-Bx6Wn_)( z*oXM!Xv^F-vjuRXw+yzA*<$NlXKsUy6LIO|%!dbj$r!xRfBcC2ee~u%(b4e+Y6g#R z1x$_EVA~gF9t!9xi8Jk_L&PQ?a-ZNAwbm0tMnijp$xF#UC<7)KG&$|3OApXPV28|) z{^P$y04!i_$dgP?@-9Y#er5Gd@N*Zmli-)q2&g!JsIYxGlT3@wKDx}Td`h{__V!2d z7dx!VnO}k!<^ONx-@n~Cr~!1%RTn;Mt}yCgRFqmwj1`O53uqm-S`qa{g)?(Jzx5VL zFW`8zKwdh1jtXtA1v>8<#m`tYjy*g2*R4mt!sjP3tP(-zY$3XG8)Cn6D>?n9EBd>zuE&3r2AJ zmEfTwslZrfpl?tCN3h6b|5nb#?a z&;5s*yJ*}Spi#sAF$-JMuhMtzy56l07Z8>L#GKDv<^yioqeVnSMSpaq_TEWVRQe4I zD&=V|ZMoCDKA+y@iA_tTXwWwUxTdhUkfD?DvJ8!I*6~n_i+!%ndfs4lNf=ERsC^<- zVHXk!kEMDe>0@(J@~Tb;h@PJka+o&c&5Tn}r>PDoyG-3&mJ@AJP~dAYsc%m^PIfm> z1A%g{CByC5t2;qMI4(Gmj^^gf#njl?a@v}Oho>Yd=^YCTRFh9i^|A*o{?_7pGCaH# z;D&PN1*zY2z>c!;6^n$|fQ5|>Ugavy$0r5MdP+KHNkg81NnG2*yHaaKT6nheVOA>Si($MyGvMFF=!@6Q;x-Py6$o;fW8 znEUvo!SiPOSI5{$iFt;`5vTJ}WCsvRP{{aO=73_0irWw9mcN`QR6u<`AfVGMfO^)$ z0JdJPim7xOa3TQ-%YA3o;V9AvSOx&#P%htMpN*uC-@;qi-LQ>3>H*%!dMelH$RLFc z?>!`76VA2S9%rP~_ge0kYD&_~H+-(kN*yX9C8Y+W<|jUV9VSJj0brK!H~u={C0LoSs*JU+ zh*z>WoQ|srsG|OuZPIP?&dEqbfS+IYi+g5xcxq5S?w^f}A0dX%4*TBIh3e>t1PsK0 zc+=6y%}&2Q9~uEMBIBKKbe0^_#g9;1` zI5oK_68>Y2VghcnBh;N$0NR-LTRXGFb;6;G3$uXnZ3*LEFi zq2{K4;yh86loUDv>Fe|9jW^tJxt_hQq66uQg=G{S%Jp)hL^;>%NU0{yW~N^E&6=b# zB4VAs_;@1sV!7oAEwE$H4r4%3@p2P%D{9lykm!K+Y&z!>JCJB?Zopv4;i$b{_!T7y zW0ilCkbptsShcow_wpG#jshhxEyA*g%il-=7fl$|q@&X%$L{Qhq<%s&vP3F7~ib<+E{GuW*59|HtaPf zIaw<#HaR)D)AUf{vrc+u>Id>rFc~kfU3Pv>sEGG`L?L5~y;*9b1I3^*%OWU6<>lq| z_4Vz$J{Re4C4wC#9K5rUm2{fID1ZLxZ7WeE<9qv4t9!ez(ZMiB#a}I(TZGfAO_IN> zJZ5VsDSAzpD0=vjrHZG_Ab#+#0_1(s@4EKbuihg?`TC-aXIQVQYE>^xO-{b1rS*sv zaXCs%i{Z5mAnimTMneB+;Be;kF%2C%yO!8xPoOLrnSD-yD0>Kz^TZz|&CqsDC`*gR}Fbxi2d{pPY^k%YnHzKmXR-o-{FNK>^)<1}Tp^ zv`PmC({y!RZWWCrCohc|56hI#MhR>O>wq3Fg@>8k-P1EQ#ToY*(qIr@mz|9XU7wAi z1D>&Gk8A2}X9xg#2HQHGgkk))O1Hh#>u5E(s4o=(lHn-eJy>xQo!~}aQx1WXqZ$WIfo;HWc>9(=I*x@8HeJR_~p&cp$Rao1U zql!vZFK-2J8666{AqYISw$B3EpePE8irN;M($Pj$20O*&X?l8kH?EyH6{Nf6sH)QB z)@`e+k>9T3mGU)|Jk2wGvTp0C}iTvKr2M1-`k5-B7mzof%&(B)pxz5(!M31RyvEQzkLi{xP zrwxh|EzrNiQIe9Ib*RE93rO*-`}svgL{wDB*r`d_L(f-v#hKkV|B-lY#(OWRCU%B^Q=ci9Lx&pXdLjtPRQW_7dE2AOM6%} zhbx8`)EEZ4z2-3Vya-tv(ENG27m!C%T`R`KGz`?|YTc`^XIaJ)R9gVSm4_PZSN!Ot zkfO|z_xF5=sD{|tDFz&1i@2V#CB?;JP!GN&8D;uNz~U*2IVkG^FT|M`;{C)gpHd|{ zVaCx>QKr#X%=@BaVk9|Usdt`-`*Atq8tH+udSzBwfIJkioYmFYpl_nao}_SUyP_)# zgz$FPs|5ymPB!&VZADXg8dmMDHEIna6vOR(bX$Z3QZ<@O6o4_+IXhKRQRa0&JPJGT_afsfNLL9d=93r4UJc&;p8YxUpo0azFJrhG13(b4hJ=GNAA-~+noOaiKt@@m!ywTKAQ zWemaArZIIZ>S_7*znLt8TvugeRnwDmj~8ykfL{}2dGDw10^DOJi4>BDJg3a$H5A@Z zQ0U!b%;@{B6ft1WDANC+uRjJv$DP&wR)4SurKF^q-hdM!WvA)r*fwgsykRp0imh|T zna`+8FfQ1A(mvN_2AxB;wm6`W24+WIx8BaaH;wV!%L&t!1cb4eOY^-zA(g`0nHT`T zciG&^L|fZZ9M$^Tlh@PB3t(1L|W7PS8u-eLM9ER_~>r9=TApQo@Oy$)mmR^T3T-0B69uy zt#56OddkHC*q89ga7${t2`;*l^oQp@c3aGEdFgj|QP2WxZ0kxvEVLl0sGaq2mFkF* zz=fi)W8fXc%}*3e{Bwmq54r!lKjUv!z*e~7@v`BMAKxWol>qx8Bna_$y9RR7|7q^% zRR>*Bvy3$?tgMOe-SSII)qhcIjsV)4OdF@@F3@%Cj|{J zAn+~p6%OCIF_v`TB%04mFetyD8DQ@Ooer;jG=NcUcgH#@Gt(h)dQ+fUZ`X<%=s)N%!~K|ZwFLRRj7MT zn@=Qq;%C`uoPhNa*%~|ucyEcV=fKPku{Gr9kHbZH!SMuj+%6V-hM(V}XcQuVydClJ z(>SA&z#<=8!TsAULk_!w3wW*3i01I^W@dJlECD^|f_ZQz`hLwGcExzV-`2A`kMJ`e z#4+)fffIXep}BhPR1J_i5V&=9xd<_D2U z+cUZh?`MRQlo?3@TYS08puqpJySwA3>Mp$1z3}DWH8_v*6ia3;X827OIF8`)kUedu5yS;j| z(7-mkt6b(}%&(ggpVu-HjR!2f$+9%Hn3fm=7p^x>02h4A_vbG7O3+Aig!*g9X!(}i zdqLvU;KFSpL?4Kc?OkA1HOid)3hvT=EtuAvnMMBiaUB`KyJFSU-CRxDus*RBRUXT_^rWOKJfm<45WrHcp7#US8-1NG`*W${L~OyaynCOE@>s3ee%66 z3SM9V*ca%JAi8)>E@1HxvPFW~8 zU26v$5`?5kok@z8v4~bJ1CsLc zQFeYl@W=}SV=z<%>haF}vXyX-ea4_svK3AZlj`Rp38M<`>F>|U&)*m)RaFhVjgAbx zn3wi+bDV`I{dx_mVqQc2*i&|3Iq3qOISO*X3jrtlX9ier?mQy7{&ONr(Kdk9@|E!O zn@x!i3JRi9`NG#dtw5{=sy?|VaQ0?d7**N~nI2%)fWs>Rs-Rv~{kV{&cwu1jDE{W?{-?F@lgms z*LfSB6x$Mv;Go}uK$CWsH8N=C!cKs^JTh(s)zyilX_eLldNf-Y>be)L4sitDM7 zC>s#`wO*tnmaz3J)6 zPBH=ko|h&}e(0{E(nM=}`zuUQA;q6^$W&ooH!`|4jvPLKqNx9x5S8Jsp>cELJQRVW zAav)VJd{s58)vdf3;JTevbSt)Zdy1XA3i21@TdgBK965-la=)msBNjGim@@0fSE`j zO;+&ManRRUR_uRTz}311YW^DeypzafAffi&V%I+|?ixg9_X=m$h7&Z%uu-#fT=`sp z)Ax98C#UH;C*wp;$0T#&SpjtytKBJAPob1Rd}f!sCZbdE3H$k2msh<$K-i+Fb3|^j z2Tt)A3C+^UUByoh=H^84QWi!=t&$Y=7X!n+iPit0DIWp@R;6B79OyhhOnm&5c$W9w zJ0MJ5+8+MyQk16^6chwqrvsy-1u7uD7|){1Fh41@GS%AUb+ddLzd6uC@jalND2!lf&|Yf+k$hX)EFq z5uT7R)gjYRS$VvdQsY{SNW$%qoRd*EVZ2q%8U$(!4J!ggrLj0JXUg}ZavB*gOYE2c(@X4G2n(nZyn*;vlE~jSyB^ofA(~Lq`YfdCULhTr2_4 z(6npjFpP?nN{D@ZWTw(05!jJ+X@uHcK`W}Y^%3xGfu-hlKOHW0*D|}2kyTI6JWr&? z2kmLY95w&M|wu=aeBsLCKYU+VX3zAvzCInLax$oI(PAWLz_3%6`3QXhW)rga4&%j}} z#B6CHFLFlDqt8v`b#@$USp%sUkat(DegADCSMcv;+@t?5W!!@$VjidLU z8qL0lN=oj$7bi4xJ0#y7TWGow7weG^k073PmsOKk*9~4VO5jj*97aVbfu5$%CjF+?OV)+pYEDj$NRLRKiYe%w5Q4!T?51L$g*S~& zPKKl38H{fmgC30rHz8G30*f#?RMY@9)#zpFYchIzJ)=oA@f^d`HFZAe7CR0F`S8f8 zfv~7OG6n{Ny|)d&e!Z|?K}5y*Dw)8<0HDF)Z_CLuHi>8>ni+nBZRI0S%gqNKx72s> zNlVv+fyIi9iP2D3ufA)1-hglh#!9a)u?2h61gEd-$XfO8x#1eA1-HrQ0`n5lGc*O7#}##D>yZpJfxc z{fJm9?|Sh3A{A3WRt5rCx@3&|apwlWyQH3=U-YO7qtf`ugmLc(sOg`AwMZu2p>lHY zz3n%yW3@csafOXpR?!&bAKIM~=17mqj+XZKcIK__~|?U@f2 zcmBDuYEV7lMupzSOV7d3mi*~yf;kADL0>}kz=nf{1`kLO*2PoX->#&UmA}_6Iu}(} zr{7m_gP&2G&AD)kF1>}c-0k}-+!kR$T*xg%9^f4h?BC}%IPH}BqU|N9O$4n(7;P_qo6PEi>~O$`xB$1 zrMfe>K*k0B=@V-^7{BZC)R8UGFANtAT{{rW$ B#U20v diff --git a/ceph/doc/cephfs/cephfs-top.rst b/ceph/doc/cephfs/cephfs-top.rst index e2ca72427..824fb9d06 100644 --- a/ceph/doc/cephfs/cephfs-top.rst +++ b/ceph/doc/cephfs/cephfs-top.rst @@ -29,7 +29,7 @@ metrics are for a particular MDS rank (e.g., number of subtrees handled by an MD Once enabled, Ceph Filesystem metrics can be fetched via:: $ ceph fs perf stats - {"version": 1, "global_counters": ["cap_hit", "read_latency", "write_latency", "metadata_latency", "dentry_lease"], "counters": [], "client_metadata": {"client.614146": {"IP": "10.1.1.100", "hostname" : "ceph-host1", "root": "/", "mount_point": "/mnt/cephfs", "valid_metrics": ["cap_hit", "read_latency", "write_latency", "metadata_latency", "dentry_lease"]}}, "global_metrics": {"client.614146": [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]}, "metrics": {"delayed_ranks": [], "mds.0": {"client.614146": []}}} + {"version": 1, "global_counters": ["cap_hit", "read_latency", "write_latency", "metadata_latency", "dentry_lease", "opened_files", "pinned_icaps", "opened_inodes", "avg_read_latency", "stdev_read_latency", "avg_write_latency", "stdev_write_latency", "avg_metadata_latency", "stdev_metadata_latency"], "counters": [], "client_metadata": {"client.324130": {"IP": "192.168.1.100", "hostname": "ceph-host1", "root": "/", "mount_point": "/mnt/cephfs", "valid_metrics": ["cap_hit", "read_latency", "write_latency", "metadata_latency", "dentry_lease, "opened_files", "pinned_icaps", "opened_inodes", "avg_read_latency", "stdev_read_latency", "avg_write_latency", "stdev_write_latency", "avg_metadata_latency", "stdev_metadata_latency"]}}, "global_metrics": {"client.324130": [[309785, 1280], [0, 0], [197, 519015022], [88, 279074768], [12, 70147], [0, 3], [3, 3], [0, 3], [0, 0], [0, 0], [0, 11699223], [0, 88245], [0, 6596951], [0, 9539]]}, "metrics": {"delayed_ranks": [], "mds.0": {"client.324130": []}}} Details of the JSON command output are as follows: diff --git a/ceph/doc/cephfs/fs-volumes.rst b/ceph/doc/cephfs/fs-volumes.rst index eb33b1f93..f52f0589e 100644 --- a/ceph/doc/cephfs/fs-volumes.rst +++ b/ceph/doc/cephfs/fs-volumes.rst @@ -97,18 +97,57 @@ The CephX IDs authorized to need to be reauthorized to on-going operations of the clients using these IDs may be disrupted. Mirroring is expected to be disabled on the volume. +Fetch the information of a CephFS volume using:: + + $ ceph fs volume info vol_name + { + "mon_addrs": [ + "192.168.1.7:40977" + ], + "pending_subvolume_deletions": 0, + "pools": { + "data": [ + { + "avail": 106288709632, + "name": "cephfs.vol_name.data", + "used": 4096 + } + ], + "metadata": [ + { + "avail": 106288709632, + "name": "cephfs.vol_name.meta", + "used": 155648 + } + ] + }, + "used_size": 0 + } + +The output format is json and contains fields as follows. + +* pools: Attributes of data and metadata pools + * avail: The amount of free space available in bytes + * used: The amount of storage consumed in bytes + * name: Name of the pool +* mon_addrs: List of monitor addresses +* used_size: Current used size of the CephFS volume in bytes +* pending_subvolume_deletions: Number of subvolumes pending deletion + FS Subvolume groups ------------------- Create a subvolume group using:: - $ ceph fs subvolumegroup create [--pool_layout ] [--uid ] [--gid ] [--mode ] + $ ceph fs subvolumegroup create [--size ] [--pool_layout ] [--uid ] [--gid ] [--mode ] The command succeeds even if the subvolume group already exists. When creating a subvolume group you can specify its data pool layout (see -:doc:`/cephfs/file-layouts`), uid, gid, and file mode in octal numerals. By default, the -subvolume group is created with an octal file mode '755', uid '0', gid '0' and data pool +:doc:`/cephfs/file-layouts`), uid, gid, file mode in octal numerals and +size in bytes. The size of the subvolume group is specified by setting +a quota on it (see :doc:`/cephfs/quota`). By default, the subvolume group +is created with an octal file mode '755', uid '0', gid '0' and data pool layout of its parent directory. @@ -131,6 +170,46 @@ List subvolume groups using:: .. note:: Subvolume group snapshot feature is no longer supported in mainline CephFS (existing group snapshots can still be listed and deleted) +Fetch the metadata of a subvolume group using:: + + $ ceph fs subvolumegroup info + +The output format is json and contains fields as follows. + +* atime: access time of subvolume group path in the format "YYYY-MM-DD HH:MM:SS" +* mtime: modification time of subvolume group path in the format "YYYY-MM-DD HH:MM:SS" +* ctime: change time of subvolume group path in the format "YYYY-MM-DD HH:MM:SS" +* uid: uid of subvolume group path +* gid: gid of subvolume group path +* mode: mode of subvolume group path +* mon_addrs: list of monitor addresses +* bytes_pcent: quota used in percentage if quota is set, else displays "undefined" +* bytes_quota: quota size in bytes if quota is set, else displays "infinite" +* bytes_used: current used size of the subvolume group in bytes +* created_at: time of creation of subvolume group in the format "YYYY-MM-DD HH:MM:SS" +* data_pool: data pool the subvolume group belongs to + +Check the presence of any subvolume group using:: + + $ ceph fs subvolumegroup exist + +The strings returned by the 'exist' command: + * "subvolumegroup exists": if any subvolumegroup is present + * "no subvolumegroup exists": if no subvolumegroup is present + +.. note:: It checks for the presence of custom groups and not the default one. To validate the emptiness of the volume, subvolumegroup existence check alone is not sufficient. The subvolume existence also needs to be checked as there might be subvolumes in the default group. + +Resize a subvolume group using:: + + $ ceph fs subvolumegroup resize [--no_shrink] + +The command resizes the subvolume group quota using the size specified by 'new_size'. +The '--no_shrink' flag prevents the subvolume group to shrink below the current used +size of the subvolume group. + +The subvolume group can be resized to an infinite size by passing 'inf' or 'infinite' +as the new_size. + Remove a snapshot of a subvolume group using:: $ ceph fs subvolumegroup snapshot rm [--force] @@ -260,6 +339,14 @@ List subvolumes using:: .. note:: subvolumes that are removed but have snapshots retained, are also listed. +Check the presence of any subvolume using:: + + $ ceph fs subvolume exist [--group_name ] + +The strings returned by the 'exist' command: + * "subvolume exists": if any subvolume of given group_name is present + * "no subvolume exists": if no subvolume of given group_name is present + Set custom metadata on the subvolume as a key-value pair using:: $ ceph fs subvolume metadata set [--group_name ] @@ -312,7 +399,39 @@ The output format is json and contains fields as follows. * created_at: time of creation of snapshot in the format "YYYY-MM-DD HH:MM:SS:ffffff" * data_pool: data pool the snapshot belongs to * has_pending_clones: "yes" if snapshot clone is in progress otherwise "no" -* size: snapshot size in bytes +* pending_clones: list of in progress or pending clones and their target group if exist otherwise this field is not shown +* orphan_clones_count: count of orphan clones if snapshot has orphan clones otherwise this field is not shown + +Sample output if snapshot clones are in progress or pending state:: + + $ ceph fs subvolume snapshot info cephfs subvol snap + { + "created_at": "2022-06-14 13:54:58.618769", + "data_pool": "cephfs.cephfs.data", + "has_pending_clones": "yes", + "pending_clones": [ + { + "name": "clone_1", + "target_group": "target_subvol_group" + }, + { + "name": "clone_2" + }, + { + "name": "clone_3", + "target_group": "target_subvol_group" + } + ] + } + +Sample output if no snapshot clone is in progress or pending state:: + + $ ceph fs subvolume snapshot info cephfs subvol snap + { + "created_at": "2022-06-14 13:54:58.618769", + "data_pool": "cephfs.cephfs.data", + "has_pending_clones": "no" + } Set custom metadata on the snapshot as a key-value pair using:: diff --git a/ceph/doc/cephfs/mds-state-diagram.dot b/ceph/doc/cephfs/mds-state-diagram.dot index 3ad192ef1..8c9fa25d0 100644 --- a/ceph/doc/cephfs/mds-state-diagram.dot +++ b/ceph/doc/cephfs/mds-state-diagram.dot @@ -57,6 +57,7 @@ D0 -> N3 [color=red,penwidth=2.0]; // terminal (but not "in") node [shape=polygon,sides=6,color=black,peripheries=1]; D1 [label="down:damaged"] +S2 -> D1 [color=black,penwidth=2.0]; N3 -> D1 [color=black,penwidth=2.0]; N4 -> D1 [color=black,penwidth=2.0]; N5 -> D1 [color=black,penwidth=2.0]; @@ -69,5 +70,6 @@ D1 -> D0 [color=red,penwidth=2.0] node [shape=polygon,sides=6,color=purple,peripheries=1]; D3 [label="down:stopped"] S3 -> D3 [color=purple,penwidth=2.0]; +N6 -> D3 [color=purple,penwidth=2.0]; } diff --git a/ceph/doc/cephfs/quota.rst b/ceph/doc/cephfs/quota.rst index 951982d16..777298f91 100644 --- a/ceph/doc/cephfs/quota.rst +++ b/ceph/doc/cephfs/quota.rst @@ -41,6 +41,18 @@ Limitations the directory the client is restricted too (e.g., ``/home/user``) or something nested beneath it. + In case of a kernel client, it needs to have access to the parent + of the directory inode on which quotas are configured in order to + enforce them. If quota is configured on a directory path + (e.g., ``/home/volumes/group``), the kclient needs to have access + to the parent (e.g., ``/home/volumes``). + + An example command to create such an user is as below:: + + $ ceph auth get-or-create client.guest mds 'allow r path=/home/volumes, allow rw path=/home/volumes/group' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*' mon 'allow r' + + See also: https://tracker.ceph.com/issues/55090 + #. *Snapshot file data which has since been deleted or changed does not count towards the quota.* See also: http://tracker.ceph.com/issues/24284 diff --git a/ceph/doc/dev/continuous-integration.rst b/ceph/doc/dev/continuous-integration.rst index cfa44b60e..70b6da4bd 100644 --- a/ceph/doc/dev/continuous-integration.rst +++ b/ceph/doc/dev/continuous-integration.rst @@ -154,15 +154,26 @@ libboost packages `boost`_. The packages' names are changed from ``libboost-*`` to ``ceph-libboost-*``, and they are instead installed into ``/opt/ceph``, so they don't interfere with the official ``libboost`` packages shipped by - distro. Its build scripts are hosted at https://github.com/tchaikov/ceph-boost. + distro. Its build scripts are hosted at https://github.com/ceph/ceph-boost. + See https://github.com/ceph/ceph-boost/commit/2a8ae02932b2a1fd6a68072da8ca0df2b99b805c + for an example of how to bump the version number. The commands used to + build 1.79 on a vanilla Ubuntu Focal OS are below. .. prompt:: bash $ - tar xjf boost_1_76_0.tar.bz2 + sudo apt install debhelper dctrl-tools chrpath libbz2-dev libicu-dev bison \ + flex docbook-to-man help2man xsltproc doxygen dh-python python3-all-dev graphviz + wget http://download.ceph.com/qa/boost_1_79_0.tar.bz2 git clone https://github.com/ceph/ceph-boost - cp -ra ceph-boost/debian boost_1_76_0/ + tar xjf boost_1_79_0.tar.bz2 + cp -ra ceph-boost/debian boost_1_79_0/ + pushd boost_1_79_0 export DEB_BUILD_OPTIONS='parallel=6 nodoc' dpkg-buildpackage -us -uc -b + popd + BOOST_SHA=$(git ls-remote https://github.com/ceph/ceph-boost main | awk '{ print $1 }') + ls *.deb | chacractl binary create \ + libboost/master/$BOOST_SHA/ubuntu/focal/amd64/flavors/default libzbd packages `libzbd`_ . The upstream libzbd includes debian packaging already. diff --git a/ceph/doc/dev/crimson/crimson.rst b/ceph/doc/dev/crimson/crimson.rst index 8276bea34..89adc3429 100644 --- a/ceph/doc/dev/crimson/crimson.rst +++ b/ceph/doc/dev/crimson/crimson.rst @@ -24,6 +24,45 @@ cloned using git. .. _ASan: https://github.com/google/sanitizers/wiki/AddressSanitizer +Installing Crimson with ready-to-use images +=========================================== + +An alternative to building Crimson from source is to use container images built +by Ceph CI/CD and deploy them with one of the orchestrators: ``cephadm`` or ``Rook``. +In this chapter documents the ``cephadm`` way. + +NOTE: We know that this procedure is suboptimal, but it has passed internal +external quality assurance.:: + + + $ curl -L https://raw.githubusercontent.com/ceph/ceph-ci/wip-bharat-crimson/src/cephadm/cephadm -o cephadm + $ cp cephadm /usr/sbin + $ vi /usr/sbin/cephadm + +In the file change ``DEFAULT_IMAGE = 'quay.ceph.io/ceph-ci/ceph:master'`` +to ``DEFAULT_IMAGE = 'quay.ceph.io/ceph-ci/ceph:-crimson`` where ```` +is the commit ID built by the Ceph CI/CD. You may use +https://shaman.ceph.com/builds/ceph/ to monitor branches built by Ceph's Jenkins +and to also discover those IDs. + +An example:: + + DEFAULT_IMAGE = 'quay.ceph.io/ceph-ci/ceph:1647216bf4ebac6bcf5ad7739e02b38569736cfd-crimson + +When the edition is finished:: + + chmod 777 cephadm + podman pull quay.ceph.io/ceph-ci/ceph:-crimson + cephadm bootstrap --mon-ip 10.1.172.208 --allow-fqdn-hostname + # Set "PermitRootLogin yes" for other nodes you want to use + echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config + systemctl restart sshd + + ssh-copy-id -f -i /etc/ceph/ceph.pub root@ + cephadm shell + ceph orch host add + ceph orch apply osd --all-available-devices + Running Crimson =============== diff --git a/ceph/doc/dev/delayed-delete.rst b/ceph/doc/dev/delayed-delete.rst index bf5f65a46..31f3e6b97 100644 --- a/ceph/doc/dev/delayed-delete.rst +++ b/ceph/doc/dev/delayed-delete.rst @@ -2,11 +2,12 @@ CephFS delayed deletion ========================= -When you delete a file, the data is not immediately removed. Each -object in the file needs to be removed independently, and sending -``size_of_file / stripe_size * replication_count`` messages would slow -the client down too much, and use a too much of the clients -bandwidth. Additionally, snapshots may mean some objects should not be -deleted. +The deletion of a file does not immediately remove its data. Each of the file's +underlying objects must be removed independently. If these objects were removed +immediately, the client would have to send ``size_of_file / stripe_size * +replication_count`` messages. This would consume significant bandwith and would +slow the client unacceptably. If snapshots exist, their existence can prevent +the deletion of objects associated with them. -Instead, the file is marked as deleted on the MDS, and deleted lazily. +In these cases, such files are (1) marked as deleted on the MDS and (2) deleted +lazily. diff --git a/ceph/doc/dev/developer_guide/basic-workflow.rst b/ceph/doc/dev/developer_guide/basic-workflow.rst index 6bd98ebb4..eddc2fe7b 100644 --- a/ceph/doc/dev/developer_guide/basic-workflow.rst +++ b/ceph/doc/dev/developer_guide/basic-workflow.rst @@ -1,3 +1,5 @@ +.. _basic workflow dev guide: + Basic Workflow ============== @@ -8,7 +10,7 @@ The following chart illustrates the basic Ceph development workflow: Upstream Code Your Local Environment /----------\ git clone /-------------\ - | Ceph | -------------------------> | ceph/master | + | Ceph | -------------------------> | ceph/main | \----------/ \-------------/ ^ | | | git branch fix_1 @@ -76,7 +78,7 @@ The procedure for making changes to the Ceph repository is as follows: #. Fix the bug - #. :ref:`Synchronize local master with upstream master`. + #. :ref:`Synchronize local main with upstream main`. #. :ref:`Create a bugfix branch` in your local working copy. @@ -142,6 +144,10 @@ to work on the Ceph repository.". 1. Configure your local git environment with your name and email address. + .. note:: + These commands will work only from within the ``ceph/`` directory + that was created when you cloned your fork. + .. prompt:: bash $ git config user.name "FIRST_NAME LAST_NAME" @@ -163,23 +169,23 @@ Fixing the Bug .. _synchronizing: -Synchronizing Local Master with Upstream Master -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Synchronizing Local Main with Upstream Main +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In your local git environment, there is a copy of the ``master`` branch in -``remotes/origin/master``. This is called "local master". This copy of the -master branch (https://github.com/your_github_id/ceph.git) is "frozen in time" +In your local git environment, there is a copy of the ``main`` branch in +``remotes/origin/main``. This is called "local main". This copy of the +main branch (https://github.com/your_github_id/ceph.git) is "frozen in time" at the moment that you cloned it, but the upstream repo (https://github.com/ceph/ceph.git, typically abbreviated to ``ceph/ceph.git``) that it was forked from is not frozen in time: the upstream repo is still being updated by other contributors. -Because upstream master is continually receiving updates from other +Because upstream main is continually receiving updates from other contributors, your fork will drift farther and farther from the state of the upstream repo when you cloned it. -You must keep your fork's master branch synchronized with upstream master in -order to reduce drift between your fork's master branch and the upstream master +You must keep your fork's main branch synchronized with upstream main in +order to reduce drift between your fork's main branch and the upstream main branch. Here are the commands for keeping your fork synchronized with the @@ -188,12 +194,12 @@ upstream repository: .. prompt:: bash $ git fetch ceph - git checkout master - git reset --hard ceph/master - git push -u origin master + git checkout main + git reset --hard ceph/main + git push -u origin main -This procedure should be followed often, in order to keep your local ``master`` -in sync with upstream ``master``. +This procedure should be followed often, in order to keep your local ``main`` +in sync with upstream ``main``. .. _bugfix_branch: @@ -204,12 +210,12 @@ Create a branch for your bugfix: .. prompt:: bash $ - git checkout master + git checkout main git checkout -b fix_1 git push -u origin fix_1 -The first command (git checkout master) makes sure that the bugfix branch -"fix_1" is created from the most recent state of the master branch of the +The first command (git checkout main) makes sure that the bugfix branch +"fix_1" is created from the most recent state of the main branch of the upstream repository. The second command (git checkout -b fix_1) creates a "bugfix branch" called @@ -319,7 +325,7 @@ the `Git Commit Good Practice`_ article at the `OpenStack Project Wiki`_. .. _`OpenStack Project Wiki`: https://wiki.openstack.org/wiki/Main_Page See also our own `Submitting Patches -`_ document. +`_ document. After your pull request (PR) has been opened, update the :ref:`issue-tracker` by adding a comment directing other contributors to your PR. The comment can be @@ -388,7 +394,7 @@ see how it behaves on real clusters running on physical or virtual hardware. Tests designed for this purpose live in the `ceph/qa sub-directory`_ and are run via the `teuthology framework`_. -.. _`ceph/qa sub-directory`: https://github.com/ceph/ceph/tree/master/qa/ +.. _`ceph/qa sub-directory`: https://github.com/ceph/ceph/tree/main/qa/ .. _`teuthology repository`: https://github.com/ceph/teuthology .. _`teuthology framework`: https://github.com/ceph/teuthology @@ -437,7 +443,7 @@ will need to force push your branch with: git push --force origin fix_1 Why do we take these extra steps instead of simply adding additional commits -the the PR? It is best practice for a PR to consist of a single commit; this +the PR? It is best practice for a PR to consist of a single commit; this makes for clean history, eases peer review of your changes, and facilitates merges. In rare circumstances it also makes it easier to cleanly revert changes. @@ -496,7 +502,7 @@ Another method of generating merge commits involves using Patrick Donnelly's **/ceph/src/script/ptl-tool.py**. Merge commits that have been generated by the **ptl-tool** have the following form:: - Merge PR #36257 into master + Merge PR #36257 into main * refs/pull/36257/head: client: move client_lock to _unmount() client: add timer_lock support diff --git a/ceph/doc/dev/developer_guide/essentials.rst b/ceph/doc/dev/developer_guide/essentials.rst index c265e655a..91ae42e9c 100644 --- a/ceph/doc/dev/developer_guide/essentials.rst +++ b/ceph/doc/dev/developer_guide/essentials.rst @@ -159,6 +159,10 @@ register an account. Registering gives you a unique IRC identity and allows you to access channels where unregistered users have been locked out for technical reasons. +See ``the official OFTC (Open and Free Technology Community) documentation's +registration instructions +`` to learn how to +register your IRC account. Channels ~~~~~~~~ @@ -187,7 +191,7 @@ file `CONTRIBUTING.rst`_ in the top-level directory of the source-code tree. There may be some overlap between this guide and that file. .. _`CONTRIBUTING.rst`: - https://github.com/ceph/ceph/blob/master/CONTRIBUTING.rst + https://github.com/ceph/ceph/blob/main/CONTRIBUTING.rst All newcomers are encouraged to read that file carefully. @@ -289,7 +293,7 @@ See :ref:`kubernetes-dev` Backporting ----------- -All bugfixes should be merged to the ``master`` branch before being +All bugfixes should be merged to the ``main`` branch before being backported. To flag a bugfix for backporting, make sure it has a `tracker issue`_ associated with it and set the ``Backport`` field to a comma-separated list of previous releases (e.g. "hammer,jewel") that you think @@ -300,6 +304,36 @@ The rest (including the actual backporting) will be taken care of by the .. _`tracker issue`: http://tracker.ceph.com/ .. _`Stable Releases and Backports`: http://tracker.ceph.com/projects/ceph-releases/wiki +Dependabot +---------- + +Dependabot is a GitHub bot that scans the dependencies in the repositories for +security vulnerabilities (CVEs). If a fix is available for a discovered CVE, +Dependabot creates a pull request to update the dependency. + +Dependabot also indicates the compatibility score of the upgrade. This score is +based on the number of CI failures that occur in other GitHub repositories +where the fix was applied. + +With some configuration, Dependabot can perform non-security updates (for +example, it can upgrade to the latest minor version or patch version). + +Dependabot supports `several languages and package managers +`_. +As of July 2022, the Ceph project receives alerts only from pip (based on the +`requirements.txt` files) and npm (`package*.json`). It is possible to extend +these alerts to git submodules, Golang, and Java. As of July 2022, there is no +support for C++ package managers such as vcpkg, conan, C++20 modules. + +Many of the dependencies discovered by Dependabot will best be updated +elsewhere than the Ceph Github repository (distribution packages, for example, +will be a better place to update some of the dependencies). Nonetheless, the +list of new and existing vulnerabilities generated by Dependabot will be +useful. + +`Here is an example of a Dependabot pull request. +`_ + Guidance for use of cluster log ------------------------------- diff --git a/ceph/doc/dev/developer_guide/merging.rst b/ceph/doc/dev/developer_guide/merging.rst index 076e5b62a..36e10fc84 100644 --- a/ceph/doc/dev/developer_guide/merging.rst +++ b/ceph/doc/dev/developer_guide/merging.rst @@ -18,10 +18,10 @@ What ? Where ? ^^^^^^^ -Features are merged to the *master* branch. Bug fixes should be merged to the +Features are merged to the *main* branch. Bug fixes should be merged to the corresponding named branch (e.g. *nautilus* for 14.0.z, *pacific* for 16.0.z, etc.). However, this is not mandatory - bug fixes and documentation -enhancements can be merged to the *master* branch as well, since the *master* +enhancements can be merged to the *main* branch as well, since the *main* branch is itself occasionally merged to the named branch during the development releases phase. In either case, if a bug fix is important it can also be flagged for backport to one or more previous stable releases. @@ -32,16 +32,16 @@ When ? After each stable release, candidate branches for previous releases enter phase 2 (see below). For example: the *jewel* named branch was created when the *infernalis* release candidates entered phase 2. From this point on, -*master* was no longer associated with *infernalis*. After he named branch of -the next stable release is created, *master* will be occasionally merged into +*main* was no longer associated with *infernalis*. After he named branch of +the next stable release is created, *main* will be occasionally merged into it. Branch merges ^^^^^^^^^^^^^ -* The latest stable release branch is merged periodically into master. -* The master branch is merged periodically into the branch of the stable release. -* The master is merged into the stable release branch +* The latest stable release branch is merged periodically into main. +* The main branch is merged periodically into the branch of the stable release. +* The main is merged into the stable release branch immediately after each development (x.0.z) release. Stable release candidates (i.e. x.1.z) phase 1 @@ -56,12 +56,12 @@ Where ? ^^^^^^^ The stable release branch (e.g. *jewel* for 10.0.z, *luminous* -for 12.0.z, etc.) or *master*. Bug fixes should be merged to the named +for 12.0.z, etc.) or *main*. Bug fixes should be merged to the named branch corresponding to the stable release candidate (e.g. *jewel* for -10.1.z) or to *master*. During this phase, all commits to *master* will be +10.1.z) or to *main*. During this phase, all commits to *main* will be merged to the named branch, and vice versa. In other words, it makes no difference whether a commit is merged to the named branch or to -*master* - it will make it into the next release candidate either way. +*main* - it will make it into the next release candidate either way. When ? ^^^^^^ @@ -72,9 +72,9 @@ x.1.0 tag is set in the release branch. Branch merges ^^^^^^^^^^^^^ -* The stable release branch is merged periodically into *master*. -* The *master* branch is merged periodically into the stable release branch. -* The *master* branch is merged into the stable release branch +* The stable release branch is merged periodically into *main*. +* The *main* branch is merged periodically into the stable release branch. +* The *main* branch is merged into the stable release branch immediately after each x.1.z release candidate. Stable release candidates (i.e. x.1.z) phase 2 @@ -90,7 +90,7 @@ Where ? The stable release branch (e.g. *mimic* for 13.0.z, *octopus* for 15.0.z ,etc.). During this phase, all commits to the named branch will be merged into -*master*. Cherry-picking to the named branch during release candidate phase 2 +*main*. Cherry-picking to the named branch during release candidate phase 2 is performed manually since the official backporting process begins only when the release is pronounced "stable". @@ -102,7 +102,7 @@ After Sage Weil announces that it is time for phase 2 to happen. Branch merges ^^^^^^^^^^^^^ -* The stable release branch is occasionally merged into master. +* The stable release branch is occasionally merged into main. Stable releases (i.e. x.2.z) ---------------------------- @@ -112,8 +112,8 @@ What ? * Bug fixes * Features are sometime accepted -* Commits should be cherry-picked from *master* when possible -* Commits that are not cherry-picked from *master* must pertain to a bug unique to +* Commits should be cherry-picked from *main* when possible +* Commits that are not cherry-picked from *main* must pertain to a bug unique to the stable release * See also the `backport HOWTO`_ document diff --git a/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst b/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst index eb2cfe355..88e350c86 100644 --- a/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst +++ b/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst @@ -164,7 +164,7 @@ Ceph. This profile is enabled by default. +------------------------+-------------+--------+-------+ | background recovery | 25% | 1 | 100% | +------------------------+-------------+--------+-------+ -| background best effort | 25% | 1 | MAX | +| background best effort | 25% | 2 | MAX | +------------------------+-------------+--------+-------+ balanced @@ -182,7 +182,7 @@ no competing services. +------------------------+-------------+--------+-------+ | background recovery | 40% | 1 | 150% | +------------------------+-------------+--------+-------+ -| background best effort | 20% | 1 | MAX | +| background best effort | 20% | 2 | MAX | +------------------------+-------------+--------+-------+ high_recovery_ops @@ -200,7 +200,7 @@ recoveries during non-peak hours. +------------------------+-------------+--------+-------+ | background recovery | 60% | 2 | 200% | +------------------------+-------------+--------+-------+ -| background best effort | 1 (MIN) | 1 | MAX | +| background best effort | 1 (MIN) | 2 | MAX | +------------------------+-------------+--------+-------+ custom diff --git a/ceph/doc/dev/release-process.rst b/ceph/doc/dev/release-process.rst index 5a1a316af..3750759b8 100644 --- a/ceph/doc/dev/release-process.rst +++ b/ceph/doc/dev/release-process.rst @@ -2,172 +2,224 @@ Ceph Release Process ====================== -1. Build environment -==================== +Prerequisites +============= -There are multiple build environments, debian based packages are built via pbuilder for multiple distributions. The build hosts are listed in the ``deb_hosts`` file, and the list of distributions are in ``deb_dist``. All distributions are build on each of the build hosts. Currently there is 1 64 bit and 1 32 bit build host. +Signing Machine +--------------- +The signing machine is a virtual machine in the `Sepia lab +`_. SSH access to the signing +machine is limited to the usual Infrastructure Admins along with a few other +component leads (e.g., nfs-ganesha, ceph-iscsi). -The RPM based packages are built natively, so one distribution per build host. The list of hosts is found in ``rpm_hosts``. +The ``ubuntu`` user on the machine has some `build scripts `_ that help with pulling, pushing, and signing packages. -Prior to building, it's necessary to update the pbuilder seed tarballs:: +The GPG signing key permanently lives on a `Nitrokey Pro `_ and is passed through to the VM via RHV. This helps to ensure that the key cannot be exported or leave the datacenter in any way. - ./update_all_pbuilders.sh +New Major Releases +------------------ +For each new major (alphabetical) release, you must create one ``ceph-release`` RPM for each RPM repo (e.g., one for el8 and one for el9). `chacra `_ is a python service we use to store DEB and RPM repos. The chacra repos are configured to include this ceph-release RPM, but it must be built separately. You must make sure that chacra is properly configured to include this RPM for each particular release. -2. Setup keyring for signing packages -===================================== +1. Update chacra so it is aware of the new Ceph release. See `this PR `_ for an example. +2. Redeploy chacra (e.g., ``ansible-playbook chacra.ceph.com.yml``) +3. Run https://jenkins.ceph.com/view/all/job/ceph-release-rpm/ -:: +Summarized build process +======================== - export GNUPGHOME= +1. QE finishes testing and finds a stopping point. That commit is pushed to the ``$release-release`` branch in ceph.git (e.g., ``quincy-release``). This allows work to continue in the working ``$release`` branch without having to freeze it during the release process. +2. The Ceph Council approves and notifies the "Build Lead". +3. The "Build Lead" starts the `Jenkins multijob `_, which triggers all builds. +4. Packages are pushed to chacra.ceph.com. +5. Packages are pulled from chacra.ceph.com to the Signer VM. +6. Packages are signed. +7. Packages are pushed to download.ceph.com. +8. Release containers are built and pushed to quay.io. - # verify it's accessible - gpg --list-keys +Hotfix Release Process Deviation +-------------------------------- -The release key should be present:: +A hotfix release has a couple differences. - pub 4096R/17ED316D 2012-05-20 - uid Ceph Release Key +1. Check out the most recent tag. For example, if we're releasing a hotfix on top of 17.2.3, ``git checkout -f -B quincy-release origin/v17.2.3`` +2. ``git cherry-pick -x`` the necessary hotfix commits +3. ``git push -f origin quincy-release`` +4. Notify the "Build Lead" to start the build. +5. The "Build Lead" should set ``RELEASE_TYPE=HOTFIX`` instead of ``STABLE``. +Security Release Process Deviation +---------------------------------- -3. Set up build area -==================== +A security/CVE release is similar to a hotfix release with two differences: -Clone the ceph and ceph-build source trees:: + 1. The fix should be pushed to the `ceph-private `_ repo instead of ceph.git (requires GitHub Admin Role). + 2. The tags (e.g., v17.2.4) must be manually pushed to ceph.git by the "Build Lead." - git clone http://github.com/ceph/ceph.git - git clone http://github.com/ceph/ceph-build.git +1. Check out the most recent tag. For example, if we're releasing a security fix on top of 17.2.3, ``git checkout -f -B quincy-release origin/v17.2.3`` +2. ``git cherry-pick -x`` the necessary security fix commits +3. ``git remote add security git@github.com:ceph/ceph-private.git`` +4. ``git push -f security quincy-release`` +5. Notify the "Build Lead" to start the build. +6. The "Build Lead" should set ``RELEASE_TYPE=SECURITY`` instead of ``STABLE``. +7. Finally, the `ceph-tag `_ steps need to be manually run by the "Build Lead" as close to the Announcement time as possible:: -In the ceph source directory, checkout next branch (for point releases use the {codename} branch):: + # Example using quincy pretending 17.2.4 is the security release version + # Add the ceph-releases repo (also requires GitHub Admin Role). The `ceph-setup `_ job will have already created and pushed the tag to ceph-releases.git. + git remote add releases git@github.com:ceph/ceph-releases.git + git fetch --all + # Check out the version commit + git checkout -f -B quincy-release releases/quincy-release + git push -f origin quincy-release + git push origin v17.2.4 + # Now create a Pull Request of quincy-release targeting quincy to merge the version commit and security fixes back into the quincy branch - git checkout next +1. Preparing the release branch +=============================== -Checkout the submodules:: +Once QE has determined a stopping point in the working (e.g., ``quincy``) branch, that commit should be pushed to the corresponding ``quincy-release`` branch. - git submodule update --force --init --recursive +Notify the "Build Lead" that the release branch is ready. -4. Update Build version numbers -================================ - -Substitute the ceph release number where indicated below by the string ``0.xx``. - -Edit configure.ac and update the version number. Example diff:: - - -AC_INIT([ceph], [0.54], [ceph-devel@vger.kernel.org]) - +AC_INIT([ceph], [0.55], [ceph-devel@vger.kernel.org]) - -Update the version number in the debian change log:: - - DEBEMAIL user@host dch -v 0.xx-1 - -Commit the changes:: - - git commit -a - -Tag the release:: - - ../ceph-build/tag-release v0.xx - - -5. Create Makefiles -=================== - -The actual configure options used to build packages are in the -``ceph.spec.in`` and ``debian/rules`` files. At this point we just -need to create a Makefile.:: - - ./do_autogen.sh - - -6. Run the release scripts -========================== - -This creates tarballs and copies them, with other needed files to -the build hosts listed in deb_hosts and rpm_hosts, runs a local build -script, then rsyncs the results back to the specified release directory.:: - - ../ceph-build/do_release.sh /tmp/release - - -7. Create RPM Repo -================== - -Copy the rpms to the destination repo:: - - mkdir /tmp/rpm-repo - ../ceph-build/push_to_rpm_repo.sh /tmp/release /tmp/rpm-repo 0.xx - -Next add any additional rpms to the repo that are needed such as leveldb. -See RPM Backports section +2. Starting the build +===================== -Finally, sign the rpms and build the repo indexes:: +We'll use a stable/regular 15.2.17 release of Octopus as an example throughout this document. - ../ceph-build/sign_and_index_rpm_repo.sh /tmp/release /tmp/rpm-repo 0.xx +1. Browse to https://jenkins.ceph.com/view/all/job/ceph/build?delay=0sec +2. Log in with GitHub OAuth +3. Set the parameters as necessary:: + BRANCH=octopus + TAG=checked + VERSION=15.2.17 + RELEASE_TYPE=STABLE + ARCHS=x86_64 arm64 -8. Create Debian repo -===================== +4. Use https://docs.ceph.com/en/latest/start/os-recommendations/?highlight=debian#platforms to determine the ``DISTROS`` parameter. For example, -The key-id used below is the id of the ceph release key from step 2:: + +-------------------+-------------------------------------------+ + | Release | Distro Codemap | + +===================+===========================================+ + | octopus (15.X.X) | ``focal bionic centos7 centos8 buster`` | + +-------------------+-------------------------------------------+ + | pacific (16.X.X) | ``focal bionic centos8 buster bullseye`` | + +-------------------+-------------------------------------------+ + | quincy (17.X.X) | ``focal centos8 centos9 bullseye`` | + +-------------------+-------------------------------------------+ - mkdir /tmp/debian-repo - ../ceph-build/gen_reprepro_conf.sh /tmp/debian-repo key-id - ../ceph-build/push_to_deb_repo.sh /tmp/release /tmp/debian-repo 0.xx main +5. Click ``Build``. +3. Release Notes +================ -Next add any addition debian packages that are needed such as leveldb. -See the Debian Backports section below. +Packages take hours to build. Use those hours to create the Release Notes and Announcements: -Debian packages are signed when added to the repo, so no further action is -needed. +1. ceph.git Release Notes (e.g., `v15.2.17's ceph.git (docs.ceph.com) PR `_) +2. ceph.io Release Notes (e.g., `v15.2.17's ceph.io.git (www.ceph.io) PR `_) +3. E-mail announcement +See `the Ceph Tracker wiki page that explains how to write the release notes `_. -9. Push repos to ceph.org -========================== +4. Signing and Publishing the Build +=================================== -For a development release:: +#. Obtain the sha1 of the version commit from the `build job `_ or the ``sha1`` file created by the `ceph-setup `_ job. - rcp ceph-0.xx.tar.bz2 ceph-0.xx.tar.gz \ - ceph_site@ceph.com:ceph.com/downloads/. - rsync -av /tmp/rpm-repo/0.xx/ ceph_site@ceph.com:ceph.com/rpm-testing - rsync -av /tmp/debian-repo/ ceph_site@ceph.com:ceph.com/debian-testing +#. Download the packages from chacra.ceph.com to the signing virtual machine. These packages get downloaded to ``/opt/repos`` where the `Sepia Lab Long Running (Ceph) Cluster `_ is mounted. -For a stable release, replace {CODENAME} with the release codename (e.g., ``argonaut`` or ``bobtail``):: + .. prompt:: bash $ - rcp ceph-0.xx.tar.bz2 \ - ceph_site@ceph.com:ceph.com/downloads/ceph-0.xx.tar.bz2 - rcp ceph-0.xx.tar.gz \ - ceph_site@ceph.com:ceph.com/downloads/ceph-0.xx.tar.gz - rsync -av /tmp/rpm-repo/0.xx/ ceph_site@ceph.com:ceph.com/rpm-{CODENAME} - rsync -auv /tmp/debian-repo/ ceph_site@ceph.com:ceph.com/debian-{CODENAME} + ssh ubuntu@signer.front.sepia.ceph.com + sync-pull ceph [pacific|quincy|etc] + + Example:: + + $ sync-pull ceph octopus 8a82819d84cf884bd39c17e3236e0632ac146dc4 + sync for: ceph octopus + ******************************************** + Found the most packages (332) in ubuntu/bionic. + No JSON object could be decoded + No JSON object could be decoded + ubuntu@chacra.ceph.com:/opt/repos/ceph/octopus/8a82819d84cf884bd39c17e3236e0632ac146dc4/ubuntu/bionic/flavors/default/* /opt/repos/ceph/octopus-15.2.17/debian/jessie/ + -------------------------------------------- + receiving incremental file list + db/ + db/checksums.db + 180.22K 100% 2.23MB/s 0:00:00 (xfr#1, to-chk=463/467) + db/contents.cache.db + 507.90K 100% 1.95MB/s 0:00:00 (xfr#2, to-chk=462/467) + db/packages.db + + etc... + +#. Sign the DEBs: + + .. prompt:: bash + + merfi gpg /opt/repos/ceph/octopus-15.2.17/debian + + Example:: + + $ merfi gpg /opt/repos/ceph/octopus-15.2.17/debian + --> Starting path collection, looking for files to sign + --> 18 matching paths found + --> will sign with the following commands: + --> gpg --batch --yes --armor --detach-sig --output Release.gpg Release + --> gpg --batch --yes --clearsign --output InRelease Release + --> signing: /opt/repos/ceph/octopus-15.2.17/debian/jessie/dists/bionic/Release + --> Running command: gpg --batch --yes --armor --detach-sig --output Release.gpg Release + --> Running command: gpg --batch --yes --clearsign --output InRelease Release + --> signing: /opt/repos/ceph/octopus-15.2.17/debian/jessie/dists/focal/Release + --> Running command: gpg --batch --yes --armor --detach-sig --output Release.gpg Release + --> Running command: gpg --batch --yes --clearsign --output InRelease Release + + etc... + +#. Sign the RPMs: + + .. prompt:: bash + + sign-rpms octopus + + Example:: + + $ sign-rpms octopus + Checking packages in: /opt/repos/ceph/octopus-15.2.17/centos/7 + signing: /opt/repos/ceph/octopus-15.2.17/centos/7/SRPMS/ceph-release-1-1.el7.src.rpm + /opt/repos/ceph/octopus-15.2.17/centos/7/SRPMS/ceph-release-1-1.el7.src.rpm: + signing: /opt/repos/ceph/octopus-15.2.17/centos/7/SRPMS/ceph-15.2.17-0.el7.src.rpm + /opt/repos/ceph/octopus-15.2.17/centos/7/SRPMS/ceph-15.2.17-0.el7.src.rpm: + signing: /opt/repos/ceph/octopus-15.2.17/centos/7/noarch/ceph-mgr-modules-core-15.2.17-0.el7.noarch.rpm + + etc... + +5. Publish the packages to download.ceph.com: + + .. prompt:: bash $ + + sync-push octopus + +5. Build Containers +=================== -10. Update Git -============== +Start the following two jobs: -Point release -------------- +#. https://2.jenkins.ceph.com/job/ceph-container-build-ceph-base-push-imgs/ +#. https://2.jenkins.ceph.com/job/ceph-container-build-ceph-base-push-imgs-arm64/ -For point releases just push the version number update to the -branch and the new tag:: +6. Announce the Release +======================= - git push origin {codename} - git push origin v0.xx +Version Commit PR +----------------- -Development and Stable releases -------------------------------- +The `ceph-tag Jenkins job `_ creates a Pull Request in ceph.git that targets the release branch. -For a development release, update tags for ``ceph.git``:: +If this was a regular release (not a hotfix release or a security release), the only commit in that Pull Request should be the version commit. For example, see `v15.2.17's version commit PR `_. - git push origin v0.xx - git push origin HEAD:last - git checkout master - git merge next - git push origin master - git push origin HEAD:next +Request a review and then merge the Pull Request. -Similarly, for a development release, for both ``teuthology.git`` and ``ceph-qa-suite.git``:: +Announcing +---------- - git checkout master - git reset --hard origin/master - git branch -f last origin/next - git push -f origin last - git push -f origin master:next +Publish the Release Notes on ceph.io before announcing the release by email, because the e-mail announcement references the ceph.io blog post. diff --git a/ceph/doc/index.rst b/ceph/doc/index.rst index cca7031cb..b9323c452 100644 --- a/ceph/doc/index.rst +++ b/ceph/doc/index.rst @@ -5,6 +5,13 @@ Ceph uniquely delivers **object, block, and file storage in one unified system**. +.. warning:: + + :ref:`If this is your first time using Ceph, read the "Basic Workflow" + page in the Ceph Developer Guide to learn how to contribute to the + Ceph project. (Click anywhere in this paragraph to read the "Basic + Workflow" page of the Ceph Developer Guide.) `. + .. container:: columns-3 .. container:: column diff --git a/ceph/doc/man/8/cephadm.rst b/ceph/doc/man/8/cephadm.rst index ebcc3a6a2..b34d5dc52 100644 --- a/ceph/doc/man/8/cephadm.rst +++ b/ceph/doc/man/8/cephadm.rst @@ -194,6 +194,17 @@ Arguments: * [--skip-firewalld] Do not configure firewalld * [--skip-pull] do not pull the latest image before adopting +Configuration: + +When starting the shell, cephadm looks for configuration in the following order. +Only the first values found are used: + +1. An explicit, user provided path to a config file (``-c/--config`` option) +2. Config file for daemon specified with ``--name`` parameter (``/var/lib/ceph///config``) +3. ``/var/lib/ceph//config/ceph.conf`` if it exists +4. The config file for a ``mon`` daemon (``/var/lib/ceph//mon./config``) if it exists +5. Finally: fallback to the default file ``/etc/ceph/ceph.conf`` + bootstrap --------- diff --git a/ceph/doc/man/8/cephfs-top.rst b/ceph/doc/man/8/cephfs-top.rst index 654633c75..c3719cd36 100644 --- a/ceph/doc/man/8/cephfs-top.rst +++ b/ceph/doc/man/8/cephfs-top.rst @@ -43,18 +43,6 @@ Descriptions of fields cap hit rate -.. describe:: rlat - - read latency - -.. describe:: wlat - - write latency - -.. describe:: mlat - - metadata latency - .. describe:: dlease dentry lease rate @@ -95,6 +83,29 @@ Descriptions of fields speed of write IOs compared with the last refresh +.. describe:: rlatavg + + average read latency + +.. describe:: rlatsd + + standard deviation (variance) for read latency + +.. describe:: wlatavg + + average write latency + +.. describe:: wlatsd + + standard deviation (variance) for write latency + +.. describe:: mlatavg + + average metadata latency + +.. describe:: mlatsd + + standard deviation (variance) for metadata latency Availability ============ diff --git a/ceph/doc/man/8/rbd.rst b/ceph/doc/man/8/rbd.rst index 27b176e1b..28ab9dcf9 100644 --- a/ceph/doc/man/8/rbd.rst +++ b/ceph/doc/man/8/rbd.rst @@ -830,7 +830,8 @@ Per mapping (block device) `rbd device map` options: * alloc_size - Minimum allocation unit of the underlying OSD object store backend (since 5.1, default is 64K bytes). This is used to round off and drop discards that are too small. For bluestore, the recommended setting is - bluestore_min_alloc_size (typically 64K for hard disk drives and 16K for + bluestore_min_alloc_size (currently set to 4K for all types of drives, + previously used to be set to 64K for hard disk drives and 16K for solid-state drives). For filestore with filestore_punch_hole = false, the recommended setting is image object size (typically 4M). diff --git a/ceph/doc/mgr/dashboard.rst b/ceph/doc/mgr/dashboard.rst index 5bfcced4c..3f3e92520 100644 --- a/ceph/doc/mgr/dashboard.rst +++ b/ceph/doc/mgr/dashboard.rst @@ -215,9 +215,11 @@ If you're building Ceph from source and want to start the dashboard from your development environment, please see the files ``README.rst`` and ``HACKING.rst`` in the source directory ``src/pybind/mgr/dashboard``. -Within a running Ceph cluster, the Ceph Dashboard is enabled with:: +Within a running Ceph cluster, the Ceph Dashboard is enabled with: - $ ceph mgr module enable dashboard +.. prompt:: bash $ + + ceph mgr module enable dashboard Configuration ------------- @@ -230,9 +232,11 @@ SSL/TLS Support All HTTP connections to the dashboard are secured with SSL/TLS by default. To get the dashboard up and running quickly, you can generate and install a -self-signed certificate:: +self-signed certificate: + +.. prompt:: bash $ - $ ceph dashboard create-self-signed-cert + ceph dashboard create-self-signed-cert Note that most web browsers will complain about self-signed certificates and require explicit confirmation before establishing a secure connection to the @@ -241,28 +245,36 @@ dashboard. To properly secure a deployment and to remove the warning, a certificate that is issued by a certificate authority (CA) should be used. -For example, a key pair can be generated with a command similar to:: +For example, a key pair can be generated with a command similar to: - $ openssl req -new -nodes -x509 \ - -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650 \ - -keyout dashboard.key -out dashboard.crt -extensions v3_ca +.. prompt:: bash $ + + openssl req -new -nodes -x509 \ + -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650 \ + -keyout dashboard.key -out dashboard.crt -extensions v3_ca The ``dashboard.crt`` file should then be signed by a CA. Once that is done, you -can enable it for Ceph manager instances by running the following commands:: +can enable it for Ceph manager instances by running the following commands: + +.. prompt:: bash $ - $ ceph dashboard set-ssl-certificate -i dashboard.crt - $ ceph dashboard set-ssl-certificate-key -i dashboard.key + ceph dashboard set-ssl-certificate -i dashboard.crt + ceph dashboard set-ssl-certificate-key -i dashboard.key If unique certificates are desired for each manager instance, the name of the instance can be included as follows (where ``$name`` is the name -of the ``ceph-mgr`` instance, usually the hostname):: +of the ``ceph-mgr`` instance, usually the hostname): - $ ceph dashboard set-ssl-certificate $name -i dashboard.crt - $ ceph dashboard set-ssl-certificate-key $name -i dashboard.key +.. prompt:: bash $ -SSL can also be disabled by setting this configuration value:: + ceph dashboard set-ssl-certificate $name -i dashboard.crt + ceph dashboard set-ssl-certificate-key $name -i dashboard.key - $ ceph config set mgr mgr/dashboard/ssl false +SSL can also be disabled by setting this configuration value: + +.. prompt:: bash $ + + ceph config set mgr mgr/dashboard/ssl false This might be useful if the dashboard will be running behind a proxy which does not support SSL for its upstream servers or other situations where SSL is not @@ -279,10 +291,12 @@ wanted or required. See :ref:`dashboard-proxy-configuration` for more details. You must restart Ceph manager processes after changing the SSL certificate and key. This can be accomplished by either running ``ceph mgr fail mgr`` or by disabling and re-enabling the dashboard module (which also - triggers the manager to respawn itself):: + triggers the manager to respawn itself): + + .. prompt:: bash $ - $ ceph mgr module disable dashboard - $ ceph mgr module enable dashboard + ceph mgr module disable dashboard + ceph mgr module enable dashboard .. _dashboard-host-name-and-port: @@ -298,19 +312,23 @@ If no specific address has been configured, the web app will bind to ``::``, which corresponds to all available IPv4 and IPv6 addresses. These defaults can be changed via the configuration key facility on a -cluster-wide level (so they apply to all manager instances) as follows:: +cluster-wide level (so they apply to all manager instances) as follows: + +.. prompt:: bash $ - $ ceph config set mgr mgr/dashboard/server_addr $IP - $ ceph config set mgr mgr/dashboard/server_port $PORT - $ ceph config set mgr mgr/dashboard/ssl_server_port $PORT + ceph config set mgr mgr/dashboard/server_addr $IP + ceph config set mgr mgr/dashboard/server_port $PORT + ceph config set mgr mgr/dashboard/ssl_server_port $PORT Since each ``ceph-mgr`` hosts its own instance of the dashboard, it may be necessary to configure them separately. The IP address and port for a specific -manager instance can be changed with the following commands:: +manager instance can be changed with the following commands: - $ ceph config set mgr mgr/dashboard/$name/server_addr $IP - $ ceph config set mgr mgr/dashboard/$name/server_port $PORT - $ ceph config set mgr mgr/dashboard/$name/ssl_server_port $PORT +.. prompt:: bash $ + + ceph config set mgr mgr/dashboard/$name/server_addr $IP + ceph config set mgr mgr/dashboard/$name/server_port $PORT + ceph config set mgr mgr/dashboard/$name/ssl_server_port $PORT Replace ``$name`` with the ID of the ceph-mgr instance hosting the dashboard. @@ -329,9 +347,11 @@ you can use. For more details please refer to the `User and Role Management`_ section. To create a user with the administrator role you can use the following -commands:: +commands: - $ ceph dashboard ac-user-create -i administrator +.. prompt:: bash $ + + ceph dashboard ac-user-create -i administrator Account Lock-out ^^^^^^^^^^^^^^^^ @@ -339,27 +359,33 @@ Account Lock-out It disables a user account if a user repeatedly enters the wrong credentials for multiple times. It is enabled by default to prevent brute-force or dictionary attacks. The user can get or set the default number of lock-out attempts using -these commands respectively:: +these commands respectively: + +.. prompt:: bash $ - $ ceph dashboard get-account-lockout-attempts - $ ceph dashboard set-account-lockout-attempts + ceph dashboard get-account-lockout-attempts + ceph dashboard set-account-lockout-attempts .. warning:: This feature can be disabled by setting the default number of lock-out attempts to 0. However, by disabling this feature, the account is more vulnerable to brute-force or - dictionary based attacks. This can be disabled by:: + dictionary based attacks. This can be disabled by: - $ ceph dashboard set-account-lockout-attempts 0 + .. prompt:: bash $ + + ceph dashboard set-account-lockout-attempts 0 Enable a Locked User ^^^^^^^^^^^^^^^^^^^^ If a user account is disabled as a result of multiple invalid login attempts, then it needs to be manually enabled by the administrator. This can be done by the following -command:: +command: + +.. prompt:: bash $ - $ ceph dashboard ac-user-enable + ceph dashboard ac-user-enable Accessing the Dashboard ^^^^^^^^^^^^^^^^^^^^^^^ @@ -378,28 +404,36 @@ Enabling the Object Gateway Management Frontend When RGW is deployed with cephadm, the RGW credentials used by the dashboard will be automatically configured. You can also manually force the -credentials to be set up with:: +credentials to be set up with: + +.. prompt:: bash $ - $ ceph dashboard set-rgw-credentials + ceph dashboard set-rgw-credentials This will create an RGW user with uid ``dashboard`` for each realm in the system. -If you've configured a custom 'admin' resource in your RGW admin API, you should set it here also:: +If you've configured a custom 'admin' resource in your RGW admin API, you should set it here also: - $ ceph dashboard set-rgw-api-admin-resource +.. prompt:: bash $ + + ceph dashboard set-rgw-api-admin-resource If you are using a self-signed certificate in your Object Gateway setup, you should disable certificate verification in the dashboard to avoid refused connections, e.g. caused by certificates signed by unknown CA or not matching -the host name:: +the host name: + +.. prompt:: bash $ - $ ceph dashboard set-rgw-api-ssl-verify False + ceph dashboard set-rgw-api-ssl-verify False If the Object Gateway takes too long to process requests and the dashboard runs -into timeouts, you can set the timeout value to your needs:: +into timeouts, you can set the timeout value to your needs: - $ ceph dashboard set-rest-requests-timeout +.. prompt:: bash $ + + ceph dashboard set-rest-requests-timeout The default value is 45 seconds. @@ -423,16 +457,20 @@ If the ``ceph-iscsi`` REST API is configured in HTTPS mode and its using a self- certificate, you need to configure the dashboard to avoid SSL certificate verification when accessing ceph-iscsi API. -To disable API SSL verification run the following command:: +To disable API SSL verification run the following command: + +.. prompt:: bash $ - $ ceph dashboard set-iscsi-api-ssl-verification false + ceph dashboard set-iscsi-api-ssl-verification false -The available iSCSI gateways must be defined using the following commands:: +The available iSCSI gateways must be defined using the following commands: - $ ceph dashboard iscsi-gateway-list - $ # Gateway URL format for a new gateway: ://:@[:port] - $ ceph dashboard iscsi-gateway-add -i [] - $ ceph dashboard iscsi-gateway-rm +.. prompt:: bash $ + + ceph dashboard iscsi-gateway-list + # Gateway URL format for a new gateway: ://:@[:port] + ceph dashboard iscsi-gateway-add -i [] + ceph dashboard iscsi-gateway-rm .. _dashboard-grafana: @@ -480,9 +518,11 @@ The following process describes how to configure Grafana and Prometheus manually. After you have installed Prometheus, Grafana, and the Node exporter on appropriate hosts, proceed with the following steps. -#. Enable the Ceph Exporter which comes as Ceph Manager module by running:: +#. Enable the Ceph Exporter which comes as Ceph Manager module by running: + + .. prompt:: bash $ - $ ceph mgr module enable prometheus + ceph mgr module enable prometheus More details can be found in the documentation of the :ref:`mgr-prometheus`. @@ -524,7 +564,9 @@ on appropriate hosts, proceed with the following steps. #. Add Prometheus as data source to Grafana `using the Grafana Web UI `_. -#. Install the `vonage-status-panel and grafana-piechart-panel` plugins using:: +#. Install the `vonage-status-panel and grafana-piechart-panel` plugins using: + + .. prompt:: bash $ grafana-cli plugins install vonage-status-panel grafana-cli plugins install grafana-piechart-panel @@ -532,16 +574,20 @@ on appropriate hosts, proceed with the following steps. #. Add Dashboards to Grafana: Dashboards can be added to Grafana by importing dashboard JSON files. - Use the following command to download the JSON files:: + Use the following command to download the JSON files: - wget https://raw.githubusercontent.com/ceph/ceph/master/monitoring/ceph-mixin/dashboards_out/.json + .. prompt:: bash $ + + wget https://raw.githubusercontent.com/ceph/ceph/main/monitoring/ceph-mixin/dashboards_out/.json You can find various dashboard JSON files `here `_ . + main/monitoring/ceph-mixin/dashboards_out>`_. + + For Example, for ceph-cluster overview you can use: - For Example, for ceph-cluster overview you can use:: + .. prompt:: bash $ - wget https://raw.githubusercontent.com/ceph/ceph/master/monitoring/ceph-mixin/dashboards_out/ceph-cluster.json + wget https://raw.githubusercontent.com/ceph/ceph/main/monitoring/ceph-mixin/dashboards_out/ceph-cluster.json You may also author your own dashboards. @@ -577,9 +623,11 @@ After you have set up Grafana and Prometheus, you will need to configure the connection information that the Ceph Dashboard will use to access Grafana. You need to tell the dashboard on which URL the Grafana instance is -running/deployed:: +running/deployed: - $ ceph dashboard set-grafana-api-url # default: '' +.. prompt:: bash $ + + ceph dashboard set-grafana-api-url # default: '' The format of the URL : `://:` @@ -596,18 +644,22 @@ The format of the URL : `://:` If you are using a self-signed certificate for Grafana, disable certificate verification in the dashboard to avoid refused connections, which can be a result of certificates signed by an unknown CA or that do not -match the host name:: +match the host name: + +.. prompt:: bash $ - $ ceph dashboard set-grafana-api-ssl-verify False + ceph dashboard set-grafana-api-ssl-verify False You can also access Grafana directly to monitor your cluster. .. note:: Ceph Dashboard configuration information can also be unset. For example, to - clear the Grafana API URL we configured above:: + clear the Grafana API URL we configured above: - $ ceph dashboard reset-grafana-api-url + .. prompt:: bash $ + + ceph dashboard reset-grafana-api-url Alternative URL for Browsers """""""""""""""""""""""""""" @@ -633,9 +685,11 @@ This setting won't ever be changed automatically, unlike the GRAFANA_API_URL which is set by :ref:`cephadm` (only if cephadm is used to deploy monitoring services). -To change the URL that is returned to the frontend issue the following command:: +To change the URL that is returned to the frontend issue the following command: + +.. prompt:: bash $ - $ ceph dashboard set-grafana-frontend-api-url + ceph dashboard set-grafana-frontend-api-url If no value is set for that option, it will simply fall back to the value of the GRAFANA_API_URL option. If set, it will instruct the browser to use this URL to @@ -659,9 +713,11 @@ process can be performed by an existing Identity Provider (IdP). Please ensure that this library is installed on your system, either by using your distribution's package management or via Python's `pip` installer. -To configure SSO on Ceph Dashboard, you should use the following command:: +To configure SSO on Ceph Dashboard, you should use the following command: + +.. prompt:: bash $ - $ ceph dashboard sso setup saml2 {} {} {} {} + ceph dashboard sso setup saml2 {} {} {} {} Parameters: @@ -675,25 +731,33 @@ Parameters: The issuer value of SAML requests will follow this pattern: ****/auth/saml2/metadata -To display the current SAML 2.0 configuration, use the following command:: +To display the current SAML 2.0 configuration, use the following command: - $ ceph dashboard sso show saml2 +.. prompt:: bash $ + + ceph dashboard sso show saml2 .. note:: For more information about `onelogin_settings`, please check the `onelogin documentation `_. -To disable SSO:: +To disable SSO: + +.. prompt:: bash $ + + ceph dashboard sso disable + +To check if SSO is enabled: - $ ceph dashboard sso disable +.. prompt:: bash $ -To check if SSO is enabled:: + ceph dashboard sso status - $ ceph dashboard sso status +To enable SSO: -To enable SSO:: +.. prompt:: bash $ - $ ceph dashboard sso enable saml2 + ceph dashboard sso enable saml2 .. _dashboard-alerting: @@ -768,25 +832,32 @@ in order to manage silences. #. Update a silence (which will recreate and expire it (default Alertmanager behaviour)) - To use it, specify the host and port of the Alertmanager server:: + To use it, specify the host and port of the Alertmanager server: + + .. prompt:: bash $ - $ ceph dashboard set-alertmanager-api-host # default: '' + ceph dashboard set-alertmanager-api-host # default: '' - For example:: + For example: + + .. prompt:: bash $ - $ ceph dashboard set-alertmanager-api-host 'http://localhost:9093' + ceph dashboard set-alertmanager-api-host 'http://localhost:9093' To be able to see all configured alerts, you will need to configure the URL to the Prometheus API. Using this API, the UI will also help you in verifying that a new silence will match a corresponding alert. - :: + + .. prompt:: bash $ - $ ceph dashboard set-prometheus-api-host # default: '' + ceph dashboard set-prometheus-api-host # default: '' - For example:: + For example: - $ ceph dashboard set-prometheus-api-host 'http://localhost:9090' + .. prompt:: bash $ + + ceph dashboard set-prometheus-api-host 'http://localhost:9090' After setting up the hosts, refresh your browser's dashboard window or tab. @@ -801,13 +872,17 @@ Alertmanager setup, you should disable certificate verification in the dashboard to avoid refused connections caused by certificates signed by an unknown CA or that do not match the host name. -- For Prometheus:: +- For Prometheus: + +.. prompt:: bash $ + + ceph dashboard set-prometheus-api-ssl-verify False - $ ceph dashboard set-prometheus-api-ssl-verify False +- For Alertmanager: -- For Alertmanager:: +.. prompt:: bash $ - $ ceph dashboard set-alertmanager-api-ssl-verify False + ceph dashboard set-alertmanager-api-ssl-verify False .. _dashboard-user-role-management: @@ -823,30 +898,38 @@ following checks: - Is the password longer than N characters? - Are the old and new password the same? -The password policy feature can be switched on or off completely:: +The password policy feature can be switched on or off completely: - $ ceph dashboard set-pwd-policy-enabled +.. prompt:: bash $ -The following individual checks can also be switched on or off:: + ceph dashboard set-pwd-policy-enabled - $ ceph dashboard set-pwd-policy-check-length-enabled - $ ceph dashboard set-pwd-policy-check-oldpwd-enabled - $ ceph dashboard set-pwd-policy-check-username-enabled - $ ceph dashboard set-pwd-policy-check-exclusion-list-enabled - $ ceph dashboard set-pwd-policy-check-complexity-enabled - $ ceph dashboard set-pwd-policy-check-sequential-chars-enabled - $ ceph dashboard set-pwd-policy-check-repetitive-chars-enabled +The following individual checks can also be switched on or off: + +.. prompt:: bash $ + + ceph dashboard set-pwd-policy-check-length-enabled + ceph dashboard set-pwd-policy-check-oldpwd-enabled + ceph dashboard set-pwd-policy-check-username-enabled + ceph dashboard set-pwd-policy-check-exclusion-list-enabled + ceph dashboard set-pwd-policy-check-complexity-enabled + ceph dashboard set-pwd-policy-check-sequential-chars-enabled + ceph dashboard set-pwd-policy-check-repetitive-chars-enabled Additionally the following options are available to configure password policy. -- Minimum password length (defaults to 8):: +- Minimum password length (defaults to 8): + +.. prompt:: bash $ + + ceph dashboard set-pwd-policy-min-length - $ ceph dashboard set-pwd-policy-min-length +- Minimum password complexity (defaults to 10): -- Minimum password complexity (defaults to 10):: + .. prompt:: bash $ - $ ceph dashboard set-pwd-policy-min-complexity + ceph dashboard set-pwd-policy-min-complexity Password complexity is calculated by classifying each character in the password. The complexity count starts by 0. A character is rated by @@ -859,9 +942,11 @@ policy. - Increase by 5 if the character has not been classified by one of the previous rules. - A list of comma separated words that are not allowed to be used in a - password:: + password: - $ ceph dashboard set-pwd-policy-exclusion-list [,...] + .. prompt:: bash $ + + ceph dashboard set-pwd-policy-exclusion-list [,...] User Accounts @@ -879,44 +964,60 @@ available to all ``ceph-mgr`` instances. We provide a set of CLI commands to manage user accounts: -- *Show User(s)*:: +- *Show User(s)*: + + .. prompt:: bash $ - $ ceph dashboard ac-user-show [] + ceph dashboard ac-user-show [] -- *Create User*:: +- *Create User*: + + .. prompt:: bash $ - $ ceph dashboard ac-user-create [--enabled] [--force-password] [--pwd_update_required] -i [] [] [] [] + ceph dashboard ac-user-create [--enabled] [--force-password] [--pwd_update_required] -i [] [] [] [] To bypass password policy checks use the `force-password` option. Add the option `pwd_update_required` so that a newly created user has to change their password after the first login. -- *Delete User*:: +- *Delete User*: + + .. prompt:: bash $ - $ ceph dashboard ac-user-delete + ceph dashboard ac-user-delete -- *Change Password*:: +- *Change Password*: - $ ceph dashboard ac-user-set-password [--force-password] -i + .. prompt:: bash $ -- *Change Password Hash*:: + ceph dashboard ac-user-set-password [--force-password] -i - $ ceph dashboard ac-user-set-password-hash -i +- *Change Password Hash*: + + .. prompt:: bash $ + + ceph dashboard ac-user-set-password-hash -i The hash must be a bcrypt hash and salt, e.g. ``$2b$12$Pt3Vq/rDt2y9glTPSV.VFegiLkQeIpddtkhoFetNApYmIJOY8gau2``. This can be used to import users from an external database. -- *Modify User (name, and email)*:: +- *Modify User (name, and email)*: + + .. prompt:: bash $ + + ceph dashboard ac-user-set-info - $ ceph dashboard ac-user-set-info +- *Disable User*: -- *Disable User*:: + .. prompt:: bash $ - $ ceph dashboard ac-user-disable + ceph dashboard ac-user-disable -- *Enable User*:: +- *Enable User*: - $ ceph dashboard ac-user-enable + .. prompt:: bash $ + + ceph dashboard ac-user-enable User Roles and Permissions ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -991,42 +1092,58 @@ The list of system roles are: - **pool-manager**: allows full permissions for the *pool* scope. - **cephfs-manager**: allows full permissions for the *cephfs* scope. -The list of available roles can be retrieved with the following command:: +The list of available roles can be retrieved with the following command: + +.. prompt:: bash $ - $ ceph dashboard ac-role-show [] + ceph dashboard ac-role-show [] You can also use the CLI to create new roles. The available commands are the following: -- *Create Role*:: +- *Create Role*: + + .. prompt:: bash $ - $ ceph dashboard ac-role-create [] + ceph dashboard ac-role-create [] -- *Delete Role*:: +- *Delete Role*: - $ ceph dashboard ac-role-delete + .. prompt:: bash $ -- *Add Scope Permissions to Role*:: + ceph dashboard ac-role-delete - $ ceph dashboard ac-role-add-scope-perms [...] +- *Add Scope Permissions to Role*: -- *Delete Scope Permission from Role*:: + .. prompt:: bash $ - $ ceph dashboard ac-role-del-scope-perms + ceph dashboard ac-role-add-scope-perms [...] + +- *Delete Scope Permission from Role*: + + .. prompt:: bash $ + + ceph dashboard ac-role-del-scope-perms To assign roles to users, the following commands are available: -- *Set User Roles*:: +- *Set User Roles*: + + .. prompt:: bash $ + + ceph dashboard ac-user-set-roles [...] + +- *Add Roles To User*: - $ ceph dashboard ac-user-set-roles [...] + .. prompt:: bash $ -- *Add Roles To User*:: + ceph dashboard ac-user-add-roles [...] - $ ceph dashboard ac-user-add-roles [...] +- *Delete Roles from User*: -- *Delete Roles from User*:: + .. prompt:: bash $ - $ ceph dashboard ac-user-del-roles [...] + ceph dashboard ac-user-del-roles [...] Example of User and Custom Role Creation @@ -1036,19 +1153,25 @@ In this section we show a complete example of the commands that create a user account that can manage RBD images, view and create Ceph pools, and has read-only access to other scopes. -1. *Create the user*:: +1. *Create the user*: - $ ceph dashboard ac-user-create bob -i + .. prompt:: bash $ -2. *Create role and specify scope permissions*:: + ceph dashboard ac-user-create bob -i - $ ceph dashboard ac-role-create rbd/pool-manager - $ ceph dashboard ac-role-add-scope-perms rbd/pool-manager rbd-image read create update delete - $ ceph dashboard ac-role-add-scope-perms rbd/pool-manager pool read create +2. *Create role and specify scope permissions*: -3. *Associate roles to user*:: + .. prompt:: bash $ - $ ceph dashboard ac-user-set-roles bob rbd/pool-manager read-only + ceph dashboard ac-role-create rbd/pool-manager + ceph dashboard ac-role-add-scope-perms rbd/pool-manager rbd-image read create update delete + ceph dashboard ac-role-add-scope-perms rbd/pool-manager pool read create + +3. *Associate roles to user*: + + .. prompt:: bash $ + + ceph dashboard ac-user-set-roles bob rbd/pool-manager read-only .. _dashboard-proxy-configuration: @@ -1075,9 +1198,9 @@ you may wish to service it under a URL prefix. To get the dashboard to use hyperlinks that include your prefix, you can set the ``url_prefix`` setting: -:: +.. prompt:: bash $ - ceph config set mgr mgr/dashboard/url_prefix $PREFIX + ceph config set mgr mgr/dashboard/url_prefix $PREFIX so you can access the dashboard at ``http://$IP:$PORT/$PREFIX/``. @@ -1088,21 +1211,27 @@ If the dashboard is behind a load-balancing proxy like `HAProxy +.. prompt:: bash $ + + ceph dashboard set-audit-api-enabled If enabled, the following parameters are logged per each request: @@ -1166,9 +1297,11 @@ If enabled, the following parameters are logged per each request: * user - The name of the user, otherwise 'None' The logging of the request payload (the arguments and their values) is enabled -by default. Execute the following command to disable this behaviour:: +by default. Execute the following command to disable this behaviour: + +.. prompt:: bash $ - $ ceph dashboard set-audit-api-log-payload + ceph dashboard set-audit-api-log-payload A log entry may look like this:: @@ -1201,9 +1334,14 @@ Troubleshooting the Dashboard Locating the Dashboard ^^^^^^^^^^^^^^^^^^^^^^ -If you are unsure of the location of the Ceph Dashboard, run the following command:: +If you are unsure of the location of the Ceph Dashboard, run the following command: + +.. prompt:: bash $ + + ceph mgr services | jq .dashboard + +:: - $ ceph mgr services | jq .dashboard "https://host:port" The command returns the URL where the Ceph Dashboard is located: ``https://:/`` @@ -1221,9 +1359,11 @@ Accessing the Dashboard If you are unable to access the Ceph Dashboard, run the following commands: -#. Verify the Ceph Dashboard module is enabled:: +#. Verify the Ceph Dashboard module is enabled: + + .. prompt:: bash $ - $ ceph mgr module ls | jq .enabled_modules + ceph mgr module ls | jq .enabled_modules Ensure the Ceph Dashboard module is listed in the return value of the command. Example snipped output from the command above:: @@ -1234,42 +1374,62 @@ commands: "restful" ] -#. If it is not listed, activate the module with the following command:: +#. If it is not listed, activate the module with the following command: - $ ceph mgr module enable dashboard + .. prompt:: bash $ + + ceph mgr module enable dashboard #. Check the Ceph Dashboard and/or ``ceph-mgr`` log files for any errors. - * Check if ``ceph-mgr`` log messages are written to a file by:: + * Check if ``ceph-mgr`` log messages are written to a file by: + + .. prompt:: bash $ + + ceph config get mgr log_to_file + + :: - $ ceph config get mgr log_to_file true * Get the location of the log file (it's ``/var/log/ceph/-.log`` - by default):: + by default): + + .. prompt:: bash $ + + ceph config get mgr log_file + + :: - $ ceph config get mgr log_file /var/log/ceph/$cluster-$name.log #. Ensure the SSL/TSL support is configured properly: - * Check if the SSL/TSL support is enabled:: + * Check if the SSL/TSL support is enabled: + + .. prompt:: bash $ - $ ceph config get mgr mgr/dashboard/ssl + ceph config get mgr mgr/dashboard/ssl - * If the command returns ``true``, verify a certificate exists by:: + * If the command returns ``true``, verify a certificate exists by: - $ ceph config-key get mgr/dashboard/crt + .. prompt:: bash $ - and:: + ceph config-key get mgr/dashboard/crt - $ ceph config-key get mgr/dashboard/key + and: + + .. prompt:: bash $ + + ceph config-key get mgr/dashboard/key * If it doesn't return ``true``, run the following command to generate a self-signed certificate or follow the instructions outlined in - :ref:`dashboard-ssl-tls-support`:: + :ref:`dashboard-ssl-tls-support`: + + .. prompt:: bash $ - $ ceph dashboard create-self-signed-cert + ceph dashboard create-self-signed-cert Trouble Logging into the Dashboard @@ -1287,24 +1447,33 @@ error, run through the procedural checks below: and password, and ensure that your keyboard's caps lock is not enabled by accident. #. If your user credentials are correct, but you are experiencing the same - error, check that the user account exists:: + error, check that the user account exists: - $ ceph dashboard ac-user-show + .. prompt:: bash $ + + ceph dashboard ac-user-show This command returns your user data. If the user does not exist, it will print:: - $ Error ENOENT: User does not exist + Error ENOENT: User does not exist + +#. Check if the user is enabled: + + .. prompt:: bash $ -#. Check if the user is enabled:: + ceph dashboard ac-user-show | jq .enabled + + :: - $ ceph dashboard ac-user-show | jq .enabled true Check if ``enabled`` is set to ``true`` for your user. If not the user is - not enabled, run:: + not enabled, run: + + .. prompt:: bash $ - $ ceph dashboard ac-user-enable + ceph dashboard ac-user-enable Please see :ref:`dashboard-user-role-management` for more information. @@ -1334,9 +1503,11 @@ To enable this flag via the Ceph Dashboard, navigate from *Cluster* to *Manager modules*. Select *Dashboard module* and click the edit button. Click the *debug* checkbox and update. -To enable it via the CLI, run the following command:: +To enable it via the CLI, run the following command: - $ ceph dashboard debug enable +.. prompt:: bash $ + + ceph dashboard debug enable Setting Logging Level of Dashboard Module @@ -1345,29 +1516,74 @@ Setting Logging Level of Dashboard Module Setting the logging level to debug makes the log more verbose and helpful for debugging. -#. Increase the logging level of manager daemons:: +#. Increase the logging level of manager daemons: + + .. prompt:: bash $ - $ ceph tell mgr config set debug_mgr 20 + ceph tell mgr config set debug_mgr 20 #. Adjust the logging level of the Ceph Dashboard module via the Dashboard or CLI: * Navigate from *Cluster* to *Manager modules*. Select *Dashboard module* and click the edit button. Modify the ``log_level`` configuration. - * To adjust it via the CLI, run the following command:: + * To adjust it via the CLI, run the following command: - $ bin/ceph config set mgr mgr/dashboard/log_level debug + .. prompt:: bash $ -#. High log levels can result in considerable log volume, which can + bin/ceph config set mgr mgr/dashboard/log_level debug + +3. High log levels can result in considerable log volume, which can easily fill up your filesystem. Set a calendar reminder for an hour, a day, or a week in the future to revert this temporary logging increase. This looks -something like this:: +something like this: - $ ceph config log - ... - --- 11 --- 2020-11-07 11:11:11.960659 --- mgr.x/dashboard/log_level = debug --- - ... - $ ceph config reset 11 + .. prompt:: bash $ + + ceph config log + + :: + + ... + --- 11 --- 2020-11-07 11:11:11.960659 --- mgr.x/dashboard/log_level = debug --- + ... + + .. prompt:: bash $ + + ceph config reset 11 + +.. _centralized-logging: + +Enable Centralized Logging in Dashboard +""""""""""""""""""""""""""""""""""""""" + +To learn more about centralized logging, see :ref:`cephadm-monitoring-centralized-logs` + +1. Create the Loki service on any particular host using "Create Services" option. + +2. Similarly create the Promtail service which will be by default deployed + on all the running hosts. + +3. To see debug-level messages as well as info-level events, run the following command via CLI: + + .. prompt:: bash $ + + ceph config set mgr mgr/cephadm/log_to_cluster_level debug + +4. To enable logging to files, run the following commands via CLI: + + .. prompt:: bash $ + + ceph config set global log_to_file true + ceph config set global mon_cluster_log_to_file true + +5. Click on the Daemon Logs tab under Cluster -> Logs. + +6. You can find some pre-defined labels there on clicking the Log browser button such as filename, + job etc that can help you query the logs at one go. + +7. You can query the logs with LogQL for advanced search and perform some + calculations as well - https://grafana.com/docs/loki/latest/logql/. Reporting issues from Dashboard @@ -1382,11 +1598,15 @@ on the issue tracker. Under the ``my account`` tab in the Ceph Issue Tracker, the user can see their API access key. This key is used for authentication when creating a new issue. To store the Ceph API access key, in the CLI run: -``ceph dashboard set-issue-tracker-api-key -i `` +.. prompt:: bash $ + + ``ceph dashboard set-issue-tracker-api-key -i `` Then on successful update, you can create an issue using: -``ceph dashboard create issue `` +.. prompt:: bash $ + + ``ceph dashboard create issue `` The available projects to create an issue on are: #. dashboard diff --git a/ceph/doc/mgr/dashboard_plugins/debug.inc.rst b/ceph/doc/mgr/dashboard_plugins/debug.inc.rst index e11137cd2..883419cbf 100644 --- a/ceph/doc/mgr/dashboard_plugins/debug.inc.rst +++ b/ceph/doc/mgr/dashboard_plugins/debug.inc.rst @@ -4,13 +4,30 @@ Debug ^^^^^ This plugin allows to customize the behaviour of the dashboard according to the -debug mode. It can be enabled, disabled or checked with the following command:: +debug mode. It can be enabled, disabled or checked with the following command: + +.. prompt:: bash $ + + ceph dashboard debug status + +:: - $ ceph dashboard debug status Debug: 'disabled' - $ ceph dashboard debug enable + +.. prompt:: bash $ + + ceph dashboard debug enable + +:: + Debug: 'enabled' - $ ceph dashboard debug disable + +.. prompt:: bash $ + + ceph dashboard debug disable + +:: + Debug: 'disabled' By default, it's disabled. This is the recommended setting for production diff --git a/ceph/doc/mgr/dashboard_plugins/feature_toggles.inc.rst b/ceph/doc/mgr/dashboard_plugins/feature_toggles.inc.rst index 5153e7092..7c96b0faa 100644 --- a/ceph/doc/mgr/dashboard_plugins/feature_toggles.inc.rst +++ b/ceph/doc/mgr/dashboard_plugins/feature_toggles.inc.rst @@ -25,9 +25,14 @@ The list of features that can be enabled/disabled is: By default all features come enabled. -To retrieve a list of features and their current statuses:: +To retrieve a list of features and their current statuses: + +.. prompt:: bash $ + + ceph dashboard feature status + +:: - $ ceph dashboard feature status Feature 'cephfs': 'enabled' Feature 'iscsi': 'enabled' Feature 'mirroring': 'enabled' @@ -35,9 +40,14 @@ To retrieve a list of features and their current statuses:: Feature 'rgw': 'enabled' Feature 'nfs': 'enabled' -To enable or disable the status of a single or multiple features:: +To enable or disable the status of a single or multiple features: + +.. prompt:: bash $ + + ceph dashboard feature disable iscsi mirroring + +:: - $ ceph dashboard feature disable iscsi mirroring Feature 'iscsi': disabled Feature 'mirroring': disabled diff --git a/ceph/doc/mgr/dashboard_plugins/motd.inc.rst b/ceph/doc/mgr/dashboard_plugins/motd.inc.rst index b8464e1f3..0f9cc199a 100644 --- a/ceph/doc/mgr/dashboard_plugins/motd.inc.rst +++ b/ceph/doc/mgr/dashboard_plugins/motd.inc.rst @@ -12,17 +12,23 @@ syntax to specify the expiration time: `Ns|m|h|d|w` for seconds, minutes, hours, days and weeks. If the MOTD should expire after 2 hours, use `2h` or `5w` for 5 weeks. Use `0` to configure a MOTD that does not expire. -To configure a MOTD, run the following command:: +To configure a MOTD, run the following command: - $ ceph dashboard motd set +.. prompt:: bash $ -To show the configured MOTD:: + ceph dashboard motd set - $ ceph dashboard motd get +To show the configured MOTD: -To clear the configured MOTD run:: +.. prompt:: bash $ - $ ceph dashboard motd clear + ceph dashboard motd get + +To clear the configured MOTD run: + +.. prompt:: bash $ + + ceph dashboard motd clear A MOTD with a `info` or `warning` severity can be closed by the user. The `info` MOTD is not displayed anymore until the local storage cookies are diff --git a/ceph/doc/mgr/orchestrator.rst b/ceph/doc/mgr/orchestrator.rst index 2f20667f2..24704e5e1 100644 --- a/ceph/doc/mgr/orchestrator.rst +++ b/ceph/doc/mgr/orchestrator.rst @@ -5,14 +5,18 @@ Orchestrator CLI ================ -This module provides a command line interface (CLI) to orchestrator -modules (``ceph-mgr`` modules which interface with external orchestration services). +This module provides a command line interface (CLI) for orchestrator modules. +Orchestrator modules are ``ceph-mgr`` plugins that interface with external +orchestration services. -As the orchestrator CLI unifies multiple external orchestrators, a common nomenclature -for the orchestrator module is needed. +Definition of Terms +=================== + +The orchestrator CLI unifies multiple external orchestrators, so we need a +common nomenclature for the orchestrator module: +--------------------------------------+---------------------------------------+ -| *host* | hostname (not DNS name) of the | +| *host* | hostname (not the DNS name) of the | | | physical host. Not the podname, | | | container name, or hostname inside | | | the container. | @@ -20,7 +24,7 @@ for the orchestrator module is needed. | *service type* | The type of the service. e.g., nfs, | | | mds, osd, mon, rgw, mgr, iscsi | +--------------------------------------+---------------------------------------+ -| *service* | A logical service, Typically | +| *service* | A logical service. Typically | | | comprised of multiple service | | | instances on multiple hosts for HA | | | | @@ -34,29 +38,28 @@ for the orchestrator module is needed. | | like LIO or knfsd or whatever) | | | | | | This identifier should | -| | uniquely identify the instance | +| | uniquely identify the instance. | +--------------------------------------+---------------------------------------+ -The relation between the names is the following: - -* A *service* has a specific *service type* -* A *daemon* is a physical instance of a *service type* +Here is how the names relate: +* A *service* has a specific *service type*. +* A *daemon* is a physical instance of a *service type*. .. note:: - Orchestrator modules may only implement a subset of the commands listed below. - Also, the implementation of the commands may differ between modules. + Orchestrator modules might implement only a subset of the commands listed + below. The implementation of the commands may differ between modules. Status ====== -:: +.. prompt:: bash $ - ceph orch status [--detail] + ceph orch status [--detail] -Show current orchestrator mode and high-level status (whether the orchestrator -plugin is available and operational) +This command shows the current orchestrator mode and its high-level status +(whether the orchestrator plugin is available and operational). .. @@ -92,15 +95,20 @@ plugin is available and operational) Stateless services (MDS/RGW/NFS/rbd-mirror/iSCSI) ================================================= -(Please note: The orchestrator will not configure the services. Please look into the corresponding -documentation for service configuration details.) +.. note:: + + The orchestrator will not configure the services. See the relevant + documentation for details about how to configure particular services. + +The ``name`` parameter identifies the kind of the group of instances. The +following short list explains the meaning of the ``name`` parameter: -The ``name`` parameter is an identifier of the group of instances: +* A CephFS file system identifies a group of MDS daemons. +* A zone name identifies a group of RGWs. -* a CephFS file system for a group of MDS daemons, -* a zone name for a group of RGWs +Creating/growing/shrinking/removing services: -Creating/growing/shrinking/removing services:: +.. prompt:: bash $ ceph orch apply mds [--placement=] [--dry-run] ceph orch apply rgw [--realm=] [--zone=] [--port=] [--ssl] [--placement=] [--dry-run] @@ -111,33 +119,73 @@ where ``placement`` is a :ref:`orchestrator-cli-placement-spec`. e.g., ``ceph orch apply mds myfs --placement="3 host1 host2 host3"`` -Service Commands:: +Service Commands: + +.. prompt:: bash $ ceph orch +.. note:: These commands apply only to cephadm containerized daemons. + +Options +======= + +.. option:: start + + Start the daemon on the corresponding host. + +.. option:: stop + + Stop the daemon on the corresponding host. + +.. option:: restart + + Restart the daemon on the corresponding host. + +.. option:: redeploy + + Redeploy the ceph daemon on the corresponding host. This will recreate the daemon directory + structure under ``/var/lib/ceph//`` (if it doesn't exist), refresh its + configuration files, regenerate its unit-files and restarts the systemd daemon. + +.. option:: reconfig + + Reconfigure the daemon on the corresponding host. This will refresh configuration files then restart the daemon. + + .. note:: this command assumes the daemon directory ``/var/lib/ceph//`` already exists. Configuring the Orchestrator CLI ================================ -To enable the orchestrator, select the orchestrator module to use -with the ``set backend`` command:: +Enable the orchestrator by using the ``set backend`` command to select the orchestrator module that will be used: + +.. prompt:: bash $ ceph orch set backend -For example, to enable the Rook orchestrator module and use it with the CLI:: +Example - Configuring the Orchestrator CLI +------------------------------------------ + +For example, to enable the Rook orchestrator module and use it with the CLI: + +.. prompt:: bash $ ceph mgr module enable rook ceph orch set backend rook -Check the backend is properly configured:: +Confirm that the backend is properly configured: + +.. prompt:: bash $ ceph orch status Disable the Orchestrator ------------------------ -To disable the orchestrator, use the empty string ``""``:: +To disable the orchestrator, use the empty string ``""``: + +.. prompt:: bash $ ceph orch set backend "" ceph mgr module disable rook diff --git a/ceph/doc/mgr/telemetry.rst b/ceph/doc/mgr/telemetry.rst index dc817e8cf..bed07746d 100644 --- a/ceph/doc/mgr/telemetry.rst +++ b/ceph/doc/mgr/telemetry.rst @@ -191,6 +191,7 @@ List all collections with:: crash_base REPORTING Information about daemon crashes (daemon type and version, backtrace, etc.) device_base REPORTING Information about device health metrics ident_base NOT REPORTING: CHANNEL ident IS OFF User-provided identifying information about the cluster + perf_memory_metrics NOT REPORTING: NOT OPTED-IN, CHANNEL perf IS OFF Heap stats and mempools for mon and mds perf_perf NOT REPORTING: NOT OPTED-IN, CHANNEL perf IS OFF Information about performance counters of the cluster diff --git a/ceph/doc/rados/configuration/mclock-config-ref.rst b/ceph/doc/rados/configuration/mclock-config-ref.rst index da13a0cfa..1040b2e66 100644 --- a/ceph/doc/rados/configuration/mclock-config-ref.rst +++ b/ceph/doc/rados/configuration/mclock-config-ref.rst @@ -102,7 +102,7 @@ shows the resource control parameters set by the profile: +------------------------+-------------+--------+-------+ | background recovery | 25% | 1 | 100% | +------------------------+-------------+--------+-------+ -| background best-effort | 25% | 1 | MAX | +| background best-effort | 25% | 2 | MAX | +------------------------+-------------+--------+-------+ high_recovery_ops @@ -120,7 +120,7 @@ parameters set by the profile: +------------------------+-------------+--------+-------+ | background recovery | 60% | 2 | 200% | +------------------------+-------------+--------+-------+ -| background best-effort | 1 (MIN) | 1 | MAX | +| background best-effort | 1 (MIN) | 2 | MAX | +------------------------+-------------+--------+-------+ balanced @@ -139,7 +139,7 @@ within the OSD. +------------------------+-------------+--------+-------+ | background recovery | 40% | 1 | 150% | +------------------------+-------------+--------+-------+ -| background best-effort | 20% | 1 | MAX | +| background best-effort | 20% | 2 | MAX | +------------------------+-------------+--------+-------+ .. note:: Across the built-in profiles, internal background best-effort clients @@ -247,6 +247,145 @@ And that's it! You are ready to run workloads on the cluster and check if the QoS requirements are being met. +Switching Between Built-in and Custom Profiles +============================================== + +There may be situations requiring switching from a built-in profile to the +*custom* profile and vice-versa. The following sections outline the steps to +accomplish this. + +Steps to Switch From a Built-in to the Custom Profile +----------------------------------------------------- + +The following command can be used to switch to the *custom* profile: + + .. prompt:: bash # + + ceph config set osd osd_mclock_profile custom + +For example, to change the profile to *custom* on all OSDs, the following +command can be used: + + .. prompt:: bash # + + ceph config set osd osd_mclock_profile custom + +After switching to the *custom* profile, the desired mClock configuration +option may be modified. For example, to change the client reservation IOPS +allocation for a specific OSD (say osd.0), the following command can be used: + + .. prompt:: bash # + + ceph config set osd.0 osd_mclock_scheduler_client_res 3000 + +.. important:: Care must be taken to change the reservations of other services like + recovery and background best effort accordingly to ensure that the sum of the + reservations do not exceed the maximum IOPS capacity of the OSD. + +.. tip:: The reservation and limit parameter allocations are per-shard based on + the type of backing device (HDD/SSD) under the OSD. See + :confval:`osd_op_num_shards_hdd` and :confval:`osd_op_num_shards_ssd` for + more details. + +Steps to Switch From the Custom Profile to a Built-in Profile +------------------------------------------------------------- + +Switching from the *custom* profile to a built-in profile requires an +intermediate step of removing the custom settings from the central config +database for the changes to take effect. + +The following sequence of commands can be used to switch to a built-in profile: + +#. Set the desired built-in profile using: + + .. prompt:: bash # + + ceph config set osd + + For example, to set the built-in profile to ``high_client_ops`` on all + OSDs, run the following command: + + .. prompt:: bash # + + ceph config set osd osd_mclock_profile high_client_ops +#. Determine the existing custom mClock configuration settings in the central + config database using the following command: + + .. prompt:: bash # + + ceph config dump +#. Remove the custom mClock configuration settings determined in the previous + step from the central config database: + + .. prompt:: bash # + + ceph config rm osd + + For example, to remove the configuration option + :confval:`osd_mclock_scheduler_client_res` that was set on all OSDs, run the + following command: + + .. prompt:: bash # + + ceph config rm osd osd_mclock_scheduler_client_res +#. After all existing custom mClock configuration settings have been removed + from the central config database, the configuration settings pertaining to + ``high_client_ops`` will come into effect. For e.g., to verify the settings + on osd.0 use: + + .. prompt:: bash # + + ceph config show osd.0 + +Switch Temporarily Between mClock Profiles +------------------------------------------ + +To switch between mClock profiles on a temporary basis, the following commands +may be used to override the settings: + +.. warning:: This section is for advanced users or for experimental testing. The + recommendation is to not use the below commands on a running cluster as it + could have unexpected outcomes. + +.. note:: The configuration changes on an OSD using the below commands are + ephemeral and are lost when it restarts. It is also important to note that + the config options overridden using the below commands cannot be modified + further using the *ceph config set osd.N ...* command. The changes will not + take effect until a given OSD is restarted. This is intentional, as per the + config subsystem design. However, any further modification can still be made + ephemerally using the commands mentioned below. + +#. Run the *injectargs* command as shown to override the mclock settings: + + .. prompt:: bash # + + ceph tell osd.N injectargs '--=' + + For example, the following command overrides the + :confval:`osd_mclock_profile` option on osd.0: + + .. prompt:: bash # + + ceph tell osd.0 injectargs '--osd_mclock_profile=high_recovery_ops' + + +#. An alternate command that can be used is: + + .. prompt:: bash # + + ceph daemon osd.N config set + + For example, the following command overrides the + :confval:`osd_mclock_profile` option on osd.0: + + .. prompt:: bash # + + ceph daemon osd.0 config set osd_mclock_profile high_recovery_ops + +The individual QoS-related config options for the *custom* profile can also be +modified ephemerally using the above commands. + + OSD Capacity Determination (Automated) ====================================== diff --git a/ceph/doc/rados/operations/operating.rst b/ceph/doc/rados/operations/operating.rst index 5a9353fdb..60855a8ea 100644 --- a/ceph/doc/rados/operations/operating.rst +++ b/ceph/doc/rados/operations/operating.rst @@ -10,87 +10,107 @@ Running Ceph with systemd For all distributions that support systemd (CentOS 7, Fedora, Debian Jessie 8 and later, SUSE), ceph daemons are now managed using native -systemd files instead of the legacy sysvinit scripts. For example:: +systemd files instead of the legacy sysvinit scripts. For example: - sudo systemctl start ceph.target # start all daemons - sudo systemctl status ceph-osd@12 # check status of osd.12 +.. prompt:: bash $ -To list the Ceph systemd units on a node, execute:: + sudo systemctl start ceph.target # start all daemons + sudo systemctl status ceph-osd@12 # check status of osd.12 - sudo systemctl status ceph\*.service ceph\*.target +To list the Ceph systemd units on a node, execute: + +.. prompt:: bash $ + + sudo systemctl status ceph\*.service ceph\*.target Starting all Daemons -------------------- To start all daemons on a Ceph Node (irrespective of type), execute the -following:: +following: + +.. prompt:: bash $ - sudo systemctl start ceph.target + sudo systemctl start ceph.target Stopping all Daemons -------------------- To stop all daemons on a Ceph Node (irrespective of type), execute the -following:: +following: - sudo systemctl stop ceph\*.service ceph\*.target +.. prompt:: bash $ + + sudo systemctl stop ceph\*.service ceph\*.target Starting all Daemons by Type ---------------------------- To start all daemons of a particular type on a Ceph Node, execute one of the -following:: +following: + +.. prompt:: bash $ - sudo systemctl start ceph-osd.target - sudo systemctl start ceph-mon.target - sudo systemctl start ceph-mds.target + sudo systemctl start ceph-osd.target + sudo systemctl start ceph-mon.target + sudo systemctl start ceph-mds.target Stopping all Daemons by Type ---------------------------- To stop all daemons of a particular type on a Ceph Node, execute one of the -following:: +following: - sudo systemctl stop ceph-mon\*.service ceph-mon.target - sudo systemctl stop ceph-osd\*.service ceph-osd.target - sudo systemctl stop ceph-mds\*.service ceph-mds.target +.. prompt:: bash $ + + sudo systemctl stop ceph-mon\*.service ceph-mon.target + sudo systemctl stop ceph-osd\*.service ceph-osd.target + sudo systemctl stop ceph-mds\*.service ceph-mds.target Starting a Daemon ----------------- To start a specific daemon instance on a Ceph Node, execute one of the -following:: +following: + +.. prompt:: bash $ + + sudo systemctl start ceph-osd@{id} + sudo systemctl start ceph-mon@{hostname} + sudo systemctl start ceph-mds@{hostname} - sudo systemctl start ceph-osd@{id} - sudo systemctl start ceph-mon@{hostname} - sudo systemctl start ceph-mds@{hostname} +For example: -For example:: +.. prompt:: bash $ - sudo systemctl start ceph-osd@1 - sudo systemctl start ceph-mon@ceph-server - sudo systemctl start ceph-mds@ceph-server + sudo systemctl start ceph-osd@1 + sudo systemctl start ceph-mon@ceph-server + sudo systemctl start ceph-mds@ceph-server Stopping a Daemon ----------------- To stop a specific daemon instance on a Ceph Node, execute one of the -following:: +following: + +.. prompt:: bash $ + + sudo systemctl stop ceph-osd@{id} + sudo systemctl stop ceph-mon@{hostname} + sudo systemctl stop ceph-mds@{hostname} - sudo systemctl stop ceph-osd@{id} - sudo systemctl stop ceph-mon@{hostname} - sudo systemctl stop ceph-mds@{hostname} +For example: -For example:: +.. prompt:: bash $ - sudo systemctl stop ceph-osd@1 - sudo systemctl stop ceph-mon@ceph-server - sudo systemctl stop ceph-mds@ceph-server + sudo systemctl stop ceph-osd@1 + sudo systemctl stop ceph-mon@ceph-server + sudo systemctl stop ceph-mds@ceph-server .. index:: sysvinit; operating a cluster diff --git a/ceph/doc/rados/operations/placement-groups.rst b/ceph/doc/rados/operations/placement-groups.rst index d8d1a532b..c471ff8bc 100644 --- a/ceph/doc/rados/operations/placement-groups.rst +++ b/ceph/doc/rados/operations/placement-groups.rst @@ -143,16 +143,21 @@ example, a pool that maps to OSDs of class `ssd` and a pool that maps to OSDs of class `hdd` will each have optimal PG counts that depend on the number of those respective device types. +In the case where a pool uses OSDs under two or more CRUSH roots, e.g., (shadow +trees with both `ssd` and `hdd` devices), the autoscaler will +issue a warning to the user in the manager log stating the name of the pool +and the set of roots that overlap each other. The autoscaler will not +scale any pools with overlapping roots because this can cause problems +with the scaling process. We recommend making each pool belong to only +one root (one OSD class) to get rid of the warning and ensure a successful +scaling process. + The autoscaler uses the `bulk` flag to determine which pool should start out with a full complement of PGs and only scales down when the usage ratio across the pool is not even. However, if the pool doesn't have the `bulk` flag, the pool will start out with minimal PGs and only when there is more usage in the pool. -The autoscaler identifies any overlapping roots and prevents the pools -with such roots from scaling because overlapping roots can cause problems -with the scaling process. - To create pool with `bulk` flag:: ceph osd pool create --bulk diff --git a/ceph/doc/radosgw/STS.rst b/ceph/doc/radosgw/STS.rst index b4cd67148..f0729680b 100644 --- a/ceph/doc/radosgw/STS.rst +++ b/ceph/doc/radosgw/STS.rst @@ -107,6 +107,15 @@ Examples those credentials. In this example, TESTER1 assumes a role created by TESTER, to access S3 resources owned by TESTER, according to the permission policy attached to the role. +.. code-block:: console + + radosgw-admin caps add --uid="TESTER" --caps="roles=*" + +2. The following is an example of the AssumeRole API call, which shows steps to create a role, assign a policy to it + (that allows access to S3 resources), assuming a role to get temporary credentials and accessing S3 resources using + those credentials. In this example, TESTER1 assumes a role created by TESTER, to access S3 resources owned by TESTER, + according to the permission policy attached to the role. + .. code-block:: python import boto3 @@ -286,4 +295,4 @@ Steps for integrating Radosgw with Keycloak can be found here STSLite ======= STSLite has been built on STS, and documentation for the same can be found here -:doc:`STSLite`. \ No newline at end of file +:doc:`STSLite`. diff --git a/ceph/doc/radosgw/STSLite.rst b/ceph/doc/radosgw/STSLite.rst index c78c14e50..f5dae7050 100644 --- a/ceph/doc/radosgw/STSLite.rst +++ b/ceph/doc/radosgw/STSLite.rst @@ -35,7 +35,7 @@ Parameters: **TokenCode** (String/ Optional): The value provided by the MFA device, if MFA is required. An administrative user needs to attach a policy to allow invocation of GetSessionToken API using its permanent -credentials and to allow subsequent s3 operations invocation using only the temporary credentials returned +credentials and to allow subsequent S3 operations invocation using only the temporary credentials returned by GetSessionToken. The user attaching the policy needs to have admin caps. For example:: diff --git a/ceph/doc/radosgw/config-ref.rst b/ceph/doc/radosgw/config-ref.rst index 295fa8ce2..9d127aa04 100644 --- a/ceph/doc/radosgw/config-ref.rst +++ b/ceph/doc/radosgw/config-ref.rst @@ -229,6 +229,22 @@ HashiCorp Vault Settings .. confval:: rgw_crypt_vault_secret_engine .. confval:: rgw_crypt_vault_namespace +SSE-S3 Settings +=============== + +.. confval:: rgw_crypt_sse_s3_backend +.. confval:: rgw_crypt_sse_s3_vault_secret_engine +.. confval:: rgw_crypt_sse_s3_key_template +.. confval:: rgw_crypt_sse_s3_vault_auth +.. confval:: rgw_crypt_sse_s3_vault_token_file +.. confval:: rgw_crypt_sse_s3_vault_addr +.. confval:: rgw_crypt_sse_s3_vault_prefix +.. confval:: rgw_crypt_sse_s3_vault_namespace +.. confval:: rgw_crypt_sse_s3_vault_verify_ssl +.. confval:: rgw_crypt_sse_s3_vault_ssl_cacert +.. confval:: rgw_crypt_sse_s3_vault_ssl_clientcert +.. confval:: rgw_crypt_sse_s3_vault_ssl_clientkey + QoS settings ------------ diff --git a/ceph/doc/radosgw/encryption.rst b/ceph/doc/radosgw/encryption.rst index 2b51e088f..7cada356d 100644 --- a/ceph/doc/radosgw/encryption.rst +++ b/ceph/doc/radosgw/encryption.rst @@ -25,13 +25,14 @@ keys and remember which key was used to encrypt each object. This is implemented in S3 according to the `Amazon SSE-C`_ specification. -As all key management is handled by the client, no special configuration is -needed to support this encryption mode. +As all key management is handled by the client, no special Ceph configuration +is needed to support this encryption mode. Key Management Service ====================== -This mode allows keys to be stored in a secure key management service and +In this mode, an administrator stores keys in a secure key management service. +These keys are then retrieved on demand by the Ceph Object Gateway to serve requests to encrypt or decrypt data. @@ -43,12 +44,26 @@ integration with `Barbican`_, `Vault`_, and `KMIP`_ are implemented. See `OpenStack Barbican Integration`_, `HashiCorp Vault Integration`_, and `KMIP Integration`_. +SSE-S3 +====== + +This makes key management invisible to the user. They are still stored +in vault, but they are automatically created and deleted by Ceph. and +retrieved as required to serve requests to encrypt +or decrypt data. + +This is implemented in S3 according to the `Amazon SSE-S3`_ specification. + +In principle, any key management service could be used here. Currently +only integration with `Vault`_, is implemented. + +See `HashiCorp Vault Integration`_. + Bucket Encryption APIs ====================== Bucket Encryption APIs to support server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). -SSE-KMS implementation via BucketEncryption APIs is not supported yet. See `PutBucketEncryption`_, `GetBucketEncryption`_, `DeleteBucketEncryption`_ @@ -69,6 +84,7 @@ The configuration expects a base64-encoded 256 bit key. For example:: .. _Amazon SSE-C: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html .. _Amazon SSE-KMS: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html +.. _Amazon SSE-S3: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html .. _Barbican: https://wiki.openstack.org/wiki/Barbican .. _Vault: https://www.vaultproject.io/docs/ .. _KMIP: http://www.oasis-open.org/committees/kmip/ diff --git a/ceph/doc/radosgw/vault.rst b/ceph/doc/radosgw/vault.rst index e21b639e9..a5b51b9bd 100644 --- a/ceph/doc/radosgw/vault.rst +++ b/ceph/doc/radosgw/vault.rst @@ -111,7 +111,7 @@ Token policies for the object gateway All Vault tokens have powers as specified by the polices attached to that token. Multiple policies may be associated with one -token. You should only use the policy necessary for your +token. You should only use the policies necessary for your configuration. When using the kv secret engine with the object gateway:: @@ -156,6 +156,18 @@ transit secret engine, you might need the following policy:: } EOF +If you are using both sse-kms and sse-s3, then you should point +each to separate containers. You could either use separate +vault instances, or you could use either separately mounted +transit instances, or different branches under a common transit +pointpoint. If you are not using separate vault instances, you can +Use these to point kms and sse-s3 to separate containers: +``rgw_crypt_vault_prefix`` +and/or +``rgw_crypt_sse_s3_vault_prefix``. +When granting vault permissions to sse-kms bucket owners, you should +not give them permission to muck around with sse-s3 keys; +only ceph itself should be doing that. Token authentication -------------------- diff --git a/ceph/doc/rbd/rbd-mirroring.rst b/ceph/doc/rbd/rbd-mirroring.rst index 464ed2ce9..74a2a364e 100644 --- a/ceph/doc/rbd/rbd-mirroring.rst +++ b/ceph/doc/rbd/rbd-mirroring.rst @@ -326,7 +326,7 @@ For example:: $ rbd --cluster site-a mirror image snapshot image-pool/image-1 -By default only ``3`` mirror-snapshots will be created per-image. The most +By default up to ``5`` mirror-snapshots will be created per-image. The most recent mirror-snapshot is automatically pruned if the limit is reached. The limit can be overridden via the ``rbd_mirroring_max_mirroring_snapshots`` configuration option if required. Additionally, mirror-snapshots are diff --git a/ceph/doc/start/documenting-ceph.rst b/ceph/doc/start/documenting-ceph.rst index 1ab3a8717..7df48d02c 100644 --- a/ceph/doc/start/documenting-ceph.rst +++ b/ceph/doc/start/documenting-ceph.rst @@ -30,12 +30,14 @@ repository. Python Sphinx renders the source into HTML and manpages. Viewing Old Ceph Documentation ============================== -The https://docs.ceph.com link displays the ``master`` branch by default, but -you can view the documentation for older versions of Ceph (e.g., ``mimic``) by -replacing the version name in the url (e.g. ``pacific`` in -`https://docs.ceph.com/en/pacific `_) with the -branch name you prefer (e.g. ``mimic``, to create a URL that reads -`https://docs.ceph.com/en/mimic/ `_). +The https://docs.ceph.com link displays the latest release branch by default +(for example, if "Quincy" is the most recent release, then by default +https://docs.ceph.com displays the documentation for Quincy), but you can view +the documentation for older versions of Ceph (for example, ``pacific``) by +replacing the version name in the url (for example, ``quincy`` in +`https://docs.ceph.com/en/pacific `_) with the +branch name you prefer (for example, ``pacific``, to create a URL that reads +`https://docs.ceph.com/en/pacific/ `_). .. _making_contributions: @@ -140,13 +142,13 @@ Select a Branch --------------- When you make small changes to the documentation, such as fixing typographical -errors or clarifying explanations, use the ``master`` branch (default). You -should also use the ``master`` branch when making contributions to features that -are in the current release. ``master`` is the most commonly used branch. : +errors or clarifying explanations, use the ``main`` branch (default). You +should also use the ``main`` branch when making contributions to features that +are in the current release. ``main`` is the most commonly used branch. : .. prompt:: bash $ - git checkout master + git checkout main When you make changes to documentation that affect an upcoming release, use the ``next`` branch. ``next`` is the second most commonly used branch. : @@ -158,7 +160,7 @@ the ``next`` branch. ``next`` is the second most commonly used branch. : When you are making substantial contributions such as new features that are not yet in the current release; if your contribution is related to an issue with a tracker ID; or, if you want to see your documentation rendered on the Ceph.com -website before it gets merged into the ``master`` branch, you should create a +website before it gets merged into the ``main`` branch, you should create a branch. To distinguish branches that include only documentation updates, we prepend them with ``wip-doc`` by convention, following the form ``wip-doc-{your-branch-name}``. If the branch relates to an issue filed in diff --git a/ceph/doc/start/hardware-recommendations.rst b/ceph/doc/start/hardware-recommendations.rst index b6c47502c..d922c03dc 100644 --- a/ceph/doc/start/hardware-recommendations.rst +++ b/ceph/doc/start/hardware-recommendations.rst @@ -21,21 +21,30 @@ data cluster (e.g., OpenStack, CloudStack, etc). CPU === -CephFS metadata servers are CPU intensive, so they should have significant -processing power (e.g., quad core or better CPUs) and benefit from higher clock -rate (frequency in GHz). Ceph OSDs run the :term:`RADOS` service, calculate -data placement with :term:`CRUSH`, replicate data, and maintain their own copy of the -cluster map. Therefore, OSD nodes should have a reasonable amount of processing -power. Requirements vary by use-case; a starting point might be one core per -OSD for light / archival usage, and two cores per OSD for heavy workloads such -as RBD volumes attached to VMs. Monitor / manager nodes do not have heavy CPU -demands so a modest processor can be chosen for them. Also consider whether the -host machine will run CPU-intensive processes in addition to Ceph daemons. For -example, if your hosts will run computing VMs (e.g., OpenStack Nova), you will -need to ensure that these other processes leave sufficient processing power for -Ceph daemons. We recommend running additional CPU-intensive processes on -separate hosts to avoid resource contention. - +CephFS metadata servers (MDS) are CPU-intensive. CephFS metadata servers (MDS) +should therefore have quad-core (or better) CPUs and high clock rates (GHz). OSD +nodes need enough processing power to run the RADOS service, to calculate data +placement with CRUSH, to replicate data, and to maintain their own copies of the +cluster map. + +The requirements of one Ceph cluster are not the same as the requirements of +another, but here are some general guidelines. + +In earlier versions of Ceph, we would make hardware recommendations based on +the number of cores per OSD, but this cores-per-OSD metric is no longer as +useful a metric as the number of cycles per IOP and the number of IOPs per OSD. +For example, for NVMe drives, Ceph can easily utilize five or six cores on real +clusters and up to about fourteen cores on single OSDs in isolation. So cores +per OSD are no longer as pressing a concern as they were. When selecting +hardware, select for IOPs per core. + +Monitor nodes and manager nodes have no heavy CPU demands and require only +modest processors. If your host machines will run CPU-intensive processes in +addition to Ceph daemons, make sure that you have enough processing power to +run both the CPU-intensive processes and the Ceph daemons. (OpenStack Nova is +one such example of a CPU-intensive process.) We recommend that you run +non-Ceph CPU-intensive processes on separate hosts (that is, on hosts that are +not your monitor and manager nodes) in order to avoid resource contention. RAM === @@ -340,34 +349,50 @@ multiple OSDs per host. Networks ======== -Provision at least 10Gbps+ networking in your racks. Replicating 1TB of data -across a 1Gbps network takes 3 hours, and 10TBs takes 30 hours! By contrast, -with a 10Gbps network, the replication times would be 20 minutes and 1 hour -respectively. In a petabyte-scale cluster, failure of an OSD drive is an -expectation, not an exception. System administrators will appreciate PGs -recovering from a ``degraded`` state to an ``active + clean`` state as rapidly -as possible, with price / performance tradeoffs taken into consideration. -Additionally, some deployment tools employ VLANs to make hardware and network -cabling more manageable. VLANs using 802.1q protocol require VLAN-capable NICs -and Switches. The added hardware expense may be offset by the operational cost -savings for network setup and maintenance. When using VLANs to handle VM +Provision at least 10 Gb/s networking in your racks. + +Speed +----- + +It takes three hours to replicate 1 TB of data across a 1 Gb/s network and it +takes thirty hours to replicate 10 TB across a 1 Gb/s network. But it takes only +twenty minutes to replicate 1 TB across a 10 Gb/s network, and it takes +only one hour to replicate 10 TB across a 10 Gb/s network. + +Cost +---- + +The larger the Ceph cluster, the more common OSD failures will be. +The faster that a placement group (PG) can recover from a ``degraded`` state to +an ``active + clean`` state, the better. Notably, fast recovery minimizes +the liklihood of multiple, overlapping failures that can cause data to become +temporarily unavailable or even lost. Of course, when provisioning your +network, you will have to balance price against performance. + +Some deployment tools employ VLANs to make hardware and network cabling more +manageable. VLANs that use the 802.1q protocol require VLAN-capable NICs and +switches. The added expense of this hardware may be offset by the operational +cost savings on network setup and maintenance. When using VLANs to handle VM traffic between the cluster and compute stacks (e.g., OpenStack, CloudStack, -etc.), there is additional value in using 10G Ethernet or better; 40Gb or -25/50/100 Gb networking as of 2020 is common for production clusters. +etc.), there is additional value in using 10 Gb/s Ethernet or better; 40 Gb/s or +25/50/100 Gb/s networking as of 2022 is common for production clusters. + +Top-of-rack (TOR) switches also need fast and redundant uplinks to spind +spine switches / routers, often at least 40 Gb/s. -Top-of-rack routers for each network also need to be able to communicate with -spine routers that have even faster throughput, often 40Gbp/s or more. +Baseboard Management Controller (BMC) +------------------------------------- -Your server hardware should have a Baseboard Management Controller (BMC). +Your server chassis should have a Baseboard Management Controller (BMC). +Well-known examples are iDRAC (Dell), CIMC (Cisco UCS), and iLO (HPE). Administration and deployment tools may also use BMCs extensively, especially -via IPMI or Redfish, so consider -the cost/benefit tradeoff of an out-of-band network for administration. -Hypervisor SSH access, VM image uploads, OS image installs, management sockets, -etc. can impose significant loads on a network. Running three networks may seem -like overkill, but each traffic path represents a potential capacity, throughput -and/or performance bottleneck that you should carefully consider before -deploying a large scale data cluster. +via IPMI or Redfish, so consider the cost/benefit tradeoff of an out-of-band +network for security and administration. Hypervisor SSH access, VM image uploads, +OS image installs, management sockets, etc. can impose significant loads on a network. +Running three networks may seem like overkill, but each traffic path represents +a potential capacity, throughput and/or performance bottleneck that you should +carefully consider before deploying a large scale data cluster. Failure Domains @@ -440,10 +465,11 @@ and development clusters can run successfully with modest hardware. - - +.. _block and block.db: https://docs.ceph.com/en/latest/rados/configuration/bluestore-config-ref/#block-and-block-db .. _Ceph blog: https://ceph.com/community/blog/ .. _Ceph Write Throughput 1: http://ceph.com/community/ceph-performance-part-1-disk-controller-write-throughput/ .. _Ceph Write Throughput 2: http://ceph.com/community/ceph-performance-part-2-write-throughput-without-ssd-journals/ .. _Mapping Pools to Different Types of OSDs: ../../rados/operations/crush-map#placing-different-pools-on-different-osds .. _OS Recommendations: ../os-recommendations +.. _Storage Networking Industry Association's Total Cost of Ownership calculator: https://www.snia.org/forums/cmsi/programs/TCOcalc +.. _Werner Fischer's blog post on partition alignment: https://www.thomas-krenn.com/en/wiki/Partition_Alignment_detailed_explanation diff --git a/ceph/doc/start/intro.rst b/ceph/doc/start/intro.rst index 1f1eb9b38..e0e944b87 100644 --- a/ceph/doc/start/intro.rst +++ b/ceph/doc/start/intro.rst @@ -34,7 +34,7 @@ required when running Ceph File System clients. `REST API`_. At least two managers are normally required for high availability. -- **Ceph OSDs**: A :term:`Ceph OSD` (object storage daemon, +- **Ceph OSDs**: An Object Storage Daemon (:term:`Ceph OSD`, ``ceph-osd``) stores data, handles data replication, recovery, rebalancing, and provides some monitoring information to Ceph Monitors and Managers by checking other Ceph OSD Daemons for a @@ -49,10 +49,10 @@ required when running Ceph File System clients. Ceph Storage Cluster. Ceph stores data as objects within logical storage pools. Using the -:term:`CRUSH` algorithm, Ceph calculates which placement group should -contain the object, and further calculates which Ceph OSD Daemon -should store the placement group. The CRUSH algorithm enables the -Ceph Storage Cluster to scale, rebalance, and recover dynamically. +:term:`CRUSH` algorithm, Ceph calculates which placement group (PG) should +contain the object, and which OSD should store the placement group. The +CRUSH algorithm enables the Ceph Storage Cluster to scale, rebalance, and +recover dynamically. .. _REST API: ../../mgr/restful diff --git a/ceph/install-deps.sh b/ceph/install-deps.sh index 6f4cd178f..d33815641 100755 --- a/ceph/install-deps.sh +++ b/ceph/install-deps.sh @@ -217,19 +217,6 @@ function install_libzbd_on_ubuntu { libzbd-dev } -function install_libpmem_on_ubuntu { - local codename=$1 - local project=pmem - local sha1=7c18b4b1413ae965ea8bcbfc69eb9784f9212319 - install_pkg_on_ubuntu \ - $project \ - $sha1 \ - $codename \ - check \ - libpmem-dev \ - libpmemobj-dev -} - function version_lt { test $1 != $(echo -e "$1\n$2" | sort -rV | head -n 1) } @@ -305,7 +292,6 @@ else [ $WITH_SEASTAR ] && with_seastar=true || with_seastar=false [ $WITH_JAEGER ] && with_jaeger=true || with_jaeger=false [ $WITH_ZBD ] && with_zbd=true || with_zbd=false - [ $WITH_PMEM ] && with_pmem=true || with_pmem=false source /etc/os-release case "$ID" in debian|ubuntu|devuan|elementary|softiron) @@ -322,7 +308,6 @@ else *Focal*) [ ! $NO_BOOST_PKGS ] && install_boost_on_ubuntu focal $with_zbd && install_libzbd_on_ubuntu focal - $with_pmem && install_libpmem_on_ubuntu focal ;; *) $SUDO apt-get install -y gcc @@ -372,7 +357,7 @@ else ;; centos|rhel|ol|virtuozzo) MAJOR_VERSION="$(echo $VERSION_ID | cut -d. -f1)" - $SUDO dnf install -y dnf-utils + $SUDO dnf install -y dnf-utils selinux-policy-targeted rpm --quiet --query epel-release || \ $SUDO dnf -y install --nogpgcheck https://dl.fedoraproject.org/pub/epel/epel-release-latest-$MAJOR_VERSION.noarch.rpm $SUDO rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-$MAJOR_VERSION @@ -396,7 +381,7 @@ else $SUDO $builddepcmd $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out [ ${PIPESTATUS[0]} -ne 0 ] && exit 1 IGNORE_YUM_BUILDEP_ERRORS="ValueError: SELinux policy is not managed or store cannot be accessed." - sed "/$IGNORE_YUM_BUILDEP_ERRORS/d" $DIR/yum-builddep.out | grep -qi "error:" && exit 1 + sed "/$IGNORE_YUM_BUILDEP_ERRORS/d" $DIR/yum-builddep.out | grep -i "error:" && exit 1 ;; opensuse*|suse|sles) echo "Using zypper to install dependencies" diff --git a/ceph/monitoring/ceph-mixin/CMakeLists.txt b/ceph/monitoring/ceph-mixin/CMakeLists.txt index 66e620f6c..e63c740b7 100644 --- a/ceph/monitoring/ceph-mixin/CMakeLists.txt +++ b/ceph/monitoring/ceph-mixin/CMakeLists.txt @@ -11,17 +11,6 @@ if(WITH_GRAFANA) include(AddCephTest) set(CEPH_BUILD_VIRTUALENV ${CMAKE_BINARY_DIR}) - add_test(NAME jsonnet-build - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/jsonnet-build.sh ${CMAKE_CURRENT_BINARY_DIR}) - set_property(TEST jsonnet-build PROPERTY - FIXTURES_SETUP jsonnet) - add_test(NAME jsonnet-cleanup - COMMAND rm -rf go-jsonnet - ${CMAKE_CURRENT_BINARY_DIR}/jsonnet - ${CMAKE_CURRENT_BINARY_DIR}/jsonnetfmt) - set_property(TEST jsonnet-cleanup PROPERTY - FIXTURES_CLEANUP jsonnet) - add_test(NAME jsonnet-bundler-build COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/jsonnet-bundler-build.sh ${CMAKE_CURRENT_BINARY_DIR}) set_property(TEST jsonnet-bundler-build PROPERTY @@ -34,10 +23,10 @@ if(WITH_GRAFANA) add_tox_test(grafana-lint TOX_ENVS lint) add_tox_test(jsonnet-lint TOX_ENVS jsonnet-lint) set_property(TEST run-tox-jsonnet-lint PROPERTY - FIXTURES_REQUIRED venv-for-jsonnet-lint jsonnet) + FIXTURES_REQUIRED venv-for-jsonnet-lint) add_tox_test(jsonnet-check TOX_ENVS jsonnet-check) set_property(TEST run-tox-jsonnet-check PROPERTY - FIXTURES_REQUIRED venv-for-jsonnet-check jsonnet jsonnet-bundler) + FIXTURES_REQUIRED venv-for-jsonnet-check jsonnet-bundler) add_tox_test(alerts-check TOX_ENVS alerts-check) add_tox_test(alerts-lint TOX_ENVS alerts-lint) diff --git a/ceph/monitoring/ceph-mixin/Makefile b/ceph/monitoring/ceph-mixin/Makefile index 44575b77e..0cb7aa539 100644 --- a/ceph/monitoring/ceph-mixin/Makefile +++ b/ceph/monitoring/ceph-mixin/Makefile @@ -8,7 +8,7 @@ generate: dashboards_out vendor: jsonnetfile.lock.json tox -ejsonnet-bundler-install -dashboards_out: vendor $(JSONNETS_FILES) +dashboards_out: vendor dashboards tox -ejsonnet-fix lint: diff --git a/ceph/monitoring/ceph-mixin/README.md b/ceph/monitoring/ceph-mixin/README.md index fd0fe95ed..4b8b43035 100644 --- a/ceph/monitoring/ceph-mixin/README.md +++ b/ceph/monitoring/ceph-mixin/README.md @@ -5,7 +5,7 @@ All the Grafana dashboards are already generated in the `dashboards_out` directory and alerts in the `prometheus_alerts.yml` file. You can use the Grafana dashboards and alerts with Jsonnet like any other -prometheus mixin. You can find more ressources about mixins in general on +prometheus mixin. You can find more resources about mixins in general on [monitoring.mixins.dev](https://monitoring.mixins.dev/). ### Grafana dashboards for Ceph @@ -26,11 +26,20 @@ plugin](http://docs.ceph.com/en/latest/mgr/prometheus/) and the ### Prometheus alerts -In `prometheus_alerts.yml` you'll find a set of Prometheus +In `prometheus_alerts.libsonnet` you'll find a set of Prometheus alert rules that should provide a decent set of default alerts for a -Ceph cluster. Just put this file in a place according to your Prometheus +Ceph cluster. After building them with jsonnet put this file in place according to your Prometheus configuration (wherever the `rules` configuration stanza points). +### Multi-cluster support +Ceph-mixin supports dashboards and alerts across multiple clusters. +To enable this feature you need to configure the following in `config.libsonnnet`: + +``` +showMultiCluster: true, +clusterLabel: '', +``` + #### SNMP Ceph provides a MIB (CEPH-PROMETHEUS-ALERT-MIB.txt) to support sending Prometheus alerts through to an SNMP management platform. The translation from Prometheus diff --git a/ceph/monitoring/ceph-mixin/alerts.jsonnet b/ceph/monitoring/ceph-mixin/alerts.jsonnet new file mode 100644 index 000000000..43826eedd --- /dev/null +++ b/ceph/monitoring/ceph-mixin/alerts.jsonnet @@ -0,0 +1 @@ +std.manifestYamlDoc((import 'alerts.libsonnet').prometheusAlerts, indent_array_in_object=true, quote_keys=false) diff --git a/ceph/monitoring/ceph-mixin/alerts.libsonnet b/ceph/monitoring/ceph-mixin/alerts.libsonnet index 9c759938a..c2d39e2d3 100644 --- a/ceph/monitoring/ceph-mixin/alerts.libsonnet +++ b/ceph/monitoring/ceph-mixin/alerts.libsonnet @@ -1,3 +1,4 @@ { - prometheusAlerts+:: std.parseYaml(importstr 'prometheus_alerts.yml'), + prometheusAlerts+:: (import 'prometheus_alerts.libsonnet') + + { _config:: $._config }, } diff --git a/ceph/monitoring/ceph-mixin/config.libsonnet b/ceph/monitoring/ceph-mixin/config.libsonnet index 0967ef424..7ee1210b0 100644 --- a/ceph/monitoring/ceph-mixin/config.libsonnet +++ b/ceph/monitoring/ceph-mixin/config.libsonnet @@ -1 +1,11 @@ -{} +{ + _config+:: { + dashboardTags: ['ceph-mixin'], + + clusterLabel: 'cluster', + showMultiCluster: false, + + CephNodeNetworkPacketDropsThreshold: 0.005, + CephNodeNetworkPacketDropsPerSec: 10, + }, +} diff --git a/ceph/monitoring/ceph-mixin/dashboards.libsonnet b/ceph/monitoring/ceph-mixin/dashboards.libsonnet new file mode 100644 index 000000000..5cae18329 --- /dev/null +++ b/ceph/monitoring/ceph-mixin/dashboards.libsonnet @@ -0,0 +1,10 @@ +{ + grafanaDashboards+:: + (import 'dashboards/cephfs.libsonnet') + + (import 'dashboards/host.libsonnet') + + (import 'dashboards/osd.libsonnet') + + (import 'dashboards/pool.libsonnet') + + (import 'dashboards/rbd.libsonnet') + + (import 'dashboards/rgw.libsonnet') + + { _config:: $._config }, +} diff --git a/ceph/monitoring/ceph-mixin/dashboards/cephfs.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/cephfs.libsonnet index 3d09a5453..d12d9f4dd 100644 --- a/ceph/monitoring/ceph-mixin/dashboards/cephfs.libsonnet +++ b/ceph/monitoring/ceph-mixin/dashboards/cephfs.libsonnet @@ -1,101 +1,89 @@ local g = import 'grafonnet/grafana.libsonnet'; -local u = import 'utils.libsonnet'; -{ - grafanaDashboards+:: { - 'cephfs-overview.json': - local CephfsOverviewGraphPanel(title, formatY1, labelY1, expr, legendFormat, x, y, w, h) = - u.graphPanelSchema({}, - title, - '', - 'null', - false, - formatY1, - 'short', - labelY1, - null, - 0, - 1, - '$datasource') - .addTargets( - [u.addTargetSchema(expr, legendFormat)] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - u.dashboardSchema( - 'MDS Performance', - '', - 'tbO9LAiZz', - 'now-1h', - '15s', - 16, - [], - '', - { - refresh_intervals: ['5s', '10s', '15s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } - ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) - ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.3.2' +(import 'utils.libsonnet') { + 'cephfs-overview.json': + $.dashboardSchema( + 'MDS Performance', + '', + 'tbO9LAiZz', + 'now-1h', + '30s', + 16, + $._config.dashboardTags, + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' - ) - .addTemplate( - g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') - ) - .addTemplate( - u.addTemplateSchema('mds_servers', - '$datasource', - 'label_values(ceph_mds_inodes, ceph_daemon)', - 1, - true, - 1, - 'MDS Server', - '') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema('mds_servers', + '$datasource', + 'label_values(ceph_mds_inodes{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + true, + 1, + 'MDS Server', + '') + ) + .addPanels([ + $.addRowSchema(false, true, 'MDS Performance') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, + $.simpleGraphPanel( + {}, + 'MDS Workload - $mds_servers', + '', + 'none', + 'Reads(-) / Writes (+)', + 0, + 'sum(rate(ceph_objecter_op_r{%(matchers)s, ceph_daemon=~"($mds_servers).*"}[$__rate_interval]))' % $.matchers(), + 'Read Ops', + 0, + 1, + 12, + 9 ) - .addPanels([ - u.addRowSchema(false, true, 'MDS Performance') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, - CephfsOverviewGraphPanel( - 'MDS Workload - $mds_servers', - 'none', - 'Reads(-) / Writes (+)', - 'sum(rate(ceph_objecter_op_r{ceph_daemon=~"($mds_servers).*"}[1m]))', - 'Read Ops', - 0, - 1, - 12, - 9 - ) - .addTarget(u.addTargetSchema( - 'sum(rate(ceph_objecter_op_w{ceph_daemon=~"($mds_servers).*"}[1m]))', - 'Write Ops' - )) - .addSeriesOverride( - { alias: '/.*Reads/', transform: 'negative-Y' } - ), - CephfsOverviewGraphPanel( - 'Client Request Load - $mds_servers', - 'none', - 'Client Requests', - 'ceph_mds_server_handle_client_request{ceph_daemon=~"($mds_servers).*"}', - '{{ceph_daemon}}', - 12, - 1, - 12, - 9 - ), - ]), - }, + .addTarget($.addTargetSchema( + 'sum(rate(ceph_objecter_op_w{%(matchers)s, ceph_daemon=~"($mds_servers).*"}[$__rate_interval]))' % $.matchers(), + 'Write Ops' + )) + .addSeriesOverride( + { alias: '/.*Reads/', transform: 'negative-Y' } + ), + $.simpleGraphPanel( + {}, + 'Client Request Load - $mds_servers', + '', + 'none', + 'Client Requests', + 0, + 'ceph_mds_server_handle_client_request{%(matchers)s, ceph_daemon=~"($mds_servers).*"}' % $.matchers(), + '{{ceph_daemon}}', + 12, + 1, + 12, + 9 + ), + ]), } diff --git a/ceph/monitoring/ceph-mixin/dashboards/dashboards.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/dashboards.libsonnet deleted file mode 100644 index 72ca48324..000000000 --- a/ceph/monitoring/ceph-mixin/dashboards/dashboards.libsonnet +++ /dev/null @@ -1,6 +0,0 @@ -(import 'cephfs.libsonnet') + -(import 'host.libsonnet') + -(import 'osd.libsonnet') + -(import 'pool.libsonnet') + -(import 'rbd.libsonnet') + -(import 'rgw.libsonnet') diff --git a/ceph/monitoring/ceph-mixin/dashboards/host.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/host.libsonnet index 06cb66d0a..3e0b31f2c 100644 --- a/ceph/monitoring/ceph-mixin/dashboards/host.libsonnet +++ b/ceph/monitoring/ceph-mixin/dashboards/host.libsonnet @@ -1,549 +1,723 @@ local g = import 'grafonnet/grafana.libsonnet'; -local u = import 'utils.libsonnet'; -{ - grafanaDashboards+:: { - 'hosts-overview.json': - local HostsOverviewSingleStatPanel(format, - title, - description, - valueName, - expr, - instant, - x, - y, - w, - h) = - u.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'], - '$datasource', - format, - title, - description, - valueName, - false, - 100, - false, - false, - '') - .addTarget( - u.addTargetSchema(expr, '', 'time_series', 1, instant) - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - local HostsOverviewGraphPanel(title, description, formatY1, expr, legendFormat, x, y, w, h) = - u.graphPanelSchema( - {}, title, description, 'null', false, formatY1, 'short', null, null, 0, 1, '$datasource' - ) - .addTargets( - [u.addTargetSchema( - expr, legendFormat - )] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - u.dashboardSchema( - 'Host Overview', +(import 'utils.libsonnet') { + 'hosts-overview.json': + $.dashboardSchema( + 'Host Overview', + '', + 'y0KGL0iZz', + 'now-1h', + '30s', + 16, + $._config.dashboardTags, + '', + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='singlestat', name='Singlestat', version='5.0.0' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' + ) + ) + .addTemplate( + g.template.datasource('datasource', + 'prometheus', + 'default', + label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema('osd_hosts', + '$datasource', + 'label_values(ceph_disk_occupation{%(matchers)s}, exported_instance)' % $.matchers(), + 1, + true, + 1, + null, + '([^.]*).*') + ) + .addTemplate( + $.addTemplateSchema('mon_hosts', + '$datasource', + 'label_values(ceph_mon_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + true, + 1, + null, + 'mon.(.*)') + ) + .addTemplate( + $.addTemplateSchema('mds_hosts', + '$datasource', + 'label_values(ceph_mds_inodes{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + true, + 1, + null, + 'mds.(.*)') + ) + .addTemplate( + $.addTemplateSchema('rgw_hosts', + '$datasource', + 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + true, + 1, + null, + 'rgw.(.*)') + ) + .addPanels([ + $.simpleSingleStatPanel( + 'none', + 'OSD Hosts', '', - 'y0KGL0iZz', - 'now-1h', - '10s', + 'current', + 'count(sum by (hostname) (ceph_osd_metadata{%(matchers)s}))' % $.matchers(), + true, + 'time_series', + 0, + 0, + 4, + 5 + ), + $.simpleSingleStatPanel( + 'percentunit', + 'AVG CPU Busy', + 'Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster', + 'current', + ||| + avg(1 - ( + avg by(instance) ( + rate(node_cpu_seconds_total{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval]) or + rate(node_cpu{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval]) + ) + )) + |||, + true, + 'time_series', + 4, + 0, + 4, + 5 + ), + $.simpleSingleStatPanel( + 'percentunit', + 'AVG RAM Utilization', + 'Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)', + 'current', + ||| + avg (( + ( + node_memory_MemTotal{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or + node_memory_MemTotal_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} + ) - (( + node_memory_MemFree{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or + node_memory_MemFree_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}) + + ( + node_memory_Cached{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or + node_memory_Cached_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} + ) + ( + node_memory_Buffers{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or + node_memory_Buffers_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} + ) + ( + node_memory_Slab{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or + node_memory_Slab_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} + ) + ) + ) / ( + node_memory_MemTotal{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or + node_memory_MemTotal_bytes{instance=~"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*"} + )) + |||, + true, + 'time_series', + 8, + 0, + 4, + 5 + ), + $.simpleSingleStatPanel( + 'none', + 'Physical IOPS', + 'IOPS Load at the device as reported by the OS on all OSD hosts', + 'current', + ||| + sum (( + rate(node_disk_reads_completed{instance=~"($osd_hosts).*"}[$__rate_interval]) or + rate(node_disk_reads_completed_total{instance=~"($osd_hosts).*"}[$__rate_interval]) + ) + ( + rate(node_disk_writes_completed{instance=~"($osd_hosts).*"}[$__rate_interval]) or + rate(node_disk_writes_completed_total{instance=~"($osd_hosts).*"}[$__rate_interval]) + )) + |||, + true, + 'time_series', + 12, + 0, + 4, + 5 + ), + $.simpleSingleStatPanel( + 'percent', + 'AVG Disk Utilization', + 'Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)', + 'current', + ||| + avg ( + label_replace( + (rate(node_disk_io_time_ms[$__rate_interval]) / 10 ) or + (rate(node_disk_io_time_seconds_total[$__rate_interval]) * 100), + "instance", "$1", "instance", "([^.:]*).*" + ) * on(instance, device) group_left(ceph_daemon) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, instance=~"($osd_hosts).*"}, + "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^.:]*).*" + ) + ) + ||| % $.matchers(), + true, + 'time_series', 16, - [], + 0, + 4, + 5 + ), + $.simpleSingleStatPanel( + 'bytes', + 'Network Load', + 'Total send/receive network load across all hosts in the ceph cluster', + 'current', + ||| + sum ( + ( + rate(node_network_receive_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval]) or + rate(node_network_receive_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval]) + ) unless on (device, instance) + label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)") + ) + + sum ( + ( + rate(node_network_transmit_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval]) or + rate(node_network_transmit_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[$__rate_interval]) + ) unless on (device, instance) + label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)") + ) + |||, + true, + 'time_series', + 20, + 0, + 4, + 5 + ), + $.simpleGraphPanel( + {}, + 'CPU Busy - Top 10 Hosts', + 'Show the top 10 busiest hosts by cpu', + 'percent', + null, + 0, + ||| + topk(10, + 100 * ( + 1 - ( + avg by(instance) ( + rate(node_cpu_seconds_total{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval]) or + rate(node_cpu{mode='idle',instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}[$__rate_interval]) + ) + ) + ) + ) + |||, + '{{instance}}', + 0, + 5, + 12, + 9 + ), + $.simpleGraphPanel( + {}, + 'Network Load - Top 10 Hosts', + 'Top 10 hosts by network load', + 'Bps', + null, + 0, + ||| + topk(10, (sum by(instance) ( + ( + rate(node_network_receive_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval]) or + rate(node_network_receive_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval]) + ) + + ( + rate(node_network_transmit_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval]) or + rate(node_network_transmit_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[$__rate_interval]) + ) unless on (device, instance) + label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)")) + )) + |||, + '{{instance}}', + 12, + 5, + 12, + 9 + ), + ]), + 'host-details.json': + $.dashboardSchema( + 'Host Details', + '', + 'rtOg0AiWz', + 'now-1h', + '30s', + 16, + $._config.dashboardTags + ['overview'], + '' + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='singlestat', name='Singlestat', version='5.0.0' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard' + ) + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema('ceph_hosts', + '$datasource', + 'label_values({%(clusterMatcher)s}, instance)' % $.matchers(), + 1, + false, + 3, + 'Hostname', + '([^.:]*).*') + ) + .addPanels([ + $.addRowSchema(false, true, '$ceph_hosts System Overview') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, + $.simpleSingleStatPanel( + 'none', + 'OSDs', '', + 'current', + "count(sum by (ceph_daemon) (ceph_osd_metadata{%(matchers)s, hostname='$ceph_hosts'}))" % $.matchers(), + null, + 'time_series', + 0, + 1, + 3, + 5 + ), + $.simpleGraphPanel( { - refresh_intervals: ['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } - ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.3.2' - ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' - ) - .addRequired( - type='panel', id='singlestat', name='Singlestat', version='5.0.0' - ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) + interrupt: '#447EBC', + steal: '#6D1F62', + system: '#890F02', + user: '#3F6833', + wait: '#C15C17', + }, + 'CPU Utilization', + "Shows the CPU breakdown. When multiple servers are selected, only the first host's cpu data is shown", + 'percent', + '% Utilization', + null, + ||| + sum by (mode) ( + rate(node_cpu{instance=~"($ceph_hosts)([\\\\.:].*)?", mode=~"(irq|nice|softirq|steal|system|user|iowait)"}[$__rate_interval]) or + rate(node_cpu_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?", mode=~"(irq|nice|softirq|steal|system|user|iowait)"}[$__rate_interval]) + ) / ( + scalar( + sum(rate(node_cpu{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_cpu_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])) + ) * 100 + ) + |||, + '{{mode}}', + 3, + 1, + 6, + 10 + ), + $.simpleGraphPanel( + { + Available: '#508642', + Free: '#508642', + Total: '#bf1b00', + Used: '#bf1b00', + total: '#bf1b00', + used: '#0a50a1', + }, + 'RAM Usage', + '', + 'bytes', + 'RAM used', + null, + ||| + node_memory_MemFree{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_MemFree_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + |||, + 'Free', + 9, + 1, + 6, + 10 ) - .addTemplate( - g.template.datasource('datasource', - 'prometheus', - 'default', - label='Data Source') + .addTargets( + [ + $.addTargetSchema( + ||| + node_memory_MemTotal{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_MemTotal_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + |||, + 'total' + ), + $.addTargetSchema( + ||| + ( + node_memory_Cached{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_Cached_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) + ( + node_memory_Buffers{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_Buffers_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) + ( + node_memory_Slab{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_Slab_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) + |||, + 'buffers/cache' + ), + $.addTargetSchema( + ||| + ( + node_memory_MemTotal{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_MemTotal_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) - ( + ( + node_memory_MemFree{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_MemFree_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) + ( + node_memory_Cached{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_Cached_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) + ( + node_memory_Buffers{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_Buffers_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) + + ( + node_memory_Slab{instance=~"$ceph_hosts([\\\\.:].*)?"} or + node_memory_Slab_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} + ) + ) + |||, + 'used' + ), + ] ) - .addTemplate( - u.addTemplateSchema('osd_hosts', - '$datasource', - 'label_values(ceph_disk_occupation, exported_instance)', - 1, - true, - 1, - null, - '([^.]*).*') + .addSeriesOverride( + { + alias: 'total', + color: '#bf1b00', + fill: 0, + linewidth: 2, + stack: false, + } + ), + $.simpleGraphPanel( + {}, + 'Network Load', + "Show the network load (rx,tx) across all interfaces (excluding loopback 'lo')", + 'decbytes', + 'Send (-) / Receive (+)', + null, + ||| + sum by (device) ( + rate( + node_network_receive_bytes{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval]) or + rate(node_network_receive_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval] + ) + ) + |||, + '{{device}}.rx', + 15, + 1, + 6, + 10 ) - .addTemplate( - u.addTemplateSchema('mon_hosts', - '$datasource', - 'label_values(ceph_mon_metadata, ceph_daemon)', - 1, - true, - 1, - null, - 'mon.(.*)') + .addTargets( + [ + $.addTargetSchema( + ||| + sum by (device) ( + rate(node_network_transmit_bytes{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval]) or + rate(node_network_transmit_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[$__rate_interval]) + ) + |||, + '{{device}}.tx' + ), + ] ) - .addTemplate( - u.addTemplateSchema('mds_hosts', - '$datasource', - 'label_values(ceph_mds_inodes, ceph_daemon)', - 1, - true, - 1, - null, - 'mds.(.*)') + .addSeriesOverride( + { alias: '/.*tx/', transform: 'negative-Y' } + ), + $.simpleGraphPanel( + {}, + 'Network drop rate', + '', + 'pps', + 'Send (-) / Receive (+)', + null, + ||| + rate(node_network_receive_drop{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_network_receive_drop_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) + |||, + '{{device}}.rx', + 21, + 1, + 3, + 5 ) - .addTemplate( - u.addTemplateSchema('rgw_hosts', - '$datasource', - 'label_values(ceph_rgw_metadata, ceph_daemon)', - 1, - true, - 1, - null, - 'rgw.(.*)') + .addTargets( + [ + $.addTargetSchema( + ||| + rate(node_network_transmit_drop{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_network_transmit_drop_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) + |||, + '{{device}}.tx' + ), + ] ) - .addPanels([ - HostsOverviewSingleStatPanel( - 'none', - 'OSD Hosts', - '', - 'current', - 'count(sum by (hostname) (ceph_osd_metadata))', - true, - 0, - 0, - 4, - 5 - ), - HostsOverviewSingleStatPanel( - 'percentunit', - 'AVG CPU Busy', - 'Average CPU busy across all hosts (OSD, RGW, MON etc) within the cluster', - 'current', - 'avg(\n 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )', - true, - 4, - 0, - 4, - 5 - ), - HostsOverviewSingleStatPanel( - 'percentunit', - 'AVG RAM Utilization', - 'Average Memory Usage across all hosts in the cluster (excludes buffer/cache usage)', - 'current', - 'avg (((node_memory_MemTotal{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or node_memory_MemTotal_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"})- (\n (node_memory_MemFree{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or node_memory_MemFree_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}) + \n (node_memory_Cached{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or node_memory_Cached_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}) + \n (node_memory_Buffers{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or node_memory_Buffers_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"}) +\n (node_memory_Slab{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or node_memory_Slab_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"})\n )) /\n (node_memory_MemTotal{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*"} or node_memory_MemTotal_bytes{instance=~"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*"} ))', - true, - 8, - 0, - 4, - 5 - ), - HostsOverviewSingleStatPanel( - 'none', - 'Physical IOPS', - 'IOPS Load at the device as reported by the OS on all OSD hosts', - 'current', - 'sum ((irate(node_disk_reads_completed{instance=~"($osd_hosts).*"}[5m]) or irate(node_disk_reads_completed_total{instance=~"($osd_hosts).*"}[5m]) ) + \n(irate(node_disk_writes_completed{instance=~"($osd_hosts).*"}[5m]) or irate(node_disk_writes_completed_total{instance=~"($osd_hosts).*"}[5m])))', - true, - 12, - 0, - 4, - 5 - ), - HostsOverviewSingleStatPanel( - 'percent', - 'AVG Disk Utilization', - 'Average Disk utilization for all OSD data devices (i.e. excludes journal/WAL)', - 'current', - 'avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), "instance", "$1", "instance", "([^.:]*).*"\n ) *\n on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~"($osd_hosts).*"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^.:]*).*")\n)', - true, - 16, - 0, - 4, - 5 - ), - HostsOverviewSingleStatPanel( - 'bytes', - 'Network Load', - 'Total send/receive network load across all hosts in the ceph cluster', - 'current', - ||| - sum ( - ( - irate(node_network_receive_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) or - irate(node_network_receive_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) - ) unless on (device, instance) - label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)") - ) + - sum ( - ( - irate(node_network_transmit_bytes{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) or - irate(node_network_transmit_bytes_total{instance=~"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*",device!="lo"}[1m]) - ) unless on (device, instance) - label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)") - ) - ||| - , - true, - 20, - 0, - 4, - 5 - ), - HostsOverviewGraphPanel( - 'CPU Busy - Top 10 Hosts', - 'Show the top 10 busiest hosts by cpu', - 'percent', - 'topk(10,100 * ( 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode=\'idle\',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )\n)', - '{{instance}}', - 0, - 5, - 12, - 9 - ), - HostsOverviewGraphPanel( - 'Network Load - Top 10 Hosts', 'Top 10 hosts by network load', 'Bps', ||| - topk(10, (sum by(instance) ( - ( - irate(node_network_receive_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) or - irate(node_network_receive_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) - ) + - ( - irate(node_network_transmit_bytes{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) or - irate(node_network_transmit_bytes_total{instance=~"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*",device!="lo"}[1m]) - ) unless on (device, instance) - label_replace((bonding_slaves > 0), "device", "$1", "master", "(.+)")) - )) - ||| - , '{{instance}}', 12, 5, 12, 9 - ), - ]), - 'host-details.json': - local HostDetailsSingleStatPanel(format, - title, - description, - valueName, - expr, - x, - y, - w, - h) = - u.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'], - '$datasource', - format, - title, - description, - valueName, - false, - 100, - false, - false, - '') - .addTarget(u.addTargetSchema(expr)) + { gridPos: { x: x, y: y, w: w, h: h } }; - - local HostDetailsGraphPanel(alias, - title, - description, - nullPointMode, - formatY1, - labelY1, - expr, - legendFormat, - x, - y, - w, - h) = - u.graphPanelSchema(alias, - title, - description, - nullPointMode, - false, - formatY1, - 'short', - labelY1, - null, - null, - 1, - '$datasource') - .addTargets( - [u.addTargetSchema(expr, legendFormat)] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - u.dashboardSchema( - 'Host Details', - '', - 'rtOg0AiWz', - 'now-1h', - '10s', - 16, - ['overview'], - '', + .addSeriesOverride( { - refresh_intervals: ['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], + alias: '/.*tx/', + transform: 'negative-Y', } + ), + $.simpleSingleStatPanel( + 'bytes', + 'Raw Capacity', + 'Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.', + 'current', + ||| + sum( + ceph_osd_stat_bytes{%(matchers)s} and + on (ceph_daemon) ceph_disk_occupation{%(matchers)s, instance=~"($ceph_hosts)([\\\\.:].*)?"} + ) + ||| % $.matchers(), + null, + 'time_series', + 0, + 6, + 3, + 5 + ), + $.simpleGraphPanel( + {}, + 'Network error rate', + '', + 'pps', + 'Send (-) / Receive (+)', + null, + ||| + rate(node_network_receive_errs{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_network_receive_errs_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) + |||, + '{{device}}.rx', + 21, + 6, + 3, + 5 ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.3.2' - ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' + .addTargets( + [$.addTargetSchema( + ||| + rate(node_network_transmit_errs{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_network_transmit_errs_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[$__rate_interval]) + |||, + '{{device}}.tx' + )] ) - .addRequired( - type='panel', id='singlestat', name='Singlestat', version='5.0.0' + .addSeriesOverride( + { + alias: '/.*tx/', + transform: 'negative-Y', + } + ), + $.addRowSchema(false, + true, + 'OSD Disk Performance Statistics') + { gridPos: { x: 0, y: 11, w: 24, h: 1 } }, + $.simpleGraphPanel( + {}, + '$ceph_hosts Disk IOPS', + "For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it's name and corresponding OSD id value", + 'ops', + 'Read (-) / Write (+)', + null, + ||| + label_replace( + ( + rate(node_disk_writes_completed{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_disk_writes_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) + ), "instance", "$1", "instance", "([^:.]*).*" + ) * on(instance, device) group_left(ceph_daemon) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s}, "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}}({{ceph_daemon}}) writes', + 0, + 12, + 11, + 9 ) - .addAnnotation( - u.addAnnotationSchema( - 1, '-- Grafana --', true, true, 'rgba(0, 211, 255, 1)', 'Annotations & Alerts', 'dashboard' - ) + .addTargets( + [ + $.addTargetSchema( + ||| + label_replace( + ( + rate(node_disk_reads_completed{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_disk_reads_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) + ), "instance", "$1", "instance", "([^:.]*).*" + ) * on(instance, device) group_left(ceph_daemon) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s},"device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}}({{ceph_daemon}}) reads' + ), + ] ) - .addTemplate( - g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + .addSeriesOverride( + { alias: '/.*reads/', transform: 'negative-Y' } + ), + $.simpleGraphPanel( + {}, + '$ceph_hosts Throughput by Disk', + 'For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id', + 'Bps', + 'Read (-) / Write (+)', + null, + ||| + label_replace( + ( + rate(node_disk_bytes_written{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_disk_written_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) + ), "instance", "$1", "instance", "([^:.]*).*") * on(instance, device) + group_left(ceph_daemon) label_replace( + label_replace(ceph_disk_occupation_human{%(matchers)s}, "device", "$1", "device", "/dev/(.*)"), + "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}}({{ceph_daemon}}) write', + 12, + 12, + 11, + 9 ) - .addTemplate( - u.addTemplateSchema('ceph_hosts', '$datasource', 'label_values(node_scrape_collector_success, instance) ', 1, false, 3, 'Hostname', '([^.:]*).*') + .addTargets( + [$.addTargetSchema( + ||| + label_replace( + ( + rate(node_disk_bytes_read{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) or + rate(node_disk_read_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) + ), + "instance", "$1", "instance", "([^:.]*).*") * on(instance, device) + group_left(ceph_daemon) label_replace( + label_replace(ceph_disk_occupation_human{%(matchers)s}, "device", "$1", "device", "/dev/(.*)"), + "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}}({{ceph_daemon}}) read' + )] ) - .addPanels([ - u.addRowSchema(false, true, '$ceph_hosts System Overview') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, - HostDetailsSingleStatPanel( - 'none', - 'OSDs', - '', - 'current', - "count(sum by (ceph_daemon) (ceph_osd_metadata{hostname='$ceph_hosts'}))", - 0, - 1, - 3, - 5 - ), - HostDetailsGraphPanel( - { - interrupt: '#447EBC', - steal: '#6D1F62', - system: '#890F02', - user: '#3F6833', - wait: '#C15C17', - }, 'CPU Utilization', "Shows the CPU breakdown. When multiple servers are selected, only the first host's cpu data is shown", 'null', 'percent', '% Utilization', 'sum by (mode) (\n irate(node_cpu{instance=~"($ceph_hosts)([\\\\.:].*)?", mode=~"(irq|nice|softirq|steal|system|user|iowait)"}[1m]) or\n irate(node_cpu_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?", mode=~"(irq|nice|softirq|steal|system|user|iowait)"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~"($ceph_hosts)([\\\\.:].*)?"}[1m]) or\n irate(node_cpu_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[1m]))\n) * 100', '{{mode}}', 3, 1, 6, 10 - ), - HostDetailsGraphPanel( - { - Available: '#508642', - Free: '#508642', - Total: '#bf1b00', - Used: '#bf1b00', - total: '#bf1b00', - used: '#0a50a1', - }, - 'RAM Usage', - '', - 'null', - 'bytes', - 'RAM used', - 'node_memory_MemFree{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_MemFree_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} ', - 'Free', - 9, - 1, - 6, - 10 - ) - .addTargets( - [ - u.addTargetSchema('node_memory_MemTotal{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_MemTotal_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"} ', 'total'), - u.addTargetSchema('(node_memory_Cached{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_Cached_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}) + \n(node_memory_Buffers{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_Buffers_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}) +\n(node_memory_Slab{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_Slab_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}) \n', 'buffers/cache'), - u.addTargetSchema('(node_memory_MemTotal{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_MemTotal_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"})- (\n (node_memory_MemFree{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_MemFree_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}) + \n (node_memory_Cached{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_Cached_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}) + \n (node_memory_Buffers{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_Buffers_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"}) +\n (node_memory_Slab{instance=~"$ceph_hosts([\\\\.:].*)?"} or node_memory_Slab_bytes{instance=~"$ceph_hosts([\\\\.:].*)?"})\n )\n \n', 'used'), - ] - ) - .addSeriesOverride( - { - alias: 'total', - color: '#bf1b00', - fill: 0, - linewidth: 2, - stack: false, - } - ), - HostDetailsGraphPanel( - {}, - 'Network Load', - "Show the network load (rx,tx) across all interfaces (excluding loopback 'lo')", - 'null', - 'decbytes', - 'Send (-) / Receive (+)', - 'sum by (device) (\n irate(node_network_receive_bytes{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[1m])\n)', - '{{device}}.rx', - 15, - 1, - 6, - 10 - ) - .addTargets( - [ - u.addTargetSchema('sum by (device) (\n irate(node_network_transmit_bytes{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?",device!="lo"}[1m])\n)', '{{device}}.tx'), - ] - ) - .addSeriesOverride( - { alias: '/.*tx/', transform: 'negative-Y' } - ), - HostDetailsGraphPanel( - {}, - 'Network drop rate', - '', - 'null', - 'pps', - 'Send (-) / Receive (+)', - 'irate(node_network_receive_drop{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m]) or irate(node_network_receive_drop_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m])', - '{{device}}.rx', - 21, - 1, - 3, - 5 - ) - .addTargets( - [ - u.addTargetSchema( - 'irate(node_network_transmit_drop{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m]) or irate(node_network_transmit_drop_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m])', '{{device}}.tx' - ), - ] - ) - .addSeriesOverride( - { - alias: '/.*tx/', - transform: 'negative-Y', - } - ), - HostDetailsSingleStatPanel( - 'bytes', - 'Raw Capacity', - 'Each OSD consists of a Journal/WAL partition and a data partition. The RAW Capacity shown is the sum of the data partitions across all OSDs on the selected OSD hosts.', - 'current', - 'sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~"($ceph_hosts)([\\\\.:].*)?"})', - 0, - 6, - 3, - 5 - ), - HostDetailsGraphPanel( - {}, - 'Network error rate', - '', - 'null', - 'pps', - 'Send (-) / Receive (+)', - 'irate(node_network_receive_errs{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m]) or irate(node_network_receive_errs_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m])', - '{{device}}.rx', - 21, - 6, - 3, - 5 - ) - .addTargets( - [u.addTargetSchema( - 'irate(node_network_transmit_errs{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m]) or irate(node_network_transmit_errs_total{instance=~"$ceph_hosts([\\\\.:].*)?"}[1m])', '{{device}}.tx' - )] - ) - .addSeriesOverride( - { - alias: '/.*tx/', - transform: 'negative-Y', - } - ), - u.addRowSchema(false, - true, - 'OSD Disk Performance Statistics') + { gridPos: { x: 0, y: 11, w: 24, h: 1 } }, - HostDetailsGraphPanel( - {}, - '$ceph_hosts Disk IOPS', - "For any OSD devices on the host, this chart shows the iops per physical device. Each device is shown by it's name and corresponding OSD id value", - 'connected', - 'ops', - 'Read (-) / Write (+)', - 'label_replace(\n (\n irate(node_disk_writes_completed{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m])\n ),\n "instance",\n "$1",\n "instance",\n "([^:.]*).*"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation_human,\n "device",\n "$1",\n "device",\n "/dev/(.*)"\n ),\n "instance",\n "$1",\n "instance",\n "([^:.]*).*"\n )', - '{{device}}({{ceph_daemon}}) writes', - 0, - 12, - 11, - 9 - ) - .addTargets( - [ - u.addTargetSchema( - 'label_replace(\n (irate(node_disk_reads_completed{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) or irate(node_disk_reads_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m])),\n "instance",\n "$1",\n "instance",\n "([^:.]*).*"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation_human,\n "device",\n "$1",\n "device",\n "/dev/(.*)"\n ),\n "instance",\n "$1",\n "instance",\n "([^:.]*).*"\n )', - '{{device}}({{ceph_daemon}}) reads' - ), - ] - ) - .addSeriesOverride( - { alias: '/.*reads/', transform: 'negative-Y' } - ), - HostDetailsGraphPanel( - {}, - '$ceph_hosts Throughput by Disk', - 'For OSD hosts, this chart shows the disk bandwidth (read bytes/sec + write bytes/sec) of the physical OSD device. Each device is shown by device name, and corresponding OSD id', - 'connected', - 'Bps', - 'Read (-) / Write (+)', - 'label_replace((irate(node_disk_bytes_written{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) or irate(node_disk_written_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m])), "instance", "$1", "instance", "([^:.]*).*") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - '{{device}}({{ceph_daemon}}) write', - 12, - 12, - 11, - 9 - ) - .addTargets( - [u.addTargetSchema( - 'label_replace((irate(node_disk_bytes_read{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) or irate(node_disk_read_bytes_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m])), "instance", "$1", "instance", "([^:.]*).*") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - '{{device}}({{ceph_daemon}}) read' - )] - ) - .addSeriesOverride( - { alias: '/.*read/', transform: 'negative-Y' } - ), - HostDetailsGraphPanel( - {}, - '$ceph_hosts Disk Latency', - "For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it's corresponding OSD id", - 'null as zero', - 's', - '', - 'max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]), 0.001), "instance", "$1", "instance", "([^:.]*).*")) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~"($ceph_hosts)([\\\\.:].*)?"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - '{{device}}({{ceph_daemon}})', - 0, - 21, - 11, - 9 - ), - HostDetailsGraphPanel( - {}, - '$ceph_hosts Disk utilization', - 'Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.', - 'connected', - 'percent', - '%Util', - 'label_replace(((irate(node_disk_io_time_ms{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[5m]) * 100), "instance", "$1", "instance", "([^:.]*).*") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~"($ceph_hosts)([\\\\.:].*)?"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - '{{device}}({{ceph_daemon}})', - 12, - 21, - 11, - 9 - ), - ]), - }, + .addSeriesOverride( + { alias: '/.*read/', transform: 'negative-Y' } + ), + $.simpleGraphPanel( + {}, + '$ceph_hosts Disk Latency', + "For OSD hosts, this chart shows the latency at the physical drive. Each drive is shown by device name, with it's corresponding OSD id", + 's', + '', + null, + ||| + max by(instance, device) (label_replace( + (rate(node_disk_write_time_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])) / + clamp_min(rate(node_disk_writes_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]), 0.001) or + (rate(node_disk_read_time_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval])) / + clamp_min(rate(node_disk_reads_completed_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]), 0.001), + "instance", "$1", "instance", "([^:.]*).*" + )) * on(instance, device) group_left(ceph_daemon) label_replace( + label_replace( + ceph_disk_occupation_human{instance=~"($ceph_hosts)([\\\\.:].*)?"}, + "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}}({{ceph_daemon}})', + 0, + 21, + 11, + 9 + ), + $.simpleGraphPanel( + {}, + '$ceph_hosts Disk utilization', + 'Show disk utilization % (util) of any OSD devices on the host by the physical device name and associated OSD id.', + 'percent', + '%Util', + null, + ||| + label_replace( + ( + (rate(node_disk_io_time_ms{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) / 10) or + rate(node_disk_io_time_seconds_total{instance=~"($ceph_hosts)([\\\\.:].*)?"}[$__rate_interval]) * 100 + ), "instance", "$1", "instance", "([^:.]*).*" + ) * on(instance, device) group_left(ceph_daemon) label_replace( + label_replace(ceph_disk_occupation_human{%(matchers)s, instance=~"($ceph_hosts)([\\\\.:].*)?"}, + "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}}({{ceph_daemon}})', + 12, + 21, + 11, + 9 + ), + ]), } diff --git a/ceph/monitoring/ceph-mixin/dashboards/osd.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/osd.libsonnet index c5e614675..129b74ba6 100644 --- a/ceph/monitoring/ceph-mixin/dashboards/osd.libsonnet +++ b/ceph/monitoring/ceph-mixin/dashboards/osd.libsonnet @@ -1,524 +1,593 @@ local g = import 'grafonnet/grafana.libsonnet'; -local u = import 'utils.libsonnet'; -{ - grafanaDashboards+:: { - 'osds-overview.json': - local OsdOverviewStyle(alias, pattern, type, unit) = - u.addStyle(alias, null, [ - 'rgba(245, 54, 54, 0.9)', - 'rgba(237, 129, 40, 0.89)', - 'rgba(50, 172, 45, 0.97)', - ], 'YYYY-MM-DD HH:mm:ss', 2, 1, pattern, [], type, unit, []); - local OsdOverviewGraphPanel(alias, - title, - description, - formatY1, - labelY1, - min, - expr, - legendFormat1, - x, - y, - w, - h) = - u.graphPanelSchema(alias, - title, - description, - 'null', - false, - formatY1, - 'short', - labelY1, - null, - min, - 1, - '$datasource') - .addTargets( - [u.addTargetSchema(expr, legendFormat1)] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - local OsdOverviewPieChartPanel(alias, description, title) = - u.addPieChartSchema(alias, - '$datasource', - description, - 'Under graph', - 'pie', - title, - 'current'); - local OsdOverviewSingleStatPanel(colors, - format, - title, - description, - valueName, - colorValue, - gaugeMaxValue, - gaugeShow, - sparkLineShow, - thresholds, - expr, - x, - y, - w, - h) = - u.addSingleStatSchema( - colors, - '$datasource', - format, - title, - description, - valueName, - colorValue, - gaugeMaxValue, - gaugeShow, - sparkLineShow, - thresholds - ) - .addTarget( - u.addTargetSchema(expr) - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - u.dashboardSchema( - 'OSD Overview', - '', - 'lo02I1Aiz', - 'now-1h', - '10s', - 16, - [], - '', - { - refresh_intervals: ['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } - ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) +(import 'utils.libsonnet') { + 'osds-overview.json': + $.dashboardSchema( + 'OSD Overview', + '', + 'lo02I1Aiz', + 'now-1h', + '30s', + 16, + $._config.dashboardTags, + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.0.0' - ) - .addRequired( - type='panel', id='grafana-piechart-panel', name='Pie Chart', version='1.3.3' - ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' - ) - .addRequired( - type='panel', id='table', name='Table', version='5.0.0' + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addRequired( + type='panel', id='grafana-piechart-panel', name='Pie Chart', version='1.3.3' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='table', name='Table', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addPanels([ + $.simpleGraphPanel( + { '@95%ile': '#e0752d' }, + 'OSD Read Latencies', + '', + 'ms', + null, + '0', + ||| + avg ( + rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) * 1000 + ) + ||| % $.matchers(), + 'AVG read', + 0, + 0, + 8, + 8 ) - .addTemplate( - g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + .addTargets( + [ + $.addTargetSchema( + ||| + max( + rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) * 1000 + ) + ||| % $.matchers(), + 'MAX read' + ), + $.addTargetSchema( + ||| + quantile(0.95, + ( + rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) + * 1000 + ) + ) + ||| % $.matchers(), + '@95%ile' + ), + ], + ), + $.addTableSchema( + '$datasource', + "This table shows the osd's that are delivering the 10 highest read latencies within the cluster", + { col: 2, desc: true }, + [ + $.overviewStyle('OSD ID', 'ceph_daemon', 'string', 'short'), + $.overviewStyle('Latency (ms)', 'Value', 'number', 'none'), + $.overviewStyle('', '/.*/', 'hidden', 'short'), + ], + 'Highest READ Latencies', + 'table' ) - .addPanels([ - OsdOverviewGraphPanel( - { '@95%ile': '#e0752d' }, - 'OSD Read Latencies', + .addTarget( + $.addTargetSchema( + ||| + topk(10, + (sort( + ( + rate(ceph_osd_op_r_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) * + 1000 + ) + )) + ) + ||| % $.matchers(), '', - 'ms', - null, - '0', - 'avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)', - 'AVG read', - 0, - 0, - 8, - 8 - ) - .addTargets( - [ - u.addTargetSchema( - 'max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)', - 'MAX read' - ), - u.addTargetSchema( - 'quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)', '@95%ile' - ), - ], - ), - u.addTableSchema( - '$datasource', - "This table shows the osd's that are delivering the 10 highest read latencies within the cluster", - { col: 2, desc: true }, - [ - OsdOverviewStyle('OSD ID', 'ceph_daemon', 'string', 'short'), - OsdOverviewStyle('Latency (ms)', 'Value', 'number', 'none'), - OsdOverviewStyle('', '/.*/', 'hidden', 'short'), - ], - 'Highest READ Latencies', - 'table' + 'table', + 1, + true ) - .addTarget( - u.addTargetSchema( - 'topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n ))\n)\n\n', '', 'table', 1, true + ) + { gridPos: { x: 8, y: 0, w: 4, h: 8 } }, + $.simpleGraphPanel( + { + '@95%ile write': '#e0752d', + }, + 'OSD Write Latencies', + '', + 'ms', + null, + '0', + ||| + avg( + rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) + * 1000 ) - ) + { gridPos: { x: 8, y: 0, w: 4, h: 8 } }, - OsdOverviewGraphPanel( - { - '@95%ile write': '#e0752d', - }, - 'OSD Write Latencies', + ||| % $.matchers(), + 'AVG write', + 12, + 0, + 8, + 8 + ) + .addTargets( + [ + $.addTargetSchema( + ||| + max( + rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) * + 1000 + ) + ||| % $.matchers(), 'MAX write' + ), + $.addTargetSchema( + ||| + quantile(0.95, ( + rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) * + 1000 + )) + ||| % $.matchers(), '@95%ile write' + ), + ], + ), + $.addTableSchema( + '$datasource', + "This table shows the osd's that are delivering the 10 highest write latencies within the cluster", + { col: 2, desc: true }, + [ + $.overviewStyle( + 'OSD ID', 'ceph_daemon', 'string', 'short' + ), + $.overviewStyle('Latency (ms)', 'Value', 'number', 'none'), + $.overviewStyle('', '/.*/', 'hidden', 'short'), + ], + 'Highest WRITE Latencies', + 'table' + ) + .addTarget( + $.addTargetSchema( + ||| + topk(10, + (sort( + (rate(ceph_osd_op_w_latency_sum{%(matchers)s}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) * + 1000) + )) + ) + ||| % $.matchers(), '', - 'ms', - null, - '0', - 'avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)', - 'AVG write', - 12, - 0, - 8, - 8 - ) - .addTargets( - [ - u.addTargetSchema( - 'max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)', - 'MAX write' - ), - u.addTargetSchema( - 'quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)', '@95%ile write' - ), - ], - ), - u.addTableSchema( - '$datasource', - "This table shows the osd's that are delivering the 10 highest write latencies within the cluster", - { col: 2, desc: true }, - [ - OsdOverviewStyle( - 'OSD ID', 'ceph_daemon', 'string', 'short' - ), - OsdOverviewStyle('Latency (ms)', 'Value', 'number', 'none'), - OsdOverviewStyle('', '/.*/', 'hidden', 'short'), - ], - 'Highest WRITE Latencies', - 'table' - ) - .addTarget( - u.addTargetSchema( - 'topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n ))\n)\n\n', - '', - 'table', - 1, - true - ) - ) + { gridPos: { x: 20, y: 0, w: 4, h: 8 } }, - OsdOverviewPieChartPanel( - {}, '', 'OSD Types Summary' + 'table', + 1, + true ) - .addTarget( - u.addTargetSchema('count by (device_class) (ceph_osd_metadata)', '{{device_class}}') - ) + { gridPos: { x: 0, y: 8, w: 4, h: 8 } }, - OsdOverviewPieChartPanel( - { 'Non-Encrypted': '#E5AC0E' }, '', 'OSD Objectstore Types' + ) + { gridPos: { x: 20, y: 0, w: 4, h: 8 } }, + $.simplePieChart( + {}, '', 'OSD Types Summary' + ) + .addTarget( + $.addTargetSchema('count by (device_class) (ceph_osd_metadata{%(matchers)s})' % $.matchers(), '{{device_class}}') + ) + { gridPos: { x: 0, y: 8, w: 4, h: 8 } }, + $.simplePieChart( + { 'Non-Encrypted': '#E5AC0E' }, '', 'OSD Objectstore Types' + ) + .addTarget( + $.addTargetSchema( + 'count(ceph_bluefs_wal_total_bytes{%(matchers)s})' % $.matchers(), 'bluestore', 'time_series', 2 ) - .addTarget( - u.addTargetSchema( - 'count(ceph_bluefs_wal_total_bytes)', 'bluestore', 'time_series', 2 - ) + ) + .addTarget( + $.addTargetSchema( + 'absent(ceph_bluefs_wal_total_bytes{%(matchers)s}) * count(ceph_osd_metadata{%(matchers)s})' % $.matchers(), 'filestore', 'time_series', 2 ) - .addTarget( - u.addTargetSchema( - 'absent(ceph_bluefs_wal_total_bytes)*count(ceph_osd_metadata)', 'filestore', 'time_series', 2 + ) + { gridPos: { x: 4, y: 8, w: 4, h: 8 } }, + $.simplePieChart( + {}, 'The pie chart shows the various OSD sizes used within the cluster', 'OSD Size Summary' + ) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} < 1099511627776)' % $.matchers(), '<1TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 1099511627776 < 2199023255552)' % $.matchers(), '<2TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 2199023255552 < 3298534883328)' % $.matchers(), '<3TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 3298534883328 < 4398046511104)' % $.matchers(), '<4TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 4398046511104 < 6597069766656)' % $.matchers(), '<6TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 6597069766656 < 8796093022208)' % $.matchers(), '<8TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 8796093022208 < 10995116277760)' % $.matchers(), '<10TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 10995116277760 < 13194139533312)' % $.matchers(), '<12TB', 'time_series', 2 + )) + .addTarget($.addTargetSchema( + 'count(ceph_osd_stat_bytes{%(matchers)s} >= 13194139533312)' % $.matchers(), '<12TB+', 'time_series', 2 + )) + { gridPos: { x: 8, y: 8, w: 4, h: 8 } }, + g.graphPanel.new(bars=true, + datasource='$datasource', + title='Distribution of PGs per OSD', + x_axis_buckets=20, + x_axis_mode='histogram', + x_axis_values=['total'], + formatY1='short', + formatY2='short', + labelY1='# of OSDs', + min='0', + nullPointMode='null') + .addTarget($.addTargetSchema( + 'ceph_osd_numpg{%(matchers)s}' % $.matchers(), 'PGs per OSD', 'time_series', 1, true + )) + { gridPos: { x: 12, y: 8, w: 8, h: 8 } }, + $.gaugeSingleStatPanel( + 'percentunit', + 'OSD onode Hits Ratio', + 'This gauge panel shows onode Hits ratio to help determine if increasing RAM per OSD could help improve the performance of the cluster', + 'current', + true, + 1, + true, + false, + '.75', + ||| + sum(ceph_bluestore_onode_hits{%(matchers)s}) / ( + sum(ceph_bluestore_onode_hits{%(matchers)s}) + + sum(ceph_bluestore_onode_misses{%(matchers)s}) ) - ) + { gridPos: { x: 4, y: 8, w: 4, h: 8 } }, - OsdOverviewPieChartPanel( - {}, 'The pie chart shows the various OSD sizes used within the cluster', 'OSD Size Summary' - ) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes < 1099511627776)', '<1TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 1099511627776 < 2199023255552)', '<2TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 2199023255552 < 3298534883328)', '<3TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 3298534883328 < 4398046511104)', '<4TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 4398046511104 < 6597069766656)', '<6TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 6597069766656 < 8796093022208)', '<8TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 8796093022208 < 10995116277760)', '<10TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 10995116277760 < 13194139533312)', '<12TB', 'time_series', 2 - )) - .addTarget(u.addTargetSchema( - 'count(ceph_osd_stat_bytes >= 13194139533312)', '<12TB+', 'time_series', 2 - )) + { gridPos: { x: 8, y: 8, w: 4, h: 8 } }, - g.graphPanel.new(bars=true, - datasource='$datasource', - title='Distribution of PGs per OSD', - x_axis_buckets=20, - x_axis_mode='histogram', - x_axis_values=['total'], - formatY1='short', - formatY2='short', - labelY1='# of OSDs', - min='0', - nullPointMode='null') - .addTarget(u.addTargetSchema( - 'ceph_osd_numpg\n', 'PGs per OSD', 'time_series', 1, true - )) + { gridPos: { x: 12, y: 8, w: 8, h: 8 } }, - OsdOverviewSingleStatPanel( - ['#d44a3a', '#299c46'], - 'percentunit', - 'OSD onode Hits Ratio', - 'This gauge panel shows onode Hits ratio to help determine if increasing RAM per OSD could help improve the performance of the cluster', - 'current', - true, - 1, - true, - false, - '.75', - 'sum(ceph_bluestore_onode_hits)/(sum(ceph_bluestore_onode_hits) + sum(ceph_bluestore_onode_misses))', - 20, - 8, - 4, - 8 - ), - u.addRowSchema(false, - true, - 'R/W Profile') + { gridPos: { x: 0, y: 16, w: 24, h: 1 } }, - OsdOverviewGraphPanel( - {}, - 'Read/Write Profile', - 'Show the read/write workload profile overtime', - 'short', - null, - null, - 'round(sum(irate(ceph_pool_rd[30s])))', - 'Reads', - 0, - 17, - 24, - 8 - ) - .addTargets([u.addTargetSchema( - 'round(sum(irate(ceph_pool_wr[30s])))', 'Writes' - )]), - ]), - 'osd-device-details.json': - local OsdDeviceDetailsPanel(title, - description, - formatY1, - labelY1, - expr1, - expr2, - legendFormat1, - legendFormat2, - x, - y, - w, - h) = - u.graphPanelSchema({}, - title, - description, - 'null', - false, - formatY1, - 'short', - labelY1, - null, - null, - 1, - '$datasource') - .addTargets( - [ - u.addTargetSchema(expr1, - legendFormat1), - u.addTargetSchema(expr2, legendFormat2), - ] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; + ||| % $.matchers(), + 'time_series', + 20, + 8, + 4, + 8 + ), + $.addRowSchema(false, + true, + 'R/W Profile') + { gridPos: { x: 0, y: 16, w: 24, h: 1 } }, + $.simpleGraphPanel( + {}, + 'Read/Write Profile', + 'Show the read/write workload profile overtime', + 'short', + null, + null, + 'round(sum(rate(ceph_pool_rd{%(matchers)s}[$__rate_interval])))' % $.matchers(), + 'Reads', + 0, + 17, + 24, + 8 + ) + .addTargets([$.addTargetSchema( + 'round(sum(rate(ceph_pool_wr{%(matchers)s}[$__rate_interval])))' % $.matchers(), 'Writes' + )]), + ]), + 'osd-device-details.json': + local OsdDeviceDetailsPanel(title, + description, + formatY1, + labelY1, + expr1, + expr2, + legendFormat1, + legendFormat2, + x, + y, + w, + h) = + $.graphPanelSchema({}, + title, + description, + 'null', + false, + formatY1, + 'short', + labelY1, + null, + null, + 1, + '$datasource') + .addTargets( + [ + $.addTargetSchema(expr1, + legendFormat1), + $.addTargetSchema(expr2, legendFormat2), + ] + ) + { gridPos: { x: x, y: y, w: w, h: h } }; - u.dashboardSchema( - 'OSD device details', - '', - 'CrAHE0iZz', - 'now-3h', - '', - 16, - [], + $.dashboardSchema( + 'OSD device details', + '', + 'CrAHE0iZz', + 'now-3h', + '30s', + 16, + $._config.dashboardTags, + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' + ) + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', + 'prometheus', + 'default', + label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema('osd', + '$datasource', + 'label_values(ceph_osd_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + false, + 1, + 'OSD', + '(.*)') + ) + .addPanels([ + $.addRowSchema( + false, true, 'OSD Performance' + ) + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, + OsdDeviceDetailsPanel( + '$osd Latency', '', + 's', + 'Read (-) / Write (+)', + ||| + rate(ceph_osd_op_r_latency_sum{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_r_latency_count{%(matchers)s}[$__rate_interval]) + ||| % $.matchers(), + ||| + rate(ceph_osd_op_w_latency_sum{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval]) / + on (ceph_daemon) rate(ceph_osd_op_w_latency_count{%(matchers)s}[$__rate_interval]) + ||| % $.matchers(), + 'read', + 'write', + 0, + 1, + 6, + 9 + ) + .addSeriesOverride( { - refresh_intervals: ['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], + alias: 'read', + transform: 'negative-Y', } + ), + OsdDeviceDetailsPanel( + '$osd R/W IOPS', + '', + 'short', + 'Read (-) / Write (+)', + 'rate(ceph_osd_op_r{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(), + 'rate(ceph_osd_op_w{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(), + 'Reads', + 'Writes', + 6, + 1, + 6, + 9 ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) + .addSeriesOverride( + { alias: 'Reads', transform: 'negative-Y' } + ), + OsdDeviceDetailsPanel( + '$osd R/W Bytes', + '', + 'bytes', + 'Read (-) / Write (+)', + 'rate(ceph_osd_op_r_out_bytes{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(), + 'rate(ceph_osd_op_w_in_bytes{%(matchers)s, ceph_daemon=~"$osd"}[$__rate_interval])' % $.matchers(), + 'Read Bytes', + 'Write Bytes', + 12, + 1, + 6, + 9 ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.3.2' + .addSeriesOverride({ alias: 'Read Bytes', transform: 'negative-Y' }), + $.addRowSchema( + false, true, 'Physical Device Performance' + ) + { gridPos: { x: 0, y: 10, w: 24, h: 1 } }, + OsdDeviceDetailsPanel( + 'Physical Device Latency for $osd', + '', + 's', + 'Read (-) / Write (+)', + ||| + ( + label_replace( + rate(node_disk_read_time_seconds_total{%(clusterMatcher)s}[$__rate_interval]) / + rate(node_disk_reads_completed_total{%(clusterMatcher)s}[$__rate_interval]), + "instance", "$1", "instance", "([^:.]*).*" + ) and on (instance, device) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, + "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ) + ||| % $.matchers(), + ||| + ( + label_replace( + rate(node_disk_write_time_seconds_total{%(clusterMatcher)s}[$__rate_interval]) / + rate(node_disk_writes_completed_total{%(clusterMatcher)s}[$__rate_interval]), + "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) + label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ) + ||| % $.matchers(), + '{{instance}}/{{device}} Reads', + '{{instance}}/{{device}} Writes', + 0, + 11, + 6, + 9 ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' + .addSeriesOverride( + { alias: '/.*Reads/', transform: 'negative-Y' } + ), + OsdDeviceDetailsPanel( + 'Physical Device R/W IOPS for $osd', + '', + 'short', + 'Read (-) / Write (+)', + ||| + label_replace( + rate(node_disk_writes_completed_total{%(clusterMatcher)s}[$__rate_interval]), + "instance", "$1", "instance", "([^:.]*).*" + ) and on (instance, device) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, + "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + ||| + label_replace( + rate(node_disk_reads_completed_total{%(clusterMatcher)s}[$__rate_interval]), + "instance", "$1", "instance", "([^:.]*).*" + ) and on (instance, device) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, + "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}} on {{instance}} Writes', + '{{device}} on {{instance}} Reads', + 6, + 11, + 6, + 9 ) - .addTemplate( - g.template.datasource('datasource', - 'prometheus', - 'default', - label='Data Source') + .addSeriesOverride( + { alias: '/.*Reads/', transform: 'negative-Y' } + ), + OsdDeviceDetailsPanel( + 'Physical Device R/W Bytes for $osd', + '', + 'Bps', + 'Read (-) / Write (+)', + ||| + label_replace( + rate(node_disk_read_bytes_total{%(clusterMatcher)s}[$__rate_interval]), "instance", "$1", "instance", "([^:.]*).*" + ) and on (instance, device) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, + "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + ||| + label_replace( + rate(node_disk_written_bytes_total{%(clusterMatcher)s}[$__rate_interval]), "instance", "$1", "instance", "([^:.]*).*" + ) and on (instance, device) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, + "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{instance}} {{device}} Reads', + '{{instance}} {{device}} Writes', + 12, + 11, + 6, + 9 ) - .addTemplate( - u.addTemplateSchema('osd', - '$datasource', - 'label_values(ceph_osd_metadata,ceph_daemon)', - 1, - false, - 1, - 'OSD', - '(.*)') + .addSeriesOverride( + { alias: '/.*Reads/', transform: 'negative-Y' } + ), + $.graphPanelSchema( + {}, + 'Physical Device Util% for $osd', + '', + 'null', + false, + 'percentunit', + 'short', + null, + null, + null, + 1, + '$datasource' ) - .addPanels([ - u.addRowSchema( - false, true, 'OSD Performance' - ) + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, - OsdDeviceDetailsPanel( - '$osd Latency', - '', - 's', - 'Read (-) / Write (+)', - 'irate(ceph_osd_op_r_latency_sum{ceph_daemon=~"$osd"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m])', - 'irate(ceph_osd_op_w_latency_sum{ceph_daemon=~"$osd"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m])', - 'read', - 'write', - 0, - 1, - 6, - 9 - ) - .addSeriesOverride( - { - alias: 'read', - transform: 'negative-Y', - } - ), - OsdDeviceDetailsPanel( - '$osd R/W IOPS', - '', - 'short', - 'Read (-) / Write (+)', - 'irate(ceph_osd_op_r{ceph_daemon=~"$osd"}[1m])', - 'irate(ceph_osd_op_w{ceph_daemon=~"$osd"}[1m])', - 'Reads', - 'Writes', - 6, - 1, - 6, - 9 - ) - .addSeriesOverride( - { alias: 'Reads', transform: 'negative-Y' } - ), - OsdDeviceDetailsPanel( - '$osd R/W Bytes', - '', - 'bytes', - 'Read (-) / Write (+)', - 'irate(ceph_osd_op_r_out_bytes{ceph_daemon=~"$osd"}[1m])', - 'irate(ceph_osd_op_w_in_bytes{ceph_daemon=~"$osd"}[1m])', - 'Read Bytes', - 'Write Bytes', - 12, - 1, - 6, - 9 - ) - .addSeriesOverride({ alias: 'Read Bytes', transform: 'negative-Y' }), - u.addRowSchema( - false, true, 'Physical Device Performance' - ) + { gridPos: { x: 0, y: 10, w: 24, h: 1 } }, - OsdDeviceDetailsPanel( - 'Physical Device Latency for $osd', - '', - 's', - 'Read (-) / Write (+)', - '(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*"))', - '(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*"))', - '{{instance}}/{{device}} Reads', - '{{instance}}/{{device}} Writes', - 0, - 11, - 6, - 9 - ) - .addSeriesOverride( - { alias: '/.*Reads/', transform: 'negative-Y' } - ), - OsdDeviceDetailsPanel( - 'Physical Device R/W IOPS for $osd', - '', - 'short', - 'Read (-) / Write (+)', - 'label_replace(irate(node_disk_writes_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - 'label_replace(irate(node_disk_reads_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - '{{device}} on {{instance}} Writes', - '{{device}} on {{instance}} Reads', - 6, - 11, - 6, - 9 - ) - .addSeriesOverride( - { alias: '/.*Reads/', transform: 'negative-Y' } - ), - OsdDeviceDetailsPanel( - 'Physical Device R/W Bytes for $osd', - '', - 'Bps', - 'Read (-) / Write (+)', - 'label_replace(irate(node_disk_read_bytes_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - 'label_replace(irate(node_disk_written_bytes_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - '{{instance}} {{device}} Reads', - '{{instance}} {{device}} Writes', - 12, - 11, - 6, - 9 - ) - .addSeriesOverride( - { alias: '/.*Reads/', transform: 'negative-Y' } - ), - u.graphPanelSchema( - {}, - 'Physical Device Util% for $osd', - '', - 'null', - false, - 'percentunit', - 'short', - null, - null, - null, - 1, - '$datasource' - ) - .addTarget(u.addTargetSchema( - 'label_replace(irate(node_disk_io_time_seconds_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*")', - '{{device}} on {{instance}}' - )) + { gridPos: { x: 18, y: 11, w: 6, h: 9 } }, - ]), - }, + .addTarget($.addTargetSchema( + ||| + label_replace( + rate(node_disk_io_time_seconds_total{%(clusterMatcher)s}[$__rate_interval]), + "instance", "$1", "instance", "([^:.]*).*" + ) and on (instance, device) label_replace( + label_replace( + ceph_disk_occupation_human{%(matchers)s, ceph_daemon=~"$osd"}, "device", "$1", "device", "/dev/(.*)" + ), "instance", "$1", "instance", "([^:.]*).*" + ) + ||| % $.matchers(), + '{{device}} on {{instance}}' + )) + { gridPos: { x: 18, y: 11, w: 6, h: 9 } }, + ]), } diff --git a/ceph/monitoring/ceph-mixin/dashboards/pool.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/pool.libsonnet index 8fb4f815c..6444335d9 100644 --- a/ceph/monitoring/ceph-mixin/dashboards/pool.libsonnet +++ b/ceph/monitoring/ceph-mixin/dashboards/pool.libsonnet @@ -1,573 +1,552 @@ local g = import 'grafonnet/grafana.libsonnet'; -local u = import 'utils.libsonnet'; -{ - grafanaDashboards+:: { - 'pool-overview.json': - local PoolOverviewSingleStatPanel(format, - title, - description, - valueName, - expr, - instant, - targetFormat, - x, - y, - w, - h) = - u.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'], - '$datasource', - format, - title, - description, - valueName, - false, - 100, - false, - false, - '') - .addTarget(u.addTargetSchema(expr, '', targetFormat, 1, instant)) + { gridPos: { x: x, y: y, w: w, h: h } }; - - local PoolOverviewStyle(alias, - pattern, - type, - unit, - colorMode, - thresholds, - valueMaps) = - u.addStyle(alias, - colorMode, - [ - 'rgba(245, 54, 54, 0.9)', - 'rgba(237, 129, 40, 0.89)', - 'rgba(50, 172, 45, 0.97)', - ], - 'YYYY-MM-DD HH:mm:ss', - 2, - 1, - pattern, - thresholds, - type, - unit, - valueMaps); - - local PoolOverviewGraphPanel(title, - description, - formatY1, - labelY1, - expr, - legendFormat, - x, - y, - w, - h) = - u.graphPanelSchema({}, - title, - description, - 'null as zero', - false, - formatY1, - 'short', - labelY1, - null, - 0, - 1, - '$datasource') - .addTargets( - [u.addTargetSchema(expr, - legendFormat)] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - u.dashboardSchema( - 'Ceph Pools Overview', +(import 'utils.libsonnet') { + 'pool-overview.json': + $.dashboardSchema( + 'Ceph Pools Overview', + '', + 'z99hzWtmk', + 'now-1h', + '30s', + 22, + $._config.dashboardTags, + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' + ) + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + g.template.custom(label='TopK', + name='topk', + current='15', + query='15') + ) + .addPanels([ + $.simpleSingleStatPanel( + 'none', + 'Pools', '', - 'z99hzWtmk', - 'now-1h', - '15s', - 22, - [], + 'avg', + 'count(ceph_pool_metadata{%(matchers)s})' % $.matchers(), + true, + 'table', + 0, + 0, + 3, + 3 + ), + $.simpleSingleStatPanel( + 'none', + 'Pools with Compression', + 'Count of the pools that have compression enabled', + 'current', + 'count(ceph_pool_metadata{%(matchers)s, compression_mode!="none"})' % $.matchers(), + null, '', - { refresh_intervals: ['5s', '10s', '15s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'] } - ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) - ) - .addTemplate( - g.template.datasource('datasource', - 'prometheus', - 'Dashboard1', - label='Data Source') + 3, + 0, + 3, + 3 + ), + $.simpleSingleStatPanel( + 'bytes', + 'Total Raw Capacity', + 'Total raw capacity available to the cluster', + 'current', + 'sum(ceph_osd_stat_bytes{%(matchers)s})' % $.matchers(), + null, + '', + 6, + 0, + 3, + 3 + ), + $.simpleSingleStatPanel( + 'bytes', + 'Raw Capacity Consumed', + 'Total raw capacity consumed by user data and associated overheads (metadata + redundancy)', + 'current', + 'sum(ceph_pool_bytes_used{%(matchers)s})' % $.matchers(), + true, + '', + 9, + 0, + 3, + 3 + ), + $.simpleSingleStatPanel( + 'bytes', + 'Logical Stored ', + 'Total of client data stored in the cluster', + 'current', + 'sum(ceph_pool_stored{%(matchers)s})' % $.matchers(), + true, + '', + 12, + 0, + 3, + 3 + ), + $.simpleSingleStatPanel( + 'bytes', + 'Compression Savings', + 'A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression', + 'current', + ||| + sum( + ceph_pool_compress_under_bytes{%(matchers)s} - + ceph_pool_compress_bytes_used{%(matchers)s} + ) + ||| % $.matchers(), + null, + '', + 15, + 0, + 3, + 3 + ), + $.simpleSingleStatPanel( + 'percent', + 'Compression Eligibility', + 'Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data', + 'current', + ||| + ( + sum(ceph_pool_compress_under_bytes{%(matchers)s} > 0) / + sum(ceph_pool_stored_raw{%(matchers)s} and ceph_pool_compress_under_bytes{%(matchers)s} > 0) + ) * 100 + ||| % $.matchers(), + null, + 'table', + 18, + 0, + 3, + 3 + ), + $.simpleSingleStatPanel( + 'none', + 'Compression Factor', + 'This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)', + 'current', + ||| + sum( + ceph_pool_compress_under_bytes{%(matchers)s} > 0) + / sum(ceph_pool_compress_bytes_used{%(matchers)s} > 0 + ) + ||| % $.matchers(), + null, + '', + 21, + 0, + 3, + 3 + ), + $.addTableSchema( + '$datasource', + '', + { col: 5, desc: true }, + [ + $.overviewStyle('', 'Time', 'hidden', 'short'), + $.overviewStyle('', 'instance', 'hidden', 'short'), + $.overviewStyle('', 'job', 'hidden', 'short'), + $.overviewStyle('Pool Name', 'name', 'string', 'short'), + $.overviewStyle('Pool ID', 'pool_id', 'hidden', 'none'), + $.overviewStyle('Compression Factor', 'Value #A', 'number', 'none'), + $.overviewStyle('% Used', 'Value #D', 'number', 'percentunit', 'value', ['70', '85']), + $.overviewStyle('Usable Free', 'Value #B', 'number', 'bytes'), + $.overviewStyle('Compression Eligibility', 'Value #C', 'number', 'percent'), + $.overviewStyle('Compression Savings', 'Value #E', 'number', 'bytes'), + $.overviewStyle('Growth (5d)', 'Value #F', 'number', 'bytes', 'value', ['0', '0']), + $.overviewStyle('IOPS', 'Value #G', 'number', 'none'), + $.overviewStyle('Bandwidth', 'Value #H', 'number', 'Bps'), + $.overviewStyle('', '__name__', 'hidden', 'short'), + $.overviewStyle('', 'type', 'hidden', 'short'), + $.overviewStyle('', 'compression_mode', 'hidden', 'short'), + $.overviewStyle('Type', 'description', 'string', 'short'), + $.overviewStyle('Stored', 'Value #J', 'number', 'bytes'), + $.overviewStyle('', 'Value #I', 'hidden', 'short'), + $.overviewStyle('Compression', 'Value #K', 'string', 'short', null, [], [{ text: 'ON', value: '1' }]), + ], + 'Pool Overview', + 'table' ) - .addTemplate( - g.template.custom(label='TopK', - name='topk', - current='15', - query='15') + .addTargets( + [ + $.addTargetSchema( + ||| + ( + ceph_pool_compress_under_bytes{%(matchers)s} / + ceph_pool_compress_bytes_used{%(matchers)s} > 0 + ) and on(pool_id) ( + ( + (ceph_pool_compress_under_bytes{%(matchers)s} > 0) / + ceph_pool_stored_raw{%(matchers)s} + ) * 100 > 0.5 + ) + ||| % $.matchers(), + 'A', + 'table', + 1, + true + ), + $.addTargetSchema( + ||| + ceph_pool_max_avail{%(matchers)s} * + on(pool_id) group_left(name) ceph_pool_metadata{%(matchers)s} + ||| % $.matchers(), + 'B', + 'table', + 1, + true + ), + $.addTargetSchema( + ||| + ( + (ceph_pool_compress_under_bytes{%(matchers)s} > 0) / + ceph_pool_stored_raw{%(matchers)s} + ) * 100 + ||| % $.matchers(), + 'C', + 'table', + 1, + true + ), + $.addTargetSchema( + ||| + ceph_pool_percent_used{%(matchers)s} * + on(pool_id) group_left(name) ceph_pool_metadata{%(matchers)s} + ||| % $.matchers(), + 'D', + 'table', + 1, + true + ), + $.addTargetSchema( + ||| + ceph_pool_compress_under_bytes{%(matchers)s} - + ceph_pool_compress_bytes_used{%(matchers)s} > 0 + ||| % $.matchers(), + 'E', + 'table', + 1, + true + ), + $.addTargetSchema( + 'delta(ceph_pool_stored{%(matchers)s}[5d])' % $.matchers(), 'F', 'table', 1, true + ), + $.addTargetSchema( + ||| + rate(ceph_pool_rd{%(matchers)s}[$__rate_interval]) + + rate(ceph_pool_wr{%(matchers)s}[$__rate_interval]) + ||| % $.matchers(), + 'G', + 'table', + 1, + true + ), + $.addTargetSchema( + ||| + rate(ceph_pool_rd_bytes{%(matchers)s}[$__rate_interval]) + + rate(ceph_pool_wr_bytes{%(matchers)s}[$__rate_interval]) + ||| % $.matchers(), + 'H', + 'table', + 1, + true + ), + $.addTargetSchema( + 'ceph_pool_metadata{%(matchers)s}' % $.matchers(), 'I', 'table', 1, true + ), + $.addTargetSchema( + 'ceph_pool_stored{%(matchers)s} * on(pool_id) group_left ceph_pool_metadata{%(matchers)s}' % $.matchers(), + 'J', + 'table', + 1, + true + ), + $.addTargetSchema( + 'ceph_pool_metadata{%(matchers)s, compression_mode!="none"}' % $.matchers(), 'K', 'table', 1, true + ), + $.addTargetSchema('', 'L', '', '', null), + ] + ) + { gridPos: { x: 0, y: 3, w: 24, h: 6 } }, + $.simpleGraphPanel( + {}, + 'Top $topk Client IOPS by Pool', + 'This chart shows the sum of read and write IOPS from all clients by pool', + 'short', + 'IOPS', + 0, + ||| + topk($topk, + round( + ( + rate(ceph_pool_rd{%(matchers)s}[$__rate_interval]) + + rate(ceph_pool_wr{%(matchers)s}[$__rate_interval]) + ), 1 + ) * on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s}) + ||| % $.matchers(), + '{{name}} ', + 0, + 9, + 12, + 8 ) - .addPanels([ - PoolOverviewSingleStatPanel( - 'none', - 'Pools', - '', - 'avg', - 'count(ceph_pool_metadata)', - true, - 'table', - 0, - 0, - 3, - 3 - ), - PoolOverviewSingleStatPanel( - 'none', - 'Pools with Compression', - 'Count of the pools that have compression enabled', - 'current', - 'count(ceph_pool_metadata{compression_mode!="none"})', - null, - '', - 3, - 0, - 3, - 3 - ), - PoolOverviewSingleStatPanel( - 'bytes', - 'Total Raw Capacity', - 'Total raw capacity available to the cluster', - 'current', - 'sum(ceph_osd_stat_bytes)', - null, - '', - 6, - 0, - 3, - 3 - ), - PoolOverviewSingleStatPanel( - 'bytes', - 'Raw Capacity Consumed', - 'Total raw capacity consumed by user data and associated overheads (metadata + redundancy)', - 'current', - 'sum(ceph_pool_bytes_used)', - true, - '', - 9, - 0, - 3, - 3 - ), - PoolOverviewSingleStatPanel( - 'bytes', - 'Logical Stored ', - 'Total of client data stored in the cluster', - 'current', - 'sum(ceph_pool_stored)', - true, - '', - 12, - 0, - 3, - 3 - ), - PoolOverviewSingleStatPanel( - 'bytes', - 'Compression Savings', - 'A compression saving is determined as the data eligible to be compressed minus the capacity used to store the data after compression', - 'current', - 'sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used)', - null, - '', - 15, - 0, - 3, - 3 - ), - PoolOverviewSingleStatPanel( - 'percent', - 'Compression Eligibility', - 'Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data\n', - 'current', - '(sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_stored_raw and ceph_pool_compress_under_bytes > 0)) * 100', - null, - 'table', - 18, - 0, - 3, - 3 - ), - PoolOverviewSingleStatPanel( - 'none', - 'Compression Factor', - 'This factor describes the average ratio of data eligible to be compressed divided by the data actually stored. It does not account for data written that was ineligible for compression (too small, or compression yield too low)', - 'current', - 'sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_compress_bytes_used > 0)', - null, - '', - 21, - 0, - 3, - 3 - ), - u.addTableSchema( - '$datasource', - '', - { col: 5, desc: true }, - [ - PoolOverviewStyle('', 'Time', 'hidden', 'short', null, [], []), - PoolOverviewStyle('', 'instance', 'hidden', 'short', null, [], []), - PoolOverviewStyle('', 'job', 'hidden', 'short', null, [], []), - PoolOverviewStyle('Pool Name', 'name', 'string', 'short', null, [], []), - PoolOverviewStyle('Pool ID', 'pool_id', 'hidden', 'none', null, [], []), - PoolOverviewStyle('Compression Factor', 'Value #A', 'number', 'none', null, [], []), - PoolOverviewStyle('% Used', 'Value #D', 'number', 'percentunit', 'value', ['70', '85'], []), - PoolOverviewStyle('Usable Free', 'Value #B', 'number', 'bytes', null, [], []), - PoolOverviewStyle('Compression Eligibility', 'Value #C', 'number', 'percent', null, [], []), - PoolOverviewStyle('Compression Savings', 'Value #E', 'number', 'bytes', null, [], []), - PoolOverviewStyle('Growth (5d)', 'Value #F', 'number', 'bytes', 'value', ['0', '0'], []), - PoolOverviewStyle('IOPS', 'Value #G', 'number', 'none', null, [], []), - PoolOverviewStyle('Bandwidth', 'Value #H', 'number', 'Bps', null, [], []), - PoolOverviewStyle('', '__name__', 'hidden', 'short', null, [], []), - PoolOverviewStyle('', 'type', 'hidden', 'short', null, [], []), - PoolOverviewStyle('', 'compression_mode', 'hidden', 'short', null, [], []), - PoolOverviewStyle('Type', 'description', 'string', 'short', null, [], []), - PoolOverviewStyle('Stored', 'Value #J', 'number', 'bytes', null, [], []), - PoolOverviewStyle('', 'Value #I', 'hidden', 'short', null, [], []), - PoolOverviewStyle('Compression', 'Value #K', 'string', 'short', null, [], [{ text: 'ON', value: '1' }]), - ], - 'Pool Overview', - 'table' + .addTarget( + $.addTargetSchema( + ||| + topk($topk, + rate(ceph_pool_wr{%(matchers)s}[$__rate_interval]) + + on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s} + ) + ||| % $.matchers(), + '{{name}} - write' ) - .addTargets( - [ - u.addTargetSchema( - '(ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)', - 'A', - 'table', - 1, - true - ), - u.addTargetSchema( - 'ceph_pool_max_avail * on(pool_id) group_left(name) ceph_pool_metadata', - 'B', - 'table', - 1, - true - ), - u.addTargetSchema( - '((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100', - 'C', - 'table', - 1, - true - ), - u.addTargetSchema( - '(ceph_pool_percent_used * on(pool_id) group_left(name) ceph_pool_metadata)', - 'D', - 'table', - 1, - true - ), - u.addTargetSchema( - '(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used > 0)', - 'E', - 'table', - 1, - true - ), - u.addTargetSchema( - 'delta(ceph_pool_stored[5d])', 'F', 'table', 1, true - ), - u.addTargetSchema( - 'rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])', - 'G', - 'table', - 1, - true - ), - u.addTargetSchema( - 'rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])', - 'H', - 'table', - 1, - true - ), - u.addTargetSchema( - 'ceph_pool_metadata', 'I', 'table', 1, true - ), - u.addTargetSchema( - 'ceph_pool_stored * on(pool_id) group_left ceph_pool_metadata', - 'J', - 'table', - 1, - true - ), - u.addTargetSchema( - 'ceph_pool_metadata{compression_mode!="none"}', 'K', 'table', 1, true - ), - u.addTargetSchema('', 'L', '', '', null), - ] - ) + { gridPos: { x: 0, y: 3, w: 24, h: 6 } }, - PoolOverviewGraphPanel( - 'Top $topk Client IOPS by Pool', - 'This chart shows the sum of read and write IOPS from all clients by pool', - 'short', - 'IOPS', - 'topk($topk,round((rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata) ', - '{{name}} ', - 0, - 9, - 12, - 8 - ) - .addTarget( - u.addTargetSchema( - 'topk($topk,rate(ceph_pool_wr[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata) ', - '{{name}} - write' + ), + $.simpleGraphPanel( + {}, + 'Top $topk Client Bandwidth by Pool', + 'The chart shows the sum of read and write bytes from all clients, by pool', + 'Bps', + 'Throughput', + 0, + ||| + topk($topk, + ( + rate(ceph_pool_rd_bytes{%(matchers)s}[$__rate_interval]) + + rate(ceph_pool_wr_bytes{%(matchers)s}[$__rate_interval]) + ) * on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s} ) - ), - PoolOverviewGraphPanel( - 'Top $topk Client Bandwidth by Pool', - 'The chart shows the sum of read and write bytes from all clients, by pool', - 'Bps', - 'Throughput', - 'topk($topk,(rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata)', - '{{name}}', - 12, - 9, - 12, - 8 - ), - PoolOverviewGraphPanel( - 'Pool Capacity Usage (RAW)', - 'Historical view of capacity usage, to help identify growth and trends in pool consumption', - 'bytes', - 'Capacity Used', - 'ceph_pool_bytes_used * on(pool_id) group_right ceph_pool_metadata', - '{{name}}', - 0, - 17, - 24, - 7 - ), - ]), - 'pool-detail.json': - local PoolDetailSingleStatPanel(format, - title, - description, - valueName, - colorValue, - gaugeMaxValue, - gaugeShow, - sparkLineShow, - thresholds, - expr, - targetFormat, - x, - y, - w, - h) = - u.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'], - '$datasource', - format, - title, - description, - valueName, - colorValue, - gaugeMaxValue, - gaugeShow, - sparkLineShow, - thresholds) - .addTarget(u.addTargetSchema(expr, '', targetFormat)) + { gridPos: { x: x, y: y, w: w, h: h } }; - - local PoolDetailGraphPanel(alias, - title, - description, - formatY1, - labelY1, - expr, - legendFormat, - x, - y, - w, - h) = - u.graphPanelSchema(alias, - title, - description, - 'null as zero', - false, - formatY1, - 'short', - labelY1, - null, - null, - 1, - '$datasource') - .addTargets( - [u.addTargetSchema(expr, legendFormat)] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - u.dashboardSchema( - 'Ceph Pool Details', + ||| % $.matchers(), + '{{name}}', + 12, + 9, + 12, + 8 + ), + $.simpleGraphPanel( + {}, + 'Pool Capacity Usage (RAW)', + 'Historical view of capacity usage, to help identify growth and trends in pool consumption', + 'bytes', + 'Capacity Used', + 0, + 'ceph_pool_bytes_used{%(matchers)s} * on(pool_id) group_right ceph_pool_metadata{%(matchers)s}' % $.matchers(), + '{{name}}', + 0, + 17, + 24, + 7 + ), + ]), + 'pool-detail.json': + $.dashboardSchema( + 'Ceph Pool Details', + '', + '-xyV8KCiz', + 'now-1h', + '30s', + 22, + $._config.dashboardTags, + '' + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='panel', id='singlestat', name='Singlestat', version='5.0.0' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' + ) + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema('pool_name', + '$datasource', + 'label_values(ceph_pool_metadata{%(matchers)s}, name)' % $.matchers(), + 1, + false, + 1, + 'Pool Name', + '') + ) + .addPanels([ + $.gaugeSingleStatPanel( + 'percentunit', + 'Capacity used', '', - '-xyV8KCiz', - 'now-1h', - '15s', - 22, - [], + 'current', + true, + 1, + true, + true, + '.7,.8', + ||| + (ceph_pool_stored{%(matchers)s} / (ceph_pool_stored{%(matchers)s} + ceph_pool_max_avail{%(matchers)s})) * + on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} + ||| % $.matchers(), + 'time_series', + 0, + 0, + 7, + 7 + ), + $.gaugeSingleStatPanel( + 's', + 'Time till full', + 'Time till pool is full assuming the average fill rate of the last 6 hours', + false, + 100, + false, + false, '', + 'current', + ||| + (ceph_pool_max_avail{%(matchers)s} / deriv(ceph_pool_stored{%(matchers)s}[6h])) * + on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} > 0 + ||| % $.matchers(), + 'time_series', + 7, + 0, + 5, + 7 + ), + $.simpleGraphPanel( { - refresh_intervals: ['5s', '10s', '15s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } - ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.3.2' - ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' - ) - .addRequired( - type='panel', id='singlestat', name='Singlestat', version='5.0.0' + read_op_per_sec: + '#3F6833', + write_op_per_sec: '#E5AC0E', + }, + '$pool_name Object Ingress/Egress', + '', + 'ops', + 'Objects out(-) / in(+) ', + null, + ||| + deriv(ceph_pool_objects{%(matchers)s}[1m]) * + on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} + ||| % $.matchers(), + 'Objects per second', + 12, + 0, + 12, + 7 + ), + $.simpleGraphPanel( + { + read_op_per_sec: '#3F6833', + write_op_per_sec: '#E5AC0E', + }, + '$pool_name Client IOPS', + '', + 'iops', + 'Read (-) / Write (+)', + null, + ||| + rate(ceph_pool_rd{%(matchers)s}[$__rate_interval]) * + on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} + ||| % $.matchers(), + 'reads', + 0, + 7, + 12, + 7 ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' + .addSeriesOverride({ alias: 'reads', transform: 'negative-Y' }) + .addTarget( + $.addTargetSchema( + ||| + rate(ceph_pool_wr{%(matchers)s}[$__rate_interval]) * + on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} + ||| % $.matchers(), + 'writes' ) + ), + $.simpleGraphPanel( + { + read_op_per_sec: '#3F6833', + write_op_per_sec: '#E5AC0E', + }, + '$pool_name Client Throughput', + '', + 'Bps', + 'Read (-) / Write (+)', + null, + ||| + rate(ceph_pool_rd_bytes{%(matchers)s}[$__rate_interval]) + + on(pool_id) group_left(instance, name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} + ||| % $.matchers(), + 'reads', + 12, + 7, + 12, + 7 ) - .addTemplate( - g.template.datasource('datasource', - 'prometheus', - 'Prometheus admin.virt1.home.fajerski.name:9090', - label='Data Source') - ) - .addTemplate( - u.addTemplateSchema('pool_name', - '$datasource', - 'label_values(ceph_pool_metadata,name)', - 1, - false, - 1, - 'Pool Name', - '') - ) - .addPanels([ - PoolDetailSingleStatPanel( - 'percentunit', - 'Capacity used', - '', - 'current', - true, - 1, - true, - true, - '.7,.8', - '(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"}', - 'time_series', - 0, - 0, - 7, - 7 - ), - PoolDetailSingleStatPanel( - 's', - 'Time till full', - 'Time till pool is full assuming the average fill rate of the last 6 hours', - false, - 100, - false, - false, - '', - 'current', - '(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"} > 0', - 'time_series', - 7, - 0, - 5, - 7 - ), - PoolDetailGraphPanel( - { - read_op_per_sec: - '#3F6833', - write_op_per_sec: '#E5AC0E', - }, - '$pool_name Object Ingress/Egress', - '', - 'ops', - 'Objects out(-) / in(+) ', - 'deriv(ceph_pool_objects[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"}', - 'Objects per second', - 12, - 0, - 12, - 7 - ), - PoolDetailGraphPanel( - { - read_op_per_sec: '#3F6833', - write_op_per_sec: '#E5AC0E', - }, '$pool_name Client IOPS', '', 'iops', 'Read (-) / Write (+)', 'irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"}', 'reads', 0, 7, 12, 7 - ) - .addSeriesOverride({ alias: 'reads', transform: 'negative-Y' }) - .addTarget( - u.addTargetSchema( - 'irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"}', 'writes' - ) - ), - PoolDetailGraphPanel( - { - read_op_per_sec: '#3F6833', - write_op_per_sec: '#E5AC0E', - }, - '$pool_name Client Throughput', - '', - 'Bps', - 'Read (-) / Write (+)', - 'irate(ceph_pool_rd_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"}', - 'reads', - 12, - 7, - 12, - 7 + .addSeriesOverride({ alias: 'reads', transform: 'negative-Y' }) + .addTarget( + $.addTargetSchema( + ||| + rate(ceph_pool_wr_bytes{%(matchers)s}[$__rate_interval]) + + on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} + ||| % $.matchers(), + 'writes' ) - .addSeriesOverride({ alias: 'reads', transform: 'negative-Y' }) - .addTarget( - u.addTargetSchema( - 'irate(ceph_pool_wr_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"}', - 'writes' - ) - ), - PoolDetailGraphPanel( - { - read_op_per_sec: '#3F6833', - write_op_per_sec: '#E5AC0E', - }, - '$pool_name Objects', - '', - 'short', - 'Objects', - 'ceph_pool_objects * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"$pool_name"}', - 'Number of Objects', - 0, - 14, - 12, - 7 - ), - ]), - }, + ), + $.simpleGraphPanel( + { + read_op_per_sec: '#3F6833', + write_op_per_sec: '#E5AC0E', + }, + '$pool_name Objects', + '', + 'short', + 'Objects', + null, + ||| + ceph_pool_objects{%(matchers)s} * + on(pool_id) group_left(instance,name) ceph_pool_metadata{%(matchers)s, name=~"$pool_name"} + ||| % $.matchers(), + 'Number of Objects', + 0, + 14, + 12, + 7 + ), + ]), } diff --git a/ceph/monitoring/ceph-mixin/dashboards/rbd.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/rbd.libsonnet index a4ca6982d..0eca5a877 100644 --- a/ceph/monitoring/ceph-mixin/dashboards/rbd.libsonnet +++ b/ceph/monitoring/ceph-mixin/dashboards/rbd.libsonnet @@ -1,306 +1,337 @@ local g = import 'grafonnet/grafana.libsonnet'; local u = import 'utils.libsonnet'; -{ - grafanaDashboards+:: { - 'rbd-details.json': - local RbdDetailsPanel(title, formatY1, expr1, expr2, x, y, w, h) = - u.graphPanelSchema({}, - title, - '', - 'null as zero', - false, - formatY1, - formatY1, - null, - null, - 0, - 1, - '$Datasource') - .addTargets( - [ - u.addTargetSchema(expr1, - '{{pool}} Write'), - u.addTargetSchema(expr2, '{{pool}} Read'), - ] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; +(import 'utils.libsonnet') { + 'rbd-details.json': + local RbdDetailsPanel(title, formatY1, expr1, expr2, x, y, w, h) = + $.graphPanelSchema({}, + title, + '', + 'null as zero', + false, + formatY1, + formatY1, + null, + null, + 0, + 1, + '$datasource') + .addTargets( + [ + $.addTargetSchema(expr1, + '{{pool}} Write'), + $.addTargetSchema(expr2, '{{pool}} Read'), + ] + ) + { gridPos: { x: x, y: y, w: w, h: h } }; - u.dashboardSchema( - 'RBD Details', - 'Detailed Performance of RBD Images (IOPS/Throughput/Latency)', - 'YhCYGcuZz', - 'now-1h', - false, - 16, - [], - '', - { - refresh_intervals: ['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } - ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) - ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.3.3' - ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' + $.dashboardSchema( + 'RBD Details', + 'Detailed Performance of RBD Images (IOPS/Throughput/Latency)', + 'YhCYGcuZz', + 'now-1h', + '30s', + 16, + $._config.dashboardTags, + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' ) - .addTemplate( - g.template.datasource('Datasource', 'prometheus', 'default', label=null) - ) - .addTemplate( - u.addTemplateSchema('Pool', - '$Datasource', - 'label_values(pool)', - 1, - false, - 0, - '', - '') - ) - .addTemplate( - u.addTemplateSchema('Image', - '$Datasource', - 'label_values(image)', - 1, - false, - 0, - '', - '') - ) - .addPanels([ - RbdDetailsPanel( - 'IOPS', - 'iops', - 'irate(ceph_rbd_write_ops{pool="$Pool", image="$Image"}[30s])', - 'irate(ceph_rbd_read_ops{pool="$Pool", image="$Image"}[30s])', - 0, - 0, - 8, - 9 - ), - RbdDetailsPanel( - 'Throughput', - 'Bps', - 'irate(ceph_rbd_write_bytes{pool="$Pool", image="$Image"}[30s])', - 'irate(ceph_rbd_read_bytes{pool="$Pool", image="$Image"}[30s])', - 8, - 0, - 8, - 9 - ), - RbdDetailsPanel( - 'Average Latency', - 'ns', - 'irate(ceph_rbd_write_latency_sum{pool="$Pool", image="$Image"}[30s]) / irate(ceph_rbd_write_latency_count{pool="$Pool", image="$Image"}[30s])', - 'irate(ceph_rbd_read_latency_sum{pool="$Pool", image="$Image"}[30s]) / irate(ceph_rbd_read_latency_count{pool="$Pool", image="$Image"}[30s])', - 16, - 0, - 8, - 9 - ), - ]), - 'rbd-overview.json': - local RgwOverviewStyle(alias, pattern, type, unit) = - u.addStyle(alias, - null, - ['rgba(245, 54, 54, 0.9)', 'rgba(237, 129, 40, 0.89)', 'rgba(50, 172, 45, 0.97)'], - 'YYYY-MM-DD HH:mm:ss', - 2, - 1, - pattern, - [], - type, - unit, - []); - local RbdOverviewPanel(title, - formatY1, - expr1, - expr2, - legendFormat1, - legendFormat2, - x, - y, - w, - h) = - u.graphPanelSchema({}, - title, - '', - 'null', - false, + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.3.3' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema('pool', + '$datasource', + 'label_values(pool)', + 1, + false, + 0, + '', + '') + ) + .addTemplate( + $.addTemplateSchema('image', + '$datasource', + 'label_values(image)', + 1, + false, + 0, + '', + '') + ) + .addPanels([ + RbdDetailsPanel( + 'IOPS', + 'iops', + 'rate(ceph_rbd_write_ops{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers() + , + 'rate(ceph_rbd_read_ops{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers(), + 0, + 0, + 8, + 9 + ), + RbdDetailsPanel( + 'Throughput', + 'Bps', + 'rate(ceph_rbd_write_bytes{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers(), + 'rate(ceph_rbd_read_bytes{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval])' % $.matchers(), + 8, + 0, + 8, + 9 + ), + RbdDetailsPanel( + 'Average Latency', + 'ns', + ||| + rate(ceph_rbd_write_latency_sum{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval]) / + rate(ceph_rbd_write_latency_count{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval]) + ||| % $.matchers(), + ||| + rate(ceph_rbd_read_latency_sum{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval]) / + rate(ceph_rbd_read_latency_count{%(matchers)s, pool="$pool", image="$image"}[$__rate_interval]) + ||| % $.matchers(), + 16, + 0, + 8, + 9 + ), + ]), + 'rbd-overview.json': + local RbdOverviewPanel(title, formatY1, - 'short', - null, - null, - 0, - 1, - '$datasource') - .addTargets( - [ - u.addTargetSchema(expr1, - legendFormat1), - u.addTargetSchema(expr2, - legendFormat2), - ] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; + expr1, + expr2, + legendFormat1, + legendFormat2, + x, + y, + w, + h) = + $.graphPanelSchema({}, + title, + '', + 'null', + false, + formatY1, + 'short', + null, + null, + 0, + 1, + '$datasource') + .addTargets( + [ + $.addTargetSchema(expr1, + legendFormat1), + $.addTargetSchema(expr2, + legendFormat2), + ] + ) + { gridPos: { x: x, y: y, w: w, h: h } }; - u.dashboardSchema( - 'RBD Overview', - '', - '41FrpeUiz', - 'now-1h', - '30s', + $.dashboardSchema( + 'RBD Overview', + '', + '41FrpeUiz', + 'now-1h', + '30s', + 16, + $._config.dashboardTags + ['overview'], + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' + ) + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.4.2' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addRequired( + type='datasource', id='prometheus', name='Prometheus', version='5.0.0' + ) + .addRequired( + type='panel', id='table', name='Table', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addPanels([ + RbdOverviewPanel( + 'IOPS', + 'short', + 'round(sum(rate(ceph_rbd_write_ops{%(matchers)s}[$__rate_interval])))' % $.matchers(), + 'round(sum(rate(ceph_rbd_read_ops{%(matchers)s}[$__rate_interval])))' % $.matchers(), + 'Writes', + 'Reads', + 0, + 0, + 8, + 7 + ), + RbdOverviewPanel( + 'Throughput', + 'Bps', + 'round(sum(rate(ceph_rbd_write_bytes{%(matchers)s}[$__rate_interval])))' % $.matchers(), + 'round(sum(rate(ceph_rbd_read_bytes{%(matchers)s}[$__rate_interval])))' % $.matchers(), + 'Write', + 'Read', + 8, + 0, + 8, + 7 + ), + RbdOverviewPanel( + 'Average Latency', + 'ns', + ||| + round( + sum(rate(ceph_rbd_write_latency_sum{%(matchers)s}[$__rate_interval])) / + sum(rate(ceph_rbd_write_latency_count{%(matchers)s}[$__rate_interval])) + ) + ||| % $.matchers(), + ||| + round( + sum(rate(ceph_rbd_read_latency_sum{%(matchers)s}[$__rate_interval])) / + sum(rate(ceph_rbd_read_latency_count{%(matchers)s}[$__rate_interval])) + ) + ||| % $.matchers(), + 'Write', + 'Read', 16, - ['overview'], + 0, + 8, + 7 + ), + $.addTableSchema( + '$datasource', '', - { - refresh_intervals: ['5s', '10s', '15s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } + { col: 3, desc: true }, + [ + $.overviewStyle('Pool', 'pool', 'string', 'short'), + $.overviewStyle('Image', 'image', 'string', 'short'), + $.overviewStyle('IOPS', 'Value', 'number', 'iops'), + $.overviewStyle('', '/.*/', 'hidden', 'short'), + ], + 'Highest IOPS', + 'table' ) - .addAnnotation( - u.addAnnotationSchema( + .addTarget( + $.addTargetSchema( + ||| + topk(10, + ( + sort(( + rate(ceph_rbd_write_ops{%(matchers)s}[$__rate_interval]) + + on (image, pool, namespace) rate(ceph_rbd_read_ops{%(matchers)s}[$__rate_interval]) + )) + ) + ) + ||| % $.matchers(), + '', + 'table', 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' + true ) + ) + { gridPos: { x: 0, y: 7, w: 8, h: 7 } }, + $.addTableSchema( + '$datasource', + '', + { col: 3, desc: true }, + [ + $.overviewStyle('Pool', 'pool', 'string', 'short'), + $.overviewStyle('Image', 'image', 'string', 'short'), + $.overviewStyle('Throughput', 'Value', 'number', 'Bps'), + $.overviewStyle('', '/.*/', 'hidden', 'short'), + ], + 'Highest Throughput', + 'table' ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.4.2' - ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' - ) - .addRequired( - type='datasource', id='prometheus', name='Prometheus', version='5.0.0' - ) - .addRequired( - type='panel', id='table', name='Table', version='5.0.0' - ) - .addTemplate( - g.template.datasource('datasource', - 'prometheus', - 'default', - label='Data Source') - ) - .addPanels([ - RbdOverviewPanel( - 'IOPS', - 'short', - 'round(sum(irate(ceph_rbd_write_ops[30s])))', - 'round(sum(irate(ceph_rbd_read_ops[30s])))', - 'Writes', - 'Reads', - 0, - 0, - 8, - 7 - ), - RbdOverviewPanel( - 'Throughput', - 'Bps', - 'round(sum(irate(ceph_rbd_write_bytes[30s])))', - 'round(sum(irate(ceph_rbd_read_bytes[30s])))', - 'Write', - 'Read', - 8, - 0, - 8, - 7 - ), - RbdOverviewPanel( - 'Average Latency', - 'ns', - 'round(sum(irate(ceph_rbd_write_latency_sum[30s])) / sum(irate(ceph_rbd_write_latency_count[30s])))', - 'round(sum(irate(ceph_rbd_read_latency_sum[30s])) / sum(irate(ceph_rbd_read_latency_count[30s])))', - 'Write', - 'Read', - 16, - 0, - 8, - 7 - ), - u.addTableSchema( - '$datasource', - '', - { col: 3, desc: true }, - [ - RgwOverviewStyle('Pool', 'pool', 'string', 'short'), - RgwOverviewStyle('Image', 'image', 'string', 'short'), - RgwOverviewStyle('IOPS', 'Value', 'number', 'iops'), - RgwOverviewStyle('', '/.*/', 'hidden', 'short'), - ], - 'Highest IOPS', - 'table' - ) - .addTarget( - u.addTargetSchema( - 'topk(10, (sort((irate(ceph_rbd_write_ops[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops[30s])))))', - '', - 'table', - 1, - true - ) - ) + { gridPos: { x: 0, y: 7, w: 8, h: 7 } }, - u.addTableSchema( - '$datasource', + .addTarget( + $.addTargetSchema( + ||| + topk(10, + sort( + sum( + rate(ceph_rbd_read_bytes{%(matchers)s}[$__rate_interval]) + + rate(ceph_rbd_write_bytes{%(matchers)s}[$__rate_interval]) + ) by (pool, image, namespace) + ) + ) + ||| % $.matchers(), '', - { col: 3, desc: true }, - [ - RgwOverviewStyle('Pool', 'pool', 'string', 'short'), - RgwOverviewStyle('Image', 'image', 'string', 'short'), - RgwOverviewStyle('Throughput', 'Value', 'number', 'Bps'), - RgwOverviewStyle('', '/.*/', 'hidden', 'short'), - ], - 'Highest Throughput', - 'table' + 'table', + 1, + true ) - .addTarget( - u.addTargetSchema( - 'topk(10, sort(sum(irate(ceph_rbd_read_bytes[30s]) + irate(ceph_rbd_write_bytes[30s])) by (pool, image, namespace)))', - '', - 'table', - 1, - true - ) - ) + { gridPos: { x: 8, y: 7, w: 8, h: 7 } }, - u.addTableSchema( - '$datasource', + ) + { gridPos: { x: 8, y: 7, w: 8, h: 7 } }, + $.addTableSchema( + '$datasource', + '', + { col: 3, desc: true }, + [ + $.overviewStyle('Pool', 'pool', 'string', 'short'), + $.overviewStyle('Image', 'image', 'string', 'short'), + $.overviewStyle('Latency', 'Value', 'number', 'ns'), + $.overviewStyle('', '/.*/', 'hidden', 'short'), + ], + 'Highest Latency', + 'table' + ) + .addTarget( + $.addTargetSchema( + ||| + topk(10, + sum( + rate(ceph_rbd_write_latency_sum{%(matchers)s}[$__rate_interval]) / + clamp_min(rate(ceph_rbd_write_latency_count{%(matchers)s}[$__rate_interval]), 1) + + rate(ceph_rbd_read_latency_sum{%(matchers)s}[$__rate_interval]) / + clamp_min(rate(ceph_rbd_read_latency_count{%(matchers)s}[$__rate_interval]), 1) + ) by (pool, image, namespace) + ) + ||| % $.matchers(), '', - { col: 3, desc: true }, - [ - RgwOverviewStyle('Pool', 'pool', 'string', 'short'), - RgwOverviewStyle('Image', 'image', 'string', 'short'), - RgwOverviewStyle('Latency', 'Value', 'number', 'ns'), - RgwOverviewStyle('', '/.*/', 'hidden', 'short'), - ], - 'Highest Latency', - 'table' + 'table', + 1, + true ) - .addTarget( - u.addTargetSchema( - 'topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum[30s]) / clamp_min(irate(ceph_rbd_write_latency_count[30s]), 1) +\n irate(ceph_rbd_read_latency_sum[30s]) / clamp_min(irate(ceph_rbd_read_latency_count[30s]), 1)\n ) by (pool, image, namespace)\n)', - '', - 'table', - 1, - true - ) - ) + { gridPos: { x: 16, y: 7, w: 8, h: 7 } }, - ]), - }, + ) + { gridPos: { x: 16, y: 7, w: 8, h: 7 } }, + ]), } diff --git a/ceph/monitoring/ceph-mixin/dashboards/rgw.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/rgw.libsonnet index f7f76187f..437eb783f 100644 --- a/ceph/monitoring/ceph-mixin/dashboards/rgw.libsonnet +++ b/ceph/monitoring/ceph-mixin/dashboards/rgw.libsonnet @@ -1,625 +1,866 @@ local g = import 'grafonnet/grafana.libsonnet'; local u = import 'utils.libsonnet'; -{ - grafanaDashboards+:: { - 'radosgw-sync-overview.json': - local RgwSyncOverviewPanel(title, formatY1, labelY1, rgwMetric, x, y, w, h) = - u.graphPanelSchema({}, - title, - '', - 'null as zero', - true, - formatY1, - 'short', - labelY1, - null, - 0, - 1, - '$datasource') - .addTargets( - [u.addTargetSchema('sum by (source_zone) (rate(%s[30s]))' % rgwMetric, - '{{source_zone}}')] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; +(import 'utils.libsonnet') { + 'radosgw-sync-overview.json': + local RgwSyncOverviewPanel(title, formatY1, labelY1, rgwMetric, x, y, w, h) = + $.graphPanelSchema({}, + title, + '', + 'null as zero', + true, + formatY1, + 'short', + labelY1, + null, + 0, + 1, + '$datasource') + .addTargets( + [ + $.addTargetSchema( + 'sum by (source_zone) (rate(%(rgwMetric)s{%(matchers)s}[$__rate_interval]))' + % ($.matchers() + { rgwMetric: rgwMetric }), + '{{source_zone}}' + ), + ] + ) + { gridPos: { x: x, y: y, w: w, h: h } }; - u.dashboardSchema( - 'RGW Sync Overview', - '', - 'rgw-sync-overview', - 'now-1h', - '15s', - 16, - ['overview'], - '', - { - refresh_intervals: ['5s', '10s', '15s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } - ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) - ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.0.0' + $.dashboardSchema( + 'RGW Sync Overview', + '', + 'rgw-sync-overview', + 'now-1h', + '30s', + 16, + $._config.dashboardTags + ['overview'], + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' - ) - .addTemplate( - u.addTemplateSchema('rgw_servers', '$datasource', 'prometehus', 1, true, 1, '', '') - ) - .addTemplate( - g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', 'prometheus', 'default', label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema( + 'rgw_servers', + '$datasource', + 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + true, + 1, + '', + 'RGW Server' ) - .addPanels([ - RgwSyncOverviewPanel( - 'Replication (throughput) from Source Zone', - 'Bps', - null, - 'ceph_data_sync_from_zone_fetch_bytes_sum', - 0, - 0, - 8, - 7 - ), - RgwSyncOverviewPanel( - 'Replication (objects) from Source Zone', - 'short', - 'Objects/s', - 'ceph_data_sync_from_zone_fetch_bytes_count', - 8, - 0, - 8, - 7 - ), - RgwSyncOverviewPanel( - 'Polling Request Latency from Source Zone', - 'ms', - null, - 'ceph_data_sync_from_zone_poll_latency_sum', - 16, - 0, - 8, - 7 - ), - RgwSyncOverviewPanel( - 'Unsuccessful Object Replications from Source Zone', - 'short', - 'Count/s', - 'ceph_data_sync_from_zone_fetch_errors', - 0, - 7, - 8, - 7 - ), - ]), - 'radosgw-overview.json': - local RgwOverviewPanel( + ) + .addPanels([ + RgwSyncOverviewPanel( + 'Replication (throughput) from Source Zone', + 'Bps', + null, + 'ceph_data_sync_from_zone_fetch_bytes_sum', + 0, + 0, + 8, + 7 + ), + RgwSyncOverviewPanel( + 'Replication (objects) from Source Zone', + 'short', + 'Objects/s', + 'ceph_data_sync_from_zone_fetch_bytes_count', + 8, + 0, + 8, + 7 + ), + RgwSyncOverviewPanel( + 'Polling Request Latency from Source Zone', + 'ms', + null, + 'ceph_data_sync_from_zone_poll_latency_sum', + 16, + 0, + 8, + 7 + ), + RgwSyncOverviewPanel( + 'Unsuccessful Object Replications from Source Zone', + 'short', + 'Count/s', + 'ceph_data_sync_from_zone_fetch_errors', + 0, + 7, + 8, + 7 + ), + ]), + 'radosgw-overview.json': + local RgwOverviewPanel( + title, + description, + formatY1, + formatY2, + expr1, + legendFormat1, + x, + y, + w, + h, + datasource='$datasource', + legend_alignAsTable=false, + legend_avg=false, + legend_min=false, + legend_max=false, + legend_current=false, + legend_values=false + ) = + $.graphPanelSchema( + {}, title, description, + 'null', + false, formatY1, formatY2, - expr1, - legendFormat1, - x, - y, - w, - h, - datasource='$datasource', - legend_alignAsTable=false, - legend_avg=false, - legend_min=false, - legend_max=false, - legend_current=false, - legend_values=false - ) = - u.graphPanelSchema( - {}, - title, - description, - 'null', - false, - formatY1, - formatY2, - null, - null, - 0, - 1, - datasource, - legend_alignAsTable, - legend_avg, - legend_min, - legend_max, - legend_current, - legend_values - ) - .addTargets( - [u.addTargetSchema(expr1, legendFormat1)] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; + null, + null, + 0, + 1, + datasource, + legend_alignAsTable, + legend_avg, + legend_min, + legend_max, + legend_current, + legend_values + ) + .addTargets( + [$.addTargetSchema(expr1, legendFormat1)] + ) + { gridPos: { x: x, y: y, w: w, h: h } }; - u.dashboardSchema( - 'RGW Overview', - '', - 'WAkugZpiz', - 'now-1h', - '15s', - 16, - ['overview'], - '', - { - refresh_intervals: ['5s', '10s', '15s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } + $.dashboardSchema( + 'RGW Overview', + '', + 'WAkugZpiz', + 'now-1h', + '30s', + 16, + $._config.dashboardTags + ['overview'], + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', + 'prometheus', + 'default', + label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema( + 'rgw_servers', + '$datasource', + 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + true, + 1, + '', + 'RGW Server' ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addTemplate( + $.addTemplateSchema( + 'code', + '$datasource', + 'label_values(haproxy_server_http_responses_total{job=~"$job_haproxy", instance=~"$ingress_service"}, code)', + 1, + true, + 1, + 'HTTP Code', + '' ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + $.addTemplateSchema( + 'job_haproxy', + '$datasource', + 'label_values(haproxy_server_status, job)', + 1, + true, + 1, + 'job haproxy', + '(.*)', + multi=true, + allValues='.+', + ), + ) + .addTemplate( + $.addTemplateSchema( + 'ingress_service', + '$datasource', + 'label_values(haproxy_server_status{job=~"$job_haproxy"}, instance)', + 1, + true, + 1, + 'Ingress Service', + '' ) - .addTemplate( - u.addTemplateSchema( - 'rgw_servers', - '$datasource', - 'label_values(ceph_rgw_metadata, ceph_daemon)', - 1, - true, - 1, - '', - '' - ) + ) + .addPanels([ + $.addRowSchema(false, + true, + 'RGW Overview - All Gateways') + + { + gridPos: { x: 0, y: 0, w: 24, h: 1 }, + }, + RgwOverviewPanel( + 'Average GET/PUT Latencies', + '', + 's', + 'short', + ||| + rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) / + rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s} + ||| % $.matchers(), + 'GET AVG', + 0, + 1, + 8, + 7 + ).addTargets( + [ + $.addTargetSchema( + ||| + rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) / + rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s} + ||| % $.matchers(), + 'PUT AVG' + ), + ] + ), + RgwOverviewPanel( + 'Total Requests/sec by RGW Instance', + '', + 'none', + 'short', + ||| + sum by (rgw_host) ( + label_replace( + rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s}, + "rgw_host", "$1", "ceph_daemon", "rgw.(.*)" + ) + ) + ||| % $.matchers(), + '{{rgw_host}}', + 8, + 1, + 7, + 7 + ), + RgwOverviewPanel( + 'GET Latencies by RGW Instance', + 'Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts', + 's', + 'short', + ||| + label_replace( + rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) / + rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s}, + "rgw_host", "$1", "ceph_daemon", "rgw.(.*)" + ) + ||| % $.matchers(), + '{{rgw_host}}', + 15, + 1, + 6, + 7 + ), + RgwOverviewPanel( + 'Bandwidth Consumed by Type', + 'Total bytes transferred in/out of all radosgw instances within the cluster', + 'bytes', + 'short', + 'sum(rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]))' % $.matchers(), + 'GETs', + 0, + 8, + 8, + 6 + ).addTargets( + [$.addTargetSchema('sum(rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval]))' % $.matchers(), + 'PUTs')] + ), + RgwOverviewPanel( + 'Bandwidth by RGW Instance', + 'Total bytes transferred in/out through get/put operations, by radosgw instance', + 'bytes', + 'short', + ||| + label_replace(sum by (instance_id) ( + rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]) + + rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval])) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s}, + "rgw_host", "$1", "ceph_daemon", "rgw.(.*)" + ) + ||| % $.matchers(), + '{{rgw_host}}', + 8, + 8, + 7, + 6 + ), + RgwOverviewPanel( + 'PUT Latencies by RGW Instance', + 'Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts', + 's', + 'short', + ||| + label_replace( + rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) / + rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s}, + "rgw_host", "$1", "ceph_daemon", "rgw.(.*)" + ) + ||| % $.matchers(), + '{{rgw_host}}', + 15, + 8, + 6, + 6 + ), + $.addRowSchema( + false, true, 'RGW Overview - HAProxy Metrics' + ) + { gridPos: { x: 0, y: 12, w: 9, h: 12 } }, + RgwOverviewPanel( + 'Total responses by HTTP code', + '', + 'short', + 'short', + ||| + sum( + rate( + haproxy_frontend_http_responses_total{code=~"$code", job=~"$job_haproxy", instance=~"$ingress_service", proxy=~"frontend"}[$__rate_interval] + ) + ) by (code) + |||, + 'Frontend {{ code }}', + 0, + 12, + 5, + 12, + '$datasource', + true, + true, + true, + true, + true, + true ) - .addTemplate( - u.addTemplateSchema( - 'code', - '$datasource', - 'label_values(haproxy_server_http_responses_total{instance=~"$ingress_service"}, code)', - 1, - true, - 1, - 'HTTP Code', - '' - ) + .addTargets( + [ + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_http_responses_total{code=~"$code", job=~"$job_haproxy", instance=~"$ingress_service", proxy=~"backend"}[$__rate_interval] + ) + ) by (code) + |||, 'Backend {{ code }}' + ), + ] ) - .addTemplate( - u.addTemplateSchema( - 'ingress_service', - '$datasource', - 'label_values(haproxy_server_status, instance)', - 1, - true, - 1, - 'Ingress Service', - '' - ) + .addSeriesOverride([ + { + alias: '/.*Back.*/', + transform: 'negative-Y', + }, + { alias: '/.*1.*/' }, + { alias: '/.*2.*/' }, + { alias: '/.*3.*/' }, + { alias: '/.*4.*/' }, + { alias: '/.*5.*/' }, + { alias: '/.*other.*/' }, + ]), + RgwOverviewPanel( + 'Total requests / responses', + '', + 'short', + 'short', + ||| + sum( + rate( + haproxy_frontend_http_requests_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, + 'Requests', + 5, + 12, + 5, + 12, + '$datasource', + true, + true, + true, + true, + true, + true ) - .addTemplate( - g.template.datasource('datasource', - 'prometheus', - 'default', - label='Data Source') + .addTargets( + [ + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_response_errors_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, 'Response errors', 'time_series', 2 + ), + $.addTargetSchema( + ||| + sum( + rate( + haproxy_frontend_request_errors_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, 'Requests errors' + ), + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_redispatch_warnings_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, 'Backend redispatch', 'time_series', 2 + ), + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_retry_warnings_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, 'Backend retry', 'time_series', 2 + ), + $.addTargetSchema( + ||| + sum( + rate( + haproxy_frontend_requests_denied_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, 'Request denied', 'time_series', 2 + ), + $.addTargetSchema( + ||| + sum( + haproxy_backend_current_queue{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"} + ) by (instance) + |||, 'Backend Queued', 'time_series', 2 + ), + ] ) - .addPanels([ - u.addRowSchema(false, - true, - 'RGW Overview - All Gateways') + + .addSeriesOverride([ + { + alias: '/.*Response.*/', + transform: 'negative-Y', + }, { - gridPos: { x: 0, y: 0, w: 24, h: 1 }, + alias: '/.*Backend.*/', + transform: 'negative-Y', }, - RgwOverviewPanel( - 'Average GET/PUT Latencies', - '', - 's', - 'short', - 'rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata', - 'GET AVG', - 0, - 1, - 8, - 7 - ).addTargets( - [ - u.addTargetSchema( - 'rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata', - 'PUT AVG' - ), - ] - ), - RgwOverviewPanel( - 'Total Requests/sec by RGW Instance', - '', - 'none', - 'short', - 'sum by (rgw_host) (label_replace(rate(ceph_rgw_req[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata, "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"))', - '{{rgw_host}}', - 8, - 1, - 7, - 7 - ), - RgwOverviewPanel( - 'GET Latencies by RGW Instance', - 'Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts', - 's', - 'short', - 'label_replace(\n rate(ceph_rgw_get_initial_lat_sum[30s]) /\n rate(ceph_rgw_get_initial_lat_count[30s]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata,\n"rgw_host", "$1", "ceph_daemon", "rgw.(.*)")', - '{{rgw_host}}', - 15, - 1, - 6, - 7 - ), - RgwOverviewPanel( - 'Bandwidth Consumed by Type', - 'Total bytes transferred in/out of all radosgw instances within the cluster', - 'bytes', - 'short', - 'sum(rate(ceph_rgw_get_b[30s]))', - 'GETs', - 0, - 8, - 8, - 6 - ).addTargets( - [u.addTargetSchema('sum(rate(ceph_rgw_put_b[30s]))', - 'PUTs')] - ), - RgwOverviewPanel( - 'Bandwidth by RGW Instance', - 'Total bytes transferred in/out through get/put operations, by radosgw instance', - 'bytes', - 'short', - 'label_replace(sum by (instance_id) (\n rate(ceph_rgw_get_b[30s]) + \n rate(ceph_rgw_put_b[30s])\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata, "rgw_host", "$1", "ceph_daemon", "rgw.(.*)")', - '{{rgw_host}}', - 8, - 8, - 7, - 6 - ), - RgwOverviewPanel( - 'PUT Latencies by RGW Instance', - 'Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts', - 's', - 'short', - 'label_replace(\n rate(ceph_rgw_put_initial_lat_sum[30s]) /\n rate(ceph_rgw_put_initial_lat_count[30s]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata,\n"rgw_host", "$1", "ceph_daemon", "rgw.(.*)")', - '{{rgw_host}}', - 15, - 8, - 6, - 6 - ), - u.addRowSchema( - false, true, 'RGW Overview - HAProxy Metrics' - ) + { gridPos: { x: 0, y: 12, w: 9, h: 12 } }, - RgwOverviewPanel( - 'Total responses by HTTP code', - '', - 'short', - 'short', - 'sum(irate(haproxy_frontend_http_responses_total{code=~"$code",instance=~"$ingress_service",proxy=~"frontend"}[5m])) by (code)', - 'Frontend {{ code }}', - 0, - 12, - 5, - 12, - '$datasource', - true, - true, - true, - true, - true, - true - ) - .addTargets( - [u.addTargetSchema('sum(irate(haproxy_backend_http_responses_total{code=~"$code",instance=~"$ingress_service",proxy=~"backend"}[5m])) by (code)', 'Backend {{ code }}')] - ) - .addSeriesOverride([ - { - alias: '/.*Back.*/', - transform: 'negative-Y', - }, - { alias: '/.*1.*/' }, - { alias: '/.*2.*/' }, - { alias: '/.*3.*/' }, - { alias: '/.*4.*/' }, - { alias: '/.*5.*/' }, - { alias: '/.*other.*/' }, - ]), - RgwOverviewPanel( - 'Total requests / responses', - '', - 'short', - 'short', - 'sum(irate(haproxy_frontend_http_requests_total{proxy=~"frontend",instance=~"$ingress_service"}[5m])) by (instance)', - 'Requests', - 5, - 12, - 5, - 12, - '$datasource', - true, - true, - true, - true, - true, - true - ) - .addTargets( - [ - u.addTargetSchema('sum(irate(haproxy_backend_response_errors_total{proxy=~"backend",instance=~"$ingress_service"}[5m])) by (instance)', 'Response errors', 'time_series', 2), - u.addTargetSchema('sum(irate(haproxy_frontend_request_errors_total{proxy=~"frontend",instance=~"$ingress_service"}[5m])) by (instance)', 'Requests errors'), - u.addTargetSchema('sum(irate(haproxy_backend_redispatch_warnings_total{proxy=~"backend",instance=~"$ingress_service"}[5m])) by (instance)', 'Backend redispatch', 'time_series', 2), - u.addTargetSchema('sum(irate(haproxy_backend_retry_warnings_total{proxy=~"backend",instance=~"$ingress_service"}[5m])) by (instance)', 'Backend retry', 'time_series', 2), - u.addTargetSchema('sum(irate(haproxy_frontend_requests_denied_total{proxy=~"frontend",instance=~"$ingress_service"}[5m])) by (instance)', 'Request denied', 'time_series', 2), - u.addTargetSchema('sum(haproxy_backend_current_queue{proxy=~"backend",instance=~"$ingress_service"}) by (instance)', 'Backend Queued', 'time_series', 2), - ] - ) - .addSeriesOverride([ - { - alias: '/.*Response.*/', - transform: 'negative-Y', - }, - { - alias: '/.*Backend.*/', - transform: 'negative-Y', - }, - ]), - RgwOverviewPanel( - 'Total number of connections', - '', - 'short', - 'short', - 'sum(irate(haproxy_frontend_connections_total{proxy=~"frontend",instance=~"$ingress_service"}[5m])) by (instance)', - 'Front', - 10, - 12, - 5, - 12, - '$datasource', - true, - true, - true, - true, - true, - true - ) - .addTargets( - [ - u.addTargetSchema('sum(irate(haproxy_backend_connection_attempts_total{proxy=~"backend",instance=~"$ingress_service"}[5m])) by (instance)', 'Back'), - u.addTargetSchema('sum(irate(haproxy_backend_connection_errors_total{proxy=~"backend",instance=~"$ingress_service"}[5m])) by (instance)', 'Back errors'), - ] - ) - .addSeriesOverride([ - { - alias: '/.*Back.*/', - transform: 'negative-Y', - }, - ]), - RgwOverviewPanel( - 'Current total of incoming / outgoing bytes', - '', - 'short', - 'short', - 'sum(irate(haproxy_frontend_bytes_in_total{proxy=~"frontend",instance=~"$ingress_service"}[5m])*8) by (instance)', - 'IN Front', - 15, - 12, - 6, - 12, - '$datasource', - true, - true, - true, - true, - true, - true - ) - .addTargets( - [ - u.addTargetSchema('sum(irate(haproxy_frontend_bytes_out_total{proxy=~"frontend",instance=~"$ingress_service"}[5m])*8) by (instance)', 'OUT Front', 'time_series', 2), - u.addTargetSchema('sum(irate(haproxy_backend_bytes_in_total{proxy=~"backend",instance=~"$ingress_service"}[5m])*8) by (instance)', 'IN Back', 'time_series', 2), - u.addTargetSchema('sum(irate(haproxy_backend_bytes_out_total{proxy=~"backend",instance=~"$ingress_service"}[5m])*8) by (instance)', 'OUT Back', 'time_series', 2), - ] - ) - .addSeriesOverride([ - { - alias: '/.*OUT.*/', - transform: 'negative-Y', - }, - ]), ]), - 'radosgw-detail.json': - local RgwDetailsPanel(aliasColors, - title, - description, - formatY1, - formatY2, - expr1, - expr2, - legendFormat1, - legendFormat2, - x, - y, - w, - h) = - u.graphPanelSchema(aliasColors, - title, - description, - 'null', - false, - formatY1, - formatY2, - null, - null, - 0, - 1, - '$datasource') - .addTargets( - [u.addTargetSchema(expr1, legendFormat1), u.addTargetSchema(expr2, legendFormat2)] - ) + { gridPos: { x: x, y: y, w: w, h: h } }; - - u.dashboardSchema( - 'RGW Instance Detail', - '', - 'x5ARzZtmk', - 'now-1h', - '15s', - 16, - ['overview'], + RgwOverviewPanel( + 'Total number of connections', '', - { - refresh_intervals: ['5s', '10s', '15s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'], - time_options: ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d'], - } + 'short', + 'short', + ||| + sum( + rate( + haproxy_frontend_connections_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, + 'Front', + 10, + 12, + 5, + 12, + '$datasource', + true, + true, + true, + true, + true, + true ) - .addAnnotation( - u.addAnnotationSchema( - 1, - '-- Grafana --', - true, - true, - 'rgba(0, 211, 255, 1)', - 'Annotations & Alerts', - 'dashboard' - ) + .addTargets( + [ + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_connection_attempts_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, 'Back' + ), + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_connection_errors_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) + ) by (instance) + |||, 'Back errors' + ), + ] ) - .addRequired( - type='grafana', id='grafana', name='Grafana', version='5.0.0' + .addSeriesOverride([ + { + alias: '/.*Back.*/', + transform: 'negative-Y', + }, + ]), + RgwOverviewPanel( + 'Current total of incoming / outgoing bytes', + '', + 'short', + 'short', + ||| + sum( + rate( + haproxy_frontend_bytes_in_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) * 8 + ) by (instance) + |||, + 'IN Front', + 15, + 12, + 6, + 12, + '$datasource', + true, + true, + true, + true, + true, + true ) - .addRequired( - type='panel', - id='grafana-piechart-panel', - name='Pie Chart', - version='1.3.3' + .addTargets( + [ + $.addTargetSchema( + ||| + sum( + rate( + haproxy_frontend_bytes_out_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) * 8 + ) by (instance) + |||, 'OUT Front', 'time_series', 2 + ), + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_bytes_in_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) * 8 + ) by (instance) + |||, 'IN Back', 'time_series', 2 + ), + $.addTargetSchema( + ||| + sum( + rate( + haproxy_backend_bytes_out_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval] + ) * 8 + ) by (instance) + |||, 'OUT Back', 'time_series', 2 + ), + ] ) - .addRequired( - type='panel', id='graph', name='Graph', version='5.0.0' + .addSeriesOverride([ + { + alias: '/.*OUT.*/', + transform: 'negative-Y', + }, + ]), + ]), + 'radosgw-detail.json': + local RgwDetailsPanel(aliasColors, + title, + description, + formatY1, + formatY2, + expr1, + expr2, + legendFormat1, + legendFormat2, + x, + y, + w, + h) = + $.graphPanelSchema(aliasColors, + title, + description, + 'null', + false, + formatY1, + formatY2, + null, + null, + 0, + 1, + '$datasource') + .addTargets( + [$.addTargetSchema(expr1, legendFormat1), $.addTargetSchema(expr2, legendFormat2)] + ) + { gridPos: { x: x, y: y, w: w, h: h } }; + + $.dashboardSchema( + 'RGW Instance Detail', + '', + 'x5ARzZtmk', + 'now-1h', + '30s', + 16, + $._config.dashboardTags + ['overview'], + '' + ) + .addAnnotation( + $.addAnnotationSchema( + 1, + '-- Grafana --', + true, + true, + 'rgba(0, 211, 255, 1)', + 'Annotations & Alerts', + 'dashboard' ) - .addTemplate( - g.template.datasource('datasource', - 'prometheus', - 'default', - label='Data Source') + ) + .addRequired( + type='grafana', id='grafana', name='Grafana', version='5.0.0' + ) + .addRequired( + type='panel', + id='grafana-piechart-panel', + name='Pie Chart', + version='1.3.3' + ) + .addRequired( + type='panel', id='graph', name='Graph', version='5.0.0' + ) + .addTemplate( + g.template.datasource('datasource', + 'prometheus', + 'default', + label='Data Source') + ) + .addTemplate( + $.addClusterTemplate() + ) + .addTemplate( + $.addJobTemplate() + ) + .addTemplate( + $.addTemplateSchema('rgw_servers', + '$datasource', + 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(), + 1, + true, + 1, + '', + '') + ) + .addPanels([ + $.addRowSchema(false, true, 'RGW Host Detail : $rgw_servers') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, + RgwDetailsPanel( + {}, + '$rgw_servers GET/PUT Latencies', + '', + 's', + 'short', + ||| + sum by (instance_id) ( + rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) / + rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval]) + ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + ||| + sum by (instance_id) ( + rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) / + rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval]) + ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'GET {{ceph_daemon}}', + 'PUT {{ceph_daemon}}', + 0, + 1, + 6, + 8 + ), + RgwDetailsPanel( + {}, + 'Bandwidth by HTTP Operation', + '', + 'bytes', + 'short', + ||| + rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + ||| + rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) + ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'GETs {{ceph_daemon}}', + 'PUTs {{ceph_daemon}}', + 6, + 1, + 7, + 8 + ), + RgwDetailsPanel( + { + GETs: '#7eb26d', + Other: '#447ebc', + PUTs: '#eab839', + Requests: '#3f2b5b', + 'Requests Failed': '#bf1b00', + }, + 'HTTP Request Breakdown', + '', + 'short', + 'short', + ||| + rate(ceph_rgw_failed_req{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s,ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + ||| + rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'Requests Failed {{ceph_daemon}}', + 'GETs {{ceph_daemon}}', + 13, + 1, + 7, + 8 ) - .addTemplate( - u.addTemplateSchema('rgw_servers', - '$datasource', - 'label_values(ceph_rgw_metadata, ceph_daemon)', - 1, - true, - 1, - '', - '') + .addTargets( + [ + $.addTargetSchema( + ||| + rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'PUTs {{ceph_daemon}}' + ), + $.addTargetSchema( + ||| + ( + rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) - + ( + rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) + + rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) + ) + ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'Other {{ceph_daemon}}' + ), + ] + ), + $.simplePieChart( + { + GETs: '#7eb26d', + 'Other (HEAD,POST,DELETE)': '#447ebc', + PUTs: '#eab839', + Requests: '#3f2b5b', + Failures: '#bf1b00', + }, '', 'Workload Breakdown' ) - .addPanels([ - u.addRowSchema(false, true, 'RGW Host Detail : $rgw_servers') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } }, - RgwDetailsPanel( - {}, - '$rgw_servers GET/PUT Latencies', - '', - 's', - 'short', - 'sum by (instance_id) (rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'sum by (instance_id) (rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'GET {{ceph_daemon}}', - 'PUT {{ceph_daemon}}', - 0, - 1, - 6, - 8 - ), - RgwDetailsPanel( - {}, - 'Bandwidth by HTTP Operation', - '', - 'bytes', - 'short', - 'rate(ceph_rgw_get_b[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'rate(ceph_rgw_put_b[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'GETs {{ceph_daemon}}', - 'PUTs {{ceph_daemon}}', - 6, - 1, - 7, - 8 - ), - RgwDetailsPanel( - { - GETs: '#7eb26d', - Other: '#447ebc', - PUTs: '#eab839', - Requests: '#3f2b5b', - 'Requests Failed': '#bf1b00', - }, - 'HTTP Request Breakdown', - '', - 'short', - 'short', - 'rate(ceph_rgw_failed_req[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'rate(ceph_rgw_get[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'Requests Failed {{ceph_daemon}}', - 'GETs {{ceph_daemon}}', - 13, - 1, - 7, - 8 - ) - .addTargets( - [ - u.addTargetSchema( - 'rate(ceph_rgw_put[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'PUTs {{ceph_daemon}}' - ), - u.addTargetSchema( - '(\n rate(ceph_rgw_req[30s]) -\n (rate(ceph_rgw_get[30s]) + rate(ceph_rgw_put[30s]))\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'Other {{ceph_daemon}}' - ), - ] - ), - u.addPieChartSchema( - { - GETs: '#7eb26d', - 'Other (HEAD,POST,DELETE)': '#447ebc', - PUTs: '#eab839', - Requests: '#3f2b5b', - Failures: '#bf1b00', - }, '$datasource', '', 'Under graph', 'pie', 'Workload Breakdown', 'current' - ) - .addTarget(u.addTargetSchema( - 'rate(ceph_rgw_failed_req[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'Failures {{ceph_daemon}}' - )) - .addTarget(u.addTargetSchema( - 'rate(ceph_rgw_get[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'GETs {{ceph_daemon}}' - )) - .addTarget(u.addTargetSchema( - 'rate(ceph_rgw_put[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'PUTs {{ceph_daemon}}' - )) - .addTarget(u.addTargetSchema( - '(\n rate(ceph_rgw_req[30s]) -\n (rate(ceph_rgw_get[30s]) + rate(ceph_rgw_put[30s]))\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~"$rgw_servers"}', - 'Other (DELETE,LIST) {{ceph_daemon}}' - )) + { gridPos: { x: 20, y: 1, w: 4, h: 8 } }, - ]), - }, + .addTarget($.addTargetSchema( + ||| + rate(ceph_rgw_failed_req{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'Failures {{ceph_daemon}}' + )) + .addTarget($.addTargetSchema( + ||| + rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'GETs {{ceph_daemon}}' + )) + .addTarget($.addTargetSchema( + ||| + rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) * + on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'PUTs {{ceph_daemon}}' + )) + .addTarget($.addTargetSchema( + ||| + ( + rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) - + ( + rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) + + rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) + ) + ) * on (instance_id) group_left (ceph_daemon) + ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"} + ||| % $.matchers(), + 'Other (DELETE,LIST) {{ceph_daemon}}' + )) + { gridPos: { x: 20, y: 1, w: 4, h: 8 } }, + ]), } diff --git a/ceph/monitoring/ceph-mixin/dashboards/utils.libsonnet b/ceph/monitoring/ceph-mixin/dashboards/utils.libsonnet index 1f25d370c..a7774c7ce 100644 --- a/ceph/monitoring/ceph-mixin/dashboards/utils.libsonnet +++ b/ceph/monitoring/ceph-mixin/dashboards/utils.libsonnet @@ -1,6 +1,8 @@ local g = import 'grafonnet/grafana.libsonnet'; { + _config:: error 'must provide _config', + dashboardSchema(title, description, uid, @@ -8,8 +10,7 @@ local g = import 'grafonnet/grafana.libsonnet'; refresh, schemaVersion, tags, - timezone, - timepicker):: + timezone):: g.dashboard.new(title=title, description=description, uid=uid, @@ -17,8 +18,7 @@ local g = import 'grafonnet/grafana.libsonnet'; refresh=refresh, schemaVersion=schemaVersion, tags=tags, - timezone=timezone, - timepicker=timepicker), + timezone=timezone), graphPanelSchema(aliasColors, title, @@ -72,7 +72,10 @@ local g = import 'grafonnet/grafana.libsonnet'; includeAll, sort, label, - regex):: + regex, + hide='', + multi=false, + allValues=null):: g.template.new(name=name, datasource=datasource, query=query, @@ -80,7 +83,10 @@ local g = import 'grafonnet/grafana.libsonnet'; includeAll=includeAll, sort=sort, label=label, - regex=regex), + regex=regex, + hide=hide, + multi=multi, + allValues=allValues), addAnnotationSchema(builtIn, datasource, @@ -170,4 +176,158 @@ local g = import 'grafonnet/grafana.libsonnet'; unit: unit, valueMaps: valueMaps, }, + + matchers():: + local jobMatcher = 'job=~"$job"'; + local clusterMatcher = '%s=~"$cluster"' % $._config.clusterLabel; + { + // Common labels + jobMatcher: jobMatcher, + clusterMatcher: (if $._config.showMultiCluster then clusterMatcher else ''), + matchers: jobMatcher + + (if $._config.showMultiCluster then ', ' + clusterMatcher else ''), + }, + + addClusterTemplate():: + $.addTemplateSchema( + 'cluster', + '$datasource', + 'label_values(ceph_osd_metadata, %s)' % $._config.clusterLabel, + 1, + true, + 1, + 'cluster', + '(.*)', + if !$._config.showMultiCluster then 'variable' else '', + multi=true, + allValues='.+', + ), + + addJobTemplate():: + $.addTemplateSchema( + 'job', + '$datasource', + 'label_values(ceph_osd_metadata{%(clusterMatcher)s}, job)' % $.matchers(), + 1, + true, + 1, + 'job', + '(.*)', + multi=true, + allValues='.+', + ), + + overviewStyle(alias, + pattern, + type, + unit, + colorMode=null, + thresholds=[], + valueMaps=[]):: + $.addStyle(alias, + colorMode, + [ + 'rgba(245, 54, 54, 0.9)', + 'rgba(237, 129, 40, 0.89)', + 'rgba(50, 172, 45, 0.97)', + ], + 'YYYY-MM-DD HH:mm:ss', + 2, + 1, + pattern, + thresholds, + type, + unit, + valueMaps), + + simpleGraphPanel(alias, + title, + description, + formatY1, + labelY1, + min, + expr, + legendFormat, + x, + y, + w, + h):: + $.graphPanelSchema(alias, + title, + description, + 'null', + false, + formatY1, + 'short', + labelY1, + null, + min, + 1, + '$datasource') + .addTargets( + [$.addTargetSchema(expr, legendFormat)] + ) + { gridPos: { x: x, y: y, w: w, h: h } }, + + simpleSingleStatPanel(format, + title, + description, + valueName, + expr, + instant, + targetFormat, + x, + y, + w, + h):: + $.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'], + '$datasource', + format, + title, + description, + valueName, + false, + 100, + false, + false, + '') + .addTarget($.addTargetSchema(expr, '', targetFormat, 1, instant)) + { + gridPos: { x: x, y: y, w: w, h: h }, + }, + gaugeSingleStatPanel(format, + title, + description, + valueName, + colorValue, + gaugeMaxValue, + gaugeShow, + sparkLineShow, + thresholds, + expr, + targetFormat, + x, + y, + w, + h):: + $.addSingleStatSchema(['#299c46', 'rgba(237, 129, 40, 0.89)', '#d44a3a'], + '$datasource', + format, + title, + description, + valueName, + colorValue, + gaugeMaxValue, + gaugeShow, + sparkLineShow, + thresholds) + .addTarget($.addTargetSchema(expr, '', targetFormat)) + { gridPos: { x: + x, y: y, w: w, h: h } }, + + simplePieChart(alias, description, title):: + $.addPieChartSchema(alias, + '$datasource', + description, + 'Under graph', + 'pie', + title, + 'current'), } diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/.lint b/ceph/monitoring/ceph-mixin/dashboards_out/.lint new file mode 100644 index 000000000..6352e858f --- /dev/null +++ b/ceph/monitoring/ceph-mixin/dashboards_out/.lint @@ -0,0 +1,5 @@ +exclusions: + template-instance-rule: + reason: "Instance template not needed because of ceph-mgr leader election." + target-instance-rule: + reason: "Instance matcher not needed because of ceph-mgr leader election." diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/cephfs-overview.json b/ceph/monitoring/ceph-mixin/dashboards_out/cephfs-overview.json index 5c0c27329..3e7aeef45 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/cephfs-overview.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/cephfs-overview.json @@ -104,14 +104,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(ceph_objecter_op_r{ceph_daemon=~\"($mds_servers).*\"}[1m]))", + "expr": "sum(rate(ceph_objecter_op_r{job=~\"$job\", ceph_daemon=~\"($mds_servers).*\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read Ops", "refId": "A" }, { - "expr": "sum(rate(ceph_objecter_op_w{ceph_daemon=~\"($mds_servers).*\"}[1m]))", + "expr": "sum(rate(ceph_objecter_op_w{job=~\"$job\", ceph_daemon=~\"($mds_servers).*\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write Ops", @@ -197,7 +197,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_mds_server_handle_client_request{ceph_daemon=~\"($mds_servers).*\"}", + "expr": "ceph_mds_server_handle_client_request{job=~\"$job\", ceph_daemon=~\"($mds_servers).*\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{ceph_daemon}}", @@ -241,11 +241,13 @@ ] } ], - "refresh": "15s", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", - "tags": [ ], + "tags": [ + "ceph-mixin" + ], "templating": { "list": [ { @@ -262,6 +264,46 @@ "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, @@ -272,7 +314,7 @@ "multi": false, "name": "mds_servers", "options": [ ], - "query": "label_values(ceph_mds_inodes, ceph_daemon)", + "query": "label_values(ceph_mds_inodes{job=~\"$job\"}, ceph_daemon)", "refresh": 1, "regex": "", "sort": 1, @@ -292,7 +334,6 @@ "refresh_intervals": [ "5s", "10s", - "15s", "30s", "1m", "5m", diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/host-details.json b/ceph/monitoring/ceph-mixin/dashboards_out/host-details.json index 7b3c1df15..93c51f009 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/host-details.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/host-details.json @@ -123,7 +123,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{hostname='$ceph_hosts'}))", + "expr": "count(sum by (ceph_daemon) (ceph_osd_metadata{job=~\"$job\", hostname='$ceph_hosts'}))", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -192,7 +192,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (mode) (\n irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[1m])\n) / scalar(\n sum(irate(node_cpu{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]) or\n irate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[1m]))\n) * 100", + "expr": "sum by (mode) (\n rate(node_cpu{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[$__rate_interval]) or\n rate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\", mode=~\"(irq|nice|softirq|steal|system|user|iowait)\"}[$__rate_interval])\n) / (\n scalar(\n sum(rate(node_cpu{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_cpu_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]))\n ) * 100\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{mode}}", @@ -293,28 +293,28 @@ "steppedLine": false, "targets": [ { - "expr": "node_memory_MemFree{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"} ", + "expr": "node_memory_MemFree{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Free", "refId": "A" }, { - "expr": "node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"} ", + "expr": "node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "total", "refId": "B" }, { - "expr": "(node_memory_Cached{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n(node_memory_Buffers{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) +\n(node_memory_Slab{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) \n", + "expr": "(\n node_memory_Cached{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n) + (\n node_memory_Buffers{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n) + (\n node_memory_Slab{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "buffers/cache", "refId": "C" }, { - "expr": "(node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"})- (\n (node_memory_MemFree{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n (node_memory_Cached{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) + \n (node_memory_Buffers{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"}) +\n (node_memory_Slab{instance=~\"$ceph_hosts([\\\\.:].*)?\"} or node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\.:].*)?\"})\n )\n \n", + "expr": "(\n node_memory_MemTotal{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemTotal_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n) - (\n (\n node_memory_MemFree{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_MemFree_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n ) + (\n node_memory_Cached{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Cached_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n ) + (\n node_memory_Buffers{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Buffers_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n ) +\n (\n node_memory_Slab{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"} or\n node_memory_Slab_bytes{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}\n )\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "used", @@ -405,14 +405,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (device) (\n irate(node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or \n irate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n rate(\n node_network_receive_bytes{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_receive_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval]\n )\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.rx", "refId": "A" }, { - "expr": "sum by (device) (\n irate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\",device!=\"lo\"}[1m])\n)", + "expr": "sum by (device) (\n rate(node_network_transmit_bytes{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_transmit_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\",device!=\"lo\"}[$__rate_interval])\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -503,14 +503,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(node_network_receive_drop{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_drop_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "expr": "rate(node_network_receive_drop{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_receive_drop_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.rx", "refId": "A" }, { - "expr": "irate(node_network_transmit_drop{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_drop_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "expr": "rate(node_network_transmit_drop{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_transmit_drop_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -615,7 +615,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_osd_stat_bytes and on (ceph_daemon) ceph_disk_occupation{instance=~\"($ceph_hosts)([\\\\.:].*)?\"})", + "expr": "sum(\n ceph_osd_stat_bytes{job=~\"$job\"} and\n on (ceph_daemon) ceph_disk_occupation{job=~\"$job\", instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -683,14 +683,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(node_network_receive_errs{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_receive_errs_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "expr": "rate(node_network_receive_errs{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_receive_errs_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.rx", "refId": "A" }, { - "expr": "irate(node_network_transmit_errs{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m]) or irate(node_network_transmit_errs_total{instance=~\"$ceph_hosts([\\\\.:].*)?\"}[1m])", + "expr": "rate(node_network_transmit_errs{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_network_transmit_errs_total{instance=~\"$ceph_hosts([\\\\\\\\.:].*)?\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}.tx", @@ -783,7 +783,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "connected", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -800,14 +800,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(\n (\n irate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or\n irate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation_human,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "expr": "label_replace(\n (\n rate(node_disk_writes_completed{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) writes", "refId": "A" }, { - "expr": "label_replace(\n (irate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n)\n* on(instance, device) group_left(ceph_daemon)\n label_replace(\n label_replace(\n ceph_disk_occupation_human,\n \"device\",\n \"$1\",\n \"device\",\n \"/dev/(.*)\"\n ),\n \"instance\",\n \"$1\",\n \"instance\",\n \"([^:.]*).*\"\n )", + "expr": "label_replace(\n (\n rate(node_disk_reads_completed{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\"},\"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) reads", @@ -881,7 +881,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "connected", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -898,14 +898,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace((irate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n (\n rate(node_disk_bytes_written{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_written_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device)\n group_left(ceph_daemon) label_replace(\n label_replace(ceph_disk_occupation_human{job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) write", "refId": "A" }, { - "expr": "label_replace((irate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) or irate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m])), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n (\n rate(node_disk_bytes_read{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) or\n rate(node_disk_read_bytes_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])\n ),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device)\n group_left(ceph_daemon) label_replace(\n label_replace(ceph_disk_occupation_human{job=~\"$job\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}}) read", @@ -979,7 +979,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -991,7 +991,7 @@ "steppedLine": false, "targets": [ { - "expr": "max by(instance,device) (label_replace((irate(node_disk_write_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_writes_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001) or (irate(node_disk_read_time_seconds_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) ) / clamp_min(irate(node_disk_reads_completed_total{ instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]), 0.001), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")) * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "max by(instance, device) (label_replace(\n (rate(node_disk_write_time_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])) /\n clamp_min(rate(node_disk_writes_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]), 0.001) or\n (rate(node_disk_read_time_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval])) /\n clamp_min(rate(node_disk_reads_completed_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]), 0.001),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}})", @@ -1065,7 +1065,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "connected", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -1077,7 +1077,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(((irate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) / 10 ) or irate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") * on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~\"($ceph_hosts)([\\\\.:].*)?\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n (\n (rate(node_disk_io_time_ms{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) / 10) or\n rate(node_disk_io_time_seconds_total{instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"}[$__rate_interval]) * 100\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(ceph_disk_occupation_human{job=~\"$job\", instance=~\"($ceph_hosts)([\\\\\\\\.:].*)?\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}}({{ceph_daemon}})", @@ -1121,11 +1121,12 @@ ] } ], - "refresh": "10s", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", "tags": [ + "ceph-mixin", "overview" ], "templating": { @@ -1144,6 +1145,46 @@ "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, @@ -1154,7 +1195,7 @@ "multi": false, "name": "ceph_hosts", "options": [ ], - "query": "label_values(node_scrape_collector_success, instance) ", + "query": "label_values({}, instance)", "refresh": 1, "regex": "([^.:]*).*", "sort": 3, diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/hosts-overview.json b/ceph/monitoring/ceph-mixin/dashboards_out/hosts-overview.json index 3572d7ad4..f1cd4c499 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/hosts-overview.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/hosts-overview.json @@ -104,7 +104,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(sum by (hostname) (ceph_osd_metadata))", + "expr": "count(sum by (hostname) (ceph_osd_metadata{job=~\"$job\"}))", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -187,7 +187,7 @@ "tableColumn": "", "targets": [ { - "expr": "avg(\n 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )", + "expr": "avg(1 - (\n avg by(instance) (\n rate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval]) or\n rate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval])\n )\n))\n", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -270,7 +270,7 @@ "tableColumn": "", "targets": [ { - "expr": "avg (((node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})- (\n (node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) + \n (node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"})\n )) /\n (node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*\"} ))", + "expr": "avg ((\n (\n node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n ) - ((\n node_memory_MemFree{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_MemFree_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}) +\n (\n node_memory_Cached{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_Cached_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n ) + (\n node_memory_Buffers{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_Buffers_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n ) + (\n node_memory_Slab{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_Slab_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}\n )\n )\n) / (\n node_memory_MemTotal{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"} or\n node_memory_MemTotal_bytes{instance=~\"($osd_hosts|$rgw_hosts|$mon_hosts|$mds_hosts).*\"}\n))\n", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -353,7 +353,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum ((irate(node_disk_reads_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_reads_completed_total{instance=~\"($osd_hosts).*\"}[5m]) ) + \n(irate(node_disk_writes_completed{instance=~\"($osd_hosts).*\"}[5m]) or irate(node_disk_writes_completed_total{instance=~\"($osd_hosts).*\"}[5m])))", + "expr": "sum ((\n rate(node_disk_reads_completed{instance=~\"($osd_hosts).*\"}[$__rate_interval]) or\n rate(node_disk_reads_completed_total{instance=~\"($osd_hosts).*\"}[$__rate_interval])\n) + (\n rate(node_disk_writes_completed{instance=~\"($osd_hosts).*\"}[$__rate_interval]) or\n rate(node_disk_writes_completed_total{instance=~\"($osd_hosts).*\"}[$__rate_interval])\n))\n", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -436,7 +436,7 @@ "tableColumn": "", "targets": [ { - "expr": "avg (\n label_replace((irate(node_disk_io_time_ms[5m]) / 10 ) or\n (irate(node_disk_io_time_seconds_total[5m]) * 100), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) *\n on(instance, device) group_left(ceph_daemon) label_replace(label_replace(ceph_disk_occupation_human{instance=~\"($osd_hosts).*\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\")\n)", + "expr": "avg (\n label_replace(\n (rate(node_disk_io_time_ms[$__rate_interval]) / 10 ) or\n (rate(node_disk_io_time_seconds_total[$__rate_interval]) * 100),\n \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n ) * on(instance, device) group_left(ceph_daemon) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", instance=~\"($osd_hosts).*\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^.:]*).*\"\n )\n)\n", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -519,7 +519,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum (\n (\n irate(node_network_receive_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_receive_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n ) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n) +\nsum (\n (\n irate(node_network_transmit_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[1m])\n ) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n )\n", + "expr": "sum (\n (\n rate(node_network_receive_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_receive_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n ) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n) +\nsum (\n (\n rate(node_network_transmit_bytes{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|mon_hosts|mds_hosts|rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n ) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\")\n)\n", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -583,7 +583,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10,100 * ( 1 - (\n avg by(instance) \n (irate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]) or\n irate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[1m]))\n )\n )\n)", + "expr": "topk(10,\n 100 * (\n 1 - (\n avg by(instance) (\n rate(node_cpu_seconds_total{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval]) or\n rate(node_cpu{mode='idle',instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\"}[$__rate_interval])\n )\n )\n )\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}}", @@ -669,7 +669,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk(10, (sum by(instance) (\n(\n irate(node_network_receive_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_receive_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) +\n(\n irate(node_network_transmit_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m]) or\n irate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[1m])\n) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\"))\n))\n", + "expr": "topk(10, (sum by(instance) (\n(\n rate(node_network_receive_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_receive_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n) +\n(\n rate(node_network_transmit_bytes{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval]) or\n rate(node_network_transmit_bytes_total{instance=~\"($osd_hosts|$mon_hosts|$mds_hosts|$rgw_hosts).*\",device!=\"lo\"}[$__rate_interval])\n) unless on (device, instance)\n label_replace((bonding_slaves > 0), \"device\", \"$1\", \"master\", \"(.+)\"))\n))\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}}", @@ -713,11 +713,13 @@ ] } ], - "refresh": "10s", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", - "tags": [ ], + "tags": [ + "ceph-mixin" + ], "templating": { "list": [ { @@ -734,6 +736,46 @@ "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, @@ -744,7 +786,7 @@ "multi": false, "name": "osd_hosts", "options": [ ], - "query": "label_values(ceph_disk_occupation, exported_instance)", + "query": "label_values(ceph_disk_occupation{job=~\"$job\"}, exported_instance)", "refresh": 1, "regex": "([^.]*).*", "sort": 1, @@ -764,7 +806,7 @@ "multi": false, "name": "mon_hosts", "options": [ ], - "query": "label_values(ceph_mon_metadata, ceph_daemon)", + "query": "label_values(ceph_mon_metadata{job=~\"$job\"}, ceph_daemon)", "refresh": 1, "regex": "mon.(.*)", "sort": 1, @@ -784,7 +826,7 @@ "multi": false, "name": "mds_hosts", "options": [ ], - "query": "label_values(ceph_mds_inodes, ceph_daemon)", + "query": "label_values(ceph_mds_inodes{job=~\"$job\"}, ceph_daemon)", "refresh": 1, "regex": "mds.(.*)", "sort": 1, @@ -804,7 +846,7 @@ "multi": false, "name": "rgw_hosts", "options": [ ], - "query": "label_values(ceph_rgw_metadata, ceph_daemon)", + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", "refresh": 1, "regex": "rgw.(.*)", "sort": 1, diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/osd-device-details.json b/ceph/monitoring/ceph-mixin/dashboards_out/osd-device-details.json index 3b45dc967..384516fb0 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/osd-device-details.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/osd-device-details.json @@ -104,14 +104,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_osd_op_r_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m])", + "expr": "rate(ceph_osd_op_r_latency_sum{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "read", "refId": "A" }, { - "expr": "irate(ceph_osd_op_w_latency_sum{ceph_daemon=~\"$osd\"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m])", + "expr": "rate(ceph_osd_op_w_latency_sum{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "write", @@ -202,14 +202,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_osd_op_r{ceph_daemon=~\"$osd\"}[1m])", + "expr": "rate(ceph_osd_op_r{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Reads", "refId": "A" }, { - "expr": "irate(ceph_osd_op_w{ceph_daemon=~\"$osd\"}[1m])", + "expr": "rate(ceph_osd_op_w{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Writes", @@ -300,14 +300,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_osd_op_r_out_bytes{ceph_daemon=~\"$osd\"}[1m])", + "expr": "rate(ceph_osd_op_r_out_bytes{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read Bytes", "refId": "A" }, { - "expr": "irate(ceph_osd_op_w_in_bytes{ceph_daemon=~\"$osd\"}[1m])", + "expr": "rate(ceph_osd_op_w_in_bytes{job=~\"$job\", ceph_daemon=~\"$osd\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write Bytes", @@ -417,14 +417,14 @@ "steppedLine": false, "targets": [ { - "expr": "(label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "expr": "(\n label_replace(\n rate(node_disk_read_time_seconds_total{}[$__rate_interval]) /\n rate(node_disk_reads_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n ) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}}/{{device}} Reads", "refId": "A" }, { - "expr": "(label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"))", + "expr": "(\n label_replace(\n rate(node_disk_write_time_seconds_total{}[$__rate_interval]) /\n rate(node_disk_writes_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device)\n label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n )\n )\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}}/{{device}} Writes", @@ -515,14 +515,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(irate(node_disk_writes_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n rate(node_disk_writes_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}} on {{instance}} Writes", "refId": "A" }, { - "expr": "label_replace(irate(node_disk_reads_completed_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n rate(node_disk_reads_completed_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}} on {{instance}} Reads", @@ -613,14 +613,14 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(irate(node_disk_read_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n rate(node_disk_read_bytes_total{}[$__rate_interval]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}} {{device}} Reads", "refId": "A" }, { - "expr": "label_replace(irate(node_disk_written_bytes_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n rate(node_disk_written_bytes_total{}[$__rate_interval]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"},\n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{instance}} {{device}} Writes", @@ -706,7 +706,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(irate(node_disk_io_time_seconds_total[1m]), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\")", + "expr": "label_replace(\n rate(node_disk_io_time_seconds_total{}[$__rate_interval]),\n \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n) and on (instance, device) label_replace(\n label_replace(\n ceph_disk_occupation_human{job=~\"$job\", ceph_daemon=~\"$osd\"}, \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ), \"instance\", \"$1\", \"instance\", \"([^:.]*).*\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device}} on {{instance}}", @@ -750,11 +750,13 @@ ] } ], - "refresh": "", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", - "tags": [ ], + "tags": [ + "ceph-mixin" + ], "templating": { "list": [ { @@ -771,6 +773,46 @@ "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, @@ -781,7 +823,7 @@ "multi": false, "name": "osd", "options": [ ], - "query": "label_values(ceph_osd_metadata,ceph_daemon)", + "query": "label_values(ceph_osd_metadata{job=~\"$job\"}, ceph_daemon)", "refresh": 1, "regex": "(.*)", "sort": 1, diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/osds-overview.json b/ceph/monitoring/ceph-mixin/dashboards_out/osds-overview.json index ffcf06015..5ea8955b2 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/osds-overview.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/osds-overview.json @@ -94,21 +94,21 @@ "steppedLine": false, "targets": [ { - "expr": "avg (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "expr": "avg (\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval]) * 1000\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "AVG read", "refId": "A" }, { - "expr": "max (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)", + "expr": "max(\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval]) * 1000\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "MAX read", "refId": "B" }, { - "expr": "quantile(0.95,\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n)", + "expr": "quantile(0.95,\n (\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval])\n * 1000\n )\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "@95%ile", @@ -222,7 +222,7 @@ ], "targets": [ { - "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_r_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) * 1000)\n ))\n)\n\n", + "expr": "topk(10,\n (sort(\n (\n rate(ceph_osd_op_r_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_r_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000\n )\n ))\n)\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -281,21 +281,21 @@ "steppedLine": false, "targets": [ { - "expr": "avg (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "expr": "avg(\n rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval])\n * 1000\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "AVG write", "refId": "A" }, { - "expr": "max (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)", + "expr": "max(\n rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "MAX write", "refId": "B" }, { - "expr": "quantile(0.95,\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n)", + "expr": "quantile(0.95, (\n rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000\n))\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "@95%ile write", @@ -409,7 +409,7 @@ ], "targets": [ { - "expr": "topk(10,\n (sort(\n (irate(ceph_osd_op_w_latency_sum[1m]) / on (ceph_daemon) irate(ceph_osd_op_w_latency_count[1m]) * 1000)\n ))\n)\n\n", + "expr": "topk(10,\n (sort(\n (rate(ceph_osd_op_w_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n on (ceph_daemon) rate(ceph_osd_op_w_latency_count{job=~\"$job\"}[$__rate_interval]) *\n 1000)\n ))\n)\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -443,7 +443,7 @@ "pieType": "pie", "targets": [ { - "expr": "count by (device_class) (ceph_osd_metadata)", + "expr": "count by (device_class) (ceph_osd_metadata{job=~\"$job\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{device_class}}", @@ -476,14 +476,14 @@ "pieType": "pie", "targets": [ { - "expr": "count(ceph_bluefs_wal_total_bytes)", + "expr": "count(ceph_bluefs_wal_total_bytes{job=~\"$job\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "bluestore", "refId": "A" }, { - "expr": "absent(ceph_bluefs_wal_total_bytes)*count(ceph_osd_metadata)", + "expr": "absent(ceph_bluefs_wal_total_bytes{job=~\"$job\"}) * count(ceph_osd_metadata{job=~\"$job\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "filestore", @@ -514,63 +514,63 @@ "pieType": "pie", "targets": [ { - "expr": "count(ceph_osd_stat_bytes < 1099511627776)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} < 1099511627776)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<1TB", "refId": "A" }, { - "expr": "count(ceph_osd_stat_bytes >= 1099511627776 < 2199023255552)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 1099511627776 < 2199023255552)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<2TB", "refId": "B" }, { - "expr": "count(ceph_osd_stat_bytes >= 2199023255552 < 3298534883328)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 2199023255552 < 3298534883328)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<3TB", "refId": "C" }, { - "expr": "count(ceph_osd_stat_bytes >= 3298534883328 < 4398046511104)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 3298534883328 < 4398046511104)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<4TB", "refId": "D" }, { - "expr": "count(ceph_osd_stat_bytes >= 4398046511104 < 6597069766656)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 4398046511104 < 6597069766656)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<6TB", "refId": "E" }, { - "expr": "count(ceph_osd_stat_bytes >= 6597069766656 < 8796093022208)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 6597069766656 < 8796093022208)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<8TB", "refId": "F" }, { - "expr": "count(ceph_osd_stat_bytes >= 8796093022208 < 10995116277760)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 8796093022208 < 10995116277760)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<10TB", "refId": "G" }, { - "expr": "count(ceph_osd_stat_bytes >= 10995116277760 < 13194139533312)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 10995116277760 < 13194139533312)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<12TB", "refId": "H" }, { - "expr": "count(ceph_osd_stat_bytes >= 13194139533312)", + "expr": "count(ceph_osd_stat_bytes{job=~\"$job\"} >= 13194139533312)", "format": "time_series", "intervalFactor": 2, "legendFormat": "<12TB+", @@ -623,7 +623,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_osd_numpg\n", + "expr": "ceph_osd_numpg{job=~\"$job\"}", "format": "time_series", "instant": true, "intervalFactor": 1, @@ -672,8 +672,9 @@ "colorBackground": false, "colorValue": true, "colors": [ - "#d44a3a", - "#299c46" + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" ], "datasource": "$datasource", "description": "This gauge panel shows onode Hits ratio to help determine if increasing RAM per OSD could help improve the performance of the cluster", @@ -728,7 +729,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_bluestore_onode_hits)/(sum(ceph_bluestore_onode_hits) + sum(ceph_bluestore_onode_misses))", + "expr": "sum(ceph_bluestore_onode_hits{job=~\"$job\"}) / (\n sum(ceph_bluestore_onode_hits{job=~\"$job\"}) +\n sum(ceph_bluestore_onode_misses{job=~\"$job\"})\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -810,14 +811,14 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_pool_rd[30s])))", + "expr": "round(sum(rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Reads", "refId": "A" }, { - "expr": "round(sum(irate(ceph_pool_wr[30s])))", + "expr": "round(sum(rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Writes", @@ -861,11 +862,13 @@ ] } ], - "refresh": "10s", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", - "tags": [ ], + "tags": [ + "ceph-mixin" + ], "templating": { "list": [ { @@ -881,6 +884,46 @@ "refresh": 1, "regex": "", "type": "datasource" + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/pool-detail.json b/ceph/monitoring/ceph-mixin/dashboards_out/pool-detail.json index 9a8518e15..dc8b4152a 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/pool-detail.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/pool-detail.json @@ -104,7 +104,7 @@ "tableColumn": "", "targets": [ { - "expr": "(ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "(ceph_pool_stored{job=~\"$job\"} / (ceph_pool_stored{job=~\"$job\"} + ceph_pool_max_avail{job=~\"$job\"})) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -186,7 +186,7 @@ "tableColumn": "", "targets": [ { - "expr": "(ceph_pool_max_avail / deriv(ceph_pool_stored[6h])) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"} > 0", + "expr": "(ceph_pool_max_avail{job=~\"$job\"} / deriv(ceph_pool_stored{job=~\"$job\"}[6h])) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"} > 0\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "", @@ -240,7 +240,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -252,7 +252,7 @@ "steppedLine": false, "targets": [ { - "expr": "deriv(ceph_pool_objects[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "deriv(ceph_pool_objects{job=~\"$job\"}[1m]) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Objects per second", @@ -329,7 +329,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -346,14 +346,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval]) *\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "reads", "refId": "A" }, { - "expr": "irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval]) *\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "writes", @@ -430,7 +430,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -447,14 +447,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_pool_rd_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "rate(ceph_pool_rd_bytes{job=~\"$job\"}[$__rate_interval]) +\n on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "reads", "refId": "A" }, { - "expr": "irate(ceph_pool_wr_bytes[1m]) + on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "rate(ceph_pool_wr_bytes{job=~\"$job\"}[$__rate_interval]) +\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "writes", @@ -531,7 +531,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -543,7 +543,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_pool_objects * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~\"$pool_name\"}", + "expr": "ceph_pool_objects{job=~\"$job\"} *\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\", name=~\"$pool_name\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Number of Objects", @@ -587,17 +587,19 @@ ] } ], - "refresh": "15s", + "refresh": "30s", "rows": [ ], "schemaVersion": 22, "style": "dark", - "tags": [ ], + "tags": [ + "ceph-mixin" + ], "templating": { "list": [ { "current": { - "text": "Prometheus admin.virt1.home.fajerski.name:9090", - "value": "Prometheus admin.virt1.home.fajerski.name:9090" + "text": "default", + "value": "default" }, "hide": 0, "label": "Data Source", @@ -608,6 +610,46 @@ "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, @@ -618,7 +660,7 @@ "multi": false, "name": "pool_name", "options": [ ], - "query": "label_values(ceph_pool_metadata,name)", + "query": "label_values(ceph_pool_metadata{job=~\"$job\"}, name)", "refresh": 1, "regex": "", "sort": 1, @@ -638,7 +680,6 @@ "refresh_intervals": [ "5s", "10s", - "15s", "30s", "1m", "5m", diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/pool-overview.json b/ceph/monitoring/ceph-mixin/dashboards_out/pool-overview.json index 5767d40eb..7f042aa5b 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/pool-overview.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/pool-overview.json @@ -85,7 +85,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(ceph_pool_metadata)", + "expr": "count(ceph_pool_metadata{job=~\"$job\"})", "format": "table", "instant": true, "intervalFactor": 1, @@ -168,7 +168,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(ceph_pool_metadata{compression_mode!=\"none\"})", + "expr": "count(ceph_pool_metadata{job=~\"$job\", compression_mode!=\"none\"})", "format": "", "intervalFactor": 1, "legendFormat": "", @@ -250,7 +250,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_osd_stat_bytes)", + "expr": "sum(ceph_osd_stat_bytes{job=~\"$job\"})", "format": "", "intervalFactor": 1, "legendFormat": "", @@ -332,7 +332,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_bytes_used)", + "expr": "sum(ceph_pool_bytes_used{job=~\"$job\"})", "format": "", "instant": true, "intervalFactor": 1, @@ -415,7 +415,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_stored)", + "expr": "sum(ceph_pool_stored{job=~\"$job\"})", "format": "", "instant": true, "intervalFactor": 1, @@ -498,7 +498,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used)", + "expr": "sum(\n ceph_pool_compress_under_bytes{job=~\"$job\"} -\n ceph_pool_compress_bytes_used{job=~\"$job\"}\n)\n", "format": "", "intervalFactor": 1, "legendFormat": "", @@ -528,7 +528,7 @@ "#d44a3a" ], "datasource": "$datasource", - "description": "Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data\n", + "description": "Indicates how suitable the data is within the pools that are/have been enabled for compression - averaged across all pools holding compressed data", "format": "percent", "gauge": { "maxValue": 100, @@ -580,7 +580,7 @@ "tableColumn": "", "targets": [ { - "expr": "(sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_stored_raw and ceph_pool_compress_under_bytes > 0)) * 100", + "expr": "(\n sum(ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) /\n sum(ceph_pool_stored_raw{job=~\"$job\"} and ceph_pool_compress_under_bytes{job=~\"$job\"} > 0)\n) * 100\n", "format": "table", "intervalFactor": 1, "legendFormat": "", @@ -662,7 +662,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(ceph_pool_compress_under_bytes > 0) / sum(ceph_pool_compress_bytes_used > 0)", + "expr": "sum(\n ceph_pool_compress_under_bytes{job=~\"$job\"} > 0)\n / sum(ceph_pool_compress_bytes_used{job=~\"$job\"} > 0\n)\n", "format": "", "intervalFactor": 1, "legendFormat": "", @@ -1053,7 +1053,7 @@ ], "targets": [ { - "expr": "(ceph_pool_compress_under_bytes / ceph_pool_compress_bytes_used > 0) and on(pool_id) (((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100 > 0.5)", + "expr": "(\n ceph_pool_compress_under_bytes{job=~\"$job\"} /\n ceph_pool_compress_bytes_used{job=~\"$job\"} > 0\n) and on(pool_id) (\n (\n (ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) /\n ceph_pool_stored_raw{job=~\"$job\"}\n ) * 100 > 0.5\n)\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -1061,7 +1061,7 @@ "refId": "A" }, { - "expr": "ceph_pool_max_avail * on(pool_id) group_left(name) ceph_pool_metadata", + "expr": "ceph_pool_max_avail{job=~\"$job\"} *\n on(pool_id) group_left(name) ceph_pool_metadata{job=~\"$job\"}\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -1069,7 +1069,7 @@ "refId": "B" }, { - "expr": "((ceph_pool_compress_under_bytes > 0) / ceph_pool_stored_raw) * 100", + "expr": "(\n (ceph_pool_compress_under_bytes{job=~\"$job\"} > 0) /\n ceph_pool_stored_raw{job=~\"$job\"}\n) * 100\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -1077,7 +1077,7 @@ "refId": "C" }, { - "expr": "(ceph_pool_percent_used * on(pool_id) group_left(name) ceph_pool_metadata)", + "expr": "ceph_pool_percent_used{job=~\"$job\"} *\n on(pool_id) group_left(name) ceph_pool_metadata{job=~\"$job\"}\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -1085,7 +1085,7 @@ "refId": "D" }, { - "expr": "(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used > 0)", + "expr": "ceph_pool_compress_under_bytes{job=~\"$job\"} -\n ceph_pool_compress_bytes_used{job=~\"$job\"} > 0\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -1093,7 +1093,7 @@ "refId": "E" }, { - "expr": "delta(ceph_pool_stored[5d])", + "expr": "delta(ceph_pool_stored{job=~\"$job\"}[5d])", "format": "table", "instant": true, "intervalFactor": 1, @@ -1101,7 +1101,7 @@ "refId": "F" }, { - "expr": "rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])", + "expr": "rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval])\n + rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval])\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -1109,7 +1109,7 @@ "refId": "G" }, { - "expr": "rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])", + "expr": "rate(ceph_pool_rd_bytes{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_pool_wr_bytes{job=~\"$job\"}[$__rate_interval])\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -1117,7 +1117,7 @@ "refId": "H" }, { - "expr": "ceph_pool_metadata", + "expr": "ceph_pool_metadata{job=~\"$job\"}", "format": "table", "instant": true, "intervalFactor": 1, @@ -1125,7 +1125,7 @@ "refId": "I" }, { - "expr": "ceph_pool_stored * on(pool_id) group_left ceph_pool_metadata", + "expr": "ceph_pool_stored{job=~\"$job\"} * on(pool_id) group_left ceph_pool_metadata{job=~\"$job\"}", "format": "table", "instant": true, "intervalFactor": 1, @@ -1133,7 +1133,7 @@ "refId": "J" }, { - "expr": "ceph_pool_metadata{compression_mode!=\"none\"}", + "expr": "ceph_pool_metadata{job=~\"$job\", compression_mode!=\"none\"}", "format": "table", "instant": true, "intervalFactor": 1, @@ -1185,7 +1185,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -1197,14 +1197,14 @@ "steppedLine": false, "targets": [ { - "expr": "topk($topk,round((rate(ceph_pool_rd[30s]) + rate(ceph_pool_wr[30s])),1) * on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "expr": "topk($topk,\n round(\n (\n rate(ceph_pool_rd{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval])\n ), 1\n ) * on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\"})\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{name}} ", "refId": "A" }, { - "expr": "topk($topk,rate(ceph_pool_wr[30s]) + on(pool_id) group_left(instance,name) ceph_pool_metadata) ", + "expr": "topk($topk,\n rate(ceph_pool_wr{job=~\"$job\"}[$__rate_interval]) +\n on(pool_id) group_left(instance,name) ceph_pool_metadata{job=~\"$job\"}\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{name}} - write", @@ -1278,7 +1278,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -1290,7 +1290,7 @@ "steppedLine": false, "targets": [ { - "expr": "topk($topk,(rate(ceph_pool_rd_bytes[30s]) + rate(ceph_pool_wr_bytes[30s])) * on(pool_id) group_left(instance,name) ceph_pool_metadata)", + "expr": "topk($topk,\n (\n rate(ceph_pool_rd_bytes{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_pool_wr_bytes{job=~\"$job\"}[$__rate_interval])\n ) * on(pool_id) group_left(instance, name) ceph_pool_metadata{job=~\"$job\"}\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{name}}", @@ -1364,7 +1364,7 @@ "lines": true, "linewidth": 1, "links": [ ], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -1376,7 +1376,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceph_pool_bytes_used * on(pool_id) group_right ceph_pool_metadata", + "expr": "ceph_pool_bytes_used{job=~\"$job\"} * on(pool_id) group_right ceph_pool_metadata{job=~\"$job\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{name}}", @@ -1420,17 +1420,19 @@ ] } ], - "refresh": "15s", + "refresh": "30s", "rows": [ ], "schemaVersion": 22, "style": "dark", - "tags": [ ], + "tags": [ + "ceph-mixin" + ], "templating": { "list": [ { "current": { - "text": "Dashboard1", - "value": "Dashboard1" + "text": "default", + "value": "default" }, "hide": 0, "label": "Data Source", @@ -1441,6 +1443,46 @@ "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { @@ -1472,7 +1514,6 @@ "refresh_intervals": [ "5s", "10s", - "15s", "30s", "1m", "5m", diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-detail.json b/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-detail.json index 4d68906f2..a0f8f3537 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-detail.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-detail.json @@ -105,14 +105,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (instance_id) (rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s])) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "sum by (instance_id) (\n rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[$__rate_interval])\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "GET {{ceph_daemon}}", "refId": "A" }, { - "expr": "sum by (instance_id) (rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s])) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "sum by (instance_id) (\n rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[$__rate_interval])\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUT {{ceph_daemon}}", @@ -198,14 +198,14 @@ "steppedLine": false, "targets": [ { - "expr": "rate(ceph_rgw_get_b[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_get_b{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs {{ceph_daemon}}", "refId": "A" }, { - "expr": "rate(ceph_rgw_put_b[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_put_b{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon)\n ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs {{ceph_daemon}}", @@ -297,28 +297,28 @@ "steppedLine": false, "targets": [ { - "expr": "rate(ceph_rgw_failed_req[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_failed_req{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\",ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Requests Failed {{ceph_daemon}}", "refId": "A" }, { - "expr": "rate(ceph_rgw_get[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs {{ceph_daemon}}", "refId": "B" }, { - "expr": "rate(ceph_rgw_put[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs {{ceph_daemon}}", "refId": "C" }, { - "expr": "(\n rate(ceph_rgw_req[30s]) -\n (rate(ceph_rgw_get[30s]) + rate(ceph_rgw_put[30s]))\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "(\n rate(ceph_rgw_req{job=~\"$job\"}[$__rate_interval]) -\n (\n rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval])\n )\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Other {{ceph_daemon}}", @@ -387,28 +387,28 @@ "pieType": "pie", "targets": [ { - "expr": "rate(ceph_rgw_failed_req[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_failed_req{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Failures {{ceph_daemon}}", "refId": "A" }, { - "expr": "rate(ceph_rgw_get[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs {{ceph_daemon}}", "refId": "B" }, { - "expr": "rate(ceph_rgw_put[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs {{ceph_daemon}}", "refId": "C" }, { - "expr": "(\n rate(ceph_rgw_req[30s]) -\n (rate(ceph_rgw_get[30s]) + rate(ceph_rgw_put[30s]))\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{ceph_daemon=~\"$rgw_servers\"}", + "expr": "(\n rate(ceph_rgw_req{job=~\"$job\"}[$__rate_interval]) -\n (\n rate(ceph_rgw_get{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rgw_put{job=~\"$job\"}[$__rate_interval])\n )\n) * on (instance_id) group_left (ceph_daemon)\n ceph_rgw_metadata{job=~\"$job\", ceph_daemon=~\"$rgw_servers\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Other (DELETE,LIST) {{ceph_daemon}}", @@ -420,11 +420,12 @@ "valueName": "current" } ], - "refresh": "15s", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", "tags": [ + "ceph-mixin", "overview" ], "templating": { @@ -443,6 +444,46 @@ "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, @@ -453,7 +494,7 @@ "multi": false, "name": "rgw_servers", "options": [ ], - "query": "label_values(ceph_rgw_metadata, ceph_daemon)", + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", "refresh": 1, "regex": "", "sort": 1, @@ -473,7 +514,6 @@ "refresh_intervals": [ "5s", "10s", - "15s", "30s", "1m", "5m", diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-overview.json b/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-overview.json index 7f9375290..4332aac5e 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-overview.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-overview.json @@ -99,14 +99,14 @@ "steppedLine": false, "targets": [ { - "expr": "rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata", + "expr": "rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "GET AVG", "refId": "A" }, { - "expr": "rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata", + "expr": "rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"}\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUT AVG", @@ -192,7 +192,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (rgw_host) (label_replace(rate(ceph_rgw_req[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata, \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"))", + "expr": "sum by (rgw_host) (\n label_replace(\n rate(ceph_rgw_req{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n )\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -278,7 +278,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(\n rate(ceph_rgw_get_initial_lat_sum[30s]) /\n rate(ceph_rgw_get_initial_lat_count[30s]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata,\n\"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\")", + "expr": "label_replace(\n rate(ceph_rgw_get_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_get_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -364,14 +364,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(ceph_rgw_get_b[30s]))", + "expr": "sum(rate(ceph_rgw_get_b{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "GETs", "refId": "A" }, { - "expr": "sum(rate(ceph_rgw_put_b[30s]))", + "expr": "sum(rate(ceph_rgw_put_b{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "PUTs", @@ -457,7 +457,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(sum by (instance_id) (\n rate(ceph_rgw_get_b[30s]) + \n rate(ceph_rgw_put_b[30s])\n) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata, \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\")", + "expr": "label_replace(sum by (instance_id) (\n rate(ceph_rgw_get_b{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rgw_put_b{job=~\"$job\"}[$__rate_interval])) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -543,7 +543,7 @@ "steppedLine": false, "targets": [ { - "expr": "label_replace(\n rate(ceph_rgw_put_initial_lat_sum[30s]) /\n rate(ceph_rgw_put_initial_lat_count[30s]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata,\n\"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\")", + "expr": "label_replace(\n rate(ceph_rgw_put_initial_lat_sum{job=~\"$job\"}[$__rate_interval]) /\n rate(ceph_rgw_put_initial_lat_count{job=~\"$job\"}[$__rate_interval]) *\n on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{job=~\"$job\"},\n \"rgw_host\", \"$1\", \"ceph_daemon\", \"rgw.(.*)\"\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rgw_host}}", @@ -673,14 +673,14 @@ "steppedLine": false, "targets": [ { - "expr": "sum(irate(haproxy_frontend_http_responses_total{code=~\"$code\",instance=~\"$ingress_service\",proxy=~\"frontend\"}[5m])) by (code)", + "expr": "sum(\n rate(\n haproxy_frontend_http_responses_total{code=~\"$code\", job=~\"$job_haproxy\", instance=~\"$ingress_service\", proxy=~\"frontend\"}[$__rate_interval]\n )\n) by (code)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Frontend {{ code }}", "refId": "A" }, { - "expr": "sum(irate(haproxy_backend_http_responses_total{code=~\"$code\",instance=~\"$ingress_service\",proxy=~\"backend\"}[5m])) by (code)", + "expr": "sum(\n rate(\n haproxy_backend_http_responses_total{code=~\"$code\", job=~\"$job_haproxy\", instance=~\"$ingress_service\", proxy=~\"backend\"}[$__rate_interval]\n )\n) by (code)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Backend {{ code }}", @@ -777,49 +777,49 @@ "steppedLine": false, "targets": [ { - "expr": "sum(irate(haproxy_frontend_http_requests_total{proxy=~\"frontend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_frontend_http_requests_total{proxy=~\"frontend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Requests", "refId": "A" }, { - "expr": "sum(irate(haproxy_backend_response_errors_total{proxy=~\"backend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_backend_response_errors_total{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "Response errors", "refId": "B" }, { - "expr": "sum(irate(haproxy_frontend_request_errors_total{proxy=~\"frontend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_frontend_request_errors_total{proxy=~\"frontend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Requests errors", "refId": "C" }, { - "expr": "sum(irate(haproxy_backend_redispatch_warnings_total{proxy=~\"backend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_backend_redispatch_warnings_total{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "Backend redispatch", "refId": "D" }, { - "expr": "sum(irate(haproxy_backend_retry_warnings_total{proxy=~\"backend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_backend_retry_warnings_total{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "Backend retry", "refId": "E" }, { - "expr": "sum(irate(haproxy_frontend_requests_denied_total{proxy=~\"frontend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_frontend_requests_denied_total{proxy=~\"frontend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "Request denied", "refId": "F" }, { - "expr": "sum(haproxy_backend_current_queue{proxy=~\"backend\",instance=~\"$ingress_service\"}) by (instance)", + "expr": "sum(\n haproxy_backend_current_queue{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "Backend Queued", @@ -912,21 +912,21 @@ "steppedLine": false, "targets": [ { - "expr": "sum(irate(haproxy_frontend_connections_total{proxy=~\"frontend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_frontend_connections_total{proxy=~\"frontend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Front", "refId": "A" }, { - "expr": "sum(irate(haproxy_backend_connection_attempts_total{proxy=~\"backend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_backend_connection_attempts_total{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Back", "refId": "B" }, { - "expr": "sum(irate(haproxy_backend_connection_errors_total{proxy=~\"backend\",instance=~\"$ingress_service\"}[5m])) by (instance)", + "expr": "sum(\n rate(\n haproxy_backend_connection_errors_total{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n )\n) by (instance)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Back errors", @@ -1019,28 +1019,28 @@ "steppedLine": false, "targets": [ { - "expr": "sum(irate(haproxy_frontend_bytes_in_total{proxy=~\"frontend\",instance=~\"$ingress_service\"}[5m])*8) by (instance)", + "expr": "sum(\n rate(\n haproxy_frontend_bytes_in_total{proxy=~\"frontend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n ) * 8\n) by (instance)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "IN Front", "refId": "A" }, { - "expr": "sum(irate(haproxy_frontend_bytes_out_total{proxy=~\"frontend\",instance=~\"$ingress_service\"}[5m])*8) by (instance)", + "expr": "sum(\n rate(\n haproxy_frontend_bytes_out_total{proxy=~\"frontend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n ) * 8\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "OUT Front", "refId": "B" }, { - "expr": "sum(irate(haproxy_backend_bytes_in_total{proxy=~\"backend\",instance=~\"$ingress_service\"}[5m])*8) by (instance)", + "expr": "sum(\n rate(\n haproxy_backend_bytes_in_total{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n ) * 8\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "IN Back", "refId": "C" }, { - "expr": "sum(irate(haproxy_backend_bytes_out_total{proxy=~\"backend\",instance=~\"$ingress_service\"}[5m])*8) by (instance)", + "expr": "sum(\n rate(\n haproxy_backend_bytes_out_total{proxy=~\"backend\", job=~\"$job_haproxy\", instance=~\"$ingress_service\"}[$__rate_interval]\n ) * 8\n) by (instance)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "OUT Back", @@ -1084,15 +1084,70 @@ ] } ], - "refresh": "15s", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", "tags": [ + "ceph-mixin", "overview" ], "templating": { "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, @@ -1103,9 +1158,9 @@ "multi": false, "name": "rgw_servers", "options": [ ], - "query": "label_values(ceph_rgw_metadata, ceph_daemon)", + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", "refresh": 1, - "regex": "", + "regex": "RGW Server", "sort": 1, "tagValuesQuery": "", "tags": [ ], @@ -1123,7 +1178,7 @@ "multi": false, "name": "code", "options": [ ], - "query": "label_values(haproxy_server_http_responses_total{instance=~\"$ingress_service\"}, code)", + "query": "label_values(haproxy_server_http_responses_total{job=~\"$job_haproxy\", instance=~\"$ingress_service\"}, code)", "refresh": 1, "regex": "", "sort": 1, @@ -1134,18 +1189,18 @@ "useTags": false }, { - "allValue": null, + "allValue": ".+", "current": { }, "datasource": "$datasource", "hide": 0, "includeAll": true, - "label": "Ingress Service", - "multi": false, - "name": "ingress_service", + "label": "job haproxy", + "multi": true, + "name": "job_haproxy", "options": [ ], - "query": "label_values(haproxy_server_status, instance)", + "query": "label_values(haproxy_server_status, job)", "refresh": 1, - "regex": "", + "regex": "(.*)", "sort": 1, "tagValuesQuery": "", "tags": [ ], @@ -1154,18 +1209,24 @@ "useTags": false }, { - "current": { - "text": "default", - "value": "default" - }, + "allValue": null, + "current": { }, + "datasource": "$datasource", "hide": 0, - "label": "Data Source", - "name": "datasource", + "includeAll": true, + "label": "Ingress Service", + "multi": false, + "name": "ingress_service", "options": [ ], - "query": "prometheus", + "query": "label_values(haproxy_server_status{job=~\"$job_haproxy\"}, instance)", "refresh": 1, "regex": "", - "type": "datasource" + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, @@ -1177,7 +1238,6 @@ "refresh_intervals": [ "5s", "10s", - "15s", "30s", "1m", "5m", diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-sync-overview.json b/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-sync-overview.json index 232242acc..e0c3037d5 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-sync-overview.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/radosgw-sync-overview.json @@ -80,7 +80,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_sum[30s]))", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_sum{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -166,7 +166,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_count[30s]))", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_bytes_count{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -252,7 +252,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_poll_latency_sum[30s]))", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_poll_latency_sum{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -338,7 +338,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_errors[30s]))", + "expr": "sum by (source_zone) (rate(ceph_data_sync_from_zone_fetch_errors{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{source_zone}}", @@ -382,28 +382,63 @@ ] } ], - "refresh": "15s", + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", "tags": [ + "ceph-mixin", "overview" ], "templating": { "list": [ { - "allValue": null, + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data Source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", "current": { }, "datasource": "$datasource", "hide": 0, "includeAll": true, - "label": "", - "multi": false, - "name": "rgw_servers", + "label": "job", + "multi": true, + "name": "job", "options": [ ], - "query": "prometehus", + "query": "label_values(ceph_osd_metadata{}, job)", "refresh": 1, - "regex": "", + "regex": "(.*)", "sort": 1, "tagValuesQuery": "", "tags": [ ], @@ -412,18 +447,24 @@ "useTags": false }, { - "current": { - "text": "default", - "value": "default" - }, + "allValue": null, + "current": { }, + "datasource": "$datasource", "hide": 0, - "label": "Data Source", - "name": "datasource", + "includeAll": true, + "label": "", + "multi": false, + "name": "rgw_servers", "options": [ ], - "query": "prometheus", + "query": "label_values(ceph_rgw_metadata{job=~\"$job\"}, ceph_daemon)", "refresh": 1, - "regex": "", - "type": "datasource" + "regex": "RGW Server", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, @@ -435,7 +476,6 @@ "refresh_intervals": [ "5s", "10s", - "15s", "30s", "1m", "5m", diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/rbd-details.json b/ceph/monitoring/ceph-mixin/dashboards_out/rbd-details.json index 7a9e1b56b..f64de312a 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/rbd-details.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/rbd-details.json @@ -42,7 +42,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$Datasource", + "datasource": "$datasource", "description": "", "fill": 1, "fillGradient": 0, @@ -80,14 +80,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_rbd_write_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "rate(ceph_rbd_write_ops{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{pool}} Write", "refId": "A" }, { - "expr": "irate(ceph_rbd_read_ops{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "rate(ceph_rbd_read_ops{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{pool}} Read", @@ -135,7 +135,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$Datasource", + "datasource": "$datasource", "description": "", "fill": 1, "fillGradient": 0, @@ -173,14 +173,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_rbd_write_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "rate(ceph_rbd_write_bytes{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{pool}} Write", "refId": "A" }, { - "expr": "irate(ceph_rbd_read_bytes{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "rate(ceph_rbd_read_bytes{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{pool}} Read", @@ -228,7 +228,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$Datasource", + "datasource": "$datasource", "description": "", "fill": 1, "fillGradient": 0, @@ -266,14 +266,14 @@ "steppedLine": false, "targets": [ { - "expr": "irate(ceph_rbd_write_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_write_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "rate(ceph_rbd_write_latency_sum{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval]) /\n rate(ceph_rbd_write_latency_count{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{pool}} Write", "refId": "A" }, { - "expr": "irate(ceph_rbd_read_latency_sum{pool=\"$Pool\", image=\"$Image\"}[30s]) / irate(ceph_rbd_read_latency_count{pool=\"$Pool\", image=\"$Image\"}[30s])", + "expr": "rate(ceph_rbd_read_latency_sum{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval]) /\n rate(ceph_rbd_read_latency_count{job=~\"$job\", pool=\"$pool\", image=\"$image\"}[$__rate_interval])\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{pool}} Read", @@ -317,11 +317,13 @@ ] } ], - "refresh": false, + "refresh": "30s", "rows": [ ], "schemaVersion": 16, "style": "dark", - "tags": [ ], + "tags": [ + "ceph-mixin" + ], "templating": { "list": [ { @@ -330,23 +332,63 @@ "value": "default" }, "hide": 0, - "label": null, - "name": "Datasource", + "label": "Data Source", + "name": "datasource", "options": [ ], "query": "prometheus", "refresh": 1, "regex": "", "type": "datasource" }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, { "allValue": null, "current": { }, - "datasource": "$Datasource", + "datasource": "$datasource", "hide": 0, "includeAll": false, "label": "", "multi": false, - "name": "Pool", + "name": "pool", "options": [ ], "query": "label_values(pool)", "refresh": 1, @@ -361,12 +403,12 @@ { "allValue": null, "current": { }, - "datasource": "$Datasource", + "datasource": "$datasource", "hide": 0, "includeAll": false, "label": "", "multi": false, - "name": "Image", + "name": "image", "options": [ ], "query": "label_values(image)", "refresh": 1, diff --git a/ceph/monitoring/ceph-mixin/dashboards_out/rbd-overview.json b/ceph/monitoring/ceph-mixin/dashboards_out/rbd-overview.json index 29b82afa5..e017280e0 100644 --- a/ceph/monitoring/ceph-mixin/dashboards_out/rbd-overview.json +++ b/ceph/monitoring/ceph-mixin/dashboards_out/rbd-overview.json @@ -92,14 +92,14 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_rbd_write_ops[30s])))", + "expr": "round(sum(rate(ceph_rbd_write_ops{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Writes", "refId": "A" }, { - "expr": "round(sum(irate(ceph_rbd_read_ops[30s])))", + "expr": "round(sum(rate(ceph_rbd_read_ops{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Reads", @@ -185,14 +185,14 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_rbd_write_bytes[30s])))", + "expr": "round(sum(rate(ceph_rbd_write_bytes{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write", "refId": "A" }, { - "expr": "round(sum(irate(ceph_rbd_read_bytes[30s])))", + "expr": "round(sum(rate(ceph_rbd_read_bytes{job=~\"$job\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read", @@ -278,14 +278,14 @@ "steppedLine": false, "targets": [ { - "expr": "round(sum(irate(ceph_rbd_write_latency_sum[30s])) / sum(irate(ceph_rbd_write_latency_count[30s])))", + "expr": "round(\n sum(rate(ceph_rbd_write_latency_sum{job=~\"$job\"}[$__rate_interval])) /\n sum(rate(ceph_rbd_write_latency_count{job=~\"$job\"}[$__rate_interval]))\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Write", "refId": "A" }, { - "expr": "round(sum(irate(ceph_rbd_read_latency_sum[30s])) / sum(irate(ceph_rbd_read_latency_count[30s])))", + "expr": "round(\n sum(rate(ceph_rbd_read_latency_sum{job=~\"$job\"}[$__rate_interval])) /\n sum(rate(ceph_rbd_read_latency_count{job=~\"$job\"}[$__rate_interval]))\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Read", @@ -416,7 +416,7 @@ ], "targets": [ { - "expr": "topk(10, (sort((irate(ceph_rbd_write_ops[30s]) + on (image, pool, namespace) irate(ceph_rbd_read_ops[30s])))))", + "expr": "topk(10,\n (\n sort((\n rate(ceph_rbd_write_ops{job=~\"$job\"}[$__rate_interval]) +\n on (image, pool, namespace) rate(ceph_rbd_read_ops{job=~\"$job\"}[$__rate_interval])\n ))\n )\n)\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -518,7 +518,7 @@ ], "targets": [ { - "expr": "topk(10, sort(sum(irate(ceph_rbd_read_bytes[30s]) + irate(ceph_rbd_write_bytes[30s])) by (pool, image, namespace)))", + "expr": "topk(10,\n sort(\n sum(\n rate(ceph_rbd_read_bytes{job=~\"$job\"}[$__rate_interval]) +\n rate(ceph_rbd_write_bytes{job=~\"$job\"}[$__rate_interval])\n ) by (pool, image, namespace)\n )\n)\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -620,7 +620,7 @@ ], "targets": [ { - "expr": "topk(10,\n sum(\n irate(ceph_rbd_write_latency_sum[30s]) / clamp_min(irate(ceph_rbd_write_latency_count[30s]), 1) +\n irate(ceph_rbd_read_latency_sum[30s]) / clamp_min(irate(ceph_rbd_read_latency_count[30s]), 1)\n ) by (pool, image, namespace)\n)", + "expr": "topk(10,\n sum(\n rate(ceph_rbd_write_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n clamp_min(rate(ceph_rbd_write_latency_count{job=~\"$job\"}[$__rate_interval]), 1) +\n rate(ceph_rbd_read_latency_sum{job=~\"$job\"}[$__rate_interval]) /\n clamp_min(rate(ceph_rbd_read_latency_count{job=~\"$job\"}[$__rate_interval]), 1)\n ) by (pool, image, namespace)\n)\n", "format": "table", "instant": true, "intervalFactor": 1, @@ -640,6 +640,7 @@ "schemaVersion": 16, "style": "dark", "tags": [ + "ceph-mixin", "overview" ], "templating": { @@ -657,6 +658,46 @@ "refresh": 1, "regex": "", "type": "datasource" + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 2, + "includeAll": true, + "label": "cluster", + "multi": true, + "name": "cluster", + "options": [ ], + "query": "label_values(ceph_osd_metadata, cluster)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "current": { }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ ], + "query": "label_values(ceph_osd_metadata{}, job)", + "refresh": 1, + "regex": "(.*)", + "sort": 1, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, @@ -668,7 +709,6 @@ "refresh_intervals": [ "5s", "10s", - "15s", "30s", "1m", "5m", diff --git a/ceph/monitoring/ceph-mixin/jsonnet-build.sh b/ceph/monitoring/ceph-mixin/jsonnet-build.sh deleted file mode 100755 index 8e229f9a3..000000000 --- a/ceph/monitoring/ceph-mixin/jsonnet-build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -ex - -JSONNET_VERSION="v0.18.0" -OUTPUT_DIR=${1:-$(pwd)} - -git clone -b ${JSONNET_VERSION} --depth 1 https://github.com/google/go-jsonnet.git -cd go-jsonnet -go build ./cmd/jsonnet -go build ./cmd/jsonnetfmt -mv jsonnet jsonnetfmt ${OUTPUT_DIR} diff --git a/ceph/monitoring/ceph-mixin/mixin.libsonnet b/ceph/monitoring/ceph-mixin/mixin.libsonnet index c89b2a916..3c983a300 100644 --- a/ceph/monitoring/ceph-mixin/mixin.libsonnet +++ b/ceph/monitoring/ceph-mixin/mixin.libsonnet @@ -1,3 +1,3 @@ (import 'config.libsonnet') + -(import 'dashboards/dashboards.libsonnet') + +(import 'dashboards.libsonnet') + (import 'alerts.libsonnet') diff --git a/ceph/monitoring/ceph-mixin/prometheus_alerts.libsonnet b/ceph/monitoring/ceph-mixin/prometheus_alerts.libsonnet new file mode 100644 index 000000000..bed89a879 --- /dev/null +++ b/ceph/monitoring/ceph-mixin/prometheus_alerts.libsonnet @@ -0,0 +1,718 @@ +{ + _config:: error 'must provide _config', + + MultiClusterQuery():: + if $._config.showMultiCluster + then 'cluster,' + else '', + + MultiClusterSummary():: + if $._config.showMultiCluster + then ' on cluster {{ $labels.cluster }}' + else '', + + groups+: [ + { + name: 'cluster health', + rules: [ + { + alert: 'CephHealthError', + 'for': '5m', + expr: 'ceph_health_status == 2', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.2.1' }, + annotations: { + summary: 'Ceph is in the ERROR state%(cluster)s' % $.MultiClusterSummary(), + description: "The cluster state has been HEALTH_ERROR for more than 5 minutes%(cluster)s. Please check 'ceph health detail' for more information." % $.MultiClusterSummary(), + }, + }, + { + alert: 'CephHealthWarning', + 'for': '15m', + expr: 'ceph_health_status == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + summary: 'Ceph is in the WARNING state%(cluster)s' % $.MultiClusterSummary(), + description: "The cluster state has been HEALTH_WARN for more than 15 minutes%(cluster)s. Please check 'ceph health detail' for more information." % $.MultiClusterSummary(), + }, + }, + ], + }, + { + name: 'mon', + rules: [ + { + alert: 'CephMonDownQuorumAtRisk', + 'for': '30s', + expr: ||| + ( + (ceph_health_detail{name="MON_DOWN"} == 1) * on() ( + count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1) + ) + ) == 1 + |||, + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.3.1' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down', + summary: 'Monitor quorum is at risk%(cluster)s' % $.MultiClusterSummary(), + description: '{{ $min := query "floor(count(ceph_mon_metadata) / 2) + 1" | first | value }}Quorum requires a majority of monitors (x {{ $min }}) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}', + }, + }, + { + alert: 'CephMonDown', + 'for': '30s', + expr: ||| + count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1) + |||, + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down', + summary: 'One or more monitors down%(cluster)s' % $.MultiClusterSummary(), + description: ||| + {{ $down := query "count(ceph_mon_quorum_status == 0)" | first | value }}{{ $s := "" }}{{ if gt $down 1.0 }}{{ $s = "s" }}{{ end }}You have {{ $down }} monitor{{ $s }} down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }} + |||, + }, + }, + { + alert: 'CephMonDiskspaceCritical', + 'for': '1m', + expr: 'ceph_health_detail{name="MON_DISK_CRIT"} == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.3.2' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit', + summary: 'Filesystem space on at least one monitor is critically low%(cluster)s' % $.MultiClusterSummary(), + description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}", + }, + }, + { + alert: 'CephMonDiskspaceLow', + 'for': '5m', + expr: 'ceph_health_detail{name="MON_DISK_LOW"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low', + summary: 'Drive space on at least one monitor is approaching full%(cluster)s' % $.MultiClusterSummary(), + description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}", + }, + }, + { + alert: 'CephMonClockSkew', + 'for': '1m', + expr: 'ceph_health_detail{name="MON_CLOCK_SKEW"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew', + summary: 'Clock skew detected among monitors%(cluster)s' % $.MultiClusterSummary(), + description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon.", + }, + }, + ], + }, + { + name: 'osd', + rules: [ + { + alert: 'CephOSDDownHigh', + expr: 'count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.1' }, + annotations: { + summary: 'More than 10%% of OSDs are down%(cluster)s' % $.MultiClusterSummary(), + description: '{{ $value | humanize }}% or {{ with query "count(ceph_osd_up == 0)" }}{{ . | first | value }}{{ end }} of {{ with query "count(ceph_osd_up)" }}{{ . | first | value }}{{ end }} OSDs are down (>= 10%). The following OSDs are down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}', + }, + }, + { + alert: 'CephOSDHostDown', + 'for': '5m', + expr: 'ceph_health_detail{name="OSD_HOST_DOWN"} == 1', + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.8' }, + annotations: { + summary: 'An OSD host is offline%(cluster)s' % $.MultiClusterSummary(), + description: 'The following OSDs are down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0" }} - {{ .Labels.hostname }} : {{ .Labels.ceph_daemon }} {{- end }}', + }, + }, + { + alert: 'CephOSDDown', + 'for': '5m', + expr: 'ceph_health_detail{name="OSD_DOWN"} == 1', + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.2' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down', + summary: 'An OSD has been marked down%(cluster)s' % $.MultiClusterSummary(), + description: ||| + {{ $num := query "count(ceph_osd_up == 0)" | first | value }}{{ $s := "" }}{{ if gt $num 1.0 }}{{ $s = "s" }}{{ end }}{{ $num }} OSD{{ $s }} down for over 5mins. The following OSD{{ $s }} {{ if eq $s "" }}is{{ else }}are{{ end }} down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"}} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }} + |||, + }, + }, + { + alert: 'CephOSDNearFull', + 'for': '5m', + expr: 'ceph_health_detail{name="OSD_NEARFULL"} == 1', + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.3' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull', + summary: 'OSD(s) running low on free space (NEARFULL)%(cluster)s' % $.MultiClusterSummary(), + description: "One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.", + }, + }, + { + alert: 'CephOSDFull', + 'for': '1m', + expr: 'ceph_health_detail{name="OSD_FULL"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.6' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full', + summary: 'OSD full, writes blocked%(cluster)s' % $.MultiClusterSummary(), + description: "An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.", + }, + }, + { + alert: 'CephOSDBackfillFull', + 'for': '1m', + expr: 'ceph_health_detail{name="OSD_BACKFILLFULL"} > 0', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull', + summary: 'OSD(s) too full for backfill operations%(cluster)s' % $.MultiClusterSummary(), + description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.", + }, + }, + { + alert: 'CephOSDTooManyRepairs', + 'for': '30s', + expr: 'ceph_health_detail{name="OSD_TOO_MANY_REPAIRS"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs', + summary: 'OSD reports a high number of read errors%(cluster)s' % $.MultiClusterSummary(), + description: 'Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive.', + }, + }, + { + alert: 'CephOSDTimeoutsPublicNetwork', + 'for': '1m', + expr: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + summary: 'Network issues delaying OSD heartbeats (public network)%(cluster)s' % $.MultiClusterSummary(), + description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs.", + }, + }, + { + alert: 'CephOSDTimeoutsClusterNetwork', + 'for': '1m', + expr: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + summary: 'Network issues delaying OSD heartbeats (cluster network)%(cluster)s' % $.MultiClusterSummary(), + description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs.", + }, + }, + { + alert: 'CephOSDInternalDiskSizeMismatch', + 'for': '1m', + expr: 'ceph_health_detail{name="BLUESTORE_DISK_SIZE_MISMATCH"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch', + summary: 'OSD size inconsistency error%(cluster)s' % $.MultiClusterSummary(), + description: 'One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs.', + }, + }, + { + alert: 'CephDeviceFailurePredicted', + 'for': '1m', + expr: 'ceph_health_detail{name="DEVICE_HEALTH"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#id2', + summary: 'Device(s) predicted to fail soon%(cluster)s' % $.MultiClusterSummary(), + description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info '. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD.", + }, + }, + { + alert: 'CephDeviceFailurePredictionTooHigh', + 'for': '1m', + expr: 'ceph_health_detail{name="DEVICE_HEALTH_TOOMANY"} == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.7' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany', + summary: 'Too many devices are predicted to fail, unable to resolve%(cluster)s' % $.MultiClusterSummary(), + description: 'The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated.', + }, + }, + { + alert: 'CephDeviceFailureRelocationIncomplete', + 'for': '1m', + expr: 'ceph_health_detail{name="DEVICE_HEALTH_IN_USE"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use', + summary: 'Device failure is predicted, but unable to relocate data%(cluster)s' % $.MultiClusterSummary(), + description: 'The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer.', + }, + }, + { + alert: 'CephOSDFlapping', + expr: '(rate(ceph_osd_up[5m]) * on(%(cluster)sceph_daemon) group_left(hostname) ceph_osd_metadata) * 60 > 1' % $.MultiClusterQuery(), + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.4' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds', + summary: 'Network issues are causing OSDs to flap (mark each other down)%(cluster)s' % $.MultiClusterSummary(), + description: 'OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked down and back up {{ $value | humanize }} times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s).', + }, + }, + { + alert: 'CephOSDReadErrors', + 'for': '30s', + expr: 'ceph_health_detail{name="BLUESTORE_SPURIOUS_READ_ERRORS"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors', + summary: 'Device read errors detected%(cluster)s' % $.MultiClusterSummary(), + description: 'An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel.', + }, + }, + { + alert: 'CephPGImbalance', + 'for': '5m', + expr: ||| + abs( + ((ceph_osd_numpg > 0) - on (%(cluster)sjob) group_left avg(ceph_osd_numpg > 0) by (%(cluster)sjob)) / + on (job) group_left avg(ceph_osd_numpg > 0) by (job) + ) * on (%(cluster)sceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30 + ||| % [$.MultiClusterQuery(), $.MultiClusterQuery(), $.MultiClusterQuery()], + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.5' }, + annotations: { + summary: 'PGs are not balanced across OSDs%(cluster)s' % $.MultiClusterSummary(), + description: 'OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count.', + }, + }, + ], + }, + { + name: 'mds', + rules: [ + { + alert: 'CephFilesystemDamaged', + 'for': '1m', + expr: 'ceph_health_detail{name="MDS_DAMAGE"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.1' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages', + summary: 'CephFS filesystem is damaged%(cluster)s.' % $.MultiClusterSummary(), + description: 'Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support.', + }, + }, + { + alert: 'CephFilesystemOffline', + 'for': '1m', + expr: 'ceph_health_detail{name="MDS_ALL_DOWN"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.3' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down', + summary: 'CephFS filesystem is offline%(cluster)s' % $.MultiClusterSummary(), + description: 'All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline.', + }, + }, + { + alert: 'CephFilesystemDegraded', + 'for': '1m', + expr: 'ceph_health_detail{name="FS_DEGRADED"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.4' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded', + summary: 'CephFS filesystem is degraded%(cluster)s' % $.MultiClusterSummary(), + description: 'One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable.', + }, + }, + { + alert: 'CephFilesystemMDSRanksLow', + 'for': '1m', + expr: 'ceph_health_detail{name="MDS_UP_LESS_THAN_MAX"} > 0', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max', + summary: 'Ceph MDS daemon count is lower than configured%(cluster)s' % $.MultiClusterSummary(), + description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value.", + }, + }, + { + alert: 'CephFilesystemInsufficientStandby', + 'for': '1m', + expr: 'ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY"} > 0', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby', + summary: 'Ceph filesystem standby daemons too few%(cluster)s' % $.MultiClusterSummary(), + description: 'The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons.', + }, + }, + { + alert: 'CephFilesystemFailureNoStandby', + 'for': '1m', + expr: 'ceph_health_detail{name="FS_WITH_FAILED_MDS"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.5' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds', + summary: 'MDS daemon failed, no further standby available%(cluster)s' % $.MultiClusterSummary(), + description: 'An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS.', + }, + }, + { + alert: 'CephFilesystemReadOnly', + 'for': '1m', + expr: 'ceph_health_detail{name="MDS_HEALTH_READ_ONLY"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.2' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages', + summary: 'CephFS filesystem in read only mode due to write error(s)%(cluster)s' % $.MultiClusterSummary(), + description: 'The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support.', + }, + }, + ], + }, + { + name: 'mgr', + rules: [ + { + alert: 'CephMgrModuleCrash', + 'for': '5m', + expr: 'ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"} == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.6.1' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash', + summary: 'A manager module has recently crashed%(cluster)s' % $.MultiClusterSummary(), + description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure.", + }, + }, + { + alert: 'CephMgrPrometheusModuleInactive', + 'for': '1m', + expr: 'up{job="ceph"} == 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.6.2' }, + annotations: { + summary: 'The mgr/prometheus module is not available%(cluster)s' % $.MultiClusterSummary(), + description: "The mgr/prometheus module at {{ $labels.instance }} is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'.", + }, + }, + ], + }, + { + name: 'pgs', + rules: [ + { + alert: 'CephPGsInactive', + 'for': '5m', + expr: 'ceph_pool_metadata * on(%(cluster)spool_id,instance) group_left() (ceph_pg_total - ceph_pg_active) > 0' % $.MultiClusterQuery(), + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.1' }, + annotations: { + summary: 'One or more placement groups are inactive%(cluster)s' % $.MultiClusterSummary(), + description: '{{ $value }} PGs have been inactive for more than 5 minutes in pool {{ $labels.name }}. Inactive placement groups are not able to serve read/write requests.', + }, + }, + { + alert: 'CephPGsUnclean', + 'for': '15m', + expr: 'ceph_pool_metadata * on(%(cluster)spool_id,instance) group_left() (ceph_pg_total - ceph_pg_clean) > 0' % $.MultiClusterQuery(), + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.2' }, + annotations: { + summary: 'One or more placement groups are marked unclean%(cluster)s' % $.MultiClusterSummary(), + description: '{{ $value }} PGs have been unclean for more than 15 minutes in pool {{ $labels.name }}. Unclean PGs have not recovered from a previous failure.', + }, + }, + { + alert: 'CephPGsDamaged', + 'for': '5m', + expr: 'ceph_health_detail{name=~"PG_DAMAGED|OSD_SCRUB_ERRORS"} == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.4' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged', + summary: 'Placement group damaged, manual intervention needed%(cluster)s' % $.MultiClusterSummary(), + description: "During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg '. To repair PGs use the 'ceph pg repair ' command.", + }, + }, + { + alert: 'CephPGRecoveryAtRisk', + 'for': '1m', + expr: 'ceph_health_detail{name="PG_RECOVERY_FULL"} == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.5' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full', + summary: 'OSDs are too full for recovery%(cluster)s' % $.MultiClusterSummary(), + description: "Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data.", + }, + }, + { + alert: 'CephPGUnavilableBlockingIO', + 'for': '1m', + expr: '((ceph_health_detail{name="PG_AVAILABILITY"} == 1) - scalar(ceph_health_detail{name="OSD_DOWN"})) == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.3' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability', + summary: 'PG is unavailable%(cluster)s, blocking I/O' % $.MultiClusterSummary(), + description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O.", + }, + }, + { + alert: 'CephPGBackfillAtRisk', + 'for': '1m', + expr: 'ceph_health_detail{name="PG_BACKFILL_FULL"} == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.6' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full', + summary: 'Backfill operations are blocked due to lack of free space%(cluster)s' % $.MultiClusterSummary(), + description: "Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data.", + }, + }, + { + alert: 'CephPGNotScrubbed', + 'for': '5m', + expr: 'ceph_health_detail{name="PG_NOT_SCRUBBED"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed', + summary: 'Placement group(s) have not been scrubbed%(cluster)s' % $.MultiClusterSummary(), + description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub ", + }, + }, + { + alert: 'CephPGsHighPerOSD', + 'for': '1m', + expr: 'ceph_health_detail{name="TOO_MANY_PGS"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs', + summary: 'Placement groups per OSD is too high%(cluster)s' % $.MultiClusterSummary(), + description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools.", + }, + }, + { + alert: 'CephPGNotDeepScrubbed', + 'for': '5m', + expr: 'ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED"} == 1', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed', + summary: 'Placement group(s) have not been deep scrubbed%(cluster)s' % $.MultiClusterSummary(), + description: "One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window.", + }, + }, + ], + }, + { + name: 'nodes', + rules: [ + { + alert: 'CephNodeRootFilesystemFull', + 'for': '5m', + expr: 'node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"} * 100 < 5', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.1' }, + annotations: { + summary: 'Root filesystem is dangerously full%(cluster)s' % $.MultiClusterSummary(), + description: 'Root volume is dangerously full: {{ $value | humanize }}% free.', + }, + }, + { + alert: 'CephNodeNetworkPacketDrops', + expr: ||| + ( + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) + ) / ( + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= %(CephNodeNetworkPacketDropsThreshold)s and ( + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) + ) >= %(CephNodeNetworkPacketDropsPerSec)s + ||| % $._config, + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.2' }, + annotations: { + summary: 'One or more NICs reports packet drops%(cluster)s' % $.MultiClusterSummary(), + description: 'Node {{ $labels.instance }} experiences packet drop > %(CephNodeNetworkPacketDropsThreshold)s%% or > %(CephNodeNetworkPacketDropsPerSec)s packets/s on interface {{ $labels.device }}.' % { CephNodeNetworkPacketDropsThreshold: $._config.CephNodeNetworkPacketDropsThreshold * 100, CephNodeNetworkPacketDropsPerSec: $._config.CephNodeNetworkPacketDropsPerSec }, + }, + }, + { + alert: 'CephNodeNetworkPacketErrors', + expr: ||| + ( + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) + ) / ( + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0001 or ( + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) + ) >= 10 + |||, + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.3' }, + annotations: { + summary: 'One or more NICs reports packet errors%(cluster)s' % $.MultiClusterSummary(), + description: 'Node {{ $labels.instance }} experiences packet errors > 0.01% or > 10 packets/s on interface {{ $labels.device }}.', + }, + }, + { + alert: 'CephNodeDiskspaceWarning', + expr: 'predict_linear(node_filesystem_free_bytes{device=~"/.*"}[2d], 3600 * 24 * 5) *on(instance) group_left(nodename) node_uname_info < 0', + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.4' }, + annotations: { + summary: 'Host filesystem free space is getting low%(cluster)s' % $.MultiClusterSummary(), + description: 'Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will be full in less than 5 days based on the 48 hour trailing fill rate.', + }, + }, + { + alert: 'CephNodeInconsistentMTU', + expr: 'node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) == scalar( max by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) )or node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) == scalar( min by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) )', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + summary: 'MTU settings across Ceph hosts are inconsistent%(cluster)s' % $.MultiClusterSummary(), + description: 'Node {{ $labels.instance }} has a different MTU size ({{ $value }}) than the median of devices named {{ $labels.device }}.', + }, + }, + ], + }, + { + name: 'pools', + rules: [ + { + alert: 'CephPoolGrowthWarning', + expr: '(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(%(cluster)spool_id) group_right ceph_pool_metadata) >= 95' % $.MultiClusterQuery(), + labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.9.2' }, + annotations: { + summary: 'Pool growth rate may soon exceed capacity%(cluster)s' % $.MultiClusterSummary(), + description: "Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.", + }, + }, + { + alert: 'CephPoolBackfillFull', + expr: 'ceph_health_detail{name="POOL_BACKFILLFULL"} > 0', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + summary: 'Free space in a pool is too low for recovery/backfill%(cluster)s' % $.MultiClusterSummary(), + description: 'A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity.', + }, + }, + { + alert: 'CephPoolFull', + 'for': '1m', + expr: 'ceph_health_detail{name="POOL_FULL"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.9.1' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full', + summary: 'Pool is full - writes are blocked%(cluster)s' % $.MultiClusterSummary(), + description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) {{- range query \"topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right ceph_pool_metadata))\" }} - {{ .Labels.name }} at {{ .Value }}% {{- end }} Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes )", + }, + }, + { + alert: 'CephPoolNearFull', + 'for': '5m', + expr: 'ceph_health_detail{name="POOL_NEAR_FULL"} > 0', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + summary: 'One or more Ceph pools are nearly full%(cluster)s' % $.MultiClusterSummary(), + description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes ). Also ensure that the balancer is active.", + }, + }, + ], + }, + { + name: 'healthchecks', + rules: [ + { + alert: 'CephSlowOps', + 'for': '30s', + expr: 'ceph_healthcheck_slow_ops > 0', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops', + summary: 'OSD operations are slow to complete%(cluster)s' % $.MultiClusterSummary(), + description: '{{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)', + }, + }, + ], + }, + { + name: 'cephadm', + rules: [ + { + alert: 'CephadmUpgradeFailed', + 'for': '30s', + expr: 'ceph_health_detail{name="UPGRADE_EXCEPTION"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.11.2' }, + annotations: { + summary: 'Ceph version upgrade has failed%(cluster)s' % $.MultiClusterSummary(), + description: 'The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue', + }, + }, + { + alert: 'CephadmDaemonFailed', + 'for': '30s', + expr: 'ceph_health_detail{name="CEPHADM_FAILED_DAEMON"} > 0', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.11.1' }, + annotations: { + summary: 'A ceph daemon manged by cephadm is down%(cluster)s' % $.MultiClusterSummary(), + description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start '", + }, + }, + { + alert: 'CephadmPaused', + 'for': '1m', + expr: 'ceph_health_detail{name="CEPHADM_PAUSED"} > 0', + labels: { severity: 'warning', type: 'ceph_default' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused', + summary: 'Orchestration tasks via cephadm are PAUSED%(cluster)s' % $.MultiClusterSummary(), + description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'", + }, + }, + ], + }, + { + name: 'PrometheusServer', + rules: [ + { + alert: 'PrometheusJobMissing', + 'for': '30s', + expr: 'absent(up{job="ceph"})', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.12.1' }, + annotations: { + summary: 'The scrape job for Ceph is missing from Prometheus%(cluster)s' % $.MultiClusterSummary(), + description: "The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance.", + }, + }, + ], + }, + { + name: 'rados', + rules: [ + { + alert: 'CephObjectMissing', + 'for': '30s', + expr: '(ceph_health_detail{name="OBJECT_UNFOUND"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.10.1' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound', + summary: 'Object(s) marked UNFOUND%(cluster)s' % $.MultiClusterSummary(), + description: 'The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified.', + }, + }, + ], + }, + { + name: 'generic', + rules: [ + { + alert: 'CephDaemonCrash', + 'for': '1m', + expr: 'ceph_health_detail{name="RECENT_CRASH"} == 1', + labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.1.2' }, + annotations: { + documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash', + summary: 'One or more Ceph daemons have crashed, and are pending acknowledgement%(cluster)s' % $.MultiClusterSummary(), + description: "One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive ' command.", + }, + }, + ], + }, + ], +} diff --git a/ceph/monitoring/ceph-mixin/prometheus_alerts.yml b/ceph/monitoring/ceph-mixin/prometheus_alerts.yml index f56b58778..a544d41eb 100644 --- a/ceph/monitoring/ceph-mixin/prometheus_alerts.yml +++ b/ceph/monitoring/ceph-mixin/prometheus_alerts.yml @@ -1,901 +1,635 @@ groups: - - name: cluster health + - name: "cluster health" rules: - - alert: CephHealthError - expr: ceph_health_status == 2 - for: 5m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.2.1 - annotations: - summary: Cluster is in an ERROR state - description: > - Ceph in HEALTH_ERROR state for more than 5 minutes. - Please check "ceph health detail" for more information. - - - alert: CephHealthWarning - expr: ceph_health_status == 1 - for: 15m - labels: - severity: warning - type: ceph_default - annotations: - summary: Cluster is in a WARNING state - description: > - Ceph has been in HEALTH_WARN for more than 15 minutes. - Please check "ceph health detail" for more information. - - - name: mon + - alert: "CephHealthError" + annotations: + description: "The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information." + summary: "Ceph is in the ERROR state" + expr: "ceph_health_status == 2" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.2.1" + severity: "critical" + type: "ceph_default" + - alert: "CephHealthWarning" + annotations: + description: "The cluster state has been HEALTH_WARN for more than 15 minutes. Please check 'ceph health detail' for more information." + summary: "Ceph is in the WARNING state" + expr: "ceph_health_status == 1" + for: "15m" + labels: + severity: "warning" + type: "ceph_default" + - name: "mon" rules: - - alert: CephMonDownQuorumAtRisk - expr: ((ceph_health_detail{name="MON_DOWN"} == 1) * on() (count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1))) == 1 - for: 30s - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.3.1 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down - summary: Monitor quorum is at risk - description: | - {{ $min := query "floor(count(ceph_mon_metadata) / 2) +1" | first | value }}Quorum requires a majority of monitors (x {{ $min }}) to be active - Without quorum the cluster will become inoperable, affecting all connected clients and services. - - The following monitors are down: - {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} - {{- end }} - - alert: CephMonDown - expr: (count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1)) - for: 30s - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down - summary: One of more ceph monitors are down - description: | - {{ $down := query "count(ceph_mon_quorum_status == 0)" | first | value }}{{ $s := "" }}{{ if gt $down 1.0 }}{{ $s = "s" }}{{ end }}You have {{ $down }} monitor{{ $s }} down. - Quorum is still intact, but the loss of further monitors will make your cluster inoperable. - - The following monitors are down: - {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} - {{- end }} - - alert: CephMonDiskspaceCritical - expr: ceph_health_detail{name="MON_DISK_CRIT"} == 1 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.3.2 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit - summary: Disk space on at least one monitor is critically low - description: | - The free space available to a monitor's store is critically low (<5% by default). - You should increase the space available to the monitor(s). The - default location for the store sits under /var/lib/ceph. Your monitor hosts are; - {{- range query "ceph_mon_metadata"}} - - {{ .Labels.hostname }} - {{- end }} - - - alert: CephMonDiskspaceLow - expr: ceph_health_detail{name="MON_DISK_LOW"} == 1 - for: 5m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low - summary: Disk space on at least one monitor is approaching full - description: | - The space available to a monitor's store is approaching full (>70% is the default). - You should increase the space available to the monitor store. The - default location for the store sits under /var/lib/ceph. Your monitor hosts are; - {{- range query "ceph_mon_metadata"}} - - {{ .Labels.hostname }} - {{- end }} - - - alert: CephMonClockSkew - expr: ceph_health_detail{name="MON_CLOCK_SKEW"} == 1 - for: 1m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew - summary: Clock skew across the Monitor hosts detected - description: | - The ceph monitors rely on a consistent time reference to maintain - quorum and cluster consistency. This event indicates that at least - one of your mons is not sync'd correctly. - - Review the cluster status with ceph -s. This will show which monitors - are affected. Check the time sync status on each monitor host. - - - name: osd - rules: - - alert: CephOSDDownHigh - expr: count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10 - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.1 + - alert: "CephMonDownQuorumAtRisk" annotations: - summary: More than 10% of OSDs are down - description: | - {{ $value | humanize }}% or {{ with query "count(ceph_osd_up == 0)" }}{{ . | first | value }}{{ end }} of {{ with query "count(ceph_osd_up)" }}{{ . | first | value }}{{ end }} OSDs are down (>= 10%). - - The following OSDs are down: - {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0" }} - - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} - {{- end }} - - alert: CephOSDHostDown - expr: ceph_health_detail{name="OSD_HOST_DOWN"} == 1 - for: 5m - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.8 - annotations: - summary: An OSD host is offline - description: | - The following OSDs are down: - {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0" }} - - {{ .Labels.hostname }} : {{ .Labels.ceph_daemon }} - {{- end }} - - alert: CephOSDDown - expr: ceph_health_detail{name="OSD_DOWN"} == 1 - for: 5m - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.2 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down - summary: An OSD has been marked down/unavailable - description: | - {{ $num := query "count(ceph_osd_up == 0)" | first | value }}{{ $s := "" }}{{ if gt $num 1.0 }}{{ $s = "s" }}{{ end }}{{ $num }} OSD{{ $s }} down for over 5mins. - - The following OSD{{ $s }} {{ if eq $s "" }}is{{ else }}are{{ end }} down: - {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"}} - - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} - {{- end }} - - - alert: CephOSDNearFull - expr: ceph_health_detail{name="OSD_NEARFULL"} == 1 - for: 5m - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.3 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull - summary: OSD(s) running low on free space (NEARFULL) - description: | - One or more OSDs have reached their NEARFULL threshold - - Use 'ceph health detail' to identify which OSDs have reached this threshold. - To resolve, either add capacity to the cluster, or delete unwanted data - - alert: CephOSDFull - expr: ceph_health_detail{name="OSD_FULL"} > 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.6 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full - summary: OSD(s) is full, writes blocked - description: | - An OSD has reached it's full threshold. Writes from all pools that share the - affected OSD will be blocked. - - To resolve, either add capacity to the cluster, or delete unwanted data - - alert: CephOSDBackfillFull - expr: ceph_health_detail{name="OSD_BACKFILLFULL"} > 0 - for: 1m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull - summary: OSD(s) too full for backfill operations - description: | - An OSD has reached it's BACKFILL FULL threshold. This will prevent rebalance operations - completing for some pools. Check the current capacity utilisation with 'ceph df' - - To resolve, either add capacity to the cluster, or delete unwanted data - - alert: CephOSDTooManyRepairs - expr: ceph_health_detail{name="OSD_TOO_MANY_REPAIRS"} == 1 - for: 30s - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs - summary: OSD has hit a high number of read errors - description: | - Reads from an OSD have used a secondary PG to return data to the client, indicating - a potential failing disk. - - alert: CephOSDTimeoutsPublicNetwork - expr: ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT"} == 1 - for: 1m + description: "{{ $min := query \"floor(count(ceph_mon_metadata) / 2) + 1\" | first | value }}Quorum requires a majority of monitors (x {{ $min }}) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: {{- range query \"(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)\" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down" + summary: "Monitor quorum is at risk" + expr: | + ( + (ceph_health_detail{name="MON_DOWN"} == 1) * on() ( + count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1) + ) + ) == 1 + for: "30s" labels: - severity: warning - type: ceph_default + oid: "1.3.6.1.4.1.50495.1.2.1.3.1" + severity: "critical" + type: "ceph_default" + - alert: "CephMonDown" annotations: - summary: Network issues delaying OSD heartbeats (public network) description: | - OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network - for any latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs. - - alert: CephOSDTimeoutsClusterNetwork - expr: ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK"} == 1 - for: 1m - labels: - severity: warning - type: ceph_default + {{ $down := query "count(ceph_mon_quorum_status == 0)" | first | value }}{{ $s := "" }}{{ if gt $down 1.0 }}{{ $s = "s" }}{{ end }}You have {{ $down }} monitor{{ $s }} down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }} + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down" + summary: "One or more monitors down" + expr: | + count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1) + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephMonDiskspaceCritical" + annotations: + description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit" + summary: "Filesystem space on at least one monitor is critically low" + expr: "ceph_health_detail{name=\"MON_DISK_CRIT\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.3.2" + severity: "critical" + type: "ceph_default" + - alert: "CephMonDiskspaceLow" + annotations: + description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low" + summary: "Drive space on at least one monitor is approaching full" + expr: "ceph_health_detail{name=\"MON_DISK_LOW\"} == 1" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephMonClockSkew" + annotations: + description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew" + summary: "Clock skew detected among monitors" + expr: "ceph_health_detail{name=\"MON_CLOCK_SKEW\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - name: "osd" + rules: + - alert: "CephOSDDownHigh" annotations: - summary: Network issues delaying OSD heartbeats (cluster network) - description: | - OSD heartbeats on the cluster's 'cluster' network (backend) are running slow. Investigate the network - for any latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs. - - alert: CephOSDInternalDiskSizeMismatch - expr: ceph_health_detail{name="BLUESTORE_DISK_SIZE_MISMATCH"} == 1 - for: 1m + description: "{{ $value | humanize }}% or {{ with query \"count(ceph_osd_up == 0)\" }}{{ . | first | value }}{{ end }} of {{ with query \"count(ceph_osd_up)\" }}{{ . | first | value }}{{ end }} OSDs are down (>= 10%). The following OSDs are down: {{- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}" + summary: "More than 10% of OSDs are down" + expr: "count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10" labels: - severity: warning - type: ceph_default + oid: "1.3.6.1.4.1.50495.1.2.1.4.1" + severity: "critical" + type: "ceph_default" + - alert: "CephOSDHostDown" annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch - summary: OSD size inconsistency error - description: | - One or more OSDs have an internal inconsistency between the size of the physical device and it's metadata. - This could lead to the OSD(s) crashing in future. You should redeploy the effected OSDs. - - alert: CephDeviceFailurePredicted - expr: ceph_health_detail{name="DEVICE_HEALTH"} == 1 - for: 1m + description: "The following OSDs are down: {{- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" }} - {{ .Labels.hostname }} : {{ .Labels.ceph_daemon }} {{- end }}" + summary: "An OSD host is offline" + expr: "ceph_health_detail{name=\"OSD_HOST_DOWN\"} == 1" + for: "5m" labels: - severity: warning - type: ceph_default + oid: "1.3.6.1.4.1.50495.1.2.1.4.8" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDDown" annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#id2 - summary: Device(s) have been predicted to fail soon - description: | - The device health module has determined that one or more devices will fail - soon. To review the device states use 'ceph device ls'. To show a specific - device use 'ceph device info '. - - Mark the OSD as out (so data may migrate to other OSDs in the cluster). Once - the osd is empty remove and replace the OSD. - - alert: CephDeviceFailurePredictionTooHigh - expr: ceph_health_detail{name="DEVICE_HEALTH_TOOMANY"} == 1 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.7 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany - summary: Too many devices have been predicted to fail, unable to resolve description: | - The device health module has determined that the number of devices predicted to - fail can not be remediated automatically, since it would take too many osd's out of - the cluster, impacting performance and potentially availabililty. You should add new - OSDs to the cluster to allow data to be relocated to avoid the data integrity issues. - - alert: CephDeviceFailureRelocationIncomplete - expr: ceph_health_detail{name="DEVICE_HEALTH_IN_USE"} == 1 - for: 1m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use - summary: A device failure is predicted, but unable to relocate data - description: | - The device health module has determined that one or more devices will fail - soon, but the normal process of relocating the data on the device to other - OSDs in the cluster is blocked. - - Check the the cluster has available freespace. It may be necessary to add - more disks to the cluster to allow the data from the failing device to - successfully migrate. - - - alert: CephOSDFlapping - expr: | - ( - rate(ceph_osd_up[5m]) - * on(ceph_daemon) group_left(hostname) ceph_osd_metadata - ) * 60 > 1 - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.4 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds - summary: Network issues are causing OSD's to flap (mark each other out) - description: > - OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was - marked down and back up at {{ $value | humanize }} times once a - minute for 5 minutes. This could indicate a network issue (latency, - packet drop, disruption) on the clusters "cluster network". Check the - network environment on the listed host(s). - - - alert: CephOSDReadErrors - expr: ceph_health_detail{name="BLUESTORE_SPURIOUS_READ_ERRORS"} == 1 - for: 30s - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors - summary: Device read errors detected - description: > - An OSD has encountered read errors, but the OSD has recovered by retrying - the reads. This may indicate an issue with the Hardware or Kernel. - # alert on high deviation from average PG count - - alert: CephPGImbalance + {{ $num := query "count(ceph_osd_up == 0)" | first | value }}{{ $s := "" }}{{ if gt $num 1.0 }}{{ $s = "s" }}{{ end }}{{ $num }} OSD{{ $s }} down for over 5mins. The following OSD{{ $s }} {{ if eq $s "" }}is{{ else }}are{{ end }} down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"}} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }} + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down" + summary: "An OSD has been marked down" + expr: "ceph_health_detail{name=\"OSD_DOWN\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.2" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDNearFull" + annotations: + description: "One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull" + summary: "OSD(s) running low on free space (NEARFULL)" + expr: "ceph_health_detail{name=\"OSD_NEARFULL\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.3" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDFull" + annotations: + description: "An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full" + summary: "OSD full, writes blocked" + expr: "ceph_health_detail{name=\"OSD_FULL\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.6" + severity: "critical" + type: "ceph_default" + - alert: "CephOSDBackfillFull" + annotations: + description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull" + summary: "OSD(s) too full for backfill operations" + expr: "ceph_health_detail{name=\"OSD_BACKFILLFULL\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDTooManyRepairs" + annotations: + description: "Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs" + summary: "OSD reports a high number of read errors" + expr: "ceph_health_detail{name=\"OSD_TOO_MANY_REPAIRS\"} == 1" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDTimeoutsPublicNetwork" + annotations: + description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs." + summary: "Network issues delaying OSD heartbeats (public network)" + expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_FRONT\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDTimeoutsClusterNetwork" + annotations: + description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs." + summary: "Network issues delaying OSD heartbeats (cluster network)" + expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_BACK\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDInternalDiskSizeMismatch" + annotations: + description: "One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch" + summary: "OSD size inconsistency error" + expr: "ceph_health_detail{name=\"BLUESTORE_DISK_SIZE_MISMATCH\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephDeviceFailurePredicted" + annotations: + description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info '. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#id2" + summary: "Device(s) predicted to fail soon" + expr: "ceph_health_detail{name=\"DEVICE_HEALTH\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephDeviceFailurePredictionTooHigh" + annotations: + description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany" + summary: "Too many devices are predicted to fail, unable to resolve" + expr: "ceph_health_detail{name=\"DEVICE_HEALTH_TOOMANY\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.7" + severity: "critical" + type: "ceph_default" + - alert: "CephDeviceFailureRelocationIncomplete" + annotations: + description: "The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use" + summary: "Device failure is predicted, but unable to relocate data" + expr: "ceph_health_detail{name=\"DEVICE_HEALTH_IN_USE\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephOSDFlapping" + annotations: + description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked down and back up {{ $value | humanize }} times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s)." + documentation: "https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds" + summary: "Network issues are causing OSDs to flap (mark each other down)" + expr: "(rate(ceph_osd_up[5m]) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) * 60 > 1" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.4" + severity: "warning" + type: "ceph_default" + - alert: "CephOSDReadErrors" + annotations: + description: "An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors" + summary: "Device read errors detected" + expr: "ceph_health_detail{name=\"BLUESTORE_SPURIOUS_READ_ERRORS\"} == 1" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPGImbalance" + annotations: + description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count." + summary: "PGs are not balanced across OSDs" expr: | abs( - ( - (ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0) by (job) - ) / on (job) group_left avg(ceph_osd_numpg > 0) by (job) - ) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30 - for: 5m - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.4.5 - annotations: - summary: PG allocations are not balanced across devices - description: > - OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates - by more than 30% from average PG count. - # alert on high commit latency...but how high is too high - - - name: mds + ((ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0) by (job)) / + on (job) group_left avg(ceph_osd_numpg > 0) by (job) + ) * on (ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30 + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.4.5" + severity: "warning" + type: "ceph_default" + - name: "mds" rules: - - alert: CephFilesystemDamaged - expr: ceph_health_detail{name="MDS_DAMAGE"} > 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.5.1 - annotations: - documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages - summary: Ceph filesystem is damaged. - description: > - The filesystems metadata has been corrupted. Data access - may be blocked. - - Either analyse the output from the mds daemon admin socket, or - escalate to support - - alert: CephFilesystemOffline - expr: ceph_health_detail{name="MDS_ALL_DOWN"} > 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.5.3 - annotations: - documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down - summary: Ceph filesystem is offline - description: > - All MDS ranks are unavailable. The ceph daemons providing the metadata - for the Ceph filesystem are all down, rendering the filesystem offline. - - alert: CephFilesystemDegraded - expr: ceph_health_detail{name="FS_DEGRADED"} > 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.5.4 - annotations: - documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded - summary: Ceph filesystem is degraded - description: > - One or more metadata daemons (MDS ranks) are failed or in a - damaged state. At best the filesystem is partially available, - worst case is the filesystem is completely unusable. - - alert: CephFilesystemMDSRanksLow - expr: ceph_health_detail{name="MDS_UP_LESS_THAN_MAX"} > 0 - for: 1m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max - summary: Ceph MDS daemon count is lower than configured - description: > - The filesystem's "max_mds" setting defined the number of MDS ranks in - the filesystem. The current number of active MDS daemons is less than - this setting. - - alert: CephFilesystemInsufficientStandby - expr: ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY"} > 0 - for: 1m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby - summary: Ceph filesystem standby daemons too low - description: > - The minimum number of standby daemons determined by standby_count_wanted - is less than the actual number of standby daemons. Adjust the standby count - or increase the number of mds daemons within the filesystem. - - alert: CephFilesystemFailureNoStandby - expr: ceph_health_detail{name="FS_WITH_FAILED_MDS"} > 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.5.5 - annotations: - documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds - summary: Ceph MDS daemon failed, no further standby available - description: > - An MDS daemon has failed, leaving only one active rank without - further standby. Investigate the cause of the failure or add a - standby daemon - - alert: CephFilesystemReadOnly - expr: ceph_health_detail{name="MDS_HEALTH_READ_ONLY"} > 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.5.2 - annotations: - documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages - summary: Ceph filesystem in read only mode, due to write error(s) - description: > - The filesystem has switched to READ ONLY due to an unexpected - write error, when writing to the metadata pool - - Either analyse the output from the mds daemon admin socket, or - escalate to support - - - name: mgr + - alert: "CephFilesystemDamaged" + annotations: + description: "Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages" + summary: "CephFS filesystem is damaged." + expr: "ceph_health_detail{name=\"MDS_DAMAGE\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.1" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemOffline" + annotations: + description: "All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down" + summary: "CephFS filesystem is offline" + expr: "ceph_health_detail{name=\"MDS_ALL_DOWN\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.3" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemDegraded" + annotations: + description: "One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded" + summary: "CephFS filesystem is degraded" + expr: "ceph_health_detail{name=\"FS_DEGRADED\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.4" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemMDSRanksLow" + annotations: + description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max" + summary: "Ceph MDS daemon count is lower than configured" + expr: "ceph_health_detail{name=\"MDS_UP_LESS_THAN_MAX\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephFilesystemInsufficientStandby" + annotations: + description: "The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby" + summary: "Ceph filesystem standby daemons too few" + expr: "ceph_health_detail{name=\"MDS_INSUFFICIENT_STANDBY\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephFilesystemFailureNoStandby" + annotations: + description: "An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds" + summary: "MDS daemon failed, no further standby available" + expr: "ceph_health_detail{name=\"FS_WITH_FAILED_MDS\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.5" + severity: "critical" + type: "ceph_default" + - alert: "CephFilesystemReadOnly" + annotations: + description: "The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support." + documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages" + summary: "CephFS filesystem in read only mode due to write error(s)" + expr: "ceph_health_detail{name=\"MDS_HEALTH_READ_ONLY\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.5.2" + severity: "critical" + type: "ceph_default" + - name: "mgr" rules: - - alert: CephMgrModuleCrash - expr: ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"} == 1 - for: 5m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.6.1 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash - summary: A mgr module has recently crashed - description: > - One or more mgr modules have crashed and are yet to be acknowledged by the administrator. A - crashed module may impact functionality within the cluster. Use the 'ceph crash' commands to - investigate which module has failed, and archive it to acknowledge the failure. - - alert: CephMgrPrometheusModuleInactive - expr: up{job="ceph"} == 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.6.2 - annotations: - summary: Ceph's mgr/prometheus module is not available - description: > - The mgr/prometheus module at {{ $labels.instance }} is unreachable. This - could mean that the module has been disabled or the mgr itself is down. - - Without the mgr/prometheus module metrics and alerts will no longer - function. Open a shell to ceph and use 'ceph -s' to to determine whether the - mgr is active. If the mgr is not active, restart it, otherwise you can check - the mgr/prometheus module is loaded with 'ceph mgr module ls' and if it's - not listed as enabled, enable it with 'ceph mgr module enable prometheus' - - - name: pgs + - alert: "CephMgrModuleCrash" + annotations: + description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash" + summary: "A manager module has recently crashed" + expr: "ceph_health_detail{name=\"RECENT_MGR_MODULE_CRASH\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.6.1" + severity: "critical" + type: "ceph_default" + - alert: "CephMgrPrometheusModuleInactive" + annotations: + description: "The mgr/prometheus module at {{ $labels.instance }} is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'." + summary: "The mgr/prometheus module is not available" + expr: "up{job=\"ceph\"} == 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.6.2" + severity: "critical" + type: "ceph_default" + - name: "pgs" rules: - - alert: CephPGsInactive - expr: ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_active) > 0 - for: 5m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.7.1 - annotations: - summary: One or more Placement Groups are inactive - description: > - {{ $value }} PGs have been inactive for more than 5 minutes in pool {{ $labels.name }}. - Inactive placement groups aren't able to serve read/write - requests. - - alert: CephPGsUnclean - expr: ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_clean) > 0 - for: 15m - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.7.2 - annotations: - summary: One or more platcment groups are marked unclean - description: > - {{ $value }} PGs haven't been clean for more than 15 minutes in pool {{ $labels.name }}. - Unclean PGs haven't been able to completely recover from a previous failure. - - alert: CephPGsDamaged - expr: ceph_health_detail{name=~"PG_DAMAGED|OSD_SCRUB_ERRORS"} == 1 - for: 5m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.7.4 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged - summary: Placement group damaged, manual intervention needed - description: > - During data consistency checks (scrub), at least one PG has been flagged as being - damaged or inconsistent. - - Check to see which PG is affected, and attempt a manual repair if necessary. To list - problematic placement groups, use 'rados list-inconsistent-pg '. To repair PGs use - the 'ceph pg repair ' command. - - alert: CephPGRecoveryAtRisk - expr: ceph_health_detail{name="PG_RECOVERY_FULL"} == 1 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.7.5 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full - summary: OSDs are too full for automatic recovery - description: > - Data redundancy may be reduced, or is at risk, since one or more OSDs are at or above their - 'full' threshold. Add more capacity to the cluster, or delete unwanted data. - - alert: CephPGUnavilableBlockingIO - # PG_AVAILABILITY, but an OSD is not in a DOWN state - expr: ((ceph_health_detail{name="PG_AVAILABILITY"} == 1) - scalar(ceph_health_detail{name="OSD_DOWN"})) == 1 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.7.3 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability - summary: Placement group is unavailable, blocking some I/O - description: > - Data availability is reduced impacting the clusters ability to service I/O to some data. One or - more placement groups (PGs) are in a state that blocks IO. - - alert: CephPGBackfillAtRisk - expr: ceph_health_detail{name="PG_BACKFILL_FULL"} == 1 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.7.6 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full - summary: Backfill operations are blocked, due to lack of freespace - description: > - Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs - have breached their 'backfillfull' threshold. Add more capacity, or delete unwanted data. - - alert: CephPGNotScrubbed - expr: ceph_health_detail{name="PG_NOT_SCRUBBED"} == 1 - for: 5m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed - summary: Placement group(s) have not been scrubbed - description: | - One or more PGs have not been scrubbed recently. The scrub process is a data integrity - feature, protectng against bit-rot. It checks that objects and their metadata (size and - attributes) match across object replicas. When PGs miss their scrub window, it may - indicate the scrub window is too small, or PGs were not in a 'clean' state during the - scrub window. - - You can manually initiate a scrub with: ceph pg scrub - - alert: CephPGsHighPerOSD - expr: ceph_health_detail{name="TOO_MANY_PGS"} == 1 - for: 1m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs - summary: Placement groups per OSD is too high - description: | - The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting). - - Check that the pg_autoscaler hasn't been disabled for any of the pools, with 'ceph osd pool autoscale-status' - and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide - the autoscaler based on the expected relative size of the pool - (i.e. 'ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') - - alert: CephPGNotDeepScrubbed - expr: ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED"} == 1 - for: 5m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed - summary: Placement group(s) have not been deep scrubbed - description: | - One or more PGs have not been deep scrubbed recently. Deep scrub is a data integrity - feature, protectng against bit-rot. It compares the contents of objects and their - replicas for inconsistency. When PGs miss their deep scrub window, it may indicate - that the window is too small or PGs were not in a 'clean' state during the deep-scrub - window. - - You can manually initiate a deep scrub with: ceph pg deep-scrub - - - name: nodes + - alert: "CephPGsInactive" + annotations: + description: "{{ $value }} PGs have been inactive for more than 5 minutes in pool {{ $labels.name }}. Inactive placement groups are not able to serve read/write requests." + summary: "One or more placement groups are inactive" + expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_active) > 0" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.1" + severity: "critical" + type: "ceph_default" + - alert: "CephPGsUnclean" + annotations: + description: "{{ $value }} PGs have been unclean for more than 15 minutes in pool {{ $labels.name }}. Unclean PGs have not recovered from a previous failure." + summary: "One or more placement groups are marked unclean" + expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_clean) > 0" + for: "15m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.2" + severity: "warning" + type: "ceph_default" + - alert: "CephPGsDamaged" + annotations: + description: "During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg '. To repair PGs use the 'ceph pg repair ' command." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged" + summary: "Placement group damaged, manual intervention needed" + expr: "ceph_health_detail{name=~\"PG_DAMAGED|OSD_SCRUB_ERRORS\"} == 1" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.4" + severity: "critical" + type: "ceph_default" + - alert: "CephPGRecoveryAtRisk" + annotations: + description: "Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full" + summary: "OSDs are too full for recovery" + expr: "ceph_health_detail{name=\"PG_RECOVERY_FULL\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.5" + severity: "critical" + type: "ceph_default" + - alert: "CephPGUnavilableBlockingIO" + annotations: + description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability" + summary: "PG is unavailable, blocking I/O" + expr: "((ceph_health_detail{name=\"PG_AVAILABILITY\"} == 1) - scalar(ceph_health_detail{name=\"OSD_DOWN\"})) == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.3" + severity: "critical" + type: "ceph_default" + - alert: "CephPGBackfillAtRisk" + annotations: + description: "Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full" + summary: "Backfill operations are blocked due to lack of free space" + expr: "ceph_health_detail{name=\"PG_BACKFILL_FULL\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.7.6" + severity: "critical" + type: "ceph_default" + - alert: "CephPGNotScrubbed" + annotations: + description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub " + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed" + summary: "Placement group(s) have not been scrubbed" + expr: "ceph_health_detail{name=\"PG_NOT_SCRUBBED\"} == 1" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPGsHighPerOSD" + annotations: + description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs" + summary: "Placement groups per OSD is too high" + expr: "ceph_health_detail{name=\"TOO_MANY_PGS\"} == 1" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPGNotDeepScrubbed" + annotations: + description: "One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed" + summary: "Placement group(s) have not been deep scrubbed" + expr: "ceph_health_detail{name=\"PG_NOT_DEEP_SCRUBBED\"} == 1" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - name: "nodes" rules: - - alert: CephNodeRootFilesystemFull - expr: node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"} * 100 < 5 - for: 5m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.8.1 - annotations: - summary: Root filesystem is dangerously full - description: > - Root volume (OSD and MON store) is dangerously full: {{ $value | humanize }}% free. - - # alert on nic packet errors and drops rates > 1% packets/s - - alert: CephNodeNetworkPacketDrops + - alert: "CephNodeRootFilesystemFull" + annotations: + description: "Root volume is dangerously full: {{ $value | humanize }}% free." + summary: "Root filesystem is dangerously full" + expr: "node_filesystem_avail_bytes{mountpoint=\"/\"} / node_filesystem_size_bytes{mountpoint=\"/\"} * 100 < 5" + for: "5m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.8.1" + severity: "critical" + type: "ceph_default" + - alert: "CephNodeNetworkPacketDrops" + annotations: + description: "Node {{ $labels.instance }} experiences packet drop > 0.5% or > 10 packets/s on interface {{ $labels.device }}." + summary: "One or more NICs reports packet drops" expr: | ( - increase(node_network_receive_drop_total{device!="lo"}[1m]) + - increase(node_network_transmit_drop_total{device!="lo"}[1m]) + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) ) / ( - increase(node_network_receive_packets_total{device!="lo"}[1m]) + - increase(node_network_transmit_packets_total{device!="lo"}[1m]) - ) >= 0.0001 or ( - increase(node_network_receive_drop_total{device!="lo"}[1m]) + - increase(node_network_transmit_drop_total{device!="lo"}[1m]) + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0050000000000000001 and ( + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) ) >= 10 labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.8.2 + oid: "1.3.6.1.4.1.50495.1.2.1.8.2" + severity: "warning" + type: "ceph_default" + - alert: "CephNodeNetworkPacketErrors" annotations: - summary: One or more Nics is seeing packet drops - description: > - Node {{ $labels.instance }} experiences packet drop > 0.01% or > - 10 packets/s on interface {{ $labels.device }}. - - - alert: CephNodeNetworkPacketErrors + description: "Node {{ $labels.instance }} experiences packet errors > 0.01% or > 10 packets/s on interface {{ $labels.device }}." + summary: "One or more NICs reports packet errors" expr: | ( - increase(node_network_receive_errs_total{device!="lo"}[1m]) + - increase(node_network_transmit_errs_total{device!="lo"}[1m]) + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) ) / ( - increase(node_network_receive_packets_total{device!="lo"}[1m]) + - increase(node_network_transmit_packets_total{device!="lo"}[1m]) + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) ) >= 0.0001 or ( - increase(node_network_receive_errs_total{device!="lo"}[1m]) + - increase(node_network_transmit_errs_total{device!="lo"}[1m]) + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) ) >= 10 labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.8.3 - annotations: - summary: One or more Nics is seeing packet errors - description: > - Node {{ $labels.instance }} experiences packet errors > 0.01% or - > 10 packets/s on interface {{ $labels.device }}. - - # Restrict to device names beginning with '/' to skip false alarms from - # tmpfs, overlay type filesystems - - alert: CephNodeDiskspaceWarning - expr: | - predict_linear(node_filesystem_free_bytes{device=~"/.*"}[2d], 3600 * 24 * 5) * - on(instance) group_left(nodename) node_uname_info < 0 - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.8.4 - annotations: - summary: Host filesystem freespace is getting low - description: > - Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} - will be full in less than 5 days assuming the average fill-up - rate of the past 48 hours. - - - alert: CephNodeInconsistentMTU - expr: | - node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) == - scalar( - max by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) != - quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) - ) - or - node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) == - scalar( - min by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) != - quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) - ) + oid: "1.3.6.1.4.1.50495.1.2.1.8.3" + severity: "warning" + type: "ceph_default" + - alert: "CephNodeDiskspaceWarning" + annotations: + description: "Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will be full in less than 5 days based on the 48 hour trailing fill rate." + summary: "Host filesystem free space is getting low" + expr: "predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5) *on(instance) group_left(nodename) node_uname_info < 0" labels: - severity: warning - type: ceph_default + oid: "1.3.6.1.4.1.50495.1.2.1.8.4" + severity: "warning" + type: "ceph_default" + - alert: "CephNodeInconsistentMTU" annotations: - summary: MTU settings across Ceph hosts are inconsistent - description: > - Node {{ $labels.instance }} has a different MTU size ({{ $value }}) - than the median of devices named {{ $labels.device }}. - - - name: pools + description: "Node {{ $labels.instance }} has a different MTU size ({{ $value }}) than the median of devices named {{ $labels.device }}." + summary: "MTU settings across Ceph hosts are inconsistent" + expr: "node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0) == scalar( max by (device) (node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) )or node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0) == scalar( min by (device) (node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) )" + labels: + severity: "warning" + type: "ceph_default" + - name: "pools" rules: - - alert: CephPoolGrowthWarning - expr: | - (predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) - group_right ceph_pool_metadata) >= 95 - labels: - severity: warning - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.9.2 - annotations: - summary: Pool growth rate may soon exceed it's capacity - description: > - Pool '{{ $labels.name }}' will be full in less than 5 days - assuming the average fill-up rate of the past 48 hours. - - alert: CephPoolBackfillFull - expr: ceph_health_detail{name="POOL_BACKFILLFULL"} > 0 - labels: - severity: warning - type: ceph_default - annotations: - summary: Freespace in a pool is too low for recovery/rebalance - description: > - A pool is approaching it's near full threshold, which will - prevent rebalance operations from completing. You should - consider adding more capacity to the pool. - - - alert: CephPoolFull - expr: ceph_health_detail{name="POOL_FULL"} > 0 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.9.1 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full - summary: Pool is full - writes are blocked - description: | - A pool has reached it's MAX quota, or the OSDs supporting the pool - have reached their FULL threshold. Until this is resolved, writes to - the pool will be blocked. - Pool Breakdown (top 5) - {{- range query "topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right ceph_pool_metadata))" }} - - {{ .Labels.name }} at {{ .Value }}% - {{- end }} - Either increase the pools quota, or add capacity to the cluster first - then increase it's quota (e.g. ceph osd pool set quota max_bytes ) - - alert: CephPoolNearFull - expr: ceph_health_detail{name="POOL_NEAR_FULL"} > 0 - for: 5m - labels: - severity: warning - type: ceph_default - annotations: - summary: One or more Ceph pools are getting full - description: | - A pool has exceeeded it warning (percent full) threshold, or the OSDs - supporting the pool have reached their NEARFULL thresholds. Writes may - continue, but you are at risk of the pool going read only if more capacity - isn't made available. - - Determine the affected pool with 'ceph df detail', for example looking - at QUOTA BYTES and STORED. Either increase the pools quota, or add - capacity to the cluster first then increase it's quota - (e.g. ceph osd pool set quota max_bytes ) - - name: healthchecks + - alert: "CephPoolGrowthWarning" + annotations: + description: "Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours." + summary: "Pool growth rate may soon exceed capacity" + expr: "(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) group_right ceph_pool_metadata) >= 95" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.9.2" + severity: "warning" + type: "ceph_default" + - alert: "CephPoolBackfillFull" + annotations: + description: "A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity." + summary: "Free space in a pool is too low for recovery/backfill" + expr: "ceph_health_detail{name=\"POOL_BACKFILLFULL\"} > 0" + labels: + severity: "warning" + type: "ceph_default" + - alert: "CephPoolFull" + annotations: + description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) {{- range query \"topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right ceph_pool_metadata))\" }} - {{ .Labels.name }} at {{ .Value }}% {{- end }} Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes )" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full" + summary: "Pool is full - writes are blocked" + expr: "ceph_health_detail{name=\"POOL_FULL\"} > 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.9.1" + severity: "critical" + type: "ceph_default" + - alert: "CephPoolNearFull" + annotations: + description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes ). Also ensure that the balancer is active." + summary: "One or more Ceph pools are nearly full" + expr: "ceph_health_detail{name=\"POOL_NEAR_FULL\"} > 0" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - name: "healthchecks" rules: - - alert: CephSlowOps - expr: ceph_healthcheck_slow_ops > 0 - for: 30s - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops - summary: MON/OSD operations are slow to complete - description: > - {{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded) -# cephadm alerts - - name: cephadm + - alert: "CephSlowOps" + annotations: + description: "{{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)" + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops" + summary: "OSD operations are slow to complete" + expr: "ceph_healthcheck_slow_ops > 0" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - name: "cephadm" rules: - - alert: CephadmUpgradeFailed - expr: ceph_health_detail{name="UPGRADE_EXCEPTION"} > 0 - for: 30s - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.11.2 - annotations: - summary: Ceph version upgrade has failed - description: > - The cephadm cluster upgrade process has failed. The cluster remains in - an undetermined state. - - Please review the cephadm logs, to understand the nature of the issue - - alert: CephadmDaemonFailed - expr: ceph_health_detail{name="CEPHADM_FAILED_DAEMON"} > 0 - for: 30s - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.11.1 - annotations: - summary: A ceph daemon manged by cephadm is down - description: > - A daemon managed by cephadm is no longer active. Determine, which - daemon is down with 'ceph health detail'. you may start daemons with - the 'ceph orch daemon start ' - - alert: CephadmPaused - expr: ceph_health_detail{name="CEPHADM_PAUSED"} > 0 - for: 1m - labels: - severity: warning - type: ceph_default - annotations: - documentation: https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused - summary: Orchestration tasks via cephadm are PAUSED - description: > - Cluster management has been paused manually. This will prevent the - orchestrator from service management and reconciliation. If this is - not intentional, resume cephadm operations with 'ceph orch resume' - -# prometheus alerts - - name: PrometheusServer + - alert: "CephadmUpgradeFailed" + annotations: + description: "The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue" + summary: "Ceph version upgrade has failed" + expr: "ceph_health_detail{name=\"UPGRADE_EXCEPTION\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.11.2" + severity: "critical" + type: "ceph_default" + - alert: "CephadmDaemonFailed" + annotations: + description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start '" + summary: "A ceph daemon manged by cephadm is down" + expr: "ceph_health_detail{name=\"CEPHADM_FAILED_DAEMON\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.11.1" + severity: "critical" + type: "ceph_default" + - alert: "CephadmPaused" + annotations: + description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'" + documentation: "https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused" + summary: "Orchestration tasks via cephadm are PAUSED" + expr: "ceph_health_detail{name=\"CEPHADM_PAUSED\"} > 0" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - name: "PrometheusServer" rules: - - alert: PrometheusJobMissing - expr: absent(up{job="ceph"}) - for: 30s - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.12.1 + - alert: "PrometheusJobMissing" annotations: - summary: The scrape job for Ceph is missing from Prometheus - description: | - The prometheus job that scrapes from Ceph is no longer defined, this - will effectively mean you'll have no metrics or alerts for the cluster. - - Please review the job definitions in the prometheus.yml file of the prometheus - instance. -# Object related events - - name: rados - rules: - - alert: CephObjectMissing - expr: (ceph_health_detail{name="OBJECT_UNFOUND"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1 - for: 30s + description: "The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance." + summary: "The scrape job for Ceph is missing from Prometheus" + expr: "absent(up{job=\"ceph\"})" + for: "30s" labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.10.1 - annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound - summary: Object(s) has been marked UNFOUND - description: | - A version of a RADOS object can not be found, even though all OSDs are up. I/O - requests for this object from clients will block (hang). Resolving this issue may - require the object to be rolled back to a prior version manually, and manually verified. -# Generic - - name: generic + oid: "1.3.6.1.4.1.50495.1.2.1.12.1" + severity: "critical" + type: "ceph_default" + - name: "rados" rules: - - alert: CephDaemonCrash - expr: ceph_health_detail{name="RECENT_CRASH"} == 1 - for: 1m - labels: - severity: critical - type: ceph_default - oid: 1.3.6.1.4.1.50495.1.2.1.1.2 + - alert: "CephObjectMissing" + annotations: + description: "The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound" + summary: "Object(s) marked UNFOUND" + expr: "(ceph_health_detail{name=\"OBJECT_UNFOUND\"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.10.1" + severity: "critical" + type: "ceph_default" + - name: "generic" + rules: + - alert: "CephDaemonCrash" annotations: - documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash - summary: One or more Ceph daemons have crashed, and are pending acknowledgement - description: | - One or more daemons have crashed recently, and need to be acknowledged. This notification - ensures that software crashes don't go unseen. To acknowledge a crash, use the - 'ceph crash archive ' command. + description: "One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive ' command." + documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash" + summary: "One or more Ceph daemons have crashed, and are pending acknowledgement" + expr: "ceph_health_detail{name=\"RECENT_CRASH\"} == 1" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.1.2" + severity: "critical" + type: "ceph_default" diff --git a/ceph/monitoring/ceph-mixin/test-jsonnet.sh b/ceph/monitoring/ceph-mixin/test-jsonnet.sh index fef0443a9..87c533892 100755 --- a/ceph/monitoring/ceph-mixin/test-jsonnet.sh +++ b/ceph/monitoring/ceph-mixin/test-jsonnet.sh @@ -19,6 +19,10 @@ do done done +jsonnet -J vendor -S alerts.jsonnet -o ${TEMPDIR}/prometheus_alerts.yml +jsondiff --indent 2 "prometheus_alerts.yml" "${TEMPDIR}/prometheus_alerts.yml" \ + | tee -a ${TEMPDIR}/json_difference.log + err=0 if [ $(wc -l < ${TEMPDIR}/json_difference.log) -eq 0 ] then diff --git a/ceph/monitoring/ceph-mixin/tests_alerts/test_alerts.yml b/ceph/monitoring/ceph-mixin/tests_alerts/test_alerts.yml index 680082d89..7b7e7db73 100644 --- a/ceph/monitoring/ceph-mixin/tests_alerts/test_alerts.yml +++ b/ceph/monitoring/ceph-mixin/tests_alerts/test_alerts.yml @@ -26,10 +26,8 @@ tests: type: ceph_default severity: critical exp_annotations: - summary: Cluster is in an ERROR state - description: > - Ceph in HEALTH_ERROR state for more than 5 minutes. - Please check "ceph health detail" for more information. + summary: Ceph is in the ERROR state + description: The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information. # health warning - interval: 5m @@ -54,10 +52,8 @@ tests: type: ceph_default severity: warning exp_annotations: - summary: Cluster is in a WARNING state - description: > - Ceph has been in HEALTH_WARN for more than 15 minutes. - Please check "ceph health detail" for more information. + summary: Ceph is in the WARNING state + description: The cluster state has been HEALTH_WARN for more than 15 minutes. Please check 'ceph health detail' for more information. # 10% OSDs down - interval: 1m @@ -105,11 +101,7 @@ tests: severity: critical exp_annotations: summary: More than 10% of OSDs are down - description: | - 33.33% or 1 of 3 OSDs are down (>= 10%). - - The following OSDs are down: - - osd.1 on ceph + description: "33.33% or 1 of 3 OSDs are down (>= 10%). The following OSDs are down: - osd.1 on ceph" # flapping OSD - interval: 1s @@ -166,13 +158,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds - summary: Network issues are causing OSD's to flap (mark each other out) - description: > - OSD osd.0 on ceph was - marked down and back up at 20.1 times once a minute for 5 minutes. - This could indicate a network issue (latency, packet drop, disruption) - on the clusters "cluster network". Check the network environment on the - listed host(s). + summary: Network issues are causing OSDs to flap (mark each other down) + description: "OSD osd.0 on ceph was marked down and back up 20.1 times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s)." # high pg count deviation - interval: 1m @@ -244,10 +231,8 @@ tests: severity: warning type: ceph_default exp_annotations: - summary: PG allocations are not balanced across devices - description: > - OSD osd.1 on ceph deviates - by more than 30% from average PG count. + summary: PGs are not balanced across OSDs + description: "OSD osd.1 on ceph deviates by more than 30% from average PG count." # pgs inactive - interval: 1m @@ -295,12 +280,8 @@ tests: severity: critical type: ceph_default exp_annotations: - summary: One or more Placement Groups are inactive - description: > - 1 PGs have been inactive for more than 5 minutes in pool - device_health_metrics. - Inactive placement groups aren't able to serve read/write - requests. + summary: One or more placement groups are inactive + description: "1 PGs have been inactive for more than 5 minutes in pool device_health_metrics. Inactive placement groups are not able to serve read/write requests." #pgs unclean - interval: 1m @@ -351,12 +332,8 @@ tests: severity: warning type: ceph_default exp_annotations: - summary: One or more platcment groups are marked unclean - description: > - 1 PGs haven't been clean for more than 15 minutes in pool - device_health_metrics. - Unclean PGs haven't been able to completely recover from a - previous failure. + summary: One or more placement groups are marked unclean + description: "1 PGs have been unclean for more than 15 minutes in pool device_health_metrics. Unclean PGs have not recovered from a previous failure." # root volume full - interval: 1m @@ -395,36 +372,41 @@ tests: type: ceph_default exp_annotations: summary: Root filesystem is dangerously full - description: > - Root volume (OSD and MON store) is dangerously full: 4.811% free. + description: "Root volume is dangerously full: 4.811% free." # network packets dropped - - interval: 1s + - interval: 1m input_series: - series: 'node_network_receive_drop_total{device="eth0", instance="node-exporter",job="node-exporter"}' - values: '1+1x500' + values: '0+600x10' - series: 'node_network_transmit_drop_total{device="eth0", instance="node-exporter",job="node-exporter"}' - values: '1+1x500' + values: '0+600x10' + - series: 'node_network_receive_packets_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '0+750x10' + - series: 'node_network_transmit_packets_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '0+750x10' promql_expr_test: - expr: | ( - increase(node_network_receive_drop_total{device!="lo"}[1m]) + - increase(node_network_transmit_drop_total{device!="lo"}[1m]) + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) ) / ( - increase(node_network_receive_packets_total{device!="lo"}[1m]) + - increase(node_network_transmit_packets_total{device!="lo"}[1m]) - ) >= 0.0001 or ( - increase(node_network_receive_drop_total{device!="lo"}[1m]) + - increase(node_network_transmit_drop_total{device!="lo"}[1m]) + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) + ) >= 0.0050000000000000001 and ( + rate(node_network_receive_drop_total{device!="lo"}[1m]) + + rate(node_network_transmit_drop_total{device!="lo"}[1m]) ) >= 10 eval_time: 5m exp_samples: - labels: '{device="eth0", instance="node-exporter", job="node-exporter"}' - value: 1.2E+02 + value: 8E-1 alert_rule_test: - eval_time: 5m alertname: CephNodeNetworkPacketDrops @@ -437,38 +419,42 @@ tests: severity: warning type: ceph_default exp_annotations: - summary: One or more Nics is seeing packet drops - description: > - Node node-exporter experiences packet drop > 0.01% or > - 10 packets/s on interface eth0. + summary: One or more NICs reports packet drops + description: "Node node-exporter experiences packet drop > 0.5% or > 10 packets/s on interface eth0." # network packets errors - - interval: 1s + - interval: 1m input_series: - series: 'node_network_receive_errs_total{device="eth0", instance="node-exporter",job="node-exporter"}' - values: '1+1x500' + values: '0+600x10' - series: 'node_network_transmit_errs_total{device="eth0", instance="node-exporter",job="node-exporter"}' - values: '1+1x500' + values: '0+600x10' + - series: 'node_network_transmit_packets_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '0+750x10' + - series: 'node_network_receive_packets_total{device="eth0", + instance="node-exporter",job="node-exporter"}' + values: '0+750x10' promql_expr_test: - expr: | ( - increase(node_network_receive_errs_total{device!="lo"}[1m]) + - increase(node_network_transmit_errs_total{device!="lo"}[1m]) + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) ) / ( - increase(node_network_receive_packets_total{device!="lo"}[1m]) + - increase(node_network_transmit_packets_total{device!="lo"}[1m]) + rate(node_network_receive_packets_total{device!="lo"}[1m]) + + rate(node_network_transmit_packets_total{device!="lo"}[1m]) ) >= 0.0001 or ( - increase(node_network_receive_errs_total{device!="lo"}[1m]) + - increase(node_network_transmit_errs_total{device!="lo"}[1m]) + rate(node_network_receive_errs_total{device!="lo"}[1m]) + + rate(node_network_transmit_errs_total{device!="lo"}[1m]) ) >= 10 eval_time: 5m exp_samples: - labels: '{device="eth0", instance="node-exporter", job="node-exporter"}' - value: 1.2E+02 + value: 8E-01 alert_rule_test: - eval_time: 5m alertname: CephNodeNetworkPacketErrors @@ -480,11 +466,9 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.8.3 severity: warning type: ceph_default - exp_annotations: - summary: One or more Nics is seeing packet errors - description: > - Node node-exporter experiences packet errors > 0.01% or > 10 - packets/s on interface eth0. + exp_annotations: + summary: One or more NICs reports packet errors + description: "Node node-exporter experiences packet errors > 0.01% or > 10 packets/s on interface eth0." # Node Storage disk space filling up - interval: 1m @@ -523,11 +507,8 @@ tests: mountpoint: /rootfs nodename: node-1.unittests.com exp_annotations: - summary: Host filesystem freespace is getting low - description: > - Mountpoint /rootfs on node-1.unittests.com - will be full in less than 5 days assuming the average fill-up - rate of the past 48 hours. + summary: Host filesystem free space is getting low + description: "Mountpoint /rootfs on node-1.unittests.com will be full in less than 5 days based on the 48 hour trailing fill rate." # MTU Mismatch - interval: 1m input_series: @@ -604,9 +585,7 @@ tests: type: ceph_default exp_annotations: summary: MTU settings across Ceph hosts are inconsistent - description: > - Node hostname1 has a different MTU size (2200) - than the median of devices named eth4. + description: "Node hostname1 has a different MTU size (2200) than the median of devices named eth4." - exp_labels: device: eth4 instance: node-exporter @@ -615,9 +594,7 @@ tests: type: ceph_default exp_annotations: summary: MTU settings across Ceph hosts are inconsistent - description: > - Node node-exporter has a different MTU size (9000) - than the median of devices named eth4. + description: "Node node-exporter has a different MTU size (9000) than the median of devices named eth4." # pool full, data series has 6 but using topk(5) so to ensure the # results are working as expected @@ -675,18 +652,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full summary: Pool is full - writes are blocked - description: | - A pool has reached it's MAX quota, or the OSDs supporting the pool - have reached their FULL threshold. Until this is resolved, writes to - the pool will be blocked. - Pool Breakdown (top 5) - - rbd at 96% - - iscsi at 90% - - default.rgw.index at 72% - - cephfs_data at 32% - - default.rgw.log at 19% - Either increase the pools quota, or add capacity to the cluster first - then increase it's quota (e.g. ceph osd pool set quota max_bytes ) + description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) - rbd at 96% - iscsi at 90% - default.rgw.index at 72% - cephfs_data at 32% - default.rgw.log at 19% Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes )" # slow OSD ops - interval : 1m input_series: @@ -710,10 +676,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops - summary: MON/OSD operations are slow to complete - description: > - 1 OSD requests are taking too long to process - (osd_op_complaint_time exceeded) + summary: OSD operations are slow to complete + description: "1 OSD requests are taking too long to process (osd_op_complaint_time exceeded)" # CEPHADM orchestrator alert triggers - interval: 30s @@ -739,11 +703,7 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.11.2 exp_annotations: summary: Ceph version upgrade has failed - description: > - The cephadm cluster upgrade process has failed. The cluster remains in - an undetermined state. - - Please review the cephadm logs, to understand the nature of the issue + description: "The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue" - interval: 30s input_series: - series: 'ceph_health_detail{name="CEPHADM_FAILED_DAEMON"}' @@ -767,10 +727,7 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.11.1 exp_annotations: summary: A ceph daemon manged by cephadm is down - description: > - A daemon managed by cephadm is no longer active. Determine, which - daemon is down with 'ceph health detail'. you may start daemons with - the 'ceph orch daemon start ' + description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start '" - interval: 1m input_series: - series: 'ceph_health_detail{name="CEPHADM_PAUSED"}' @@ -794,10 +751,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused summary: Orchestration tasks via cephadm are PAUSED - description: > - Cluster management has been paused manually. This will prevent the - orchestrator from service management and reconciliation. If this is - not intentional, resume cephadm operations with 'ceph orch resume' + description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'" # MDS - interval: 1m input_series: @@ -822,13 +776,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.5.1 exp_annotations: documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages - summary: Ceph filesystem is damaged. - description: > - The filesystems metadata has been corrupted. Data access - may be blocked. - - Either analyse the output from the mds daemon admin socket, or - escalate to support + summary: CephFS filesystem is damaged. + description: "Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support." - interval: 1m input_series: - series: 'ceph_health_detail{name="MDS_HEALTH_READ_ONLY"}' @@ -852,13 +801,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.5.2 exp_annotations: documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages - summary: Ceph filesystem in read only mode, due to write error(s) - description: > - The filesystem has switched to READ ONLY due to an unexpected - write error, when writing to the metadata pool - - Either analyse the output from the mds daemon admin socket, or - escalate to support + summary: CephFS filesystem in read only mode due to write error(s) + description: "The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support." - interval: 1m input_series: - series: 'ceph_health_detail{name="MDS_ALL_DOWN"}' @@ -882,10 +826,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.5.3 exp_annotations: documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down - summary: Ceph filesystem is offline - description: > - All MDS ranks are unavailable. The ceph daemons providing the metadata - for the Ceph filesystem are all down, rendering the filesystem offline. + summary: CephFS filesystem is offline + description: "All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline." - interval: 1m input_series: - series: 'ceph_health_detail{name="FS_DEGRADED"}' @@ -909,11 +851,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.5.4 exp_annotations: documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded - summary: Ceph filesystem is degraded - description: > - One or more metadata daemons (MDS ranks) are failed or in a - damaged state. At best the filesystem is partially available, - worst case is the filesystem is completely unusable. + summary: CephFS filesystem is degraded + description: "One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable." - interval: 1m input_series: - series: 'ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY"}' @@ -936,11 +875,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby - summary: Ceph filesystem standby daemons too low - description: > - The minimum number of standby daemons determined by standby_count_wanted - is less than the actual number of standby daemons. Adjust the standby count - or increase the number of mds daemons within the filesystem. + summary: Ceph filesystem standby daemons too few + description: "The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons." - interval: 1m input_series: - series: 'ceph_health_detail{name="FS_WITH_FAILED_MDS"}' @@ -964,11 +900,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.5.5 exp_annotations: documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds - summary: Ceph MDS daemon failed, no further standby available - description: > - An MDS daemon has failed, leaving only one active rank without - further standby. Investigate the cause of the failure or add a - standby daemon + summary: MDS daemon failed, no further standby available + description: "An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS." - interval: 1m input_series: - series: 'ceph_health_detail{name="MDS_UP_LESS_THAN_MAX"}' @@ -992,10 +925,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max summary: Ceph MDS daemon count is lower than configured - description: > - The filesystem's "max_mds" setting defined the number of MDS ranks in - the filesystem. The current number of active MDS daemons is less than - this setting. + description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value." # MGR - interval: 1m input_series: @@ -1020,16 +950,8 @@ tests: type: ceph_default oid: 1.3.6.1.4.1.50495.1.2.1.6.2 exp_annotations: - summary: Ceph's mgr/prometheus module is not available - description: > - The mgr/prometheus module at ceph-mgr:9283 is unreachable. This - could mean that the module has been disabled or the mgr itself is down. - - Without the mgr/prometheus module metrics and alerts will no longer - function. Open a shell to ceph and use 'ceph -s' to to determine whether the - mgr is active. If the mgr is not active, restart it, otherwise you can check - the mgr/prometheus module is loaded with 'ceph mgr module ls' and if it's - not listed as enabled, enable it with 'ceph mgr module enable prometheus' + summary: The mgr/prometheus module is not available + description: "The mgr/prometheus module at ceph-mgr:9283 is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'." - interval: 1m input_series: - series: 'ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"}' @@ -1053,11 +975,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.6.1 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash - summary: A mgr module has recently crashed - description: > - One or more mgr modules have crashed and are yet to be acknowledged by the administrator. A - crashed module may impact functionality within the cluster. Use the 'ceph crash' commands to - investigate which module has failed, and archive it to acknowledge the failure. + summary: A manager module has recently crashed + description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure." # MON - interval: 1m input_series: @@ -1084,12 +1003,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.3.2 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit - summary: Disk space on at least one monitor is critically low - description: | - The free space available to a monitor's store is critically low (<5% by default). - You should increase the space available to the monitor(s). The - default location for the store sits under /var/lib/ceph. Your monitor hosts are; - - ceph-mon-a + summary: Filesystem space on at least one monitor is critically low + description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; - ceph-mon-a" - interval: 1m input_series: - series: 'ceph_health_detail{name="MON_DISK_LOW"}' @@ -1114,12 +1029,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low - summary: Disk space on at least one monitor is approaching full - description: | - The space available to a monitor's store is approaching full (>70% is the default). - You should increase the space available to the monitor store. The - default location for the store sits under /var/lib/ceph. Your monitor hosts are; - - ceph-mon-a + summary: Drive space on at least one monitor is approaching full + description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; - ceph-mon-a" - interval: 1m input_series: - series: 'ceph_health_detail{name="MON_CLOCK_SKEW"}' @@ -1142,14 +1053,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew - summary: Clock skew across the Monitor hosts detected - description: | - The ceph monitors rely on a consistent time reference to maintain - quorum and cluster consistency. This event indicates that at least - one of your mons is not sync'd correctly. - - Review the cluster status with ceph -s. This will show which monitors - are affected. Check the time sync status on each monitor host. + summary: Clock skew detected among monitors + description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon." # Check 3 mons one down, quorum at risk - interval: 1m @@ -1188,12 +1093,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down summary: Monitor quorum is at risk - description: | - Quorum requires a majority of monitors (x 2) to be active - Without quorum the cluster will become inoperable, affecting all connected clients and services. - - The following monitors are down: - - mon.c on ceph-mon-3 + description: "Quorum requires a majority of monitors (x 2) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: - mon.c on ceph-mon-3" # check 5 mons, 1 down - warning only - interval: 1m input_series: @@ -1234,13 +1134,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down - summary: One of more ceph monitors are down - description: | - You have 1 monitor down. - Quorum is still intact, but the loss of further monitors will make your cluster inoperable. - - The following monitors are down: - - mon.e on ceph-mon-5 + summary: One or more monitors down + description: "You have 1 monitor down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: - mon.e on ceph-mon-5\n" # Device Health - interval: 1m input_series: @@ -1264,14 +1159,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#id2 - summary: Device(s) have been predicted to fail soon - description: | - The device health module has determined that one or more devices will fail - soon. To review the device states use 'ceph device ls'. To show a specific - device use 'ceph device info '. - - Mark the OSD as out (so data may migrate to other OSDs in the cluster). Once - the osd is empty remove and replace the OSD. + summary: Device(s) predicted to fail soon + description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info '. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD." - interval: 1m input_series: - series: 'ceph_health_detail{name="DEVICE_HEALTH_TOOMANY"}' @@ -1295,12 +1184,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.4.7 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany - summary: Too many devices have been predicted to fail, unable to resolve - description: | - The device health module has determined that the number of devices predicted to - fail can not be remediated automatically, since it would take too many osd's out of - the cluster, impacting performance and potentially availabililty. You should add new - OSDs to the cluster to allow data to be relocated to avoid the data integrity issues. + summary: Too many devices are predicted to fail, unable to resolve + description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated." - interval: 1m input_series: - series: 'ceph_health_detail{name="DEVICE_HEALTH_IN_USE"}' @@ -1323,15 +1208,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use - summary: A device failure is predicted, but unable to relocate data - description: | - The device health module has determined that one or more devices will fail - soon, but the normal process of relocating the data on the device to other - OSDs in the cluster is blocked. - - Check the the cluster has available freespace. It may be necessary to add - more disks to the cluster to allow the data from the failing device to - successfully migrate. + summary: Device failure is predicted, but unable to relocate data + description: "The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer." # OSD - interval: 1m input_series: @@ -1360,9 +1238,7 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.4.8 exp_annotations: summary: An OSD host is offline - description: | - The following OSDs are down: - - ceph-osd-1 : osd.0 + description: "The following OSDs are down: - ceph-osd-1 : osd.0" - interval: 1m input_series: - series: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT"}' @@ -1385,9 +1261,7 @@ tests: type: ceph_default exp_annotations: summary: Network issues delaying OSD heartbeats (public network) - description: | - OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network - for any latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs. + description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs." - interval: 1m input_series: - series: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK"}' @@ -1410,9 +1284,7 @@ tests: type: ceph_default exp_annotations: summary: Network issues delaying OSD heartbeats (cluster network) - description: | - OSD heartbeats on the cluster's 'cluster' network (backend) are running slow. Investigate the network - for any latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs. + description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs." - interval: 1m input_series: - series: 'ceph_health_detail{name="BLUESTORE_DISK_SIZE_MISMATCH"}' @@ -1436,9 +1308,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch summary: OSD size inconsistency error - description: | - One or more OSDs have an internal inconsistency between the size of the physical device and it's metadata. - This could lead to the OSD(s) crashing in future. You should redeploy the effected OSDs. + description: "One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs." - interval: 30s input_series: - series: 'ceph_health_detail{name="BLUESTORE_SPURIOUS_READ_ERRORS"}' @@ -1462,9 +1332,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors summary: Device read errors detected - description: > - An OSD has encountered read errors, but the OSD has recovered by retrying - the reads. This may indicate an issue with the Hardware or Kernel. + description: "An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel." - interval: 1m input_series: - series: 'ceph_health_detail{name="OSD_DOWN"}' @@ -1500,12 +1368,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.4.2 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down - summary: An OSD has been marked down/unavailable - description: | - 1 OSD down for over 5mins. - - The following OSD is down: - - osd.1 on ceph-osd-2 + summary: An OSD has been marked down + description: "1 OSD down for over 5mins. The following OSD is down: - osd.1 on ceph-osd-2\n" - interval: 1m input_series: - series: 'ceph_health_detail{name="OSD_NEARFULL"}' @@ -1530,11 +1394,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull summary: OSD(s) running low on free space (NEARFULL) - description: | - One or more OSDs have reached their NEARFULL threshold - - Use 'ceph health detail' to identify which OSDs have reached this threshold. - To resolve, either add capacity to the cluster, or delete unwanted data + description: One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data. - interval: 1m input_series: - series: 'ceph_health_detail{name="OSD_FULL"}' @@ -1558,12 +1418,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.4.6 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full - summary: OSD(s) is full, writes blocked - description: | - An OSD has reached it's full threshold. Writes from all pools that share the - affected OSD will be blocked. - - To resolve, either add capacity to the cluster, or delete unwanted data + summary: OSD full, writes blocked + description: An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data. - interval: 1m input_series: - series: 'ceph_health_detail{name="OSD_BACKFILLFULL"}' @@ -1587,11 +1443,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull summary: OSD(s) too full for backfill operations - description: | - An OSD has reached it's BACKFILL FULL threshold. This will prevent rebalance operations - completing for some pools. Check the current capacity utilisation with 'ceph df' - - To resolve, either add capacity to the cluster, or delete unwanted data + description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data." - interval: 30s input_series: - series: 'ceph_health_detail{name="OSD_TOO_MANY_REPAIRS"}' @@ -1614,10 +1466,8 @@ tests: type: ceph_default exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs - summary: OSD has hit a high number of read errors - description: | - Reads from an OSD have used a secondary PG to return data to the client, indicating - a potential failing disk. + summary: OSD reports a high number of read errors + description: Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive. # Pools # trigger percent full prediction on pools 1 and 2 only - interval: 12h @@ -1649,10 +1499,8 @@ tests: type: ceph_default oid: 1.3.6.1.4.1.50495.1.2.1.9.2 exp_annotations: - summary: Pool growth rate may soon exceed it's capacity - description: > - Pool 'rbd' will be full in less than 5 days - assuming the average fill-up rate of the past 48 hours. + summary: Pool growth rate may soon exceed capacity + description: Pool 'rbd' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours. - interval: 1m input_series: - series: 'ceph_health_detail{name="POOL_BACKFILLFULL"}' @@ -1674,11 +1522,8 @@ tests: severity: warning type: ceph_default exp_annotations: - summary: Freespace in a pool is too low for recovery/rebalance - description: > - A pool is approaching it's near full threshold, which will - prevent rebalance operations from completing. You should - consider adding more capacity to the pool. + summary: Free space in a pool is too low for recovery/backfill + description: A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity. - interval: 1m input_series: @@ -1701,17 +1546,8 @@ tests: severity: warning type: ceph_default exp_annotations: - summary: One or more Ceph pools are getting full - description: | - A pool has exceeeded it warning (percent full) threshold, or the OSDs - supporting the pool have reached their NEARFULL thresholds. Writes may - continue, but you are at risk of the pool going read only if more capacity - isn't made available. - - Determine the affected pool with 'ceph df detail', for example looking - at QUOTA BYTES and STORED. Either increase the pools quota, or add - capacity to the cluster first then increase it's quota - (e.g. ceph osd pool set quota max_bytes ) + summary: One or more Ceph pools are nearly full + description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota max_bytes ). Also ensure that the balancer is active." # PGs - interval: 1m @@ -1737,14 +1573,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed summary: Placement group(s) have not been scrubbed - description: | - One or more PGs have not been scrubbed recently. The scrub process is a data integrity - feature, protectng against bit-rot. It checks that objects and their metadata (size and - attributes) match across object replicas. When PGs miss their scrub window, it may - indicate the scrub window is too small, or PGs were not in a 'clean' state during the - scrub window. - - You can manually initiate a scrub with: ceph pg scrub + description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub " - interval: 1m input_series: - series: 'ceph_health_detail{name="PG_DAMAGED"}' @@ -1769,13 +1598,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged summary: Placement group damaged, manual intervention needed - description: > - During data consistency checks (scrub), at least one PG has been flagged as being - damaged or inconsistent. - - Check to see which PG is affected, and attempt a manual repair if necessary. To list - problematic placement groups, use 'rados list-inconsistent-pg '. To repair PGs use - the 'ceph pg repair ' command. + description: During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg '. To repair PGs use the 'ceph pg repair ' command. - interval: 1m input_series: - series: 'ceph_health_detail{name="TOO_MANY_PGS"}' @@ -1799,13 +1622,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs summary: Placement groups per OSD is too high - description: | - The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting). - - Check that the pg_autoscaler hasn't been disabled for any of the pools, with 'ceph osd pool autoscale-status' - and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide - the autoscaler based on the expected relative size of the pool - (i.e. 'ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') + description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools." - interval: 1m input_series: - series: 'ceph_health_detail{name="PG_RECOVERY_FULL"}' @@ -1829,10 +1646,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.7.5 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full - summary: OSDs are too full for automatic recovery - description: > - Data redundancy may be reduced, or is at risk, since one or more OSDs are at or above their - 'full' threshold. Add more capacity to the cluster, or delete unwanted data. + summary: OSDs are too full for recovery + description: Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data. - interval: 1m input_series: - series: 'ceph_health_detail{name="PG_BACKFILL_FULL"}' @@ -1856,10 +1671,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.7.6 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full - summary: Backfill operations are blocked, due to lack of freespace - description: > - Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs - have breached their 'backfillfull' threshold. Add more capacity, or delete unwanted data. + summary: Backfill operations are blocked due to lack of free space + description: Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data. - interval: 1m input_series: - series: 'ceph_health_detail{name="PG_AVAILABILITY"}' @@ -1891,10 +1704,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.7.3 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability - summary: Placement group is unavailable, blocking some I/O - description: > - Data availability is reduced impacting the clusters ability to service I/O to some data. One or - more placement groups (PGs) are in a state that blocks IO. + summary: PG is unavailable, blocking I/O + description: Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O. - interval: 1m input_series: - series: 'ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED"}' @@ -1918,14 +1729,7 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed summary: Placement group(s) have not been deep scrubbed - description: | - One or more PGs have not been deep scrubbed recently. Deep scrub is a data integrity - feature, protectng against bit-rot. It compares the contents of objects and their - replicas for inconsistency. When PGs miss their deep scrub window, it may indicate - that the window is too small or PGs were not in a 'clean' state during the deep-scrub - window. - - You can manually initiate a deep scrub with: ceph pg deep-scrub + description: One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window. # Prometheus - interval: 1m @@ -1949,12 +1753,7 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.12.1 exp_annotations: summary: The scrape job for Ceph is missing from Prometheus - description: | - The prometheus job that scrapes from Ceph is no longer defined, this - will effectively mean you'll have no metrics or alerts for the cluster. - - Please review the job definitions in the prometheus.yml file of the prometheus - instance. + description: The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance. # RADOS - interval: 1m input_series: @@ -1991,11 +1790,8 @@ tests: oid: 1.3.6.1.4.1.50495.1.2.1.10.1 exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound - summary: Object(s) has been marked UNFOUND - description: | - A version of a RADOS object can not be found, even though all OSDs are up. I/O - requests for this object from clients will block (hang). Resolving this issue may - require the object to be rolled back to a prior version manually, and manually verified. + summary: Object(s) marked UNFOUND + description: The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified. # Generic Alerts - interval: 1m input_series: @@ -2022,7 +1818,4 @@ tests: exp_annotations: documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash summary: One or more Ceph daemons have crashed, and are pending acknowledgement - description: | - One or more daemons have crashed recently, and need to be acknowledged. This notification - ensures that software crashes don't go unseen. To acknowledge a crash, use the - 'ceph crash archive ' command. + description: One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive ' command. diff --git a/ceph/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature b/ceph/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature index 5784ecbb2..51e3c5819 100644 --- a/ceph/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature +++ b/ceph/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature @@ -3,9 +3,9 @@ Feature: Host Details Dashboard Scenario: "Test OSD" Given the following series: | metrics | values | - | ceph_osd_metadata{back_iface="",ceph_daemon="osd.0",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 | - | ceph_osd_metadata{back_iface="",ceph_daemon="osd.1",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 | - | ceph_osd_metadata{back_iface="",ceph_daemon="osd.2",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 | + | ceph_osd_metadata{job="ceph",back_iface="",ceph_daemon="osd.0",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 | + | ceph_osd_metadata{job="ceph",back_iface="",ceph_daemon="osd.1",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 | + | ceph_osd_metadata{job="ceph",back_iface="",ceph_daemon="osd.2",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 | When variable `ceph_hosts` is `127.0.0.1` Then Grafana panel `OSDs` with legend `EMPTY` shows: | metrics | values | @@ -16,54 +16,54 @@ Scenario: "Test OSD" Scenario: "Test Disk IOPS - Writes - Several OSDs per device" Given the following series: | metrics | values | - | node_disk_writes_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_writes_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_writes_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_writes_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) writes` shows: | metrics | values | - | {ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 | - | {ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 | Scenario: "Test Disk IOPS - Writes - Single OSD per device" Given the following series: | metrics | values | - | node_disk_writes_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_writes_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_writes_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_writes_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) writes` shows: | metrics | values | - | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | - | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 | Scenario: "Test Disk IOPS - Reads - Several OSDs per device" Given the following series: | metrics | values | - | node_disk_reads_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_reads_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_reads_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_reads_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) reads` shows: | metrics | values | - | {ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 | - | {ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 | Scenario: "Test Disk IOPS - Reads - Single OSD per device" Given the following series: | metrics | values | - | node_disk_reads_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_reads_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_reads_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_reads_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) reads` shows: | metrics | values | - | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | - | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 | # IOPS Panel - end @@ -72,44 +72,44 @@ Scenario: "Test Disk IOPS - Reads - Single OSD per device" Scenario: "Test disk throughput - read" Given the following series: | metrics | values | - | node_disk_read_bytes_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_read_bytes_total{device="sdb",instance="localhost:9100"} | 100+600x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_read_bytes_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_read_bytes_total{job="ceph",device="sdb",instance="localhost:9100"} | 100+600x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Throughput by Disk` with legend `{{device}}({{ceph_daemon}}) read` shows: | metrics | values | - | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | - | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 | + | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 | Scenario: "Test disk throughput - write" Given the following series: | metrics | values | - | node_disk_written_bytes_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_written_bytes_total{device="sdb",instance="localhost:9100"} | 100+600x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_written_bytes_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_written_bytes_total{job="ceph",device="sdb",instance="localhost:9100"} | 100+600x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Throughput by Disk` with legend `{{device}}({{ceph_daemon}}) write` shows: | metrics | values | - | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | - | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 | + | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 | + | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 | # Node disk bytes written/read panel - end Scenario: "Test $ceph_hosts Disk Latency panel" Given the following series: | metrics | values | - | node_disk_write_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_write_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | node_disk_writes_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_writes_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | node_disk_read_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_read_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | node_disk_reads_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_reads_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_write_time_seconds_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_write_time_seconds_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | node_disk_writes_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_writes_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | node_disk_read_time_seconds_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_read_time_seconds_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | node_disk_reads_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_reads_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Disk Latency` with legend `{{device}}({{ceph_daemon}})` shows: | metrics | values | @@ -119,13 +119,13 @@ Scenario: "Test $ceph_hosts Disk Latency panel" Scenario: "Test $ceph_hosts Disk utilization" Given the following series: | metrics | values | - | node_disk_io_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 | - | node_disk_io_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | node_disk_io_time_seconds_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 | + | node_disk_io_time_seconds_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `ceph_hosts` is `localhost` Then Grafana panel `$ceph_hosts Disk utilization` with legend `{{device}}({{ceph_daemon}})` shows: | metrics | values | - | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 100 | - | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 100 | + | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 100 | + | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 100 | diff --git a/ceph/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature b/ceph/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature index 595f23309..6c5eceaed 100644 --- a/ceph/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature +++ b/ceph/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature @@ -33,8 +33,8 @@ Scenario: "Test AVG Disk Utilization" | node_disk_io_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 | | node_disk_io_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 | | node_disk_io_time_seconds_total{device="sdc",instance="localhost:9100"} | 10 2000 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd_hosts` is `localhost` Then Grafana panel `AVG Disk Utilization` with legend `EMPTY` shows: | metrics | values | diff --git a/ceph/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature b/ceph/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature index 07af8692c..0d6ca8b17 100644 --- a/ceph/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature +++ b/ceph/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature @@ -7,8 +7,8 @@ Scenario: "Test Physical Device Latency for $osd - Reads" | node_disk_reads_completed_total{device="sdb",instance="localhost"} | 10 60 | | node_disk_read_time_seconds_total{device="sda",instance="localhost"} | 100 600 | | node_disk_read_time_seconds_total{device="sdb",instance="localhost"} | 100 600 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd` is `osd.0` Then Grafana panel `Physical Device Latency for $osd` with legend `{{instance}}/{{device}} Reads` shows: | metrics | values | @@ -21,8 +21,8 @@ Scenario: "Test Physical Device Latency for $osd - Writes" | node_disk_writes_completed_total{device="sdb",instance="localhost"} | 10 60 | | node_disk_write_time_seconds_total{device="sda",instance="localhost"} | 100 600 | | node_disk_write_time_seconds_total{device="sdb",instance="localhost"} | 100 600 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd` is `osd.0` Then Grafana panel `Physical Device Latency for $osd` with legend `{{instance}}/{{device}} Writes` shows: | metrics | values | @@ -33,8 +33,8 @@ Scenario: "Test Physical Device R/W IOPS for $osd - Writes" | metrics | values | | node_disk_writes_completed_total{device="sda",instance="localhost"} | 10 100 | | node_disk_writes_completed_total{device="sdb",instance="localhost"} | 10 100 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd` is `osd.0` Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Writes` shows: | metrics | values | @@ -45,8 +45,8 @@ Scenario: "Test Physical Device R/W IOPS for $osd - Reads" | metrics | values | | node_disk_reads_completed_total{device="sda",instance="localhost"} | 10 100 | | node_disk_reads_completed_total{device="sdb",instance="localhost"} | 10 100 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd` is `osd.0` Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Reads` shows: | metrics | values | @@ -57,8 +57,8 @@ Scenario: "Test Physical Device R/W Bytes for $osd - Reads" | metrics | values | | node_disk_reads_completed_total{device="sda",instance="localhost"} | 10 100 | | node_disk_reads_completed_total{device="sdb",instance="localhost"} | 10 100 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd` is `osd.0` Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Reads` shows: | metrics | values | @@ -69,8 +69,8 @@ Scenario: "Test Physical Device R/W Bytes for $osd - Writes" | metrics | values | | node_disk_writes_completed_total{device="sda",instance="localhost"} | 10 100 | | node_disk_writes_completed_total{device="sdb",instance="localhost"} | 10 100 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd` is `osd.0` Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Writes` shows: | metrics | values | @@ -80,8 +80,8 @@ Scenario: "Test Physical Device Util% for $osd" Given the following series: | metrics | values | | node_disk_io_time_seconds_total{device="sda",instance="localhost:9100"} | 10 100 | - | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | - | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 | + | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 | When variable `osd` is `osd.0` Then Grafana panel `Physical Device Util% for $osd` with legend `{{device}} on {{instance}}` shows: | metrics | values | diff --git a/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature b/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature index bcc793a21..e0016c507 100644 --- a/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature +++ b/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature @@ -10,7 +10,7 @@ Scenario: "Test $rgw_servers GET/PUT Latencies - GET" And variable `rgw_servers` is `rgw.foo` Then Grafana panel `$rgw_servers GET/PUT Latencies` with legend `GET {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance_id="58892247"} | 2.5000000000000004 | + | {ceph_daemon="rgw.foo", instance_id="58892247"} | 1.5 | Scenario: "Test $rgw_servers GET/PUT Latencies - PUT" Given the following series: @@ -33,7 +33,7 @@ Scenario: "Test Bandwidth by HTTP Operation - GET" And variable `rgw_servers` is `rgw.1` Then Grafana panel `Bandwidth by HTTP Operation` with legend `GETs {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 1.6666666666666667 | + | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 1.5 | Scenario: "Test Bandwidth by HTTP Operation - PUT" Given the following series: @@ -44,7 +44,7 @@ Scenario: "Test Bandwidth by HTTP Operation - PUT" And variable `rgw_servers` is `rgw.1` Then Grafana panel `Bandwidth by HTTP Operation` with legend `PUTs {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 1 | + | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 7.5E-01 | Scenario: "Test HTTP Request Breakdown - Requests Failed" Given the following series: @@ -55,7 +55,7 @@ Scenario: "Test HTTP Request Breakdown - Requests Failed" And variable `rgw_servers` is `rgw.foo` Then Grafana panel `HTTP Request Breakdown` with legend `Requests Failed {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 6.666666666666667e-02 | + | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1E-01 | Scenario: "Test HTTP Request Breakdown - GET" Given the following series: @@ -66,7 +66,7 @@ Scenario: "Test HTTP Request Breakdown - GET" And variable `rgw_servers` is `rgw.foo` Then Grafana panel `HTTP Request Breakdown` with legend `GETs {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | .6666666666666666 | + | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.1666666666666667 | Scenario: "Test HTTP Request Breakdown - PUT" Given the following series: @@ -77,7 +77,7 @@ Scenario: "Test HTTP Request Breakdown - PUT" And variable `rgw_servers` is `rgw.foo` Then Grafana panel `HTTP Request Breakdown` with legend `PUTs {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 2.3333333333333335 | + | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.5 | Scenario: "Test HTTP Request Breakdown - Other" Given the following series: @@ -101,7 +101,7 @@ Scenario: "Test Workload Breakdown - Failures" And variable `rgw_servers` is `rgw.foo` Then Grafana panel `Workload Breakdown` with legend `Failures {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 6.666666666666667e-02 | + | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1E-01 | Scenario: "Test Workload Breakdown - GETs" Given the following series: @@ -112,7 +112,7 @@ Scenario: "Test Workload Breakdown - GETs" And variable `rgw_servers` is `rgw.foo` Then Grafana panel `Workload Breakdown` with legend `GETs {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | .6666666666666666 | + | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.1666666666666667 | Scenario: "Test Workload Breakdown - PUTs" Given the following series: @@ -123,7 +123,7 @@ Scenario: "Test Workload Breakdown - PUTs" And variable `rgw_servers` is `rgw.foo` Then Grafana panel `Workload Breakdown` with legend `PUTs {{ceph_daemon}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 2.3333333333333335 | + | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.5 | Scenario: "Test Workload Breakdown - Other" Given the following series: diff --git a/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature b/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature index 69e46b1d5..b095392a2 100644 --- a/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature +++ b/ceph/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature @@ -9,7 +9,7 @@ Scenario: "Test Average GET Latencies" When interval is `30s` Then Grafana panel `Average GET/PUT Latencies` with legend `GET AVG` shows: | metrics | values | - | {ceph_daemon="rgw.foo",instance="127.0.0.1", instance_id="58892247", job="ceph"} | 2.5000000000000004 | + | {ceph_daemon="rgw.foo",instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.5 | Scenario: "Test Average PUT Latencies" Given the following series: @@ -30,7 +30,7 @@ Scenario: "Test Total Requests/sec by RGW Instance" When interval is `30s` Then Grafana panel `Total Requests/sec by RGW Instance` with legend `{{rgw_host}}` shows: | metrics | values | - | {rgw_host="1"} | 1.6666666666666667 | + | {rgw_host="1"} | 1.5 | Scenario: "Test GET Latencies by RGW Instance" Given the following series: @@ -41,7 +41,7 @@ Scenario: "Test GET Latencies by RGW Instance" When interval is `30s` Then Grafana panel `GET Latencies by RGW Instance` with legend `{{rgw_host}}` shows: | metrics | values | - | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph", rgw_host="foo"} | 2.5000000000000004 | + | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph", rgw_host="foo"} | 1.5 | Scenario: "Test Bandwidth Consumed by Type- GET" Given the following series: @@ -51,7 +51,7 @@ Scenario: "Test Bandwidth Consumed by Type- GET" And interval is `30s` Then Grafana panel `Bandwidth Consumed by Type` with legend `GETs` shows: | metrics | values | - | {} | 1.6666666666666667 | + | {} | 1.5 | Scenario: "Test Bandwidth Consumed by Type- PUT" Given the following series: @@ -61,7 +61,7 @@ Scenario: "Test Bandwidth Consumed by Type- PUT" And interval is `30s` Then Grafana panel `Bandwidth Consumed by Type` with legend `PUTs` shows: | metrics | values | - | {} | 1 | + | {} | 7.5E-01 | Scenario: "Test Bandwidth by RGW Instance" Given the following series: @@ -73,7 +73,7 @@ Scenario: "Test Bandwidth by RGW Instance" And interval is `30s` Then Grafana panel `Bandwidth by RGW Instance` with legend `{{rgw_host}}` shows: | metrics | values | - | {ceph_daemon="rgw.1", instance_id="92806566", rgw_host="1"} | 2.666666666666667 | + | {ceph_daemon="rgw.1", instance_id="92806566", rgw_host="1"} | 2.25 | Scenario: "Test PUT Latencies by RGW Instance" Given the following series: @@ -90,8 +90,8 @@ Scenario: "Test PUT Latencies by RGW Instance" Scenario: "Test Total backend responses by HTTP code" Given the following series: | metrics | values | - | haproxy_backend_http_responses_total{code="200",instance="ingress.rgw.1",proxy="backend"} | 10 100 | - | haproxy_backend_http_responses_total{code="404",instance="ingress.rgw.1",proxy="backend"} | 20 200 | + | haproxy_backend_http_responses_total{job="haproxy",code="200",instance="ingress.rgw.1",proxy="backend"} | 10 100 | + | haproxy_backend_http_responses_total{job="haproxy",code="404",instance="ingress.rgw.1",proxy="backend"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` When variable `code` is `200` Then Grafana panel `Total responses by HTTP code` with legend `Backend {{ code }}` shows: @@ -101,8 +101,8 @@ Scenario: "Test Total backend responses by HTTP code" Scenario: "Test Total frontend responses by HTTP code" Given the following series: | metrics | values | - | haproxy_frontend_http_responses_total{code="200",instance="ingress.rgw.1",proxy="frontend"} | 10 100 | - | haproxy_frontend_http_responses_total{code="404",instance="ingress.rgw.1",proxy="frontend"} | 20 200 | + | haproxy_frontend_http_responses_total{job="haproxy",code="200",instance="ingress.rgw.1",proxy="frontend"} | 10 100 | + | haproxy_frontend_http_responses_total{job="haproxy",code="404",instance="ingress.rgw.1",proxy="frontend"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` When variable `code` is `200` Then Grafana panel `Total responses by HTTP code` with legend `Frontend {{ code }}` shows: @@ -112,8 +112,8 @@ Scenario: "Test Total frontend responses by HTTP code" Scenario: "Test Total http frontend requests by instance" Given the following series: | metrics | values | - | haproxy_frontend_http_requests_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_frontend_http_requests_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_frontend_http_requests_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_frontend_http_requests_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total requests / responses` with legend `Requests` shows: | metrics | values | @@ -122,8 +122,8 @@ Scenario: "Test Total http frontend requests by instance" Scenario: "Test Total backend response errors by instance" Given the following series: | metrics | values | - | haproxy_backend_response_errors_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_response_errors_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_response_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_response_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total requests / responses` with legend `Response errors` shows: | metrics | values | @@ -132,8 +132,8 @@ Scenario: "Test Total backend response errors by instance" Scenario: "Test Total frontend requests errors by instance" Given the following series: | metrics | values | - | haproxy_frontend_request_errors_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_frontend_request_errors_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_frontend_request_errors_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_frontend_request_errors_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total requests / responses` with legend `Requests errors` shows: | metrics | values | @@ -142,8 +142,8 @@ Scenario: "Test Total frontend requests errors by instance" Scenario: "Test Total backend redispatch warnings by instance" Given the following series: | metrics | values | - | haproxy_backend_redispatch_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_redispatch_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_redispatch_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_redispatch_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total requests / responses` with legend `Backend redispatch` shows: | metrics | values | @@ -152,8 +152,8 @@ Scenario: "Test Total backend redispatch warnings by instance" Scenario: "Test Total backend retry warnings by instance" Given the following series: | metrics | values | - | haproxy_backend_retry_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_retry_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_retry_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_retry_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total requests / responses` with legend `Backend retry` shows: | metrics | values | @@ -162,8 +162,8 @@ Scenario: "Test Total backend retry warnings by instance" Scenario: "Test Total frontend requests denied by instance" Given the following series: | metrics | values | - | haproxy_frontend_requests_denied_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_frontend_requests_denied_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_frontend_requests_denied_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_frontend_requests_denied_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total requests / responses` with legend `Request denied` shows: | metrics | values | @@ -172,8 +172,8 @@ Scenario: "Test Total frontend requests denied by instance" Scenario: "Test Total backend current queue by instance" Given the following series: | metrics | values | - | haproxy_backend_current_queue{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_current_queue{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_current_queue{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_current_queue{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total requests / responses` with legend `Backend Queued` shows: | metrics | values | @@ -182,8 +182,8 @@ Scenario: "Test Total backend current queue by instance" Scenario: "Test Total frontend connections by instance" Given the following series: | metrics | values | - | haproxy_frontend_connections_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_frontend_connections_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_frontend_connections_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_frontend_connections_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total number of connections` with legend `Front` shows: | metrics | values | @@ -192,8 +192,8 @@ Scenario: "Test Total frontend connections by instance" Scenario: "Test Total backend connections attempts by instance" Given the following series: | metrics | values | - | haproxy_backend_connection_attempts_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_connection_attempts_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_connection_attempts_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_connection_attempts_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total number of connections` with legend `Back` shows: | metrics | values | @@ -202,8 +202,8 @@ Scenario: "Test Total backend connections attempts by instance" Scenario: "Test Total backend connections error by instance" Given the following series: | metrics | values | - | haproxy_backend_connection_errors_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_connection_errors_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_connection_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_connection_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Total number of connections` with legend `Back errors` shows: | metrics | values | @@ -212,8 +212,8 @@ Scenario: "Test Total backend connections error by instance" Scenario: "Test Total frontend bytes incoming by instance" Given the following series: | metrics | values | - | haproxy_frontend_bytes_in_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_frontend_bytes_in_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_frontend_bytes_in_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_frontend_bytes_in_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Current total of incoming / outgoing bytes` with legend `IN Front` shows: | metrics | values | @@ -222,8 +222,8 @@ Scenario: "Test Total frontend bytes incoming by instance" Scenario: "Test Total frontend bytes outgoing by instance" Given the following series: | metrics | values | - | haproxy_frontend_bytes_out_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_frontend_bytes_out_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_frontend_bytes_out_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_frontend_bytes_out_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Current total of incoming / outgoing bytes` with legend `OUT Front` shows: | metrics | values | @@ -232,8 +232,8 @@ Scenario: "Test Total frontend bytes outgoing by instance" Scenario: "Test Total backend bytes incoming by instance" Given the following series: | metrics | values | - | haproxy_backend_bytes_in_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_bytes_in_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_bytes_in_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_bytes_in_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Current total of incoming / outgoing bytes` with legend `IN Back` shows: | metrics | values | @@ -242,8 +242,8 @@ Scenario: "Test Total backend bytes incoming by instance" Scenario: "Test Total backend bytes outgoing by instance" Given the following series: | metrics | values | - | haproxy_backend_bytes_out_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 | - | haproxy_backend_bytes_out_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 | + | haproxy_backend_bytes_out_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 | + | haproxy_backend_bytes_out_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 | When variable `ingress_service` is `ingress.rgw.1` Then Grafana panel `Current total of incoming / outgoing bytes` with legend `OUT Back` shows: | metrics | values | diff --git a/ceph/monitoring/ceph-mixin/tests_dashboards/util.py b/ceph/monitoring/ceph-mixin/tests_dashboards/util.py index 4310eb207..1fce6559d 100644 --- a/ceph/monitoring/ceph-mixin/tests_dashboards/util.py +++ b/ceph/monitoring/ceph-mixin/tests_dashboards/util.py @@ -30,6 +30,7 @@ def get_dashboards_data() -> Dict[str, Any]: data['stats'][str(file)] = {'total': 0, 'tested': 0} add_dashboard_queries(data, dashboard_data, str(file)) add_dashboard_variables(data, dashboard_data) + add_default_dashboards_variables(data) return data @@ -76,6 +77,10 @@ def add_dashboard_variables(data: Dict[str, Any], dashboard_data: Dict[str, Any] if 'name' in variable: data['variables'][variable['name']] = 'UNSET VARIABLE' +def add_default_dashboards_variables(data: Dict[str, Any]) -> None: + data['variables']['job'] = 'ceph' + data['variables']['job_haproxy'] = 'haproxy' + data['variables']['__rate_interval'] = '1m' def replace_grafana_expr_variables(expr: str, variable: str, value: Any) -> str: """ Replace grafana variables in expression with a value diff --git a/ceph/monitoring/ceph-mixin/tox.ini b/ceph/monitoring/ceph-mixin/tox.ini index e6cae299d..821a5cd56 100644 --- a/ceph/monitoring/ceph-mixin/tox.ini +++ b/ceph/monitoring/ceph-mixin/tox.ini @@ -22,7 +22,7 @@ whitelist_externals = sh description = check: Ensure that auto-generated files matches the current version - fix: Update generated files from jsonnet filse with latest changes + fix: Update generated files from jsonnet file with latest changes lint: Test if jsonnet files are linted (without any update) deps = -rrequirements-grafonnet.txt @@ -56,7 +56,7 @@ whitelist_externals = commands = behave tests_dashboards/features -[testenv:alerts-{check,lint}] +[testenv:alerts-{fix,check,lint}] deps = -rrequirements-alerts.txt pytest @@ -64,6 +64,7 @@ depends = grafonnet-check whitelist_externals = promtool commands = + fix: jsonnet -J vendor -S alerts.jsonnet -o prometheus_alerts.yml lint: promtool check rules prometheus_alerts.yml test: pytest -rA tests_alerts/test_syntax.py tests_alerts/test_unittests.py python3 ./tests_alerts/validate_rules.py diff --git a/ceph/qa/standalone/misc/test-mclock-profile-switch.sh b/ceph/qa/standalone/misc/test-mclock-profile-switch.sh new file mode 100644 index 000000000..1bbd6ee81 --- /dev/null +++ b/ceph/qa/standalone/misc/test-mclock-profile-switch.sh @@ -0,0 +1,198 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2022 Red Hat +# +# Author: Sridhar Seshasayee +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7124" # git grep '\<7124\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--debug-bluestore 20 " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_profile_builtin_to_custom() { + local dir=$1 + local OSDS=3 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=$OSDS || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd --osd_op_queue=mclock_scheduler || return 1 + done + + # Verify that the default mclock profile is set on the OSDs + for id in $(seq 0 $(expr $OSDS - 1)) + do + local mclock_profile=$(ceph config get osd.$id osd_mclock_profile) + test "$mclock_profile" = "high_client_ops" || return 1 + done + + # Change the mclock profile to 'custom' + ceph config set osd osd_mclock_profile custom || return 1 + + # Verify that the mclock profile is set to 'custom' on the OSDs + for id in $(seq 0 $(expr $OSDS - 1)) + do + local mclock_profile=$(ceph config get osd.$id osd_mclock_profile) + test "$mclock_profile" = "custom" || return 1 + done + + # Change a mclock config param and confirm the change + local client_res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \ + osd.$id) config get osd_mclock_scheduler_client_res | \ + jq .osd_mclock_scheduler_client_res | bc) + echo "client_res = $client_res" + local client_res_new=$(expr $client_res + 10) + echo "client_res_new = $client_res_new" + ceph config set osd osd_mclock_scheduler_client_res \ + $client_res_new || return 1 + for id in $(seq 0 $(expr $OSDS - 1)) + do + # Check value in config monitor db + local res=$(ceph config get osd.$id \ + osd_mclock_scheduler_client_res) || return 1 + test $res -eq $client_res_new || return 1 + # Check value in the in-memory 'values' map + res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \ + osd.$id) config get osd_mclock_scheduler_client_res | \ + jq .osd_mclock_scheduler_client_res | bc) + test $res -eq $client_res_new || return 1 + done + + teardown $dir || return 1 +} + +function TEST_profile_custom_to_builtin() { + local dir=$1 + local OSDS=3 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=$OSDS || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd --osd_op_queue=mclock_scheduler || return 1 + done + + # Verify that the default mclock profile is set on the OSDs + for id in $(seq 0 $(expr $OSDS - 1)) + do + local mclock_profile=$(ceph config get osd.$id osd_mclock_profile) + test "$mclock_profile" = "high_client_ops" || return 1 + done + + # Change the mclock profile to 'custom' + ceph config set osd osd_mclock_profile custom || return 1 + + # Verify that the mclock profile is set to 'custom' on the OSDs + for id in $(seq 0 $(expr $OSDS - 1)) + do + local mclock_profile=$(ceph config get osd.$id osd_mclock_profile) + test "$mclock_profile" = "custom" || return 1 + done + + # Save the original client reservations allocated to the OSDs + local client_res=() + for id in $(seq 0 $(expr $OSDS - 1)) + do + client_res+=( $(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \ + osd.$id) config get osd_mclock_scheduler_client_res | \ + jq .osd_mclock_scheduler_client_res | bc) ) + echo "Original client_res for osd.$id = ${client_res[$id]}" + done + + # Change a mclock config param and confirm the change + local client_res_new=$(expr ${client_res[0]} + 10) + echo "client_res_new = $client_res_new" + ceph config set osd osd_mclock_scheduler_client_res \ + $client_res_new || return 1 + for id in $(seq 0 $(expr $OSDS - 1)) + do + # Check value in config monitor db + local res=$(ceph config get osd.$id \ + osd_mclock_scheduler_client_res) || return 1 + test $res -eq $client_res_new || return 1 + # Check value in the in-memory 'values' map + res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \ + osd.$id) config get osd_mclock_scheduler_client_res | \ + jq .osd_mclock_scheduler_client_res | bc) + test $res -eq $client_res_new || return 1 + done + + # Switch the mclock profile back to the original built-in profile. + # The config subsystem prevents the overwrite of the changed QoS config + # option above i.e. osd_mclock_scheduler_client_res. This fact is verified + # before proceeding to remove the entry from the config monitor db. After + # the config entry is removed, the original value for the config option is + # restored and is verified. + ceph config set osd osd_mclock_profile high_client_ops || return 1 + # Verify that the mclock profile is set to 'high_client_ops' on the OSDs + for id in $(seq 0 $(expr $OSDS - 1)) + do + local mclock_profile=$(ceph config get osd.$id osd_mclock_profile) + test "$mclock_profile" = "high_client_ops" || return 1 + done + + # Verify that the new value is still in effect + for id in $(seq 0 $(expr $OSDS - 1)) + do + # Check value in config monitor db + local res=$(ceph config get osd.$id \ + osd_mclock_scheduler_client_res) || return 1 + test $res -eq $client_res_new || return 1 + # Check value in the in-memory 'values' map + res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \ + osd.$id) config get osd_mclock_scheduler_client_res | \ + jq .osd_mclock_scheduler_client_res | bc) + test $res -eq $client_res_new || return 1 + done + + # Remove the changed QoS config option from monitor db + ceph config rm osd osd_mclock_scheduler_client_res || return 1 + + # Verify that the original values are now restored + for id in $(seq 0 $(expr $OSDS - 1)) + do + # Check value in the in-memory 'values' map + res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \ + osd.$id) config get osd_mclock_scheduler_client_res | \ + jq .osd_mclock_scheduler_client_res | bc) + test $res -eq ${client_res[$id]} || return 1 + done + + teardown $dir || return 1 +} + +main test-mclock-profile-switch "$@" + +# Local Variables: +# compile-command: "cd build ; make -j4 && \ +# ../qa/run-standalone.sh test-mclock-profile-switch.sh" +# End: diff --git a/ceph/qa/suites/fs/full/tasks/mgr-osd-full.yaml b/ceph/qa/suites/fs/full/tasks/mgr-osd-full.yaml index 88d6527bf..b4f673e39 100644 --- a/ceph/qa/suites/fs/full/tasks/mgr-osd-full.yaml +++ b/ceph/qa/suites/fs/full/tasks/mgr-osd-full.yaml @@ -15,7 +15,17 @@ overrides: bluestore block size: 1073741824 tasks: - workunit: - cleanup: false + cleanup: true clients: client.0: - fs/full/subvolume_rm.sh +- workunit: + cleanup: true + clients: + client.0: + - fs/full/subvolume_clone.sh +- workunit: + cleanup: true + clients: + client.0: + - fs/full/subvolume_snapshot_rm.sh diff --git a/ceph/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml b/ceph/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml new file mode 100644 index 000000000..7bbcf000f --- /dev/null +++ b/ceph/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml @@ -0,0 +1,30 @@ +overrides: + ceph: + conf: + mgr: + debug mgr: 20 + debug ms: 1 + debug finisher: 20 + debug client: 20 + log-whitelist: + - OSD full dropping all updates + - OSD near full + - pausewr flag + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping + - is full \(reached quota + - POOL_FULL + - POOL_BACKFILLFULL + +overrides: + kclient: + snapdirname: .customsnapkernel + ceph: + conf: + client: + client snapdir: .customsnapfuse + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_snap_schedules.TestSnapSchedulesSnapdir diff --git a/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml b/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml index 6ff6195bf..ca026c45f 100644 --- a/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml +++ b/ceph/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml @@ -9,3 +9,4 @@ tasks: all: - fs/misc/acl.sh - fs/misc/chmod.sh + - fs/misc/dac_override.sh diff --git a/ceph/qa/suites/orch/cephadm/workunits/0-distro b/ceph/qa/suites/orch/cephadm/workunits/0-distro new file mode 120000 index 000000000..4b341719d --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/workunits/0-distro @@ -0,0 +1 @@ +.qa/distros/container-hosts \ No newline at end of file diff --git a/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml b/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml index dc8671b7d..5d80e6bd3 100644 --- a/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml +++ b/ceph/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml @@ -11,3 +11,4 @@ op_workload: - rbd/qemu_dynamic_features.sh env: IMAGE_NAME: client.0.1-clone + timeout: 0 diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/1-base b/ceph/qa/suites/rbd/persistent-writeback-cache/1-base deleted file mode 120000 index fd10a859d..000000000 --- a/ceph/qa/suites/rbd/persistent-writeback-cache/1-base +++ /dev/null @@ -1 +0,0 @@ -../thrash/base \ No newline at end of file diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml b/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml deleted file mode 100644 index 7ac3f31da..000000000 --- a/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/big-cache.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - conf: - client: - rbd_persistent_cache_path: /home/ubuntu/cephtest/write_back_cache - rbd_persistent_cache_size: 8589934592 - rbd_plugins: pwl_cache - rbd_default_features: 61 -tasks: -- exec: - client.0: - - "mkdir -m 777 /home/ubuntu/cephtest/write_back_cache" -- exec_on_cleanup: - client.0: - - "rm -rf /home/ubuntu/cephtest/write_back_cache" diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/cache.yaml b/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/cache.yaml deleted file mode 100644 index 9265e7dd8..000000000 --- a/ceph/qa/suites/rbd/persistent-writeback-cache/4-pool/cache.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - conf: - client: - rbd_persistent_cache_path: /home/ubuntu/cephtest/write_back_cache - rbd_persistent_cache_size: 1073741824 - rbd_plugins: pwl_cache - rbd_default_features: 61 -tasks: -- exec: - client.0: - - "mkdir -m 777 /home/ubuntu/cephtest/write_back_cache" -- exec_on_cleanup: - client.0: - - "rm -rf /home/ubuntu/cephtest/write_back_cache" diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/c_api_tests_with_defaults.yaml b/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/c_api_tests_with_defaults.yaml deleted file mode 120000 index 949032725..000000000 --- a/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/c_api_tests_with_defaults.yaml +++ /dev/null @@ -1 +0,0 @@ -../../librbd/workloads/c_api_tests_with_defaults.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/recovery.yaml b/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/recovery.yaml deleted file mode 100644 index 63a0c9dcf..000000000 --- a/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/recovery.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- rbd.create_image: - client.0: - image_name: testimage - image_size: 10 - image_format: 2 -- exec: - client.0: - - "timeout 10s rbd bench --io-pattern rand --io-type write testimage || true" - - "rbd bench --io-type write --io-pattern rand --io-total 32M testimage" diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/.qa b/ceph/qa/suites/rbd/pwl-cache/.qa similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/.qa rename to ceph/qa/suites/rbd/pwl-cache/.qa diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/% b/ceph/qa/suites/rbd/pwl-cache/home/% similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/% rename to ceph/qa/suites/rbd/pwl-cache/home/% diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/.qa b/ceph/qa/suites/rbd/pwl-cache/home/.qa similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/.qa rename to ceph/qa/suites/rbd/pwl-cache/home/.qa diff --git a/ceph/qa/suites/rbd/pwl-cache/home/1-base b/ceph/qa/suites/rbd/pwl-cache/home/1-base new file mode 120000 index 000000000..89c3c7e84 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/1-base @@ -0,0 +1 @@ +../../basic/base/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/+ b/ceph/qa/suites/rbd/pwl-cache/home/2-cluster/+ similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/+ rename to ceph/qa/suites/rbd/pwl-cache/home/2-cluster/+ diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/.qa b/ceph/qa/suites/rbd/pwl-cache/home/2-cluster/.qa similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/.qa rename to ceph/qa/suites/rbd/pwl-cache/home/2-cluster/.qa diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/fix-2.yaml b/ceph/qa/suites/rbd/pwl-cache/home/2-cluster/fix-2.yaml similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/fix-2.yaml rename to ceph/qa/suites/rbd/pwl-cache/home/2-cluster/fix-2.yaml diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/openstack.yaml b/ceph/qa/suites/rbd/pwl-cache/home/2-cluster/openstack.yaml similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/2-cluster/openstack.yaml rename to ceph/qa/suites/rbd/pwl-cache/home/2-cluster/openstack.yaml diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/3-supported-random-distro$ b/ceph/qa/suites/rbd/pwl-cache/home/3-supported-random-distro$ similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/3-supported-random-distro$ rename to ceph/qa/suites/rbd/pwl-cache/home/3-supported-random-distro$ diff --git a/ceph/qa/suites/rbd/pwl-cache/home/4-cache-path.yaml b/ceph/qa/suites/rbd/pwl-cache/home/4-cache-path.yaml new file mode 100644 index 000000000..be4641b01 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/4-cache-path.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_path: /home/ubuntu/cephtest/rbd-pwl-cache + rbd_plugins: pwl_cache +tasks: +- exec: + client.0: + - "mkdir -m 777 /home/ubuntu/cephtest/rbd-pwl-cache" +- exec_on_cleanup: + client.0: + - "rm -rf /home/ubuntu/cephtest/rbd-pwl-cache" diff --git a/ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/.qa b/ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/5-cache-mode/rwl.yaml b/ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/rwl.yaml similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/5-cache-mode/rwl.yaml rename to ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/rwl.yaml diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/5-cache-mode/ssd.yaml b/ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/ssd.yaml similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/5-cache-mode/ssd.yaml rename to ceph/qa/suites/rbd/pwl-cache/home/5-cache-mode/ssd.yaml diff --git a/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/.qa b/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/1G.yaml b/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/1G.yaml new file mode 100644 index 000000000..53fcddcdf --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/1G.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_size: 1073741824 diff --git a/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/8G.yaml b/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/8G.yaml new file mode 100644 index 000000000..b53d36852 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/6-cache-size/8G.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_size: 8589934592 diff --git a/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/.qa b/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/c_api_tests_with_defaults.yaml b/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/c_api_tests_with_defaults.yaml new file mode 120000 index 000000000..359001f8f --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/c_api_tests_with_defaults.yaml @@ -0,0 +1 @@ +../../../librbd/workloads/c_api_tests_with_defaults.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/fio.yaml b/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/fio.yaml similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/fio.yaml rename to ceph/qa/suites/rbd/pwl-cache/home/7-workloads/fio.yaml diff --git a/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/recovery.yaml b/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/recovery.yaml new file mode 100644 index 000000000..3017beb22 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/7-workloads/recovery.yaml @@ -0,0 +1,9 @@ +tasks: +- rbd.create_image: + client.0: + image_name: testimage + image_size: 10240 + image_format: 2 +- rbd_pwl_cache_recovery: + client.0: + image_name: testimage diff --git a/ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/edit-site-name-modal/edit-site-name-modal.component.scss b/ceph/qa/suites/rbd/pwl-cache/tmpfs/% similarity index 100% rename from ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/edit-site-name-modal/edit-site-name-modal.component.scss rename to ceph/qa/suites/rbd/pwl-cache/tmpfs/% diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/.qa b/ceph/qa/suites/rbd/pwl-cache/tmpfs/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/1-base b/ceph/qa/suites/rbd/pwl-cache/tmpfs/1-base new file mode 120000 index 000000000..89c3c7e84 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/1-base @@ -0,0 +1 @@ +../../basic/base/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/+ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/+ new file mode 100644 index 000000000..e69de29bb diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/.qa b/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/fix-2.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/fix-2.yaml new file mode 100644 index 000000000..dbccecbce --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/fix-2.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1] +- [mon.b, mgr.y, osd.2, osd.3, client.0] diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/openstack.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/openstack.yaml new file mode 100644 index 000000000..b113e4f2e --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/3-supported-random-distro$ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/3-supported-random-distro$ new file mode 120000 index 000000000..0862b4457 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/3-supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/4-cache-path.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/4-cache-path.yaml new file mode 100644 index 000000000..b5578a0ae --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/4-cache-path.yaml @@ -0,0 +1,22 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_path: /home/ubuntu/cephtest/rbd-pwl-cache + rbd_plugins: pwl_cache +tasks: +- exec: + client.0: + - "mkdir /home/ubuntu/cephtest/tmpfs" + - "mkdir /home/ubuntu/cephtest/rbd-pwl-cache" + - "sudo mount -t tmpfs -o size=20G tmpfs /home/ubuntu/cephtest/tmpfs" + - "truncate -s 20G /home/ubuntu/cephtest/tmpfs/loopfile" + - "mkfs.ext4 /home/ubuntu/cephtest/tmpfs/loopfile" + - "sudo mount -o loop /home/ubuntu/cephtest/tmpfs/loopfile /home/ubuntu/cephtest/rbd-pwl-cache" + - "sudo chmod 777 /home/ubuntu/cephtest/rbd-pwl-cache" +- exec_on_cleanup: + client.0: + - "sudo umount /home/ubuntu/cephtest/rbd-pwl-cache" + - "sudo umount /home/ubuntu/cephtest/tmpfs" + - "rm -rf /home/ubuntu/cephtest/rbd-pwl-cache" + - "rm -rf /home/ubuntu/cephtest/tmpfs" diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/.qa b/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/rwl.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/rwl.yaml new file mode 100644 index 000000000..5aeab26b3 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/rwl.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_mode: rwl diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/ssd.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/ssd.yaml new file mode 100644 index 000000000..082149147 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/ssd.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_mode: ssd diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/.qa b/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/1G.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/1G.yaml new file mode 100644 index 000000000..53fcddcdf --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/1G.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_size: 1073741824 diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/5G.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/5G.yaml new file mode 100644 index 000000000..1c43b5de8 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/5G.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd_persistent_cache_size: 5368709120 diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/7-workloads/.qa b/ceph/qa/suites/rbd/pwl-cache/tmpfs/7-workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/7-workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/qemu_xfstests.yaml b/ceph/qa/suites/rbd/pwl-cache/tmpfs/7-workloads/qemu_xfstests.yaml similarity index 100% rename from ceph/qa/suites/rbd/persistent-writeback-cache/6-workloads/qemu_xfstests.yaml rename to ceph/qa/suites/rbd/pwl-cache/tmpfs/7-workloads/qemu_xfstests.yaml diff --git a/ceph/qa/suites/rgw/crypt/2-kms/vault_transit.yaml b/ceph/qa/suites/rgw/crypt/2-kms/vault_transit.yaml index fe8c8409d..d20bb52bc 100644 --- a/ceph/qa/suites/rgw/crypt/2-kms/vault_transit.yaml +++ b/ceph/qa/suites/rgw/crypt/2-kms/vault_transit.yaml @@ -6,9 +6,15 @@ overrides: rgw crypt vault auth: token rgw crypt vault secret engine: transit rgw crypt vault prefix: /v1/transit/ + rgw crypt sse s3 backend: vault + rgw crypt sse s3 vault auth: token + rgw crypt sse s3 vault secret engine: transit + rgw crypt sse s3 vault prefix: /v1/transit/ rgw: client.0: use-vault-role: client.0 + s3tests: + with-sse-s3: true tasks: - vault: diff --git a/ceph/qa/tasks/ceph_manager.py b/ceph/qa/tasks/ceph_manager.py index 51d802cd7..652d7d9ae 100644 --- a/ceph/qa/tasks/ceph_manager.py +++ b/ceph/qa/tasks/ceph_manager.py @@ -14,6 +14,7 @@ import logging import threading import traceback import os +import re import shlex from io import BytesIO, StringIO @@ -70,6 +71,14 @@ def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'): conf_fp = BytesIO() ctx.ceph[cluster].conf.write(conf_fp) conf_fp.seek(0) + lines = conf_fp.readlines() + m = None + for l in lines: + m = re.search("rgw.crypt.sse.s3.backend *= *(.*)", l.decode()) + if m: + break + ctx.ceph[cluster].rgw_crypt_sse_s3_backend = m.expand("\\1") if m else None + conf_fp.seek(0) writes = ctx.cluster.run( args=[ 'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'), @@ -646,6 +655,7 @@ class OSDThrasher(Thrasher): options['max_change']) def primary_affinity(self, osd=None): + self.log("primary_affinity") if osd is None: osd = random.choice(self.in_osds) if random.random() >= .5: @@ -672,6 +682,7 @@ class OSDThrasher(Thrasher): """ Install or remove random pg_upmap entries in OSDMap """ + self.log("thrash_pg_upmap") from random import shuffle out = self.ceph_manager.raw_cluster_cmd('osd', 'dump', '-f', 'json-pretty') j = json.loads(out) @@ -680,12 +691,14 @@ class OSDThrasher(Thrasher): if random.random() >= .3: pgs = self.ceph_manager.get_pg_stats() if not pgs: + self.log('No pgs; doing nothing') return pg = random.choice(pgs) pgid = str(pg['pgid']) poolid = int(pgid.split('.')[0]) sizes = [x['size'] for x in j['pools'] if x['pool'] == poolid] if len(sizes) == 0: + self.log('No pools; doing nothing') return n = sizes[0] osds = self.in_osds + self.out_osds @@ -714,6 +727,7 @@ class OSDThrasher(Thrasher): """ Install or remove random pg_upmap_items entries in OSDMap """ + self.log("thrash_pg_upmap_items") from random import shuffle out = self.ceph_manager.raw_cluster_cmd('osd', 'dump', '-f', 'json-pretty') j = json.loads(out) @@ -722,12 +736,14 @@ class OSDThrasher(Thrasher): if random.random() >= .3: pgs = self.ceph_manager.get_pg_stats() if not pgs: + self.log('No pgs; doing nothing') return pg = random.choice(pgs) pgid = str(pg['pgid']) poolid = int(pgid.split('.')[0]) sizes = [x['size'] for x in j['pools'] if x['pool'] == poolid] if len(sizes) == 0: + self.log('No pools; doing nothing') return n = sizes[0] osds = self.in_osds + self.out_osds @@ -883,15 +899,15 @@ class OSDThrasher(Thrasher): """ self.log("test_pool_min_size") self.all_up() + time.sleep(60) # buffer time for recovery to start. self.ceph_manager.wait_for_recovery( timeout=self.config.get('timeout') ) - minout = int(self.config.get("min_out", 1)) minlive = int(self.config.get("min_live", 2)) mindead = int(self.config.get("min_dead", 1)) self.log("doing min_size thrashing") - self.ceph_manager.wait_for_clean(timeout=60) + self.ceph_manager.wait_for_clean(timeout=180) assert self.ceph_manager.is_clean(), \ 'not clean before minsize thrashing starts' while not self.stopping: @@ -965,7 +981,7 @@ class OSDThrasher(Thrasher): # try a few times since there might be a concurrent pool # creation or deletion with safe_while( - sleep=5, tries=5, + sleep=25, tries=5, action='check for active or peered') as proceed: while proceed(): if self.ceph_manager.all_active_or_peered(): @@ -991,7 +1007,7 @@ class OSDThrasher(Thrasher): Pause injection testing. Check for osd being down when finished. """ the_one = random.choice(self.live_osds) - self.log("inject_pause on {osd}".format(osd=the_one)) + self.log("inject_pause on osd.{osd}".format(osd=the_one)) self.log( "Testing {key} pause injection for duration {duration}".format( key=conf_key, @@ -1166,6 +1182,7 @@ class OSDThrasher(Thrasher): This sequence should cause the revived osd to have to handle a map gap since the mons would have trimmed """ + self.log("test_map_discontinuity") while len(self.in_osds) < (self.minin + 1): self.in_osd() self.log("Waiting for recovery") @@ -1207,8 +1224,9 @@ class OSDThrasher(Thrasher): mindead = int(self.config.get("min_dead", 0)) self.log('choose_action: min_in %d min_out ' - '%d min_live %d min_dead %d' % - (minin, minout, minlive, mindead)) + '%d min_live %d min_dead %d ' + 'chance_down %.2f' % + (minin, minout, minlive, mindead, chance_down)) actions = [] if len(self.in_osds) > minin: actions.append((self.out_osd, 1.0,)) @@ -2670,7 +2688,11 @@ class CephManager: True if all pgs are clean """ pgs = self.get_pg_stats() - return self._get_num_active_clean(pgs) == len(pgs) + if self._get_num_active_clean(pgs) == len(pgs): + return True + else: + self.dump_pgs_not_active_clean() + return False def is_recovered(self): """ @@ -2716,6 +2738,12 @@ class CephManager: self.log('PG %s is not active' % pg['pgid']) self.log(pg) + def dump_pgs_not_active_peered(self, pgs): + for pg in pgs: + if (not pg['state'].count('active')) and (not pg['state'].count('peered')): + self.log('PG %s is not active or peered' % pg['pgid']) + self.log(pg) + def wait_for_clean(self, timeout=1200): """ Returns true when all pgs are clean. @@ -2901,7 +2929,11 @@ class CephManager: Wrapper to check if all PGs are active or peered """ pgs = self.get_pg_stats() - return self._get_num_active(pgs) + self._get_num_peered(pgs) == len(pgs) + if self._get_num_active(pgs) + self._get_num_peered(pgs) == len(pgs): + return True + else: + self.dump_pgs_not_active_peered(pgs) + return False def wait_till_active(self, timeout=None): """ diff --git a/ceph/qa/tasks/cephfs/filesystem.py b/ceph/qa/tasks/cephfs/filesystem.py index c8cdbedd1..8788070e7 100644 --- a/ceph/qa/tasks/cephfs/filesystem.py +++ b/ceph/qa/tasks/cephfs/filesystem.py @@ -713,6 +713,11 @@ class Filesystem(MDSCluster): def run_client_payload(self, cmd): # avoid circular dep by importing here: from tasks.cephfs.fuse_mount import FuseMount + + # Wait for at MDS daemons to be ready before mounting the + # ceph-fuse client in run_client_payload() + self.wait_for_daemons() + d = misc.get_testdir(self._ctx) m = FuseMount(self._ctx, d, "admin", self.client_remote, cephfs_name=self.name) m.mount_wait() diff --git a/ceph/qa/tasks/cephfs/kernel_mount.py b/ceph/qa/tasks/cephfs/kernel_mount.py index 9eef81963..f20fcc3cd 100644 --- a/ceph/qa/tasks/cephfs/kernel_mount.py +++ b/ceph/qa/tasks/cephfs/kernel_mount.py @@ -1,3 +1,4 @@ +import errno import json import logging import os @@ -32,6 +33,7 @@ class KernelMount(CephFSMount): self.client_config = client_config self.dynamic_debug = client_config.get('dynamic_debug', False) self.rbytes = client_config.get('rbytes', False) + self.snapdirname = client_config.get('snapdirname', '.snap') self.syntax_style = client_config.get('syntax', 'v2') self.inst = None self.addr = None @@ -107,6 +109,8 @@ class KernelMount(CephFSMount): opts += ",rbytes" else: opts += ",norbytes" + if self.snapdirname != '.snap': + opts += f',snapdirname={self.snapdirname}' mount_cmd = ['sudo'] + self._nsenter_args stx_opt = self._make_mount_cmd_old_or_new_style() @@ -212,12 +216,16 @@ class KernelMount(CephFSMount): stdout = StringIO() stderr = StringIO() try: - self.run_shell_payload(f"sudo dd if={path}", timeout=(5*60), - stdout=stdout, stderr=stderr) + self.run_shell_payload(f"sudo dd if={path}", timeout=(5 * 60), + stdout=stdout, stderr=stderr) return stdout.getvalue() except CommandFailedError: if 'no such file or directory' in stderr.getvalue().lower(): - return None + return errno.ENOENT + elif 'not a directory' in stderr.getvalue().lower(): + return errno.ENOTDIR + elif 'permission denied' in stderr.getvalue().lower(): + return errno.EACCES raise def _get_global_id(self): @@ -349,8 +357,23 @@ echo '{fdata}' | sudo tee /sys/kernel/debug/dynamic_debug/control return epoch, barrier def get_op_read_count(self): - buf = self.read_debug_file("metrics/size") - if buf is None: - return 0 - else: - return int(re.findall(r'read.*', buf)[0].split()[1]) + stdout = StringIO() + stderr = StringIO() + try: + path = os.path.join(self._get_debug_dir(), "metrics/size") + self.run_shell(f"sudo stat {path}", stdout=stdout, + stderr=stderr, cwd=None) + buf = self.read_debug_file("metrics/size") + except CommandFailedError: + if 'no such file or directory' in stderr.getvalue().lower() \ + or 'not a directory' in stderr.getvalue().lower(): + try: + path = os.path.join(self._get_debug_dir(), "metrics") + self.run_shell(f"sudo stat {path}", stdout=stdout, + stderr=stderr, cwd=None) + buf = self.read_debug_file("metrics") + except CommandFailedError: + return errno.ENOENT + else: + return 0 + return int(re.findall(r'read.*', buf)[0].split()[1]) diff --git a/ceph/qa/tasks/cephfs/mount.py b/ceph/qa/tasks/cephfs/mount.py index 41ebcba85..d3e3e4587 100644 --- a/ceph/qa/tasks/cephfs/mount.py +++ b/ceph/qa/tasks/cephfs/mount.py @@ -697,7 +697,7 @@ class CephFSMount(object): def run_shell(self, args, timeout=900, **kwargs): args = args.split() if isinstance(args, str) else args - kwargs.pop('omit_sudo', False) + omit_sudo = kwargs.pop('omit_sudo', False) sudo = kwargs.pop('sudo', False) cwd = kwargs.pop('cwd', self.mountpoint) stdout = kwargs.pop('stdout', StringIO()) @@ -706,7 +706,9 @@ class CephFSMount(object): if sudo: args.insert(0, 'sudo') - return self.client_remote.run(args=args, cwd=cwd, timeout=timeout, stdout=stdout, stderr=stderr, **kwargs) + return self.client_remote.run(args=args, cwd=cwd, timeout=timeout, + stdout=stdout, stderr=stderr, + omit_sudo=omit_sudo, **kwargs) def run_shell_payload(self, payload, **kwargs): return self.run_shell(["bash", "-c", Raw(f"'{payload}'")], **kwargs) @@ -837,6 +839,31 @@ class CephFSMount(object): return rproc + def open_dir_background(self, basename): + """ + Create and hold a capability to a directory. + """ + assert(self.is_mounted()) + + path = os.path.join(self.hostfs_mntpt, basename) + + pyscript = dedent(""" + import time + import os + + os.mkdir("{path}") + fd = os.open("{path}", os.O_RDONLY) + while True: + time.sleep(1) + """).format(path=path) + + rproc = self._run_python(pyscript) + self.background_procs.append(rproc) + + self.wait_for_visible(basename) + + return rproc + def wait_for_dir_empty(self, dirname, timeout=30): dirpath = os.path.join(self.hostfs_mntpt, dirname) with safe_while(sleep=5, tries=(timeout//5)) as proceed: diff --git a/ceph/qa/tasks/cephfs/test_failover.py b/ceph/qa/tasks/cephfs/test_failover.py index 7147807bf..8fb9f2775 100644 --- a/ceph/qa/tasks/cephfs/test_failover.py +++ b/ceph/qa/tasks/cephfs/test_failover.py @@ -602,6 +602,25 @@ class TestStandbyReplay(CephFSTestCase): self.fs.mds_restart(mds_id=victim['name']) status = self._confirm_single_replay(status=status) + def test_standby_replay_prepare_beacon(self): + """ + That a MDSMonitor::prepare_beacon handles standby-replay daemons + correctly without removing the standby. (Note, usually a standby-replay + beacon will just be replied to by MDSMonitor::preprocess_beacon.) + """ + + status = self._confirm_no_replay() + self.fs.set_max_mds(1) + self.fs.set_allow_standby_replay(True) + status = self._confirm_single_replay() + replays = list(status.get_replays(self.fs.id)) + self.assertEqual(len(replays), 1) + self.config_set('mds.'+replays[0]['name'], 'mds_inject_health_dummy', True) + time.sleep(10) # for something not to happen... + status = self._confirm_single_replay() + replays2 = list(status.get_replays(self.fs.id)) + self.assertEqual(replays[0]['gid'], replays2[0]['gid']) + def test_rank_stopped(self): """ That when a rank is STOPPED, standby replays for diff --git a/ceph/qa/tasks/cephfs/test_nfs.py b/ceph/qa/tasks/cephfs/test_nfs.py index 47b3e63a6..1f439cd31 100644 --- a/ceph/qa/tasks/cephfs/test_nfs.py +++ b/ceph/qa/tasks/cephfs/test_nfs.py @@ -83,11 +83,16 @@ class TestNFS(MgrTestCase): ''' event_occurred = False # Wait few seconds for NFS daemons' status to be updated - with contextutil.safe_while(sleep=10, tries=12, _raise=False) as proceed: + with contextutil.safe_while(sleep=10, tries=18, _raise=False) as proceed: while not event_occurred and proceed(): daemons_details = json.loads( self._fetch_nfs_daemons_details(enable_json=True)) log.info('daemons details %s', daemons_details) + # 'events' key may not exist in the daemon description + # after a mgr fail over and could take some time to appear + # (it's populated on first daemon event) + if 'events' not in daemons_details[0]: + continue for event in daemons_details[0]['events']: log.info('daemon event %s', event) if expected_event in event: diff --git a/ceph/qa/tasks/cephfs/test_readahead.py b/ceph/qa/tasks/cephfs/test_readahead.py index b1484ab07..7e6270f03 100644 --- a/ceph/qa/tasks/cephfs/test_readahead.py +++ b/ceph/qa/tasks/cephfs/test_readahead.py @@ -16,8 +16,7 @@ class TestReadahead(CephFSTestCase): initial_op_read = self.mount_a.get_op_read_count() self.mount_a.run_shell(["dd", "if=foo", "of=/dev/null", "bs=128k", "count=32"]) op_read = self.mount_a.get_op_read_count() - - assert op_read >= initial_op_read + self.assertGreaterEqual(op_read, initial_op_read) op_read -= initial_op_read log.info("read operations: {0}".format(op_read)) diff --git a/ceph/qa/tasks/cephfs/test_snap_schedules.py b/ceph/qa/tasks/cephfs/test_snap_schedules.py index 388e38135..4a9ce838e 100644 --- a/ceph/qa/tasks/cephfs/test_snap_schedules.py +++ b/ceph/qa/tasks/cephfs/test_snap_schedules.py @@ -19,7 +19,7 @@ def seconds_upto_next_schedule(time_from, timo): ts = int(time_from) return ((int(ts / 60) * 60) + timo) - ts -class TestSnapSchedules(CephFSTestCase): +class TestSnapSchedulesHelper(CephFSTestCase): CLIENTS_REQUIRED = 1 TEST_VOLUME_NAME = 'snap_vol' @@ -54,7 +54,7 @@ class TestSnapSchedules(CephFSTestCase): result = json.loads(self._fs_cmd("volume", "ls")) if len(result) == 0: self.vol_created = True - self.volname = TestSnapSchedules.TEST_VOLUME_NAME + self.volname = TestSnapSchedulesHelper.TEST_VOLUME_NAME self._fs_cmd("volume", "create", self.volname) else: self.volname = result[0]['name'] @@ -72,7 +72,7 @@ class TestSnapSchedules(CephFSTestCase): self.config_set('mgr', 'mgr/snap_schedule/dump_on_update', True) def setUp(self): - super(TestSnapSchedules, self).setUp() + super(TestSnapSchedulesHelper, self).setUp() self.volname = None self.vol_created = False self._create_or_reuse_test_volume() @@ -88,7 +88,7 @@ class TestSnapSchedules(CephFSTestCase): if self.vol_created: self._delete_test_volume() self._disable_snap_schedule() - super(TestSnapSchedules, self).tearDown() + super(TestSnapSchedulesHelper, self).tearDown() def _schedule_to_timeout(self, schedule): mult = schedule[-1] @@ -120,14 +120,14 @@ class TestSnapSchedules(CephFSTestCase): def verify(self, dir_path, max_trials): trials = 0 - snap_path = "{0}/.snap".format(dir_path) + snap_path = f'{dir_path}/.snap' while (len(self.create_cbks) or len(self.remove_cbks)) and trials < max_trials: snapshots = set(self.mount_a.ls(path=snap_path)) - log.info(f"snapshots: {snapshots}") + log.info(f'snapshots: {snapshots}') added = snapshots - self.snapshots - log.info(f"added: {added}") + log.info(f'added: {added}') removed = self.snapshots - snapshots - log.info(f"removed: {removed}") + log.info(f'removed: {removed}') if added: for cbk in list(self.create_cbks): res = cbk(list(added)) @@ -151,7 +151,7 @@ class TestSnapSchedules(CephFSTestCase): # expected "scheduled" snapshot name ts_name = (datetime.utcfromtimestamp(snap_sched_exec_epoch) - + timedelta(seconds=wait_timo)).strftime(TestSnapSchedules.SNAPSHOT_TS_FORMAT) + + timedelta(seconds=wait_timo)).strftime(TestSnapSchedulesHelper.SNAPSHOT_TS_FORMAT) return (wait_timo, ts_name) def verify_schedule(self, dir_path, schedules, retentions=[]): @@ -165,7 +165,8 @@ class TestSnapSchedules(CephFSTestCase): self.assertTrue(schedule in json_res['schedule']) for retention in retentions: self.assertTrue(retention in json_res['retention']) - + +class TestSnapSchedules(TestSnapSchedulesHelper): def remove_snapshots(self, dir_path): snap_path = f'{dir_path}/.snap' @@ -359,7 +360,7 @@ class TestSnapSchedules(CephFSTestCase): snap_path = f"{dir_path}/.snap"[1:] snapshots = self.mount_a.ls(path=snap_path) fs_count = len(snapshots) - log.debug(f'snapshots: {snapshots}'); + log.debug(f'snapshots: {snapshots}') result = self.fs_snap_schedule_cmd('status', path=dir_path, snap_schedule='1M', format='json') @@ -453,4 +454,50 @@ class TestSnapSchedules(CephFSTestCase): # cleanup self.fs_snap_schedule_cmd('remove', path=testdir, snap_schedule='1M') self.remove_snapshots(testdir[1:]) - self.mount_a.run_shell(['rmdir', testdir[1:]]) + self.mount_a.run_shell(['rmdir', testdir[1:]]) + +class TestSnapSchedulesSnapdir(TestSnapSchedulesHelper): + def remove_snapshots(self, dir_path, sdn): + snap_path = f'{dir_path}/{sdn}' + + snapshots = self.mount_a.ls(path=snap_path) + for snapshot in snapshots: + snapshot_path = os.path.join(snap_path, snapshot) + log.debug(f'removing snapshot: {snapshot_path}') + self.mount_a.run_shell(['rmdir', snapshot_path]) + + def get_snap_dir_name(self): + from tasks.cephfs.fuse_mount import FuseMount + from tasks.cephfs.kernel_mount import KernelMount + + if isinstance(self.mount_a, KernelMount): + sdn = self.mount_a.client_config.get('snapdirname', '.snap') + elif isinstance(self.mount_a, FuseMount): + sdn = self.mount_a.client_config.get('client_snapdir', '.snap') + self.fs.set_ceph_conf('client', 'client snapdir', sdn) + self.mount_a.remount() + return sdn + + def test_snap_dir_name(self): + """Test the correctness of snap directory name""" + self.mount_a.run_shell(['mkdir', '-p', TestSnapSchedulesSnapdir.TEST_DIRECTORY]) + + # set a schedule on the dir + self.fs_snap_schedule_cmd('add', path=TestSnapSchedulesSnapdir.TEST_DIRECTORY, snap_schedule='1M') + self.fs_snap_schedule_cmd('retention', 'add', path=TestSnapSchedulesSnapdir.TEST_DIRECTORY, retention_spec_or_period='1M') + exec_time = time.time() + + timo, snap_sfx = self.calc_wait_time_and_snap_name(exec_time, '1M') + sdn = self.get_snap_dir_name() + log.info(f'expecting snap {TestSnapSchedulesSnapdir.TEST_DIRECTORY}/{sdn}/scheduled-{snap_sfx} in ~{timo}s...') + + # verify snapshot schedule + self.verify_schedule(TestSnapSchedulesSnapdir.TEST_DIRECTORY, ['1M'], retentions=[{'M':1}]) + + # remove snapshot schedule + self.fs_snap_schedule_cmd('remove', path=TestSnapSchedulesSnapdir.TEST_DIRECTORY) + + # remove all scheduled snapshots + self.remove_snapshots(TestSnapSchedulesSnapdir.TEST_DIRECTORY, sdn) + + self.mount_a.run_shell(['rmdir', TestSnapSchedulesSnapdir.TEST_DIRECTORY]) diff --git a/ceph/qa/tasks/cephfs/test_strays.py b/ceph/qa/tasks/cephfs/test_strays.py index 582f1a81b..8bdc126e2 100644 --- a/ceph/qa/tasks/cephfs/test_strays.py +++ b/ceph/qa/tasks/cephfs/test_strays.py @@ -602,7 +602,6 @@ class TestStrays(CephFSTestCase): """ :param to_id: MDS id to move it to :param path: Filesystem path (string) to move - :param watch_ino: Inode number to look for at destination to confirm move :return: None """ self.mount_a.run_shell(["setfattr", "-n", "ceph.dir.pin", "-v", str(rank), path]) @@ -700,6 +699,46 @@ ln dir_1/original dir_2/linkto # See that the stray counter on rank 0 has incremented self.assertEqual(self.get_mdc_stat("strays_created", rank_0_id), 1) + def test_migrate_unlinked_dir(self): + """ + Reproduce https://tracker.ceph.com/issues/53597 + """ + rank_0_id, rank_1_id = self._setup_two_ranks() + + self.mount_a.run_shell_payload(""" +mkdir pin +touch pin/placeholder +""") + + self._force_migrate("pin") + + # Hold the dir open so it cannot be purged + p = self.mount_a.open_dir_background("pin/to-be-unlinked") + + # Unlink the dentry + self.mount_a.run_shell(["rmdir", "pin/to-be-unlinked"]) + + # Wait to see the stray count increment + self.wait_until_equal( + lambda: self.get_mdc_stat("num_strays", mds_id=rank_1_id), + expect_val=1, timeout=60, reject_fn=lambda x: x > 1) + # but not purged + self.assertEqual(self.get_mdc_stat("strays_created", mds_id=rank_1_id), 1) + self.assertEqual(self.get_mdc_stat("strays_enqueued", mds_id=rank_1_id), 0) + + # Test loading unlinked dir into cache + self.fs.mds_asok(['flush', 'journal'], rank_1_id) + self.fs.mds_asok(['cache', 'drop'], rank_1_id) + + # Shut down rank 1 + self.fs.set_max_mds(1) + self.fs.wait_for_daemons(timeout=120) + # Now the stray should be migrated to rank 0 + # self.assertEqual(self.get_mdc_stat("strays_created", mds_id=rank_0_id), 1) + # https://github.com/ceph/ceph/pull/44335#issuecomment-1125940158 + + self.mount_a.kill_background(p) + def assert_backtrace(self, ino, expected_path): """ Assert that the backtrace in the data pool for an inode matches diff --git a/ceph/qa/tasks/cephfs/test_volumes.py b/ceph/qa/tasks/cephfs/test_volumes.py index 9b5649171..330b6cb94 100644 --- a/ceph/qa/tasks/cephfs/test_volumes.py +++ b/ceph/qa/tasks/cephfs/test_volumes.py @@ -207,12 +207,24 @@ class TestVolumesHelper(CephFSTestCase): else: self.volname = result[0]['name'] + def _get_volume_info(self, vol_name): + args = ["volume", "info", vol_name] + args = tuple(args) + vol_md = self._fs_cmd(*args) + return vol_md + def _get_subvolume_group_path(self, vol_name, group_name): args = ("subvolumegroup", "getpath", vol_name, group_name) path = self._fs_cmd(*args) # remove the leading '/', and trailing whitespaces return path[1:].rstrip() + def _get_subvolume_group_info(self, vol_name, group_name): + args = ["subvolumegroup", "info", vol_name, group_name] + args = tuple(args) + group_md = self._fs_cmd(*args) + return group_md + def _get_subvolume_path(self, vol_name, subvol_name, group_name=None): args = ["subvolume", "getpath", vol_name, subvol_name] if group_name: @@ -296,12 +308,22 @@ class TestVolumesHelper(CephFSTestCase): # flip ownership to nobody. assumption: nobody's id is 65534 self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False) - def _wait_for_trash_empty(self, timeout=30): + def _wait_for_trash_empty(self, timeout=60): # XXX: construct the trash dir path (note that there is no mgr # [sub]volume interface for this). trashdir = os.path.join("./", "volumes", "_deleting") self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout) + def _wait_for_subvol_trash_empty(self, subvol, group="_nogroup", timeout=30): + trashdir = os.path.join("./", "volumes", group, subvol, ".trash") + try: + self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + pass + else: + raise + def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False): if legacy: subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group) @@ -608,6 +630,38 @@ class TestVolumes(TestVolumesHelper): # data pool names unchanged self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values())) + def test_volume_info(self): + """ + Tests the 'fs volume info' command + """ + vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"] + group = self._generate_random_group_name() + # create subvolumegroup + self._fs_cmd("subvolumegroup", "create", self.volname, group) + # get volume metadata + vol_info = json.loads(self._get_volume_info(self.volname)) + for md in vol_fields: + self.assertIn(md, vol_info, + f"'{md}' key not present in metadata of volume") + self.assertEqual(vol_info["used_size"], 0, + "Size should be zero when volumes directory is empty") + + def test_volume_info_without_subvolumegroup(self): + """ + Tests the 'fs volume info' command without subvolume group + """ + vol_fields = ["pools", "mon_addrs"] + # get volume metadata + vol_info = json.loads(self._get_volume_info(self.volname)) + for md in vol_fields: + self.assertIn(md, vol_info, + f"'{md}' key not present in metadata of volume") + self.assertNotIn("used_size", vol_info, + "'used_size' should not be present in absence of subvolumegroup") + self.assertNotIn("pending_subvolume_deletions", vol_info, + "'pending_subvolume_deletions' should not be present in absence" + " of subvolumegroup") + class TestSubvolumeGroups(TestVolumesHelper): """Tests for FS subvolume group operations.""" @@ -760,6 +814,805 @@ class TestSubvolumeGroups(TestVolumesHelper): else: raise RuntimeError("expected the 'fs subvolumegroup create' command to fail") + def test_subvolume_group_create_with_size(self): + # create group with size -- should set quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") + + # get group metadata + group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) + self.assertEqual(group_info["bytes_quota"], 1000000000) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_info(self): + # tests the 'fs subvolumegroup info' command + + group_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", + "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"] + + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # get group metadata + group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) + for md in group_md: + self.assertIn(md, group_info, "'{0}' key not present in metadata of group".format(md)) + + self.assertEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set") + self.assertEqual(group_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set") + self.assertEqual(group_info["uid"], 0) + self.assertEqual(group_info["gid"], 0) + + nsize = self.DEFAULT_FILE_SIZE*1024*1024 + self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) + + # get group metadata after quota set + group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) + for md in group_md: + self.assertIn(md, group_info, "'{0}' key not present in metadata of subvolume".format(md)) + + self.assertNotEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set") + self.assertEqual(group_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize)) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_create_idempotence(self): + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # try creating w/ same subvolume group name -- should be idempotent + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_create_idempotence_mode(self): + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # try creating w/ same subvolume group name with mode -- should set mode + self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=766") + + group_path = self._get_subvolume_group_path(self.volname, group) + + # check subvolumegroup's mode + mode = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip() + self.assertEqual(mode, "766") + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_create_idempotence_uid_gid(self): + desired_uid = 1000 + desired_gid = 1000 + + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # try creating w/ same subvolume group name with uid/gid -- should set uid/gid + self._fs_cmd("subvolumegroup", "create", self.volname, group, "--uid", str(desired_uid), "--gid", str(desired_gid)) + + group_path = self._get_subvolume_group_path(self.volname, group) + + # verify the uid and gid + actual_uid = int(self.mount_a.run_shell(['stat', '-c' '%u', group_path]).stdout.getvalue().strip()) + actual_gid = int(self.mount_a.run_shell(['stat', '-c' '%g', group_path]).stdout.getvalue().strip()) + self.assertEqual(desired_uid, actual_uid) + self.assertEqual(desired_gid, actual_gid) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_create_idempotence_data_pool(self): + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + group_path = self._get_subvolume_group_path(self.volname, group) + + default_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool") + new_pool = "new_pool" + self.assertNotEqual(default_pool, new_pool) + + # add data pool + newid = self.fs.add_data_pool(new_pool) + + # try creating w/ same subvolume group name with new data pool -- should set pool + self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", new_pool) + desired_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool") + try: + self.assertEqual(desired_pool, new_pool) + except AssertionError: + self.assertEqual(int(desired_pool), newid) # old kernel returns id + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_create_idempotence_resize(self): + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # try creating w/ same subvolume name with size -- should set quota + self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") + + # get group metadata + group_info = json.loads(self._get_subvolume_group_info(self.volname, group)) + self.assertEqual(group_info["bytes_quota"], 1000000000) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_quota_mds_path_restriction_to_group_path(self): + """ + Tests subvolumegroup quota enforcement with mds path restriction set to group. + For quota to be enforced, read permission needs to be provided to the parent + of the directory on which quota is set. Please see the tracker comment [1] + [1] https://tracker.ceph.com/issues/55090#note-8 + """ + osize = self.DEFAULT_FILE_SIZE*1024*1024*100 + # create group with 100MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + # Create auth_id + authid = "client.guest1" + user = json.loads(self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", authid, + "mds", "allow rw path=/volumes", + "mgr", "allow rw", + "osd", "allow rw tag cephfs *=*", + "mon", "allow r", + "--format=json-pretty" + )) + + # Prepare guest_mount with new authid + guest_mount = self.mount_b + guest_mount.umount_wait() + + # configure credentials for guest client + self._configure_guest_auth(guest_mount, "guest1", user[0]["key"]) + + # mount the subvolume + mount_path = os.path.join("/", subvolpath) + guest_mount.mount_wait(cephfs_mntpt=mount_path) + + # create 99 files of 1MB + guest_mount.run_shell_payload("mkdir -p dir1") + for i in range(99): + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) + guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE) + try: + # write two files of 1MB file to exceed the quota + guest_mount.run_shell_payload("mkdir -p dir2") + for i in range(2): + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) + guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) + # For quota to be enforced + time.sleep(60) + # create 400 files of 1MB to exceed quota + for i in range(400): + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) + guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) + # Sometimes quota enforcement takes time. + if i == 200: + time.sleep(60) + except CommandFailedError: + pass + else: + self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail") + + # clean up + guest_mount.umount_wait() + + # Delete the subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self): + """ + Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path + The quota should not be enforced because of the fourth limitation mentioned at + https://docs.ceph.com/en/latest/cephfs/quota/#limitations + """ + osize = self.DEFAULT_FILE_SIZE*1024*1024*100 + # create group with 100MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + mount_path = os.path.join("/", subvolpath) + + # Create auth_id + authid = "client.guest1" + user = json.loads(self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", authid, + "mds", f"allow rw path={mount_path}", + "mgr", "allow rw", + "osd", "allow rw tag cephfs *=*", + "mon", "allow r", + "--format=json-pretty" + )) + + # Prepare guest_mount with new authid + guest_mount = self.mount_b + guest_mount.umount_wait() + + # configure credentials for guest client + self._configure_guest_auth(guest_mount, "guest1", user[0]["key"]) + + # mount the subvolume + guest_mount.mount_wait(cephfs_mntpt=mount_path) + + # create 99 files of 1MB to exceed quota + guest_mount.run_shell_payload("mkdir -p dir1") + for i in range(99): + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) + guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE) + try: + # write two files of 1MB file to exceed the quota + guest_mount.run_shell_payload("mkdir -p dir2") + for i in range(2): + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) + guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) + # For quota to be enforced + time.sleep(60) + # create 400 files of 1MB to exceed quota + for i in range(400): + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) + guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE) + # Sometimes quota enforcement takes time. + if i == 200: + time.sleep(60) + except CommandFailedError: + self.fail(f"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed") + + # clean up + guest_mount.umount_wait() + + # Delete the subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_quota_exceeded_subvolume_removal(self): + """ + Tests subvolume removal if it's group quota is exceeded + """ + osize = self.DEFAULT_FILE_SIZE*1024*1024*100 + # create group with 100MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + # create 99 files of 1MB to exceed quota + self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99) + + try: + # write two files of 1MB file to exceed the quota + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) + # For quota to be enforced + time.sleep(20) + # create 400 files of 1MB to exceed quota + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=400) + except CommandFailedError: + # Delete subvolume when group quota is exceeded + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + else: + self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail") + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self): + """ + Tests retained snapshot subvolume removal if it's group quota is exceeded + """ + group = self._generate_random_group_name() + subvolname = self._generate_random_subvolume_name() + snapshot1, snapshot2 = self._generate_random_snapshot_name(2) + + osize = self.DEFAULT_FILE_SIZE*1024*1024*100 + # create group with 100MB quota + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + # create 99 files of 1MB to exceed quota + self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot1, "--group_name", group) + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot2, "--group_name", group) + + try: + # write two files of 1MB file to exceed the quota + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) + # For quota to be enforced + time.sleep(20) + # create 400 files of 1MB to exceed quota + self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=400) + except CommandFailedError: + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group, "--retain-snapshots") + # remove snapshot1 + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot1, "--group_name", group) + # remove snapshot2 (should remove volume) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot2, "--group_name", group) + # verify subvolume trash is clean + self._wait_for_subvol_trash_empty(subvolname, group=group) + else: + self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail") + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_quota_subvolume_removal(self): + """ + Tests subvolume removal if it's group quota is set. + """ + # create group with size -- should set quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group) + + # remove subvolume + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + except CommandFailedError: + self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set") + + # remove subvolumegroup + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_quota_legacy_subvolume_removal(self): + """ + Tests legacy subvolume removal if it's group quota is set. + """ + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # emulate a old-fashioned subvolume -- in a custom group + createpath1 = os.path.join(".", "volumes", group, subvolume) + self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True) + + # this would auto-upgrade on access without anyone noticing + subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, "--group-name", group) + self.assertNotEqual(subvolpath1, None) + subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline + + # and... the subvolume path returned should be what we created behind the scene + self.assertEqual(createpath1[1:], subvolpath1) + + # Set subvolumegroup quota on idempotent subvolumegroup creation + self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") + + # remove subvolume + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + except CommandFailedError: + self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set") + + # remove subvolumegroup + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_quota_v1_subvolume_removal(self): + """ + Tests v1 subvolume removal if it's group quota is set. + """ + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # emulate a v1 subvolume -- in a custom group + self._create_v1_subvolume(subvolume, subvol_group=group, has_snapshot=False) + + # Set subvolumegroup quota on idempotent subvolumegroup creation + self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000") + + # remove subvolume + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + except CommandFailedError: + self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set") + + # remove subvolumegroup + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_resize_fail_invalid_size(self): + """ + That a subvolume group cannot be resized to an invalid size and the quota did not change + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024 + # create group with 1MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize)) + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # try to resize the subvolume with an invalid size -10 + nsize = -10 + try: + self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, + "invalid error code on resize of subvolume group with invalid size") + else: + self.fail("expected the 'fs subvolumegroup resize' command to fail") + + # verify the quota did not change + size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) + self.assertEqual(size, osize) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_resize_fail_zero_size(self): + """ + That a subvolume group cannot be resized to a zero size and the quota did not change + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024 + # create group with 1MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize)) + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # try to resize the subvolume group with size 0 + nsize = 0 + try: + self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, + "invalid error code on resize of subvolume group with invalid size") + else: + self.fail("expected the 'fs subvolumegroup resize' command to fail") + + # verify the quota did not change + size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) + self.assertEqual(size, osize) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_resize_quota_lt_used_size(self): + """ + That a subvolume group can be resized to a size smaller than the current used size + and the resulting quota matches the expected size. + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024*20 + # create group with 20MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + # create one file of 10MB + file_size=self.DEFAULT_FILE_SIZE*10 + number_of_files=1 + log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, + number_of_files, + file_size)) + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1) + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + + usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) + + # shrink the subvolume group + nsize = usedsize // 2 + try: + self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) + except CommandFailedError: + self.fail("expected the 'fs subvolumegroup resize' command to succeed") + + # verify the quota + size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) + self.assertEqual(size, nsize) + + # remove subvolume and group + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self): + """ + That a subvolume group cannot be resized to a size smaller than the current used size + when --no_shrink is given and the quota did not change. + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024*20 + # create group with 20MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + # create one file of 10MB + file_size=self.DEFAULT_FILE_SIZE*10 + number_of_files=1 + log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, + number_of_files, + file_size)) + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2) + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + + usedsize = int(self.mount_a.getfattr(grouppath, "ceph.dir.rbytes")) + + # shrink the subvolume group + nsize = usedsize // 2 + try: + self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize), "--no_shrink") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolumegroup with quota less than used") + else: + self.fail("expected the 'fs subvolumegroup resize' command to fail") + + # verify the quota did not change + size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")) + self.assertEqual(size, osize) + + # remove subvolume and group + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_resize_expand_on_full_subvolume(self): + """ + That the subvolume group can be expanded after it is full and future write succeed + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024*100 + # create group with 100MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + # create 99 files of 1MB + self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99) + + try: + # write two files of 1MB file to exceed the quota + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) + # For quota to be enforced + time.sleep(20) + # create 500 files of 1MB + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) + except CommandFailedError: + # Not able to write. So expand the subvolumegroup more and try writing the files again + nsize = osize*7 + self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize)) + try: + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) + except CommandFailedError: + self.fail("expected filling subvolume {0} with 500 files of size 1MB " + "to succeed".format(subvolname)) + else: + self.fail("expected filling subvolume {0} with 500 files of size 1MB " + "to fail".format(subvolname)) + + # remove subvolume and group + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_resize_infinite_size(self): + """ + That a subvolume group can be resized to an infinite size by unsetting its quota. + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024 + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize)) + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # resize inf + self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf") + + # verify that the quota is None + size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes") + self.assertEqual(size, None) + + # remove subvolume group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_resize_infinite_size_future_writes(self): + """ + That a subvolume group can be resized to an infinite size and the future writes succeed. + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024*5 + # create group with 5MB quota + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group, + "--size", str(osize), "--mode=777") + + # make sure it exists + grouppath = self._get_subvolume_group_path(self.volname, group) + self.assertNotEqual(grouppath, None) + + # create subvolume under the group + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, + "--group_name", group, "--mode=777") + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group) + self.assertNotEqual(subvolpath, None) + + # create 4 files of 1MB + self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=4) + + try: + # write two files of 1MB file to exceed the quota + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2) + # For quota to be enforced + time.sleep(20) + # create 500 files of 1MB + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) + except CommandFailedError: + # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again + # resize inf + self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf") + try: + self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500) + except CommandFailedError: + self.fail("expected filling subvolume {0} with 500 files of size 1MB " + "to succeed".format(subvolname)) + else: + self.fail("expected filling subvolume {0} with 500 files of size 1MB " + "to fail".format(subvolname)) + + + # verify that the quota is None + size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes") + self.assertEqual(size, None) + + # remove subvolume and group + self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + def test_subvolume_group_ls(self): # tests the 'fs subvolumegroup ls' command @@ -793,10 +1646,47 @@ class TestSubvolumeGroups(TestVolumesHelper): self._fs_cmd("subvolume", "create", self.volname, subvolume) self._fs_cmd("subvolume", "rm", self.volname, subvolume) - subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) - subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls] - if "_deleting" in subvolgroupnames: - self.fail("Listing subvolume groups listed '_deleting' directory") + subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) + subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls] + if "_deleting" in subvolgroupnames: + self.fail("Listing subvolume groups listed '_deleting' directory") + + def test_subvolume_group_ls_filter_internal_directories(self): + # tests the 'fs subvolumegroup ls' command filters internal directories + # eg: '_deleting', '_nogroup', '_index', "_legacy" + + subvolumegroups = self._generate_random_group_name(3) + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + #create subvolumegroups + for groupname in subvolumegroups: + self._fs_cmd("subvolumegroup", "create", self.volname, groupname) + + # create subvolume which will create '_nogroup' directory + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # create snapshot + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # clone snapshot which will create '_index' directory + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume which will create '_deleting' directory + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # list subvolumegroups + ret = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) + self.assertEqual(len(ret), len(subvolumegroups)) + + ret_list = [subvolumegroup['name'] for subvolumegroup in ret] + self.assertEqual(len(ret_list), len(subvolumegroups)) + + self.assertEqual(all(elem in subvolumegroups for elem in ret_list), True) def test_subvolume_group_ls_for_nonexistent_volume(self): # tests the 'fs subvolumegroup ls' command when /volume doesn't exist @@ -835,6 +1725,66 @@ class TestSubvolumeGroups(TestVolumesHelper): except CommandFailedError: raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed") + def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self): + """Test the presence of any subvolumegroup when only subvolumegroup is present""" + + group = self._generate_random_group_name() + # create subvolumegroup + self._fs_cmd("subvolumegroup", "create", self.volname, group) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "subvolumegroup exists") + # delete subvolumegroup + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") + + def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self): + """Test the presence of any subvolumegroup when no subvolumegroup is present""" + + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") + + def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self): + """Test the presence of any subvolume when subvolumegroup + and subvolume both are present""" + + group = self._generate_random_group_name() + subvolume = self._generate_random_subvolume_name(2) + # create subvolumegroup + self._fs_cmd("subvolumegroup", "create", self.volname, group) + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume[0], "--group_name", group) + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume[1]) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "subvolumegroup exists") + # delete subvolume in group + self._fs_cmd("subvolume", "rm", self.volname, subvolume[0], "--group_name", group) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "subvolumegroup exists") + # delete subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume[1]) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "subvolumegroup exists") + # delete subvolumegroup + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") + + def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self): + """Test the presence of any subvolume when subvolume is present + but no subvolumegroup is present""" + + subvolume = self._generate_random_subvolume_name() + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") + # delete subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + ret = self._fs_cmd("subvolumegroup", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "no subvolumegroup exists") + class TestSubvolumes(TestVolumesHelper): """Tests for FS subvolume operations, except snapshot and snapshot clone.""" @@ -1214,6 +2164,41 @@ class TestSubvolumes(TestVolumesHelper): # verify trash dir is clean self._wait_for_trash_empty() + def test_subvolume_create_and_ls_providing_group_as_nogroup(self): + """ + That a 'subvolume create' and 'subvolume ls' should throw + permission denied error if option --group=_nogroup is provided. + """ + + subvolname = self._generate_random_subvolume_name() + + # try to create subvolume providing --group_name=_nogroup option + try: + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", "_nogroup") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM) + else: + self.fail("expected the 'fs subvolume create' command to fail") + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolname) + + # try to list subvolumes providing --group_name=_nogroup option + try: + self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM) + else: + self.fail("expected the 'fs subvolume ls' command to fail") + + # list subvolumes + self._fs_cmd("subvolume", "ls", self.volname) + + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean. + self._wait_for_trash_empty() + def test_subvolume_expand(self): """ That a subvolume can be expanded in size and its quota matches the expected size. @@ -1318,6 +2303,44 @@ class TestSubvolumes(TestVolumesHelper): # verify trash dir is clean self._wait_for_trash_empty() + def test_subvolume_ls_with_groupname_as_internal_directory(self): + # tests the 'fs subvolume ls' command when the default groupname as internal directories + # Eg: '_nogroup', '_legacy', '_deleting', '_index'. + # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index' + # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup' + + # try to list subvolumes providing --group_name=_nogroup option + try: + self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM) + else: + self.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup") + + # try to list subvolumes providing --group_name=_legacy option + try: + self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_legacy") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL) + else: + self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy") + + # try to list subvolumes providing --group_name=_deleting option + try: + self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_deleting") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL) + else: + self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting") + + # try to list subvolumes providing --group_name=_index option + try: + self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_index") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL) + else: + self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index") + def test_subvolume_ls_for_notexistent_default_group(self): # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist # prerequisite: we expect that the volume is created and the default group _nogroup is @@ -2421,6 +3444,57 @@ class TestSubvolumes(TestVolumesHelper): except CommandFailedError: self.fail("expected the 'fs subvolume rm --force' command to succeed") + def test_subvolume_exists_with_subvolumegroup_and_subvolume(self): + """Test the presence of any subvolume by specifying the name of subvolumegroup""" + + group = self._generate_random_group_name() + subvolume1 = self._generate_random_subvolume_name() + # create subvolumegroup + self._fs_cmd("subvolumegroup", "create", self.volname, group) + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) + ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group) + self.assertEqual(ret.strip('\n'), "subvolume exists") + # delete subvolume in group + self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) + ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group) + self.assertEqual(ret.strip('\n'), "no subvolume exists") + # delete subvolumegroup + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self): + """Test the presence of any subvolume specifying the name + of subvolumegroup and no subvolumes""" + + group = self._generate_random_group_name() + # create subvolumegroup + self._fs_cmd("subvolumegroup", "create", self.volname, group) + ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group) + self.assertEqual(ret.strip('\n'), "no subvolume exists") + # delete subvolumegroup + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self): + """Test the presence of any subvolume without specifying the name + of subvolumegroup""" + + subvolume1 = self._generate_random_subvolume_name() + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume1) + ret = self._fs_cmd("subvolume", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "subvolume exists") + # delete subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume1) + ret = self._fs_cmd("subvolume", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "no subvolume exists") + + def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self): + """Test the presence of any subvolume without any subvolumegroup + and without any subvolume""" + + ret = self._fs_cmd("subvolume", "exist", self.volname) + self.assertEqual(ret.strip('\n'), "no subvolume exists") + def test_subvolume_shrink(self): """ That a subvolume can be shrinked in size and its quota matches the expected size. @@ -3196,7 +4270,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper): tests the 'fs subvolume snapshot info' command """ - snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + snap_md = ["created_at", "data_pool", "has_pending_clones"] subvolume = self._generate_random_subvolume_name() snapshot, snap_missing = self._generate_random_snapshot_name(2) @@ -3523,7 +4597,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper): """ ensure a retained subvolume can be recreated and further snapshotted """ - snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + snap_md = ["created_at", "data_pool", "has_pending_clones"] subvolume = self._generate_random_subvolume_name() snapshot1, snapshot2 = self._generate_random_snapshot_name(2) @@ -3586,7 +4660,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper): ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume also test allowed and dis-allowed operations on a retained subvolume """ - snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + snap_md = ["created_at", "data_pool", "has_pending_clones"] subvolume = self._generate_random_subvolume_name() snapshot = self._generate_random_snapshot_name() @@ -4399,6 +5473,69 @@ class TestSubvolumeSnapshots(TestVolumesHelper): # verify trash dir is clean. self._wait_for_trash_empty() + def test_clean_stale_subvolume_snapshot_metadata(self): + """ + Validate cleaning of stale subvolume snapshot metadata. + """ + subvolname = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + + # create group. + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group. + self._fs_cmd("subvolume", "create", self.volname, subvolname, group) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group) + + # set metadata for snapshot. + key = "key" + value = "value" + try: + self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group) + except CommandFailedError: + self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed") + + # save the subvolume config file. + meta_path = os.path.join(".", "volumes", group, subvolname, ".meta") + tmp_meta_path = os.path.join(".", "volumes", group, subvolname, ".meta.stale_snap_section") + self.mount_a.run_shell(['sudo', 'cp', '-p', meta_path, tmp_meta_path], omit_sudo=False) + + # Delete snapshot, this would remove user snap metadata + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group) + + # Copy back saved subvolume config file. This would have stale snapshot metadata + self.mount_a.run_shell(['sudo', 'cp', '-p', tmp_meta_path, meta_path], omit_sudo=False) + + # Verify that it has stale snapshot metadata + section_name = "SNAP_METADATA_" + snapshot + try: + self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False) + except CommandFailedError: + self.fail("Expected grep cmd to succeed because stale snapshot metadata exist") + + # Do any subvolume operation to clean the stale snapshot metadata + _ = json.loads(self._get_subvolume_info(self.volname, subvolname, group)) + + # Verify that the stale snapshot metadata is cleaned + try: + self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False) + except CommandFailedError as e: + self.assertNotEqual(e.exitstatus, 0) + else: + self.fail("Expected non-zero exist status because stale snapshot metadata should not exist") + + self._fs_cmd("subvolume", "rm", self.volname, subvolname, group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean. + self._wait_for_trash_empty() + # Clean tmp config file + self.mount_a.run_shell(['sudo', 'rm', '-f', tmp_meta_path], omit_sudo=False) + + class TestSubvolumeSnapshotClones(TestVolumesHelper): """ Tests for FS subvolume snapshot clone operations.""" def test_clone_subvolume_info(self): @@ -4445,6 +5582,230 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper): # verify trash dir is clean self._wait_for_trash_empty() + def test_subvolume_snapshot_info_without_snapshot_clone(self): + """ + Verify subvolume snapshot info output without clonnnig snapshot. + If no clone is performed then path /volumes/_index/clone/{track_id} + will not exist. + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume. + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # list snapshot info + result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) + + # verify snapshot info + self.assertEqual(result['has_pending_clones'], "no") + self.assertFalse('orphan_clones_count' in result) + self.assertFalse('pending_clones' in result) + + # remove snapshot, subvolume, clone + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_info_if_no_clone_pending(self): + """ + Verify subvolume snapshot info output if no clone is in pending state. + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone_list = [f'clone_{i}' for i in range(3)] + + # create subvolume. + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clones + for clone in clone_list: + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clones status + for clone in clone_list: + self._wait_for_clone_to_complete(clone) + + # list snapshot info + result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) + + # verify snapshot info + self.assertEqual(result['has_pending_clones'], "no") + self.assertFalse('orphan_clones_count' in result) + self.assertFalse('pending_clones' in result) + + # remove snapshot, subvolume, clone + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + for clone in clone_list: + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self): + """ + Verify subvolume snapshot info output if clones are in pending state. + Clones are not specified for particular target_group. Hence target_group + should not be in the output as we don't show _nogroup (default group) + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone_list = [f'clone_{i}' for i in range(3)] + + # create subvolume. + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # insert delay at the beginning of snapshot clone + self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) + + # schedule a clones + for clone in clone_list: + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # list snapshot info + result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) + + # verify snapshot info + expected_clone_list = [] + for clone in clone_list: + expected_clone_list.append({"name": clone}) + self.assertEqual(result['has_pending_clones'], "yes") + self.assertFalse('orphan_clones_count' in result) + self.assertListEqual(result['pending_clones'], expected_clone_list) + self.assertEqual(len(result['pending_clones']), 3) + + # check clones status + for clone in clone_list: + self._wait_for_clone_to_complete(clone) + + # remove snapshot, subvolume, clone + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + for clone in clone_list: + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self): + """ + Verify subvolume snapshot info output if clones are in pending state. + Clones are not specified for target_group. + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + group = self._generate_random_group_name() + target_group = self._generate_random_group_name() + + # create groups + self._fs_cmd("subvolumegroup", "create", self.volname, group) + self._fs_cmd("subvolumegroup", "create", self.volname, target_group) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777") + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) + + # insert delay at the beginning of snapshot clone + self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, + "--group_name", group, "--target_group_name", target_group) + + # list snapshot info + result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot, "--group_name", group)) + + # verify snapshot info + expected_clone_list = [{"name": clone, "target_group": target_group}] + self.assertEqual(result['has_pending_clones'], "yes") + self.assertFalse('orphan_clones_count' in result) + self.assertListEqual(result['pending_clones'], expected_clone_list) + self.assertEqual(len(result['pending_clones']), 1) + + # check clone status + self._wait_for_clone_to_complete(clone, clone_group=target_group) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + self._fs_cmd("subvolume", "rm", self.volname, clone, target_group) + + # remove groups + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + self._fs_cmd("subvolumegroup", "rm", self.volname, target_group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_info_if_orphan_clone(self): + """ + Verify subvolume snapshot info output if orphan clones exists. + Orphan clones should not list under pending clones. + orphan_clones_count should display correct count of orphan clones' + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone_list = [f'clone_{i}' for i in range(3)] + + # create subvolume. + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777") + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # insert delay at the beginning of snapshot clone + self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5) + + # schedule a clones + for clone in clone_list: + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # remove track file for third clone to make it orphan + meta_path = os.path.join(".", "volumes", "_nogroup", subvolume, ".meta") + pending_clones_result = self.mount_a.run_shell(f"sudo grep \"clone snaps\" -A3 {meta_path}", omit_sudo=False, stdout=StringIO(), stderr=StringIO()) + third_clone_track_id = pending_clones_result.stdout.getvalue().splitlines()[3].split(" = ")[0] + third_clone_track_path = os.path.join(".", "volumes", "_index", "clone", third_clone_track_id) + self.mount_a.run_shell(f"sudo rm -f {third_clone_track_path}", omit_sudo=False) + + # list snapshot info + result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) + + # verify snapshot info + expected_clone_list = [] + for i in range(len(clone_list)-1): + expected_clone_list.append({"name": clone_list[i]}) + self.assertEqual(result['has_pending_clones'], "yes") + self.assertEqual(result['orphan_clones_count'], 1) + self.assertListEqual(result['pending_clones'], expected_clone_list) + self.assertEqual(len(result['pending_clones']), 2) + + # check clones status + for i in range(len(clone_list)-1): + self._wait_for_clone_to_complete(clone_list[i]) + + # list snapshot info after cloning completion + res = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot)) + + # verify snapshot info (has_pending_clones should be no) + self.assertEqual(res['has_pending_clones'], "no") + def test_non_clone_status(self): subvolume = self._generate_random_subvolume_name() @@ -6137,7 +7498,7 @@ class TestMisc(TestVolumesHelper): subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", "type", "uid", "features", "state"] - snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + snap_md = ["created_at", "data_pool", "has_pending_clones"] subvolume = self._generate_random_subvolume_name() snapshot = self._generate_random_snapshot_name() @@ -6338,11 +7699,10 @@ class TestMisc(TestVolumesHelper): poor man's upgrade test -- theme continues... """ subvol1, subvol2 = self._generate_random_subvolume_name(2) - group = self._generate_random_group_name() # emulate a old-fashioned subvolume in the default group createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1) - self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True) + self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False) # add required xattrs to subvolume default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") @@ -6354,7 +7714,7 @@ class TestMisc(TestVolumesHelper): # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume # .meta into legacy subvol1's root subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta") - self.mount_a.run_shell(["cp", subvol2_metapath, createpath1], sudo=True) + self.mount_a.run_shell(['sudo', 'cp', subvol2_metapath, createpath1], omit_sudo=False) # Upgrade legacy subvol1 to v1 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1) @@ -6396,7 +7756,7 @@ class TestMisc(TestVolumesHelper): # emulate a old-fashioned subvolume -- in a custom group createpath = os.path.join(".", "volumes", group, subvol) - self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True) + self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) # add required xattrs to subvolume default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") @@ -6439,7 +7799,7 @@ class TestMisc(TestVolumesHelper): # emulate a old-fashioned subvolume -- in a custom group createpath = os.path.join(".", "volumes", group, subvol) - self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True) + self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False) # add required xattrs to subvolume default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") diff --git a/ceph/qa/tasks/mgr/dashboard/test_orchestrator.py b/ceph/qa/tasks/mgr/dashboard/test_orchestrator.py index 8395853e3..2a804c4c2 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_orchestrator.py +++ b/ceph/qa/tasks/mgr/dashboard/test_orchestrator.py @@ -8,7 +8,7 @@ class OrchestratorControllerTest(DashboardTestCase): AUTH_ROLES = ['cluster-manager'] - URL_STATUS = '/api/orchestrator/status' + URL_STATUS = '/ui-api/orchestrator/status' ORCHESTRATOR = True diff --git a/ceph/qa/tasks/mgr/dashboard/test_rbd.py b/ceph/qa/tasks/mgr/dashboard/test_rbd.py index b8df472a6..997d10f2a 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_rbd.py +++ b/ceph/qa/tasks/mgr/dashboard/test_rbd.py @@ -10,10 +10,12 @@ from .helper import DashboardTestCase, JLeaf, JList, JObj class RbdTest(DashboardTestCase): AUTH_ROLES = ['pool-manager', 'block-manager', 'cluster-manager'] + LIST_VERSION = '2.0' @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['create', 'update', 'delete']}]) def test_read_access_permissions(self): - self._get('/api/block/image') + self._get('/api/block/image?offset=0&limit=-1&search=&sort=+name', + version=RbdTest.LIST_VERSION) self.assertStatus(403) self.get_image('pool', None, 'image') self.assertStatus(403) @@ -205,6 +207,7 @@ class RbdTest(DashboardTestCase): { "size": 1073741824, "obj_size": 4194304, + "mirror_mode": "journal", "num_objs": 256, "order": 22, "block_name_prefix": "rbd_data.10ae2ae8944a", @@ -245,6 +248,7 @@ class RbdTest(DashboardTestCase): 'source': JLeaf(int), 'value': JLeaf(str), })), + 'mirror_mode': JLeaf(str), }) self.assertSchema(img, schema) @@ -280,12 +284,12 @@ class RbdTest(DashboardTestCase): self.fail("Snapshot {} not found".format(snap_name)) def test_list(self): - data = self._view_cache_get('/api/block/image') + data = self._get('/api/block/image?offset=0&limit=-1&search=&sort=+name', + version=RbdTest.LIST_VERSION) self.assertStatus(200) self.assertEqual(len(data), 2) for pool_view in data: - self.assertEqual(pool_view['status'], 0) self.assertIsNotNone(pool_view['value']) self.assertIn('pool_name', pool_view) self.assertIn(pool_view['pool_name'], ['rbd', 'rbd_iscsi']) diff --git a/ceph/qa/tasks/mgr/dashboard/test_rgw.py b/ceph/qa/tasks/mgr/dashboard/test_rgw.py index dc972d3ed..53577a87a 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_rgw.py +++ b/ceph/qa/tasks/mgr/dashboard/test_rgw.py @@ -84,7 +84,7 @@ class RgwApiCredentialsTest(RgwTestCase): # Set the default credentials. self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') - data = self._get('/api/rgw/status') + data = self._get('/ui-api/rgw/status') self.assertStatus(200) self.assertIn('available', data) self.assertIn('message', data) @@ -480,7 +480,7 @@ class RgwDaemonTest(RgwTestCase): self.assertTrue(data['rgw_metadata']) def test_status(self): - data = self._get('/api/rgw/status') + data = self._get('/ui-api/rgw/status') self.assertStatus(200) self.assertIn('available', data) self.assertIn('message', data) diff --git a/ceph/qa/tasks/rbd_pwl_cache_recovery.py b/ceph/qa/tasks/rbd_pwl_cache_recovery.py new file mode 100644 index 000000000..e13c1f664 --- /dev/null +++ b/ceph/qa/tasks/rbd_pwl_cache_recovery.py @@ -0,0 +1,96 @@ +""" +persistent write log cache recovery task +""" +import contextlib +import logging +import random +import json +import time + +from teuthology import misc as teuthology +from teuthology import contextutil + +DEFAULT_NUM_ITERATIONS = 20 +IO_PATTERNS = ("full-seq", "rand") +IO_SIZES = ('4K', '16K', '128K', '1024K') + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def thrashes_rbd_bench_on_persistent_cache(ctx, config): + """ + thrashes rbd bench on persistent write log cache. + It can test recovery feature of persistent write log cache. + """ + log.info("thrashes rbd bench on persistent write log cache") + + client, client_config = list(config.items())[0] + (remote,) = ctx.cluster.only(client).remotes.keys() + client_config = client_config if client_config is not None else dict() + image_name = client_config.get('image_name', 'testimage') + num_iterations = client_config.get('num_iterations', DEFAULT_NUM_ITERATIONS) + + for i in range(num_iterations): + log.info("start rbd bench") + # rbd bench could not specify the run time so set a large enough test size. + remote.run( + args=[ + 'rbd', 'bench', + '--io-type', 'write', + '--io-pattern', random.choice(IO_PATTERNS), + '--io-size', random.choice(IO_SIZES), + '--io-total', '100G', + image_name, + ], + wait=False, + ) + # Wait a few seconds for the rbd bench process to run + # and complete the pwl cache initialization + time.sleep(10) + log.info("dump cache state when rbd bench running.") + remote.sh(['rbd', 'status', image_name, '--format=json']) + log.info("sleep...") + time.sleep(random.randint(10, 60)) + log.info("rbd bench crash.") + remote.run( + args=[ + 'killall', '-9', 'rbd', + ], + check_status=False, + ) + log.info("wait for watch timeout.") + time.sleep(40) + log.info("check cache state after crash.") + out = remote.sh(['rbd', 'status', image_name, '--format=json']) + rbd_status = json.loads(out) + assert len(rbd_status['watchers']) == 0 + assert rbd_status['persistent_cache']['present'] == True + assert rbd_status['persistent_cache']['empty'] == False + assert rbd_status['persistent_cache']['clean'] == False + log.info("check dirty cache file.") + remote.run( + args=[ + 'test', '-e', rbd_status['persistent_cache']['path'], + ] + ) + try: + yield + finally: + log.info("cleanup") + +@contextlib.contextmanager +def task(ctx, config): + """ + This is task for testing persistent write log cache recovery. + """ + assert isinstance(config, dict), \ + "task rbd_pwl_cache_recovery only supports a dictionary for configuration" + + managers = [] + config = teuthology.replace_all_with_clients(ctx.cluster, config) + managers.append( + lambda: thrashes_rbd_bench_on_persistent_cache(ctx=ctx, config=config) + ) + + with contextutil.nested(*managers): + yield diff --git a/ceph/qa/tasks/rgw.py b/ceph/qa/tasks/rgw.py index 3d2542981..bde82de41 100644 --- a/ceph/qa/tasks/rgw.py +++ b/ceph/qa/tasks/rgw.py @@ -138,9 +138,12 @@ def start_rgw(ctx, config, clients): ctx.cluster.only(client).run(args=['sudo', 'chmod', '600', token_path]) ctx.cluster.only(client).run(args=['sudo', 'chown', 'ceph', token_path]) + vault_addr = "{}:{}".format(*ctx.vault.endpoints[vault_role]) rgw_cmd.extend([ - '--rgw_crypt_vault_addr', "{}:{}".format(*ctx.vault.endpoints[vault_role]), - '--rgw_crypt_vault_token_file', token_path + '--rgw_crypt_vault_addr', vault_addr, + '--rgw_crypt_vault_token_file', token_path, + '--rgw_crypt_sse_s3_vault_addr', vault_addr, + '--rgw_crypt_sse_s3_vault_token_file', token_path, ]) elif pykmip_role is not None: if not hasattr(ctx, 'pykmip'): diff --git a/ceph/qa/tasks/s3tests.py b/ceph/qa/tasks/s3tests.py index bd844c6c1..4ee388f5d 100644 --- a/ceph/qa/tasks/s3tests.py +++ b/ceph/qa/tasks/s3tests.py @@ -416,6 +416,7 @@ def run_tests(ctx, config): testdir = teuthology.get_testdir(ctx) for client, client_config in config.items(): client_config = client_config or {} + (cluster_name,_,_) = teuthology.split_role(client) (remote,) = ctx.cluster.only(client).remotes.keys() args = [ 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), @@ -432,6 +433,12 @@ def run_tests(ctx, config): attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616","!test_of_sts","!webidentity_test"] if client_config.get('calling-format') != 'ordinary': attrs += ['!fails_with_subdomain'] + if not client_config.get('with-sse-s3'): + attrs += ['!sse-s3'] + elif client_config.get('with-sse-s3'): + pass + elif ctx.ceph[cluster_name].rgw_crypt_sse_s3_backend is None: + attrs += ['!sse-s3'] if 'extra_attrs' in client_config: attrs = client_config.get('extra_attrs') diff --git a/ceph/qa/workunits/cephadm/test_repos.sh b/ceph/qa/workunits/cephadm/test_repos.sh index 148972ceb..4b0749231 100755 --- a/ceph/qa/workunits/cephadm/test_repos.sh +++ b/ceph/qa/workunits/cephadm/test_repos.sh @@ -24,7 +24,7 @@ sudo $CEPHADM -v add-repo --release octopus test_install_uninstall sudo $CEPHADM -v rm-repo -sudo $CEPHADM -v add-repo --dev master +sudo $CEPHADM -v add-repo --dev main test_install_uninstall sudo $CEPHADM -v rm-repo diff --git a/ceph/qa/workunits/fs/full/subvolume_clone.sh b/ceph/qa/workunits/fs/full/subvolume_clone.sh new file mode 100755 index 000000000..a11131215 --- /dev/null +++ b/ceph/qa/workunits/fs/full/subvolume_clone.sh @@ -0,0 +1,114 @@ +#!/usr/bin/env bash +set -ex + +# This testcase tests the 'ceph fs subvolume snapshot clone' when the osd is full. +# The clone fails with 'MetadataMgrException: -28 (error in write)' and +# truncates the config file of corresponding subvolume while updating the config file. +# Hence the subsequent subvolume commands on the clone fails with +# 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' traceback. + +# The osd is of the size 1GB. The full-ratios are set so that osd is treated full +# at around 600MB. The subvolume is created and 100MB is written. +# The subvolume is snapshotted and cloned ten times. Since the clone delay is set to 15 seconds, +# all the clones reach pending state for sure. Among ten clones, only few succeed and rest fails +# with ENOSPACE. + +# At this stage, ".meta" config file of the failed clones are checked if it's truncated. +# and clone status command is checked for traceback. + +# Note that the failed clones would be in retry loop and it's state would be 'pending' or 'in-progress'. +# It's state is not updated to 'failed' as the config update to gets ENOSPACE too. + +set -e +ignore_failure() { + if "$@"; then return 0; else return 0; fi +} + +expect_failure() { + if "$@"; then return 1; else return 0; fi +} + +NUM_CLONES=10 + +ceph fs subvolume create cephfs sub_0 +subvol_path_0=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null) + +# For debugging +echo "Before ratios are set" +df $CEPH_MNT +ceph osd df + +ceph osd set-full-ratio 0.6 +ceph osd set-nearfull-ratio 0.50 +ceph osd set-backfillfull-ratio 0.55 + +# For debugging +echo "After ratios are set" +df -h +ceph osd df + +for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/1MB_file-$i status=progress bs=1M count=1 conv=fdatasync;done + +# For debugging +echo "After subvolumes are written" +df -h $CEPH_MNT +ceph osd df + +# snapshot +ceph fs subvolume snapshot create cephfs sub_0 snap_0 + +# Set clone snapshot delay +ceph config set mgr mgr/volumes/snapshot_clone_delay 15 + +# Schedule few clones, some would fail with no space +for i in $(eval echo {1..$NUM_CLONES});do ceph fs subvolume snapshot clone cephfs sub_0 snap_0 clone_$i;done + +# Wait for osd is full +timeout=90 +while [ $timeout -gt 0 ] +do + health=$(ceph health detail) + [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break + echo "Wating for osd to be full: $timeout" + sleep 1 + let "timeout-=1" +done + +# For debugging +echo "After osd is full" +df -h $CEPH_MNT +ceph osd df + +# Check clone status, this should not crash +for i in $(eval echo {1..$NUM_CLONES}) +do + ignore_failure ceph fs clone status cephfs clone_$i >/tmp/out_${PID}_file 2>/tmp/error_${PID}_file + cat /tmp/error_${PID}_file + if grep "complete" /tmp/out_${PID}_file; then + echo "The clone_$i is completed" + else + #in-progress/pending clones, No traceback should be found in stderr + echo clone_$i in PENDING/IN-PROGRESS + expect_failure sudo grep "Traceback" /tmp/error_${PID}_file + #config file should not be truncated and GLOBAL section should be found + sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/clone_$i/.meta + fi +done + +# Hard cleanup +ignore_failure sudo rm -rf $CEPH_MNT/_index/clone/* +ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/clone_* +ignore_failure sudo rmdir $CEPH_MNT/volumes/_nogroup/sub_0/.snap/snap_0 +ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0 + +#Set the ratios back for other full tests to run +ceph osd set-full-ratio 0.95 +ceph osd set-nearfull-ratio 0.95 +ceph osd set-backfillfull-ratio 0.95 + +#After test +echo "After test" +df -h $CEPH_MNT +ceph osd df + +echo OK diff --git a/ceph/qa/workunits/fs/full/subvolume_rm.sh b/ceph/qa/workunits/fs/full/subvolume_rm.sh index d0f9e2403..a464e30f5 100755 --- a/ceph/qa/workunits/fs/full/subvolume_rm.sh +++ b/ceph/qa/workunits/fs/full/subvolume_rm.sh @@ -59,4 +59,14 @@ do let "timeout-=1" done +#Set the ratios back for other full tests to run +ceph osd set-full-ratio 0.95 +ceph osd set-nearfull-ratio 0.95 +ceph osd set-backfillfull-ratio 0.95 + +#After test +echo "After test" +df -h +ceph osd df + echo OK diff --git a/ceph/qa/workunits/fs/full/subvolume_snapshot_rm.sh b/ceph/qa/workunits/fs/full/subvolume_snapshot_rm.sh new file mode 100755 index 000000000..f6d0add9f --- /dev/null +++ b/ceph/qa/workunits/fs/full/subvolume_snapshot_rm.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +set -ex + +# This testcase tests the 'ceph fs subvolume snapshot rm' when the osd is full. +# The snapshot rm fails with 'MetadataMgrException: -28 (error in write)' and +# truncates the config file of corresponding subvolume. Hence the subsequent +# snapshot rm of the same snapshot fails with 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' +# traceback. + +# The osd is of the size 1GB. The subvolume is created and 800MB file is written. +# Then full-ratios are set below 500MB such that the osd is treated as full. +# The subvolume snapshot is taken which succeeds as no extra space is required +# for snapshot. Now, the removal of the snapshot fails with ENOSPACE as it +# fails to remove the snapshot metadata set. The snapshot removal fails +# but should not traceback and truncate the config file. + +set -e +expect_failure() { + if "$@"; then return 1; else return 0; fi +} + +ignore_failure() { + if "$@"; then return 0; else return 0; fi +} + +ceph fs subvolume create cephfs sub_0 +subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null) + +#For debugging +echo "Before write" +df $CEPH_MNT +ceph osd df + +# Write 800MB file and set full ratio to around 200MB +ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/800MB_file-1 status=progress bs=1M count=800 conv=fdatasync + +ceph osd set-full-ratio 0.2 +ceph osd set-nearfull-ratio 0.16 +ceph osd set-backfillfull-ratio 0.18 + +timeout=30 +while [ $timeout -gt 0 ] +do + health=$(ceph health detail) + [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break + echo "Wating for osd to be full: $timeout" + sleep 1 + let "timeout-=1" +done + +#Take snapshot +ceph fs subvolume snapshot create cephfs sub_0 snap_0 + +#Remove snapshot fails but should not throw traceback +expect_failure ceph fs subvolume snapshot rm cephfs sub_0 snap_0 2>/tmp/error_${PID}_file +cat /tmp/error_${PID}_file + +# No traceback should be found +expect_failure grep "Traceback" /tmp/error_${PID}_file + +# Validate config file is not truncated and GLOBAL section exists +sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta + +#For debugging +echo "After write" +df $CEPH_MNT +ceph osd df + +# Snapshot removal with force option should succeed +ceph fs subvolume snapshot rm cephfs sub_0 snap_0 --force + +#Cleanup from backend +ignore_failure sudo rm -f /tmp/error_${PID}_file +ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0 + +#Set the ratios back for other full tests to run +ceph osd set-full-ratio 0.95 +ceph osd set-nearfull-ratio 0.95 +ceph osd set-backfillfull-ratio 0.95 + +#After test +echo "After test" +df -h $CEPH_MNT +ceph osd df + +echo OK diff --git a/ceph/qa/workunits/fs/misc/dac_override.sh b/ceph/qa/workunits/fs/misc/dac_override.sh new file mode 100755 index 000000000..dfb1a9091 --- /dev/null +++ b/ceph/qa/workunits/fs/misc/dac_override.sh @@ -0,0 +1,19 @@ +#!/bin/sh -x + +expect_failure() { + if "$@"; then return 1; else return 0; fi +} + +set -e + +mkdir -p testdir +file=test_chmod.$$ + +echo "foo" > testdir/${file} +sudo chmod 600 testdir + +# only root can read +expect_failure cat testdir/${file} + +# directory read/write DAC override for root should allow read +sudo cat testdir/${file} diff --git a/ceph/qa/workunits/mon/config.sh b/ceph/qa/workunits/mon/config.sh index 36eab8f7b..1b00201ae 100755 --- a/ceph/qa/workunits/mon/config.sh +++ b/ceph/qa/workunits/mon/config.sh @@ -62,6 +62,17 @@ ceph config rm client.foo.bar debug_asok ceph config get client.foo.bar.baz debug_asok | grep 33 ceph config rm global debug_asok +# whitespace keys +ceph config set client.foo 'debug asok' 44 +ceph config get client.foo 'debug asok' | grep 44 +ceph config set client.foo debug_asok 55 +ceph config get client.foo 'debug asok' | grep 55 +ceph config set client.foo 'debug asok' 66 +ceph config get client.foo debug_asok | grep 66 +ceph config rm client.foo debug_asok +ceph config set client.foo debug_asok 66 +ceph config rm client.foo 'debug asok' + # help ceph config help debug_asok | grep debug_asok diff --git a/ceph/qa/workunits/rbd/cli_generic.sh b/ceph/qa/workunits/rbd/cli_generic.sh index 6b403c095..042d67fad 100755 --- a/ceph/qa/workunits/rbd/cli_generic.sh +++ b/ceph/qa/workunits/rbd/cli_generic.sh @@ -1310,6 +1310,20 @@ test_mirror_snapshot_schedule() { test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --image test1 --format xml | $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1' + rbd mirror image demote rbd2/ns1/test1 + for i in `seq 12`; do + rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break + sleep 10 + done + rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1' + + rbd mirror image promote rbd2/ns1/test1 + for i in `seq 12`; do + rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' && break + sleep 10 + done + rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' + rbd mirror snapshot schedule add 1h 00:15 test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00' rbd mirror snapshot schedule ls -R | grep 'every 1h starting at 00:15:00' @@ -1331,11 +1345,11 @@ test_mirror_snapshot_schedule() { test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m' rbd rm rbd2/ns1/test1 - for i in `seq 12`; do rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break sleep 10 done + rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1' rbd mirror snapshot schedule remove test "$(rbd mirror snapshot schedule ls -R --format json)" = "[]" @@ -1344,6 +1358,107 @@ test_mirror_snapshot_schedule() { ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it } +test_perf_image_iostat() { + echo "testing perf image iostat..." + remove_images + + ceph osd pool create rbd1 8 + rbd pool init rbd1 + rbd namespace create rbd1/ns + ceph osd pool create rbd2 8 + rbd pool init rbd2 + rbd namespace create rbd2/ns + + IMAGE_SPECS=("test1" "rbd1/test2" "rbd1/ns/test3" "rbd2/test4" "rbd2/ns/test5") + for spec in "${IMAGE_SPECS[@]}"; do + # ensure all images are created without a separate data pool + # as we filter iostat by specific pool specs below + rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec + done + + BENCH_PIDS=() + for spec in "${IMAGE_SPECS[@]}"; do + rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \ + --rbd-cache false $spec >/dev/null 2>&1 & + BENCH_PIDS+=($!) + done + + # test specifying pool spec via spec syntax + test "$(rbd perf image iostat --format json rbd1 | + jq -r 'map(.image) | sort | join(" ")')" = 'test2' + test "$(rbd perf image iostat --format json rbd1/ns | + jq -r 'map(.image) | sort | join(" ")')" = 'test3' + test "$(rbd perf image iostat --format json --rbd-default-pool rbd1 /ns | + jq -r 'map(.image) | sort | join(" ")')" = 'test3' + + # test specifying pool spec via options + test "$(rbd perf image iostat --format json --pool rbd2 | + jq -r 'map(.image) | sort | join(" ")')" = 'test4' + test "$(rbd perf image iostat --format json --pool rbd2 --namespace ns | + jq -r 'map(.image) | sort | join(" ")')" = 'test5' + test "$(rbd perf image iostat --format json --rbd-default-pool rbd2 --namespace ns | + jq -r 'map(.image) | sort | join(" ")')" = 'test5' + + # test omitting pool spec (-> GLOBAL_POOL_KEY) + test "$(rbd perf image iostat --format json | + jq -r 'map(.image) | sort | join(" ")')" = 'test1 test2 test3 test4 test5' + + for pid in "${BENCH_PIDS[@]}"; do + kill $pid + done + wait + + remove_images + ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it + ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it +} + +test_mirror_pool_peer_bootstrap_create() { + echo "testing mirror pool peer bootstrap create..." + remove_images + + ceph osd pool create rbd1 8 + rbd pool init rbd1 + rbd mirror pool enable rbd1 image + ceph osd pool create rbd2 8 + rbd pool init rbd2 + rbd mirror pool enable rbd2 pool + + readarray -t MON_ADDRS < <(ceph mon dump | + sed -n 's/^[0-9]: \(.*\) mon\.[a-z]$/\1/p') + + # check that all monitors make it to the token even if only one + # valid monitor is specified + BAD_MON_ADDR="1.2.3.4:6789" + MON_HOST="${MON_ADDRS[0]},$BAD_MON_ADDR" + TOKEN="$(rbd mirror pool peer bootstrap create \ + --mon-host "$MON_HOST" rbd1 | base64 -d)" + TOKEN_FSID="$(jq -r '.fsid' <<< "$TOKEN")" + TOKEN_CLIENT_ID="$(jq -r '.client_id' <<< "$TOKEN")" + TOKEN_KEY="$(jq -r '.key' <<< "$TOKEN")" + TOKEN_MON_HOST="$(jq -r '.mon_host' <<< "$TOKEN")" + + test "$TOKEN_FSID" = "$(ceph fsid)" + test "$TOKEN_KEY" = "$(ceph auth get-key client.$TOKEN_CLIENT_ID)" + for addr in "${MON_ADDRS[@]}"; do + fgrep "$addr" <<< "$TOKEN_MON_HOST" + done + expect_fail fgrep "$BAD_MON_ADDR" <<< "$TOKEN_MON_HOST" + + # check that the token does not change, including across pools + test "$(rbd mirror pool peer bootstrap create \ + --mon-host "$MON_HOST" rbd1 | base64 -d)" = "$TOKEN" + test "$(rbd mirror pool peer bootstrap create \ + rbd1 | base64 -d)" = "$TOKEN" + test "$(rbd mirror pool peer bootstrap create \ + --mon-host "$MON_HOST" rbd2 | base64 -d)" = "$TOKEN" + test "$(rbd mirror pool peer bootstrap create \ + rbd2 | base64 -d)" = "$TOKEN" + + ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it + ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it +} + test_pool_image_args test_rename test_ls @@ -1366,5 +1481,7 @@ test_thick_provision test_namespace test_trash_purge_schedule test_mirror_snapshot_schedule +test_perf_image_iostat +test_mirror_pool_peer_bootstrap_create echo OK diff --git a/ceph/src/.git_version b/ceph/src/.git_version index 493a13236..00c39bb37 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -dff484dfc9e19a9819f375586300b3b79d80034d -17.2.3 +1353ed37dec8d74973edc3d5d5908c20ad5a7332 +17.2.4 diff --git a/ceph/src/CMakeLists.txt b/ceph/src/CMakeLists.txt index 83a90e4ec..c779c8521 100644 --- a/ceph/src/CMakeLists.txt +++ b/ceph/src/CMakeLists.txt @@ -596,6 +596,7 @@ endif(NOT WITH_SYSTEM_ROCKSDB) if(WITH_MGR) add_subdirectory(mgr) + add_subdirectory(exporter) endif() set(librados_config_srcs @@ -889,6 +890,7 @@ add_custom_target(vstart-base DEPENDS ceph-mon ceph-authtool ceph-conf + ceph-exporter monmaptool crushtool rados) @@ -901,6 +903,7 @@ endif() if (WITH_MGR) add_dependencies(vstart-base ceph-mgr) + add_dependencies(vstart-base ceph-exporter) endif() add_custom_target(vstart DEPENDS vstart-base) diff --git a/ceph/src/SimpleRADOSStriper.cc b/ceph/src/SimpleRADOSStriper.cc index 3a64193d3..bbbf15527 100644 --- a/ceph/src/SimpleRADOSStriper.cc +++ b/ceph/src/SimpleRADOSStriper.cc @@ -488,7 +488,10 @@ ssize_t SimpleRADOSStriper::read(void* data, size_t len, uint64_t off) } size_t r = 0; - std::vector> reads; + // Don't use std::vector to store bufferlists (e.g for parallelizing aio_reads), + // as they are being moved whenever the vector resizes + // and will cause invalidated references. + std::deque> reads; while ((len-r) > 0) { auto ext = get_next_extent(off+r, len-r); auto& [bl, aiocp] = reads.emplace_back(); diff --git a/ceph/src/auth/Crypto.cc b/ceph/src/auth/Crypto.cc index c96222fea..ce666e8bd 100644 --- a/ceph/src/auth/Crypto.cc +++ b/ceph/src/auth/Crypto.cc @@ -33,6 +33,12 @@ #include "common/debug.h" #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + using std::ostringstream; using std::string; @@ -40,6 +46,7 @@ using ceph::bufferlist; using ceph::bufferptr; using ceph::Formatter; + // use getentropy() if available. it uses the same source of randomness // as /dev/urandom without the filesystem overhead #ifdef HAVE_GETENTROPY @@ -603,3 +610,6 @@ CryptoHandler *CryptoHandler::create(int type) return NULL; } } + +#pragma clang diagnostic pop +#pragma GCC diagnostic pop diff --git a/ceph/src/blk/kernel/KernelDevice.cc b/ceph/src/blk/kernel/KernelDevice.cc index 1b03ce31c..ee06af61e 100644 --- a/ceph/src/blk/kernel/KernelDevice.cc +++ b/ceph/src/blk/kernel/KernelDevice.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -1078,7 +1079,17 @@ struct ExplicitHugePagePool { nullptr, buffer_size, PROT_READ | PROT_WRITE, +#if defined(__FreeBSD__) + // FreeBSD doesn't have MAP_HUGETLB nor MAP_POPULATE but it has + // a different, more automated / implicit mechanisms. However, + // we want to mimic the Linux behavior as closely as possible + // also in the matter of error handling which is the reason + // behind MAP_ALIGNED_SUPER. + // See: https://lists.freebsd.org/pipermail/freebsd-questions/2014-August/260578.html + MAP_PRIVATE | MAP_ANONYMOUS | MAP_PREFAULT_READ | MAP_ALIGNED_SUPER, +#else MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB, +#endif // __FreeBSD__ -1, 0); if (mmaped_region == MAP_FAILED) { diff --git a/ceph/src/ceph-volume/ceph_volume/api/lvm.py b/ceph/src/ceph-volume/ceph_volume/api/lvm.py index 9a5907c5d..b23466b7a 100644 --- a/ceph/src/ceph-volume/ceph_volume/api/lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/api/lvm.py @@ -785,6 +785,17 @@ def get_device_vgs(device, name_prefix=''): return [VolumeGroup(**vg) for vg in vgs if vg['vg_name'] and vg['vg_name'].startswith(name_prefix)] +def get_all_devices_vgs(name_prefix=''): + vg_fields = f'pv_name,{VG_FIELDS}' + cmd = ['pvs'] + VG_CMD_OPTIONS + ['-o', vg_fields] + stdout, stderr, returncode = process.call( + cmd, + run_on_host=True, + verbose_on_failure=False + ) + vgs = _output_parser(stdout, vg_fields) + return [VolumeGroup(**vg) for vg in vgs] + ################################# # # Code for LVM Logical Volumes diff --git a/ceph/src/ceph-volume/ceph_volume/configuration.py b/ceph/src/ceph-volume/ceph_volume/configuration.py index 2fee47ffa..e0f7ef1f0 100644 --- a/ceph/src/ceph-volume/ceph_volume/configuration.py +++ b/ceph/src/ceph-volume/ceph_volume/configuration.py @@ -86,13 +86,14 @@ class Conf(conf_parentclass): s = '_'.join(s.split()) return s - def get_safe(self, section, key, default=None): + def get_safe(self, section, key, default=None, check_valid=True): """ Attempt to get a configuration value from a certain section in a ``cfg`` object but returning None if not found. Avoids the need to be doing try/except {ConfigParser Exceptions} every time. """ - self.is_valid() + if check_valid: + self.is_valid() try: return self.get(section, key) except (configparser.NoSectionError, configparser.NoOptionError): diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py index 6fa619d50..90c4c22c4 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -73,7 +73,8 @@ def get_physical_osds(devices, args): abs_size, args.osds_per_device, osd_id, - 'dmcrypt' if args.dmcrypt else None)) + 'dmcrypt' if args.dmcrypt else None, + dev.symlink)) return ret @@ -119,14 +120,10 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar continue # any LV present is considered a taken slot occupied_slots = len(dev.lvs) - # prior to v15.2.8, db/wal deployments were grouping multiple fast devices into single VGs - we need to - # multiply requested_slots (per device) by the number of devices in the VG in order to ensure that - # abs_size is calculated correctly from vg_size - slots_for_vg = len(vg_devices) * requested_slots dev_size = dev.vg_size[0] # this only looks at the first vg on device, unsure if there is a better # way - abs_size = disk.Size(b=int(dev_size / slots_for_vg)) + abs_size = disk.Size(b=int(dev_size / requested_slots)) free_size = dev.vg_free[0] relative_size = int(abs_size) / dev_size if requested_size: @@ -572,7 +569,8 @@ class Batch(object): abs_size, slots, id_, - encryption): + encryption, + symlink=None): self.id_ = id_ self.data = self.VolSpec(path=data_path, rel_size=rel_size, @@ -582,6 +580,7 @@ class Batch(object): self.fast = None self.very_fast = None self.encryption = encryption + self.symlink = symlink def add_fast_device(self, path, rel_size, abs_size, slots, type_): self.fast = self.VolSpec(path=path, @@ -633,9 +632,12 @@ class Batch(object): if self.encryption: report += templates.osd_encryption.format( enc=self.encryption) + path = self.data.path + if self.symlink: + path = f'{self.symlink} -> {self.data.path}' report += templates.osd_component.format( _type=self.data.type_, - path=self.data.path, + path=path, size=self.data.abs_size, percent=self.data.rel_size) if self.fast: diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/listing.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/listing.py index 44d5063ce..c16afdaa7 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/listing.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/listing.py @@ -101,6 +101,8 @@ class List(object): report = {} + pvs = api.get_pvs() + for lv in lvs: if not api.is_ceph_device(lv): continue @@ -109,8 +111,7 @@ class List(object): report.setdefault(osd_id, []) lv_report = lv.as_dict() - pvs = api.get_pvs(filters={'lv_uuid': lv.lv_uuid}) - lv_report['devices'] = [pv.name for pv in pvs] if pvs else [] + lv_report['devices'] = [pv.name for pv in pvs if pv.lv_uuid == lv.lv_uuid] if pvs else [] report[osd_id].append(lv_report) phys_devs = self.create_report_non_lv_device(lv) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py index 9f8141d54..d6d778d16 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py @@ -171,8 +171,8 @@ class Zap(object): pv = api.get_single_pv(filters={'lv_uuid': lv.lv_uuid}) self.unmount_lv(lv) - wipefs(device.abspath) - zap_data(device.abspath) + wipefs(device.path) + zap_data(device.path) if self.args.destroy: lvs = api.get_lvs(filters={'vg_name': device.vg_name}) @@ -188,8 +188,8 @@ class Zap(object): mlogger.info('More than 1 LV left in VG, will proceed to ' 'destroy LV only') mlogger.info('Removing LV because --destroy was given: %s', - device.abspath) - api.remove_lv(device.abspath) + device.path) + api.remove_lv(device.path) elif lv: # just remove all lvm metadata, leaving the LV around lv.clear_tags() @@ -209,15 +209,15 @@ class Zap(object): if os.path.realpath(mapper_path) in holders: self.dmcrypt_close(mapper_uuid) - if system.device_is_mounted(device.abspath): - mlogger.info("Unmounting %s", device.abspath) - system.unmount(device.abspath) + if system.device_is_mounted(device.path): + mlogger.info("Unmounting %s", device.path) + system.unmount(device.path) - wipefs(device.abspath) - zap_data(device.abspath) + wipefs(device.path) + zap_data(device.path) if self.args.destroy: - mlogger.info("Destroying partition since --destroy was used: %s" % device.abspath) + mlogger.info("Destroying partition since --destroy was used: %s" % device.path) disk.remove_partition(device) def zap_lvm_member(self, device): @@ -230,7 +230,7 @@ class Zap(object): """ for lv in device.lvs: if lv.lv_name: - mlogger.info('Zapping lvm member {}. lv_path is {}'.format(device.abspath, lv.lv_path)) + mlogger.info('Zapping lvm member {}. lv_path is {}'.format(device.path, lv.lv_path)) self.zap_lv(Device(lv.lv_path)) else: vg = api.get_single_vg(filters={'vg_name': lv.vg_name}) @@ -259,15 +259,15 @@ class Zap(object): for part_name in device.sys_api.get('partitions', {}).keys(): self.zap_partition(Device('/dev/%s' % part_name)) - wipefs(device.abspath) - zap_data(device.abspath) + wipefs(device.path) + zap_data(device.path) @decorators.needs_root def zap(self, devices=None): devices = devices or self.args.devices for device in devices: - mlogger.info("Zapping: %s", device.abspath) + mlogger.info("Zapping: %s", device.path) if device.is_mapper and not device.is_mpath: terminal.error("Refusing to zap the mapper device: {}".format(device)) raise SystemExit(1) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py b/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py index 50d2046da..06a2b3c22 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py @@ -68,22 +68,17 @@ class List(object): def generate(self, devs=None): logger.debug('Listing block devices via lsblk...') + info_devices = disk.lsblk_all(abspath=True) if devs is None or devs == []: - devs = [] # If no devs are given initially, we want to list ALL devices including children and # parents. Parent disks with child partitions may be the appropriate device to return if # the parent disk has a bluestore header, but children may be the most appropriate # devices to return if the parent disk does not have a bluestore header. - out, err, ret = process.call([ - 'lsblk', '--paths', '--output=NAME', '--noheadings', '--list' - ]) - assert not ret - devs = out + devs = [device['NAME'] for device in info_devices if device.get('NAME',)] result = {} logger.debug('inspecting devices: {}'.format(devs)) for dev in devs: - info = disk.lsblk(dev, abspath=True) # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret # bluestore's on-disk format as an Atari partition table. These false Atari partitions # can be interpreted as real OSDs if a bluestore OSD was previously created on the false @@ -93,28 +88,29 @@ class List(object): # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to # determine whether a parent is bluestore, we should err on the side of not reporting # the child so as not to give a false negative. - if 'PKNAME' in info and info['PKNAME'] != "": - parent = info['PKNAME'] - try: - if disk.has_bluestore_label(parent): - logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent), - 'device is likely a phantom Atari partition. device info: {}'.format(info))) + for info_device in info_devices: + if 'PKNAME' in info_device and info_device['PKNAME'] != "": + parent = info_device['PKNAME'] + try: + if disk.has_bluestore_label(parent): + logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent), + 'device is likely a phantom Atari partition. device info: {}'.format(info_device))) + continue + except OSError as e: + logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev), + 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e))) continue - except OSError as e: - logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev), - 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e))) - continue - bs_info = _get_bluestore_info(dev) - if bs_info is None: - # None is also returned in the rare event that there is an issue reading info from - # a BlueStore disk, so be sure to log our assumption that it isn't bluestore - logger.info('device {} does not have BlueStore information'.format(dev)) - continue - uuid = bs_info['osd_uuid'] - if uuid not in result: - result[uuid] = {} - result[uuid].update(bs_info) + bs_info = _get_bluestore_info(dev) + if bs_info is None: + # None is also returned in the rare event that there is an issue reading info from + # a BlueStore disk, so be sure to log our assumption that it isn't bluestore + logger.info('device {} does not have BlueStore information'.format(dev)) + continue + uuid = bs_info['osd_uuid'] + if uuid not in result: + result[uuid] = {} + result[uuid].update(bs_info) return result diff --git a/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py b/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py index 70e5256d2..ff7040beb 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/simple/scan.py @@ -137,8 +137,8 @@ class Scan(object): osd_metadata[file_json_key] = content # we must scan the paths again because this might be a temporary mount - path_mounts = system.get_mounts(paths=True) - device = path_mounts.get(path) + path_mounts = system.Mounts(paths=True) + device = path_mounts.get_mounts().get(path) # it is possible to have more than one device, pick the first one, and # warn that it is possible that more than one device is 'data' @@ -360,8 +360,8 @@ class Scan(object): )) # Capture some environment status, so that it can be reused all over - self.device_mounts = system.get_mounts(devices=True) - self.path_mounts = system.get_mounts(paths=True) + self.device_mounts = system.Mounts(devices=True).get_mounts() + self.path_mounts = system.Mounts(paths=True).get_mounts() for path in paths: args.osd_path = path diff --git a/ceph/src/ceph-volume/ceph_volume/main.py b/ceph/src/ceph-volume/ceph_volume/main.py index 652b0f9c8..7868665ce 100644 --- a/ceph/src/ceph-volume/ceph_volume/main.py +++ b/ceph/src/ceph-volume/ceph_volume/main.py @@ -147,8 +147,8 @@ Ceph Conf: {ceph_path} # we warn only here, because it is possible that the configuration # file is not needed, or that it will be loaded by some other means # (like reading from lvm tags) - logger.exception('ignoring inability to load ceph.conf') - terminal.red(error) + logger.warning('ignoring inability to load ceph.conf', exc_info=1) + terminal.yellow(error) # dispatch to sub-commands terminal.dispatch(self.mapper, subcommand_args) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/conftest.py b/ceph/src/ceph-volume/ceph_volume/tests/conftest.py index c41a46074..f060f78d4 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/conftest.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/conftest.py @@ -58,32 +58,28 @@ def mock_lv_device_generator(): return dev return mock_lv - -@pytest.fixture -def mock_devices_available(): +def mock_device(): dev = create_autospec(device.Device) dev.path = '/dev/foo' dev.vg_name = 'vg_foo' dev.lv_name = 'lv_foo' + dev.symlink = None dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)] dev.available_lvm = True dev.vg_size = [21474836480] dev.vg_free = dev.vg_size - return [dev] + dev.lvs = [] + return dev + +@pytest.fixture(params=range(1,3)) +def mock_devices_available(request): + ret = [] + for _ in range(request.param): + ret.append(mock_device()) + return ret @pytest.fixture def mock_device_generator(): - def mock_device(): - dev = create_autospec(device.Device) - dev.path = '/dev/foo' - dev.vg_name = 'vg_foo' - dev.lv_name = 'lv_foo' - dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)] - dev.available_lvm = True - dev.vg_size = [21474836480] - dev.vg_free = dev.vg_size - dev.lvs = [] - return dev return mock_device @@ -238,14 +234,15 @@ def ceph_parttype(request): @pytest.fixture def lsblk_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype): monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", - lambda path: {'TYPE': 'disk', 'PARTLABEL': ceph_partlabel}) - # setting blkid here too in order to be able to fall back to PARTTYPE based - # membership - monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: {'TYPE': 'disk', - 'PARTLABEL': '', + 'NAME': 'sda', + 'PARTLABEL': ceph_partlabel, 'PARTTYPE': ceph_parttype}) - + monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all", + lambda: [{'TYPE': 'disk', + 'NAME': 'sda', + 'PARTLABEL': ceph_partlabel, + 'PARTTYPE': ceph_parttype}]) @pytest.fixture def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype): @@ -264,14 +261,19 @@ def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype): def device_info_not_ceph_disk_member(monkeypatch, request): monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", lambda path: {'TYPE': 'disk', + 'NAME': 'sda', 'PARTLABEL': request.param[0]}) + monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all", + lambda: [{'TYPE': 'disk', + 'NAME': 'sda', + 'PARTLABEL': request.param[0]}]) monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: {'TYPE': 'disk', 'PARTLABEL': request.param[1]}) @pytest.fixture -def patched_get_block_devs_lsblk(): - with patch('ceph_volume.util.disk.get_block_devs_lsblk') as p: +def patched_get_block_devs_sysfs(): + with patch('ceph_volume.util.disk.get_block_devs_sysfs') as p: yield p @pytest.fixture @@ -285,7 +287,11 @@ def patch_bluestore_label(): def device_info(monkeypatch, patch_bluestore_label): def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None, has_bluestore_label=False): - devices = devices if devices else {} + if devices: + for dev in devices.keys(): + devices[dev]['device_nodes'] = os.path.basename(dev) + else: + devices = {} lsblk = lsblk if lsblk else {} blkid = blkid if blkid else {} udevadm = udevadm if udevadm else {} @@ -305,3 +311,11 @@ def device_info(monkeypatch, patch_bluestore_label): @pytest.fixture(params=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.999, 1.0]) def data_allocate_fraction(request): return request.param + +@pytest.fixture +def fake_filesystem(fs): + + fs.create_dir('/sys/block/sda/slaves') + fs.create_dir('/sys/block/sda/queue') + fs.create_dir('/sys/block/rbd0') + yield fs diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py index 96a5b5d74..e1578e33d 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py @@ -225,6 +225,16 @@ class TestBatch(object): 'block_db', 2, 2, args) assert len(fast) == 2 + def test_get_physical_fast_allocs_abs_size(self, factory, + conf_ceph_stub, + mock_devices_available): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + args = factory(block_db_slots=None, get_block_db_size=None) + fasts = batch.get_physical_fast_allocs(mock_devices_available, + 'block_db', 2, 2, args) + for fast, dev in zip(fasts, mock_devices_available): + assert fast[2] == int(dev.vg_size[0] / 2) + def test_batch_fast_allocations_one_block_db_length(self, factory, conf_ceph_stub, mock_lv_device_generator): conf_ceph_stub('[global]\nfsid=asdf-lkjh') diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py index 1faaee05e..4c86d0ca1 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py @@ -69,7 +69,7 @@ class TestFindAssociatedDevices(object): result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234') assert len(result) == 1 - assert result[0][0].abspath == '/dev/VolGroup/lv1' + assert result[0][0].path == '/dev/VolGroup/lv1' assert result[0][0].lvs == [vol] assert result[0][1] == 'block' @@ -96,10 +96,10 @@ class TestFindAssociatedDevices(object): assert len(result) == 2 for d in result: if d[1] == 'block': - assert d[0].abspath == '/dev/VolGroup/lv1' + assert d[0].path == '/dev/VolGroup/lv1' assert d[0].lvs == [vol] elif d[1] == 'wal': - assert d[0].abspath == '/dev/VolGroup/lv2' + assert d[0].path == '/dev/VolGroup/lv2' assert d[0].lvs == [vol2] else: assert False @@ -133,13 +133,13 @@ class TestFindAssociatedDevices(object): assert len(result) == 3 for d in result: if d[1] == 'block': - assert d[0].abspath == '/dev/VolGroup/lv1' + assert d[0].path == '/dev/VolGroup/lv1' assert d[0].lvs == [vol] elif d[1] == 'wal': - assert d[0].abspath == '/dev/VolGroup/lv2' + assert d[0].path == '/dev/VolGroup/lv2' assert d[0].lvs == [vol2] elif d[1] == 'db': - assert d[0].abspath == '/dev/VolGroup/lv3' + assert d[0].path == '/dev/VolGroup/lv3' assert d[0].lvs == [vol3] else: assert False diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py index 53c694633..64016111c 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py @@ -69,7 +69,7 @@ class TestFindAssociatedDevices(object): monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) result = zap.find_associated_devices(osd_id='0') - assert result[0].abspath == '/dev/VolGroup/lv' + assert result[0].path == '/dev/VolGroup/lv' def test_lv_is_matched_fsid(self, monkeypatch): tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\ @@ -82,7 +82,7 @@ class TestFindAssociatedDevices(object): monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) result = zap.find_associated_devices(osd_fsid='asdf-lkjh') - assert result[0].abspath == '/dev/VolGroup/lv' + assert result[0].path == '/dev/VolGroup/lv' def test_lv_is_matched_id_fsid(self, monkeypatch): tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\ @@ -95,7 +95,7 @@ class TestFindAssociatedDevices(object): monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh') - assert result[0].abspath == '/dev/VolGroup/lv' + assert result[0].path == '/dev/VolGroup/lv' class TestEnsureAssociatedLVs(object): diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py index d5ccee5c9..5ad501bab 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py @@ -48,19 +48,19 @@ def _devices_side_effect(): "/dev/mapper/ceph--osd--block--2": {}, } -def _lsblk_list_output(): +def _lsblk_all_devices(abspath=True): return [ - '/dev/sda', - '/dev/sda1', - '/dev/sda2', - '/dev/sda3', - '/dev/sdb', - '/dev/sdb2', - '/dev/sdb3', - '/dev/sdc', - '/dev/sdd', - '/dev/mapper/ceph--osd--block--1', - '/dev/mapper/ceph--osd--block--2', + {"NAME": "/dev/sda", "KNAME": "/dev/sda", "PKNAME": ""}, + {"NAME": "/dev/sda1", "KNAME": "/dev/sda1", "PKNAME": "/dev/sda"}, + {"NAME": "/dev/sda2", "KNAME": "/dev/sda2", "PKNAME": "/dev/sda"}, + {"NAME": "/dev/sda3", "KNAME": "/dev/sda3", "PKNAME": "/dev/sda"}, + {"NAME": "/dev/sdb", "KNAME": "/dev/sdb", "PKNAME": ""}, + {"NAME": "/dev/sdb2", "KNAME": "/dev/sdb2", "PKNAME": "/dev/sdb"}, + {"NAME": "/dev/sdb3", "KNAME": "/dev/sdb3", "PKNAME": "/dev/sdb"}, + {"NAME": "/dev/sdc", "KNAME": "/dev/sdc", "PKNAME": ""}, + {"NAME": "/dev/sdd", "KNAME": "/dev/sdd", "PKNAME": ""}, + {"NAME": "/dev/mapper/ceph--osd--block--1", "KNAME": "/dev/mapper/ceph--osd--block--1", "PKNAME": "/dev/sdd"}, + {"NAME": "/dev/mapper/ceph--osd--block--2", "KNAME": "/dev/mapper/ceph--osd--block--2", "PKNAME": "/dev/sdd"}, ] # dummy lsblk output for device with optional parent output @@ -153,7 +153,7 @@ def _process_call_side_effect(command, **kw): return _lsblk_output(dev, parent="/dev/sdd"), '', 0 pytest.fail('dev {} needs behavior specified for it'.format(dev)) if "/dev/" not in command: - return _lsblk_list_output(), '', 0 + return _lsblk_all_devices(), '', 0 pytest.fail('command {} needs behavior specified for it'.format(command)) if "ceph-bluestore-tool" in command: @@ -192,15 +192,16 @@ class TestList(object): @patch('ceph_volume.util.device.disk.get_devices') @patch('ceph_volume.util.disk.has_bluestore_label') @patch('ceph_volume.process.call') - def test_raw_list(self, patched_call, patched_bluestore_label, patched_get_devices): + @patch('ceph_volume.util.disk.lsblk_all') + def test_raw_list(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices): raw.list.logger.setLevel("DEBUG") patched_call.side_effect = _process_call_side_effect + patched_disk_lsblk.side_effect = _lsblk_all_devices patched_bluestore_label.side_effect = _has_bluestore_label_side_effect patched_get_devices.side_effect = _devices_side_effect result = raw.list.List([]).generate() - patched_call.assert_any_call(['lsblk', '--paths', '--output=NAME', '--noheadings', '--list']) - assert len(result) == 2 + assert len(result) == 3 sdb = result['sdb-uuid'] assert sdb['osd_uuid'] == 'sdb-uuid' @@ -219,17 +220,19 @@ class TestList(object): @patch('ceph_volume.util.device.disk.get_devices') @patch('ceph_volume.util.disk.has_bluestore_label') @patch('ceph_volume.process.call') - def test_raw_list_with_OSError(self, patched_call, patched_bluestore_label, patched_get_devices): + @patch('ceph_volume.util.disk.lsblk_all') + def test_raw_list_with_OSError(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices): def _has_bluestore_label_side_effect_with_OSError(device_path): if device_path == "/dev/sdd": raise OSError('fake OSError') return _has_bluestore_label_side_effect(device_path) raw.list.logger.setLevel("DEBUG") + patched_disk_lsblk.side_effect = _lsblk_all_devices patched_call.side_effect = _process_call_side_effect patched_bluestore_label.side_effect = _has_bluestore_label_side_effect_with_OSError patched_get_devices.side_effect = _devices_side_effect result = raw.list.List([]).generate() - assert len(result) == 1 + assert len(result) == 3 assert 'sdb-uuid' in result diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py index ac2dd0e7b..5c7bd3117 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py @@ -5,9 +5,9 @@ from ceph_volume.devices.simple import activate class TestActivate(object): - def test_no_data_uuid(self, factory, tmpfile, is_root, monkeypatch, capture): - json_config = tmpfile(contents='{}') - args = factory(osd_id='0', osd_fsid='1234', json_config=json_config) + def test_no_data_uuid(self, factory, is_root, monkeypatch, capture, fake_filesystem): + fake_filesystem.create_file('/tmp/json-config', contents='{}') + args = factory(osd_id='0', osd_fsid='1234', json_config='/tmp/json-config') with pytest.raises(RuntimeError): activate.Activate([]).activate(args) @@ -45,9 +45,9 @@ class TestActivate(object): class TestEnableSystemdUnits(object): - def test_nothing_is_activated(self, tmpfile, is_root, capsys): - json_config = tmpfile(contents='{}') - activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True) + def test_nothing_is_activated(self, is_root, capsys, fake_filesystem): + fake_filesystem.create_file('/tmp/json-config', contents='{}') + activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True) activation.activate = lambda x: True activation.main() activation.enable_systemd_units('0', '1234') @@ -56,69 +56,69 @@ class TestEnableSystemdUnits(object): assert 'Skipping masking of ceph-disk' in stderr assert 'Skipping enabling and starting OSD simple' in stderr - def test_no_systemd_flag_is_true(self, tmpfile, is_root): - json_config = tmpfile(contents='{}') - activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True) + def test_no_systemd_flag_is_true(self, is_root, fake_filesystem): + fake_filesystem.create_file('/tmp/json-config', contents='{}') + activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True) activation.activate = lambda x: True activation.main() assert activation.skip_systemd is True - def test_no_systemd_flag_is_false(self, tmpfile, is_root): - json_config = tmpfile(contents='{}') - activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=True) + def test_no_systemd_flag_is_false(self, is_root, fake_filesystem): + fake_filesystem.create_file('/tmp/json-config', contents='{}') + activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=True) activation.activate = lambda x: True activation.main() assert activation.skip_systemd is False - def test_masks_ceph_disk(self, tmpfile, is_root, monkeypatch, capture): + def test_masks_ceph_disk(self, is_root, monkeypatch, capture, fake_filesystem): monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', capture) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) - json_config = tmpfile(contents='{}') - activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + fake_filesystem.create_file('/tmp/json-config', contents='{}') + activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False) activation.activate = lambda x: True activation.main() activation.enable_systemd_units('0', '1234') assert len(capture.calls) == 1 - def test_enables_simple_unit(self, tmpfile, is_root, monkeypatch, capture): + def test_enables_simple_unit(self, is_root, monkeypatch, capture, fake_filesystem): monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', capture) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) - json_config = tmpfile(contents='{}') - activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + fake_filesystem.create_file('/tmp/json-config', contents='{}') + activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False) activation.activate = lambda x: True activation.main() activation.enable_systemd_units('0', '1234') assert len(capture.calls) == 1 assert capture.calls[0]['args'] == ('0', '1234', 'simple') - def test_enables_osd_unit(self, tmpfile, is_root, monkeypatch, capture): + def test_enables_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem): monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', capture) monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) - json_config = tmpfile(contents='{}') - activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + fake_filesystem.create_file('/tmp/json-config', contents='{}') + activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False) activation.activate = lambda x: True activation.main() activation.enable_systemd_units('0', '1234') assert len(capture.calls) == 1 assert capture.calls[0]['args'] == ('0',) - def test_starts_osd_unit(self, tmpfile, is_root, monkeypatch, capture): + def test_starts_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem): monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', capture) - json_config = tmpfile(contents='{}') - activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + fake_filesystem.create_file('/tmp/json-config', contents='{}') + activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False) activation.activate = lambda x: True activation.main() activation.enable_systemd_units('0', '1234') diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py index 118493625..b5d120655 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py @@ -5,20 +5,23 @@ from ceph_volume.devices.simple import scan class TestGetContents(object): - def test_multiple_lines_are_left_as_is(self, tmpfile): - magic_file = tmpfile(contents='first\nsecond\n') + def setup(self): + self.magic_file_name = '/tmp/magic-file' + + def test_multiple_lines_are_left_as_is(self, fake_filesystem): + magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\nsecond\n') scanner = scan.Scan([]) - assert scanner.get_contents(magic_file) == 'first\nsecond\n' + assert scanner.get_contents(magic_file.path) == 'first\nsecond\n' - def test_extra_whitespace_gets_removed(self, tmpfile): - magic_file = tmpfile(contents='first ') + def test_extra_whitespace_gets_removed(self, fake_filesystem): + magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first ') scanner = scan.Scan([]) - assert scanner.get_contents(magic_file) == 'first' + assert scanner.get_contents(magic_file.path) == 'first' - def test_single_newline_values_are_trimmed(self, tmpfile): - magic_file = tmpfile(contents='first\n') + def test_single_newline_values_are_trimmed(self, fake_filesystem): + magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\n') scanner = scan.Scan([]) - assert scanner.get_contents(magic_file) == 'first' + assert scanner.get_contents(magic_file.path) == 'first' class TestEtcPath(object): @@ -36,10 +39,10 @@ class TestEtcPath(object): assert scanner.etc_path == path assert os.path.isdir(path) - def test_complains_when_file(self, tmpfile): - path = tmpfile() + def test_complains_when_file(self, fake_filesystem): + etc_dir = fake_filesystem.create_file('/etc/ceph/osd') scanner = scan.Scan([]) - scanner._etc_path = path + scanner._etc_path = etc_dir.path with pytest.raises(RuntimeError): scanner.etc_path diff --git a/ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py b/ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py index b00bd668d..785d8b56e 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/test_inventory.py @@ -3,10 +3,12 @@ import pytest from ceph_volume.util.device import Devices from ceph_volume.util.lsmdisk import LSMDisk +from mock.mock import patch import ceph_volume.util.lsmdisk as lsmdisk @pytest.fixture +@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def device_report_keys(device_info): device_info(devices={ # example output of disk.get_devices() @@ -28,13 +30,15 @@ def device_report_keys(device_info): 'size': 1999844147200.0, 'support_discard': '', 'vendor': 'DELL', - 'device_id': 'Vendor-Model-Serial'} + 'device_id': 'Vendor-Model-Serial', + 'device_nodes': 'sdb'} } ) report = Devices().json_report()[0] return list(report.keys()) @pytest.fixture +@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def device_sys_api_keys(device_info): device_info(devices={ # example output of disk.get_devices() @@ -55,13 +59,15 @@ def device_sys_api_keys(device_info): 'sectorsize': '512', 'size': 1999844147200.0, 'support_discard': '', - 'vendor': 'DELL'} + 'vendor': 'DELL', + 'device_nodes': 'sdb'} } ) report = Devices().json_report()[0] return list(report['sys_api'].keys()) @pytest.fixture +@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def device_data(device_info): device_info( devices={ @@ -85,6 +91,7 @@ def device_data(device_info): 'size': 1999844147200.0, 'support_discard': '', 'vendor': 'DELL', + 'device_nodes': 'sdb' } } ) @@ -140,6 +147,7 @@ class TestInventory(object): 'size', 'support_discard', 'vendor', + 'device_nodes' ] expected_lsm_keys = [ diff --git a/ceph/src/ceph-volume/ceph_volume/tests/test_main.py b/ceph/src/ceph-volume/ceph_volume/tests/test_main.py index afe9a2344..d03d405d5 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/test_main.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/test_main.py @@ -39,7 +39,7 @@ class TestVolume(object): assert error.value.code == 0 log = caplog.records[-1] assert log.message == 'ignoring inability to load ceph.conf' - assert log.levelname == 'ERROR' + assert log.levelname == 'WARNING' def test_logs_current_command(self, caplog): with pytest.raises(SystemExit) as error: @@ -50,15 +50,15 @@ class TestVolume(object): assert log.message == 'Running command: ceph-volume --cluster barnacle lvm --help' assert log.levelname == 'INFO' - def test_logs_set_level_error(self, caplog): + def test_logs_set_level_warning(self, caplog): with pytest.raises(SystemExit) as error: - main.Volume(argv=['ceph-volume', '--log-level', 'error', '--cluster', 'barnacle', 'lvm', '--help']) + main.Volume(argv=['ceph-volume', '--log-level', 'warning', '--cluster', 'barnacle', 'lvm', '--help']) # make sure we aren't causing an actual error assert error.value.code == 0 assert caplog.records - # only log levels of 'ERROR' or above should be captured + # only log levels of 'WARNING' for log in caplog.records: - assert log.levelname in ['ERROR', 'CRITICAL'] + assert log.levelname == 'WARNING' def test_logs_incorrect_log_level(self, capsys): with pytest.raises(SystemExit) as error: diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py index 19aaaa3bd..b0446a13b 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py @@ -16,11 +16,12 @@ class TestOSDPath(object): with pytest.raises(exceptions.SuperUserError): self.validator('') - def test_path_is_not_a_directory(self, is_root, tmpfile, monkeypatch): + def test_path_is_not_a_directory(self, is_root, monkeypatch, fake_filesystem): + fake_file = fake_filesystem.create_file('/tmp/foo') monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False) validator = arg_validators.OSDPath() with pytest.raises(argparse.ArgumentError): - validator(tmpfile()) + validator(fake_file.path) def test_files_are_missing(self, is_root, tmpdir, monkeypatch): tmppath = str(tmpdir) @@ -78,16 +79,25 @@ class TestExcludeGroupOptions(object): class TestValidDevice(object): - def setup(self): + def setup(self, fake_filesystem): self.validator = arg_validators.ValidDevice() @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) - def test_path_is_valid(self, m_has_bs_label, fake_call, patch_bluestore_label): - result = self.validator('/') - assert result.abspath == '/' + def test_path_is_valid(self, m_has_bs_label, + fake_call, patch_bluestore_label, + device_info, monkeypatch): + monkeypatch.setattr('ceph_volume.util.device.Device.exists', lambda: True) + lsblk = {"TYPE": "disk", "NAME": "sda"} + device_info(lsblk=lsblk) + result = self.validator('/dev/sda') + assert result.path == '/dev/sda' @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) - def test_path_is_invalid(self, m_has_bs_label, fake_call, patch_bluestore_label): + def test_path_is_invalid(self, m_has_bs_label, + fake_call, patch_bluestore_label, + device_info): + lsblk = {"TYPE": "disk", "NAME": "sda"} + device_info(lsblk=lsblk) with pytest.raises(argparse.ArgumentError): self.validator('/device/does/not/exist') diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py index f6e439279..8eef3ff00 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_device.py @@ -1,3 +1,4 @@ +import os import pytest from copy import deepcopy from ceph_volume.util import device @@ -16,8 +17,8 @@ class TestDevice(object): deepcopy(volumes)) data = {"/dev/sda": {"foo": "bar"}} - lsblk = {"TYPE": "disk"} - device_info(devices=data,lsblk=lsblk) + lsblk = {"TYPE": "disk", "NAME": "sda"} + device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.sys_api assert "foo" in disk.sys_api @@ -32,157 +33,196 @@ class TestDevice(object): # 5GB in size data = {"/dev/sda": {"size": "5368709120"}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.lvm_size.gb == 4 + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lvm_size_rounds_down(self, fake_call, device_info): # 5.5GB in size data = {"/dev/sda": {"size": "5905580032"}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.lvm_size.gb == 4 def test_is_lv(self, fake_call, device_info): data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"} - lsblk = {"TYPE": "lvm"} + lsblk = {"TYPE": "lvm", "NAME": "vg-lv"} device_info(lv=data,lsblk=lsblk) disk = device.Device("vg/lv") assert disk.is_lv + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_vgs_is_empty(self, fake_call, device_info, monkeypatch): BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", pv_tags={}) pvolumes = [] pvolumes.append(BarPVolume) - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: {}) disk = device.Device("/dev/nvme0n1") assert disk.vgs == [] + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=6, vg_extent_size=1073741824) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) - lsblk = {"TYPE": "disk"} + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) + lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} device_info(lsblk=lsblk) disk = device.Device("/dev/nvme0n1") assert len(disk.vgs) == 1 + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_device(self, fake_call, device_info): data = {"/dev/sda": {"foo": "bar"}} - lsblk = {"TYPE": "device"} + lsblk = {"TYPE": "device", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_device is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) + def test_loop_device_is_not_device(self, fake_call, device_info): + data = {"/dev/loop0": {"foo": "bar"}} + lsblk = {"TYPE": "loop"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/loop0") + assert disk.is_device is False + + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) + def test_loop_device_is_device(self, fake_call, device_info): + data = {"/dev/loop0": {"foo": "bar"}} + lsblk = {"TYPE": "loop"} + os.environ["CEPH_VOLUME_ALLOW_LOOP_DEVICES"] = "1" + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/loop0") + assert disk.is_device is True + del os.environ["CEPH_VOLUME_ALLOW_LOOP_DEVICES"] + + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_rotational(self, fake_call, device_info): data = {"/dev/sda": {"rotational": "1"}} - lsblk = {"TYPE": "device"} + lsblk = {"TYPE": "device", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.rotational + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_not_rotational(self, fake_call, device_info): data = {"/dev/sda": {"rotational": "0"}} - lsblk = {"TYPE": "device"} + lsblk = {"TYPE": "device", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.rotational + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_rotational_lsblk(self, fake_call, device_info): data = {"/dev/sda": {"foo": "bar"}} - lsblk = {"TYPE": "device", "ROTA": "1"} + lsblk = {"TYPE": "device", "ROTA": "1", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.rotational + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_not_rotational_lsblk(self, fake_call, device_info): data = {"/dev/sda": {"rotational": "0"}} - lsblk = {"TYPE": "device", "ROTA": "0"} + lsblk = {"TYPE": "device", "ROTA": "0", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.rotational + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_rotational_defaults_true(self, fake_call, device_info): # rotational will default true if no info from sys_api or lsblk is found data = {"/dev/sda": {"foo": "bar"}} - lsblk = {"TYPE": "device", "foo": "bar"} + lsblk = {"TYPE": "device", "foo": "bar", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.rotational + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_disk_is_device(self, fake_call, device_info): data = {"/dev/sda": {"foo": "bar"}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_device is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_partition(self, fake_call, device_info): data = {"/dev/sda1": {"foo": "bar"}} - lsblk = {"TYPE": "part", "PKNAME": "sda"} + lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda1") assert disk.is_partition + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mpath_device_is_device(self, fake_call, device_info): data = {"/dev/foo": {"foo": "bar"}} - lsblk = {"TYPE": "mpath"} + lsblk = {"TYPE": "mpath", "NAME": "foo"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/foo") assert disk.is_device is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_not_lvm_member(self, fake_call, device_info): data = {"/dev/sda1": {"foo": "bar"}} - lsblk = {"TYPE": "part", "PKNAME": "sda"} + lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda1") assert not disk.is_lvm_member + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_lvm_member(self, fake_call, device_info): data = {"/dev/sda1": {"foo": "bar"}} - lsblk = {"TYPE": "part", "PKNAME": "sda"} + lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda1") assert not disk.is_lvm_member + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_mapper_device(self, fake_call, device_info): - lsblk = {"TYPE": "lvm"} + lsblk = {"TYPE": "lvm", "NAME": "foo"} device_info(lsblk=lsblk) disk = device.Device("/dev/mapper/foo") assert disk.is_mapper + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_dm_is_mapper_device(self, fake_call, device_info): - lsblk = {"TYPE": "lvm"} + lsblk = {"TYPE": "lvm", "NAME": "dm-4"} device_info(lsblk=lsblk) disk = device.Device("/dev/dm-4") assert disk.is_mapper + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_not_mapper_device(self, fake_call, device_info): - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.is_mapper @pytest.mark.usefixtures("lsblk_ceph_disk_member", "disable_kernel_queries") + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_lsblk(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member @pytest.mark.usefixtures("blkid_ceph_disk_member", + "lsblk_ceph_disk_member", "disable_kernel_queries") + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_blkid(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member @pytest.mark.usefixtures("lsblk_ceph_disk_member", "disable_kernel_queries") + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_member_not_available_lsblk(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member @@ -190,23 +230,27 @@ class TestDevice(object): assert "Used by ceph-disk" in disk.rejected_reasons @pytest.mark.usefixtures("blkid_ceph_disk_member", + "lsblk_ceph_disk_member", "disable_kernel_queries") + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_member_not_available_blkid(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member assert not disk.available assert "Used by ceph-disk" in disk.rejected_reasons + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_removable_device(self, fake_call, device_info): data = {"/dev/sdb": {"removable": 1}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sdb"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sdb") assert not disk.available + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_device_with_gpt_headers(self, fake_call, device_info): data = {"/dev/sdb": {"removable": 0, "size": 5368709120}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sdb"} blkid= {"PTTYPE": "gpt"} device_info( devices=data, @@ -216,96 +260,145 @@ class TestDevice(object): disk = device.Device("/dev/sdb") assert not disk.available + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_accept_non_removable_device(self, fake_call, device_info): data = {"/dev/sdb": {"removable": 0, "size": 5368709120}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sdb"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sdb") assert disk.available + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_not_acceptable_device(self, fake_call, device_info): data = {"/dev/dm-0": {"foo": "bar"}} - lsblk = {"TYPE": "mpath"} + lsblk = {"TYPE": "mpath", "NAME": "dm-0"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/dm-0") assert not disk.available + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) + @patch('ceph_volume.util.device.os.path.realpath') + @patch('ceph_volume.util.device.os.path.islink') + def test_accept_symlink_to_device(self, + m_os_path_islink, + m_os_path_realpath, + device_info, + fake_call): + m_os_path_islink.return_value = True + m_os_path_realpath.return_value = '/dev/sdb' + data = {"/dev/sdb": {"ro": 0, "size": 5368709120}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/test_symlink") + print(disk) + print(disk.sys_api) + assert disk.available + + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) + @patch('ceph_volume.util.device.os.readlink') + @patch('ceph_volume.util.device.os.path.islink') + def test_reject_symlink_to_device_mapper(self, + m_os_path_islink, + m_os_readlink, + device_info, + fake_call): + m_os_path_islink.return_value = True + m_os_readlink.return_value = '/dev/dm-0' + data = {"/dev/mapper/mpatha": {"ro": 0, "size": 5368709120}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/mapper/mpatha") + assert disk.available + + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_readonly_device(self, fake_call, device_info): data = {"/dev/cdrom": {"ro": 1}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "cdrom"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/cdrom") assert not disk.available + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_smaller_than_5gb(self, fake_call, device_info): data = {"/dev/sda": {"size": 5368709119}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.available, 'too small device is available' + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_accept_non_readonly_device(self, fake_call, device_info): data = {"/dev/sda": {"ro": 0, "size": 5368709120}} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.available + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_bluestore_device(self, fake_call, monkeypatch, patch_bluestore_label, device_info): patch_bluestore_label.return_value = True - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.available assert "Has BlueStore device label" in disk.rejected_reasons + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_device_with_oserror(self, fake_call, monkeypatch, patch_bluestore_label, device_info): patch_bluestore_label.side_effect = OSError('test failure') - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.available assert "Failed to determine if device is BlueStore" in disk.rejected_reasons - @pytest.mark.usefixtures("device_info_not_ceph_disk_member", + @pytest.mark.usefixtures("lsblk_ceph_disk_member", + "device_info_not_ceph_disk_member", "disable_kernel_queries") + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_not_ceph_disk_member_lsblk(self, fake_call, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member is False + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_existing_vg_available(self, fake_call, monkeypatch, device_info): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1536, vg_extent_size=4194304) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) - lsblk = {"TYPE": "disk"} + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) + lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} - device_info(devices=data, lsblk=lsblk) + lv = {"tags": {"ceph.osd_id": "1"}} + device_info(devices=data, lsblk=lsblk, lv=lv) disk = device.Device("/dev/nvme0n1") assert disk.available_lvm assert not disk.available assert not disk.available_raw + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=4, vg_extent_size=1073741824) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) - lsblk = {"TYPE": "disk"} + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) + lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} - device_info(devices=data, lsblk=lsblk) + lv = {"tags": {"ceph.osd_id": "1"}} + device_info(devices=data, lsblk=lsblk, lv=lv) disk = device.Device("/dev/nvme0n1") assert not disk.available_lvm assert not disk.available assert not disk.available_raw + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info): - vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000, + vg1 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1000, vg_extent_size=4194304) - vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536, + vg2 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=536, vg_extent_size=4194304) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2]) - lsblk = {"TYPE": "disk"} + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg1, vg2]) + lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} - device_info(devices=data, lsblk=lsblk) + lv = {"tags": {"ceph.osd_id": "1"}} + device_info(devices=data, lsblk=lsblk, lv=lv) disk = device.Device("/dev/nvme0n1") assert disk.available_lvm assert not disk.available @@ -315,7 +408,7 @@ class TestDevice(object): def test_used_by_ceph(self, fake_call, device_info, monkeypatch, ceph_type): data = {"/dev/sda": {"foo": "bar"}} - lsblk = {"TYPE": "part", "PKNAME": "sda"} + lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"} FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes = [] @@ -337,12 +430,13 @@ class TestDevice(object): disk = device.Device("/dev/sda") assert disk.used_by_ceph + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_not_used_by_ceph(self, fake_call, device_info, monkeypatch): FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes = [] pvolumes.append(FooPVolume) data = {"/dev/sda": {"foo": "bar"}} - lsblk = {"TYPE": "part", "PKNAME": "sda"} + lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"} lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}} monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes) @@ -350,9 +444,10 @@ class TestDevice(object): disk = device.Device("/dev/sda") assert not disk.used_by_ceph + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_get_device_id(self, fake_call, device_info): udev = {k:k for k in ['ID_VENDOR', 'ID_MODEL', 'ID_SCSI_SERIAL']} - lsblk = {"TYPE": "disk"} + lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(udevadm=udev,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk._get_device_id() == 'ID_VENDOR_ID_MODEL_ID_SCSI_SERIAL' @@ -362,7 +457,7 @@ class TestDevice(object): # low-level behavior of has_bluestore_label with patch.object(device.Device, "__init__", lambda self, path, with_lsm=False: None): disk = device.Device("/dev/sda") - disk.abspath = "/dev/sda" + disk.path = "/dev/sda" with patch('builtins.open', mock_open(read_data=b'bluestore block device\n')): assert disk.has_bluestore_label with patch('builtins.open', mock_open(read_data=b'not a bluestore block device\n')): @@ -371,109 +466,123 @@ class TestDevice(object): class TestDeviceEncryption(object): + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_not_encrypted_lsblk(self, fake_call, device_info): - lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs', 'PKNAME': 'sda'} + lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs', 'NAME': 'sda', 'PKNAME': 'sda'} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_encrypted is False + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_encrypted_lsblk(self, fake_call, device_info): - lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS', 'PKNAME': 'sda'} + lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'PKNAME': 'sda'} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_not_encrypted_blkid(self, fake_call, device_info): - lsblk = {'TYPE': 'part', 'PKNAME': 'sda'} + lsblk = {'TYPE': 'part', 'NAME': 'sda', 'PKNAME': 'sda'} blkid = {'TYPE': 'ceph data'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") assert disk.is_encrypted is False + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_encrypted_blkid(self, fake_call, device_info): - lsblk = {'TYPE': 'part', 'PKNAME': 'sda'} + lsblk = {'TYPE': 'part', 'NAME': 'sda' ,'PKNAME': 'sda'} blkid = {'TYPE': 'crypto_LUKS'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_encrypted_luks1(self, fake_call, device_info, monkeypatch): status = {'type': 'LUKS1'} monkeypatch.setattr(device, 'encryption_status', lambda x: status) - lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid','TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_encrypted_luks2(self, fake_call, device_info, monkeypatch): status = {'type': 'LUKS2'} monkeypatch.setattr(device, 'encryption_status', lambda x: status) - lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_encrypted_plain(self, fake_call, device_info, monkeypatch): status = {'type': 'PLAIN'} monkeypatch.setattr(device, 'encryption_status', lambda x: status) - lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_not_encrypted_plain(self, fake_call, device_info, monkeypatch): monkeypatch.setattr(device, 'encryption_status', lambda x: {}) - lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is False + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_encrypted_blkid(self, fake_call, device_info): - lsblk = {'TYPE': 'lvm'} + lsblk = {'TYPE': 'lvm', 'NAME': 'sda'} blkid = {'TYPE': 'crypto_LUKS'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = {} assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_not_encrypted_blkid(self, fake_call, factory, device_info): - lsblk = {'TYPE': 'lvm'} + lsblk = {'TYPE': 'lvm', 'NAME': 'sda'} blkid = {'TYPE': 'xfs'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = factory(encrypted=None) assert disk.is_encrypted is False + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_encrypted_lsblk(self, fake_call, device_info): - lsblk = {'FSTYPE': 'crypto_LUKS', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = {} assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_not_encrypted_lsblk(self, fake_call, factory, device_info): - lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = factory(encrypted=None) assert disk.is_encrypted is False + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_encrypted_lvm_api(self, fake_call, factory, device_info): - lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = factory(encrypted=True) assert disk.is_encrypted is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_not_encrypted_lvm_api(self, fake_call, factory, device_info): - lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") @@ -491,28 +600,37 @@ class TestDeviceOrdering(object): "/dev/sdd": {"removable": 1}, # invalid } + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_valid_before_invalid(self, fake_call, device_info): - lsblk = {"TYPE": "disk"} - device_info(devices=self.data,lsblk=lsblk) + lsblk_sda = {"NAME": "sda", "TYPE": "disk"} + lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"} + device_info(devices=self.data,lsblk=lsblk_sda) sda = device.Device("/dev/sda") + device_info(devices=self.data,lsblk=lsblk_sdb) sdb = device.Device("/dev/sdb") assert sda < sdb assert sdb > sda + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_valid_alphabetical_ordering(self, fake_call, device_info): - lsblk = {"TYPE": "disk"} - device_info(devices=self.data,lsblk=lsblk) + lsblk_sda = {"NAME": "sda", "TYPE": "disk"} + lsblk_sdc = {"NAME": "sdc", "TYPE": "disk"} + device_info(devices=self.data,lsblk=lsblk_sda) sda = device.Device("/dev/sda") + device_info(devices=self.data,lsblk=lsblk_sdc) sdc = device.Device("/dev/sdc") assert sda < sdc assert sdc > sda + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_invalid_alphabetical_ordering(self, fake_call, device_info): - lsblk = {"TYPE": "disk"} - device_info(devices=self.data,lsblk=lsblk) + lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"} + lsblk_sdd = {"NAME": "sdd", "TYPE": "disk"} + device_info(devices=self.data,lsblk=lsblk_sdb) sdb = device.Device("/dev/sdb") + device_info(devices=self.data,lsblk=lsblk_sdd) sdd = device.Device("/dev/sdd") assert sdb < sdd @@ -521,38 +639,45 @@ class TestDeviceOrdering(object): class TestCephDiskDevice(object): + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partlabel_lsblk(self, fake_call, device_info): - lsblk = {"TYPE": "disk", "PARTLABEL": ""} + lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": ""} device_info(lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.partlabel == '' + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partlabel_blkid(self, fake_call, device_info): + lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph data"} blkid = {"TYPE": "disk", "PARTLABEL": "ceph data"} - device_info(blkid=blkid) + device_info(blkid=blkid, lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.partlabel == 'ceph data' - @pytest.mark.usefixtures("blkid_ceph_disk_member", + @pytest.mark.usefixtures("lsblk_ceph_disk_member", + "blkid_ceph_disk_member", "disable_kernel_queries") - def test_is_member_blkid(self, fake_call, monkeypatch, patch_bluestore_label): + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) + def test_is_member_blkid(self, fake_call, monkeypatch): disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.is_member is True @pytest.mark.usefixtures("lsblk_ceph_disk_member", "disable_kernel_queries") + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_member_lsblk(self, fake_call, patch_bluestore_label, device_info): - lsblk = {"TYPE": "disk", "PARTLABEL": "ceph"} + lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph"} device_info(lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.is_member is True + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_unknown_type(self, fake_call, device_info): - lsblk = {"TYPE": "disk", "PARTLABEL": "gluster"} + lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "gluster"} device_info(lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) @@ -560,7 +685,9 @@ class TestCephDiskDevice(object): ceph_types = ['data', 'wal', 'db', 'lockbox', 'journal', 'block'] - @pytest.mark.usefixtures("blkid_ceph_disk_member", + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) + @pytest.mark.usefixtures("lsblk_ceph_disk_member", + "blkid_ceph_disk_member", "disable_kernel_queries") def test_type_blkid(self, monkeypatch, fake_call, device_info, ceph_partlabel): disk = device.CephDiskDevice(device.Device("/dev/sda")) @@ -570,6 +697,7 @@ class TestCephDiskDevice(object): @pytest.mark.usefixtures("blkid_ceph_disk_member", "lsblk_ceph_disk_member", "disable_kernel_queries") + @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_type_lsblk(self, fake_call, device_info, ceph_partlabel): disk = device.CephDiskDevice(device.Device("/dev/sda")) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py index 44f19e036..fcd644a86 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py @@ -1,7 +1,7 @@ import os import pytest -from mock.mock import patch from ceph_volume.util import disk +from mock.mock import patch class TestLsblkParser(object): @@ -218,119 +218,74 @@ class TestSizeParse(object): assert result == disk.Size(tb=1.8) -class TestGetBlockDevsLsblk(object): - - @patch('ceph_volume.process.call') - def test_return_structure(self, patched_call): - lsblk_stdout = [ - '/dev/dm-0 /dev/mapper/ceph--8b2684eb--56ff--49e4--8f28--522e04cbd6ab-osd--data--9fc29fbf--3b5b--4066--be10--61042569b5a7 lvm', - '/dev/vda /dev/vda disk', - '/dev/vda1 /dev/vda1 part', - '/dev/vdb /dev/vdb disk',] - patched_call.return_value = (lsblk_stdout, '', 0) - disks = disk.get_block_devs_lsblk() - assert len(disks) == len(lsblk_stdout) - assert len(disks[0]) == 3 - - @patch('ceph_volume.process.call') - def test_empty_lsblk(self, patched_call): - patched_call.return_value = ([], '', 0) - disks = disk.get_block_devs_lsblk() - assert len(disks) == 0 - - @patch('ceph_volume.process.call') - def test_raise_on_failure(self, patched_call): - patched_call.return_value = ([], 'error', 1) - with pytest.raises(OSError): - disk.get_block_devs_lsblk() - - class TestGetDevices(object): - def setup_path(self, tmpdir): - path = os.path.join(str(tmpdir), 'block') - os.makedirs(path) - return path - - def test_no_devices_are_found(self, tmpdir, patched_get_block_devs_lsblk): - patched_get_block_devs_lsblk.return_value = [] + def test_no_devices_are_found(self, tmpdir, patched_get_block_devs_sysfs): + patched_get_block_devs_sysfs.return_value = [] result = disk.get_devices(_sys_block_path=str(tmpdir)) assert result == {} - def test_sda_block_is_found(self, tmpdir, patched_get_block_devs_lsblk): + @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) + def test_sda_block_is_found(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' - patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] - block_path = self.setup_path(tmpdir) - os.makedirs(os.path.join(block_path, 'sda')) - result = disk.get_devices(_sys_block_path=block_path) + patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] + result = disk.get_devices() assert len(result.keys()) == 1 assert result[sda_path]['human_readable_size'] == '0.00 B' assert result[sda_path]['model'] == '' assert result[sda_path]['partitions'] == {} - - def test_sda_size(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) + def test_sda_size(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' - patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] - block_path = self.setup_path(tmpdir) - block_sda_path = os.path.join(block_path, 'sda') - os.makedirs(block_sda_path) - tmpfile('size', '1024', directory=block_sda_path) - result = disk.get_devices(_sys_block_path=block_path) + patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] + fake_filesystem.create_file('/sys/block/sda/size', contents = '1024') + result = disk.get_devices() assert list(result.keys()) == [sda_path] assert result[sda_path]['human_readable_size'] == '512.00 KB' - def test_sda_sectorsize_fallsback(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) + def test_sda_sectorsize_fallsback(self, patched_get_block_devs_sysfs, fake_filesystem): # if no sectorsize, it will use queue/hw_sector_size sda_path = '/dev/sda' - patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] - block_path = self.setup_path(tmpdir) - block_sda_path = os.path.join(block_path, 'sda') - sda_queue_path = os.path.join(block_sda_path, 'queue') - os.makedirs(block_sda_path) - os.makedirs(sda_queue_path) - tmpfile('hw_sector_size', contents='1024', directory=sda_queue_path) - result = disk.get_devices(_sys_block_path=block_path) + patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] + fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024') + result = disk.get_devices() assert list(result.keys()) == [sda_path] assert result[sda_path]['sectorsize'] == '1024' - def test_sda_sectorsize_from_logical_block(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) + def test_sda_sectorsize_from_logical_block(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' - patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] - block_path = self.setup_path(tmpdir) - block_sda_path = os.path.join(block_path, 'sda') - sda_queue_path = os.path.join(block_sda_path, 'queue') - os.makedirs(block_sda_path) - os.makedirs(sda_queue_path) - tmpfile('logical_block_size', contents='99', directory=sda_queue_path) - result = disk.get_devices(_sys_block_path=block_path) + patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] + fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99') + result = disk.get_devices() assert result[sda_path]['sectorsize'] == '99' - def test_sda_sectorsize_does_not_fallback(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) + def test_sda_sectorsize_does_not_fallback(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' - patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] - block_path = self.setup_path(tmpdir) - block_sda_path = os.path.join(block_path, 'sda') - sda_queue_path = os.path.join(block_sda_path, 'queue') - os.makedirs(block_sda_path) - os.makedirs(sda_queue_path) - tmpfile('logical_block_size', contents='99', directory=sda_queue_path) - tmpfile('hw_sector_size', contents='1024', directory=sda_queue_path) - result = disk.get_devices(_sys_block_path=block_path) + patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] + fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99') + fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024') + result = disk.get_devices() assert result[sda_path]['sectorsize'] == '99' - def test_is_rotational(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) + def test_is_rotational(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' - patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] - block_path = self.setup_path(tmpdir) - block_sda_path = os.path.join(block_path, 'sda') - sda_queue_path = os.path.join(block_sda_path, 'queue') - os.makedirs(block_sda_path) - os.makedirs(sda_queue_path) - tmpfile('rotational', contents='1', directory=sda_queue_path) - result = disk.get_devices(_sys_block_path=block_path) + patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] + fake_filesystem.create_file('/sys/block/sda/queue/rotational', contents = '1') + result = disk.get_devices() assert result[sda_path]['rotational'] == '1' + @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) + def test_is_ceph_rbd(self, patched_get_block_devs_sysfs, fake_filesystem): + rbd_path = '/dev/rbd0' + patched_get_block_devs_sysfs.return_value = [[rbd_path, rbd_path, 'disk']] + result = disk.get_devices() + assert rbd_path not in result + class TestSizeCalculations(object): @@ -549,3 +504,21 @@ class TestSizeSpecificFormatting(object): result = "%s" % size.tb assert "%s" % size.tb == "%s" % size.terabytes assert result == "1027.00 TB" + + +class TestAllowLoopDevsWarning(object): + def test_loop_dev_warning(self, fake_call, caplog): + assert disk.allow_loop_devices() is False + assert not caplog.records + os.environ['CEPH_VOLUME_ALLOW_LOOP_DEVICES'] = "y" + assert disk.allow_loop_devices() is True + log = caplog.records[0] + assert log.levelname == "WARNING" + assert "will never be supported in production" in log.message + + +class TestHasBlueStoreLabel(object): + def test_device_path_is_a_path(self, fake_filesystem): + device_path = '/var/lib/ceph/osd/ceph-0' + fake_filesystem.create_dir(device_path) + assert not disk.has_bluestore_label(device_path) \ No newline at end of file diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_encryption.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_encryption.py index fc0991cf7..cd2ea8f18 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_encryption.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_encryption.py @@ -48,9 +48,9 @@ class TestStatus(object): class TestDmcryptClose(object): - def test_mapper_exists(self, fake_run, tmpfile): - file_name = tmpfile(name='mapper-device') - encryption.dmcrypt_close(file_name) + def test_mapper_exists(self, fake_run, fake_filesystem): + file_name = fake_filesystem.create_file('mapper-device') + encryption.dmcrypt_close(file_name.path) arguments = fake_run.calls[0]['args'][0] assert arguments[0] == 'cryptsetup' assert arguments[1] == 'remove' diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_system.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_system.py index e7a124b8d..5746f7023 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_system.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_system.py @@ -145,27 +145,28 @@ class TestGetMounts(object): with open(proc_path, 'w') as f: f.write('') monkeypatch.setattr(system, 'PROCDIR', PROCDIR) - assert system.get_mounts() == {} + m = system.Mounts() + assert m.get_mounts() == {} def test_is_mounted_(self, fake_proc): - result = system.get_mounts() - assert result['/dev/sdc2'] == ['/boot'] + m = system.Mounts() + assert m.get_mounts()['/dev/sdc2'] == ['/boot'] def test_ignores_two_fields(self, fake_proc): - result = system.get_mounts() - assert result.get('/dev/sde4') is None + m = system.Mounts() + assert m.get_mounts().get('/dev/sde4') is None def test_tmpfs_is_reported(self, fake_proc): - result = system.get_mounts() - assert result['tmpfs'][0] == '/dev/shm' + m = system.Mounts() + assert m.get_mounts()['tmpfs'][0] == '/dev/shm' def test_non_skip_devs_arent_reported(self, fake_proc): - result = system.get_mounts() - assert result.get('cgroup') is None + m = system.Mounts() + assert m.get_mounts().get('cgroup') is None def test_multiple_mounts_are_appended(self, fake_proc): - result = system.get_mounts() - assert len(result['tmpfs']) == 7 + m = system.Mounts() + assert len(m.get_mounts()['tmpfs']) == 7 def test_nonexistent_devices_are_skipped(self, tmpdir, monkeypatch): PROCDIR = str(tmpdir) @@ -176,19 +177,19 @@ class TestGetMounts(object): /dev/sda2 /far/lib/ceph/osd/ceph-1 xfs rw,attr2,inode64,noquota 0 0""")) monkeypatch.setattr(system, 'PROCDIR', PROCDIR) monkeypatch.setattr(os.path, 'exists', lambda x: False if x == '/dev/sda1' else True) - result = system.get_mounts() - assert result.get('/dev/sda1') is None + m = system.Mounts() + assert m.get_mounts().get('/dev/sda1') is None class TestIsBinary(object): - def test_is_binary(self, tmpfile): - binary_path = tmpfile(contents='asd\n\nlkjh\x00') - assert system.is_binary(binary_path) + def test_is_binary(self, fake_filesystem): + binary_path = fake_filesystem.create_file('/tmp/fake-file', contents='asd\n\nlkjh\x00') + assert system.is_binary(binary_path.path) - def test_is_not_binary(self, tmpfile): - binary_path = tmpfile(contents='asd\n\nlkjh0') - assert system.is_binary(binary_path) is False + def test_is_not_binary(self, fake_filesystem): + binary_path = fake_filesystem.create_file('/tmp/fake-file', contents='asd\n\nlkjh0') + assert system.is_binary(binary_path.path) is False class TestGetFileContents(object): @@ -197,21 +198,20 @@ class TestGetFileContents(object): filepath = os.path.join(str(tmpdir), 'doesnotexist') assert system.get_file_contents(filepath, 'default') == 'default' - def test_path_has_contents(self, tmpfile): - interesting_file = tmpfile(contents="1") - result = system.get_file_contents(interesting_file) + def test_path_has_contents(self, fake_filesystem): + interesting_file = fake_filesystem.create_file('/tmp/fake-file', contents="1") + result = system.get_file_contents(interesting_file.path) assert result == "1" - def test_path_has_multiline_contents(self, tmpfile): - interesting_file = tmpfile(contents="0\n1") - result = system.get_file_contents(interesting_file) + def test_path_has_multiline_contents(self, fake_filesystem): + interesting_file = fake_filesystem.create_file('/tmp/fake-file', contents="0\n1") + result = system.get_file_contents(interesting_file.path) assert result == "0\n1" - def test_exception_returns_default(self, tmpfile): - interesting_file = tmpfile(contents="0") - # remove read, causes IOError - os.chmod(interesting_file, 0o000) - result = system.get_file_contents(interesting_file) + def test_exception_returns_default(self): + with patch('builtins.open') as mocked_open: + mocked_open.side_effect = Exception() + result = system.get_file_contents('/tmp/fake-file') assert result == '' diff --git a/ceph/src/ceph-volume/ceph_volume/util/device.py b/ceph/src/ceph-volume/ceph_volume/util/device.py index edd6e804f..015cfe6ff 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/device.py +++ b/ceph/src/ceph-volume/ceph_volume/util/device.py @@ -3,18 +3,19 @@ import logging import os from functools import total_ordering -from ceph_volume import sys_info, process +from ceph_volume import sys_info from ceph_volume.api import lvm from ceph_volume.util import disk, system from ceph_volume.util.lsmdisk import LSMDisk from ceph_volume.util.constants import ceph_disk_guids +from ceph_volume.util.disk import allow_loop_devices logger = logging.getLogger(__name__) report_template = """ -{dev:<25} {size:<12} {rot!s:<7} {available!s:<9} {model}""" +{dev:<25} {size:<12} {device_nodes:<15} {rot!s:<7} {available!s:<9} {model}""" def encryption_status(abspath): @@ -33,10 +34,17 @@ class Devices(object): """ def __init__(self, filter_for_batch=False, with_lsm=False): + lvs = lvm.get_lvs() + lsblk_all = disk.lsblk_all() + all_devices_vgs = lvm.get_all_devices_vgs() if not sys_info.devices: sys_info.devices = disk.get_devices() - self.devices = [Device(k, with_lsm) for k in - sys_info.devices.keys()] + self.devices = [Device(k, + with_lsm, + lvs=lvs, + lsblk_all=lsblk_all, + all_devices_vgs=all_devices_vgs) for k in + sys_info.devices.keys()] if filter_for_batch: self.devices = [d for d in self.devices if d.available_lvm_batch] @@ -48,6 +56,8 @@ class Devices(object): rot='rotates', model='Model name', available='available', + device_nodes='Device nodes', + )] for device in sorted(self.devices): output.append(device.report()) @@ -89,23 +99,37 @@ class Device(object): # unittests lvs = [] - def __init__(self, path, with_lsm=False): + def __init__(self, path, with_lsm=False, lvs=None, lsblk_all=None, all_devices_vgs=None): self.path = path # LVs can have a vg/lv path, while disks will have /dev/sda - self.abspath = path + self.symlink = None + # check if we are a symlink + if os.path.islink(self.path): + self.symlink = self.path + real_path = os.path.realpath(self.path) + # check if we are not a device mapper + if "dm-" not in real_path: + self.path = real_path + if not sys_info.devices: + sys_info.devices = disk.get_devices() + if sys_info.devices.get(self.path, {}): + self.device_nodes = sys_info.devices[self.path]['device_nodes'] + self.sys_api = sys_info.devices.get(self.path, {}) + self.partitions = self._get_partitions() self.lv_api = None - self.lvs = [] + self.lvs = [] if not lvs else lvs + self.lsblk_all = lsblk_all + self.all_devices_vgs = all_devices_vgs self.vgs = [] self.vg_name = None self.lv_name = None self.disk_api = {} - self.blkid_api = {} - self.sys_api = {} + self.blkid_api = None self._exists = None self._is_lvm_member = None + self.ceph_device = False self._parse() self.lsm_data = self.fetch_lsm(with_lsm) - self.ceph_device = None self.available_lvm, self.rejected_reasons_lvm = self._check_lvm_reject_reasons() self.available_raw, self.rejected_reasons_raw = self._check_raw_reject_reasons() @@ -148,47 +172,65 @@ class Device(object): def __hash__(self): return hash(self.path) + def load_blkid_api(self): + if self.blkid_api is None: + self.blkid_api = disk.blkid(self.path) + def _parse(self): - if not sys_info.devices: - sys_info.devices = disk.get_devices() - self.sys_api = sys_info.devices.get(self.abspath, {}) + lv = None if not self.sys_api: # if no device was found check if we are a partition - partname = self.abspath.split('/')[-1] + partname = self.path.split('/')[-1] for device, info in sys_info.devices.items(): part = info['partitions'].get(partname, {}) if part: self.sys_api = part break - # if the path is not absolute, we have 'vg/lv', let's use LV name - # to get the LV. - if self.path[0] == '/': - lv = lvm.get_single_lv(filters={'lv_path': self.path}) + if self.lvs: + for _lv in self.lvs: + # if the path is not absolute, we have 'vg/lv', let's use LV name + # to get the LV. + if self.path[0] == '/': + if _lv.lv_path == self.path: + lv = _lv + break + else: + vgname, lvname = self.path.split('/') + if _lv.lv_name == lvname and _lv.vg_name == vgname: + lv = _lv + break else: - vgname, lvname = self.path.split('/') - lv = lvm.get_single_lv(filters={'lv_name': lvname, - 'vg_name': vgname}) + if self.path[0] == '/': + lv = lvm.get_single_lv(filters={'lv_path': self.path}) + else: + vgname, lvname = self.path.split('/') + lv = lvm.get_single_lv(filters={'lv_name': lvname, + 'vg_name': vgname}) + if lv: self.lv_api = lv self.lvs = [lv] - self.abspath = lv.lv_path + self.path = lv.lv_path self.vg_name = lv.vg_name self.lv_name = lv.name self.ceph_device = lvm.is_ceph_device(lv) else: - dev = disk.lsblk(self.path) - self.blkid_api = disk.blkid(self.path) + self.lvs = [] + if self.lsblk_all: + for dev in self.lsblk_all: + if dev['NAME'] == os.path.basename(self.path): + break + else: + dev = disk.lsblk(self.path) self.disk_api = dev device_type = dev.get('TYPE', '') # always check is this is an lvm member - if device_type in ['part', 'disk']: + valid_types = ['part', 'disk'] + if allow_loop_devices(): + valid_types.append('loop') + if device_type in valid_types: self._set_lvm_membership() - out, err, rc = process.call([ - 'ceph-bluestore-tool', 'show-label', - '--dev', self.path], verbose_on_failure=False) - if rc: - self.ceph_device = True self.ceph_disk = CephDiskDevice(self) @@ -200,7 +242,7 @@ class Device(object): prefix = 'Partition' elif self.is_device: prefix = 'Raw Device' - return '<%s: %s>' % (prefix, self.abspath) + return '<%s: %s>' % (prefix, self.path) def pretty_report(self): def format_value(v): @@ -232,11 +274,12 @@ class Device(object): def report(self): return report_template.format( - dev=self.abspath, + dev=self.path, size=self.size_human, rot=self.rotational, available=self.available, model=self.model, + device_nodes=self.device_nodes ) def json_report(self): @@ -252,7 +295,7 @@ class Device(object): """ props = ['ID_VENDOR', 'ID_MODEL', 'ID_MODEL_ENC', 'ID_SERIAL_SHORT', 'ID_SERIAL', 'ID_SCSI_SERIAL'] - p = disk.udevadm_property(self.abspath, props) + p = disk.udevadm_property(self.path, props) if p.get('ID_MODEL','').startswith('LVM PV '): p['ID_MODEL'] = p.get('ID_MODEL_ENC', '').replace('\\x20', ' ').strip() if 'ID_VENDOR' in p and 'ID_MODEL' in p and 'ID_SCSI_SERIAL' in p: @@ -281,40 +324,51 @@ class Device(object): # VGs, should we consider it as part of LVM? We choose not to # here, because most likely, we need to use VGs from this PV. self._is_lvm_member = False - for path in self._get_pv_paths(): - vgs = lvm.get_device_vgs(path) + device_to_check = [self.path] + device_to_check.extend(self.partitions) + + # a pv can only be in one vg, so this should be safe + # FIXME: While the above assumption holds, sda1 and sda2 + # can each host a PV and VG. I think the vg_name property is + # actually unused (not 100% sure) and can simply be removed + vgs = None + if not self.all_devices_vgs: + self.all_devices_vgs = lvm.get_all_devices_vgs() + for path in device_to_check: + for dev_vg in self.all_devices_vgs: + if dev_vg.pv_name == path: + vgs = [dev_vg] if vgs: self.vgs.extend(vgs) - # a pv can only be in one vg, so this should be safe - # FIXME: While the above assumption holds, sda1 and sda2 - # can each host a PV and VG. I think the vg_name property is - # actually unused (not 100% sure) and can simply be removed self.vg_name = vgs[0] self._is_lvm_member = True self.lvs.extend(lvm.get_device_lvs(path)) - return self._is_lvm_member + if self.lvs: + self.ceph_device = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs]) - def _get_pv_paths(self): + def _get_partitions(self): """ For block devices LVM can reside on the raw block device or on a partition. Return a list of paths to be checked for a pv. """ - paths = [self.abspath] - path_dir = os.path.dirname(self.abspath) - for part in self.sys_api.get('partitions', {}).keys(): - paths.append(os.path.join(path_dir, part)) - return paths + partitions = [] + path_dir = os.path.dirname(self.path) + for partition in self.sys_api.get('partitions', {}).keys(): + partitions.append(os.path.join(path_dir, partition)) + return partitions @property def exists(self): - return os.path.exists(self.abspath) + return os.path.exists(self.path) @property def has_fs(self): + self.load_blkid_api() return 'TYPE' in self.blkid_api @property def has_gpt_headers(self): + self.load_blkid_api() return self.blkid_api.get("PTTYPE") == "gpt" @property @@ -362,18 +416,27 @@ class Device(object): @property def is_ceph_disk_member(self): - is_member = self.ceph_disk.is_member + def is_member(device): + return 'ceph' in device.get('PARTLABEL', '') or \ + device.get('PARTTYPE', '') in ceph_disk_guids.keys() + # If we come from Devices(), self.lsblk_all is set already. + # Otherwise, we have to grab the data. + details = self.lsblk_all or disk.lsblk_all() + _is_member = False if self.sys_api.get("partitions"): for part in self.sys_api.get("partitions").keys(): - part = Device("/dev/%s" % part) - if part.is_ceph_disk_member: - is_member = True - break - return is_member + for dev in details: + if part.startswith(dev['NAME']): + if is_member(dev): + _is_member = True + return _is_member + else: + return is_member(self.disk_api) + raise RuntimeError(f"Couln't check if device {self.path} is a ceph-disk member.") @property def has_bluestore_label(self): - return disk.has_bluestore_label(self.abspath) + return disk.has_bluestore_label(self.path) @property def is_mapper(self): @@ -381,7 +444,10 @@ class Device(object): @property def device_type(self): - if self.disk_api: + self.load_blkid_api() + if 'type' in self.sys_api: + return self.sys_api['type'] + elif self.disk_api: return self.disk_api['TYPE'] elif self.blkid_api: return self.blkid_api['TYPE'] @@ -396,6 +462,7 @@ class Device(object): @property def is_partition(self): + self.load_blkid_api() if self.disk_api: return self.disk_api['TYPE'] == 'part' elif self.blkid_api: @@ -404,13 +471,17 @@ class Device(object): @property def is_device(self): + self.load_blkid_api() api = None if self.disk_api: api = self.disk_api elif self.blkid_api: api = self.blkid_api if api: - return self.device_type in ['disk', 'device', 'mpath'] + valid_types = ['disk', 'device', 'mpath'] + if allow_loop_devices(): + valid_types.append('loop') + return self.device_type in valid_types return False @property @@ -423,6 +494,7 @@ class Device(object): Only correct for LVs, device mappers, and partitions. Will report a ``None`` for raw devices. """ + self.load_blkid_api() crypt_reports = [self.blkid_api.get('TYPE', ''), self.disk_api.get('FSTYPE', '')] if self.is_lv: # if disk APIs are reporting this is encrypted use that: @@ -435,7 +507,7 @@ class Device(object): elif self.is_partition: return 'crypto_LUKS' in crypt_reports elif self.is_mapper: - active_mapper = encryption_status(self.abspath) + active_mapper = encryption_status(self.path) if active_mapper: # normalize a bit to ensure same values regardless of source encryption_type = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks @@ -520,7 +592,7 @@ class Device(object): except OSError as e: # likely failed to open the device. assuming it is BlueStore is the safest option # so that a possibly-already-existing OSD doesn't get overwritten - logger.error('failed to determine if device {} is BlueStore. device should not be used to avoid false negatives. err: {}'.format(self.abspath, e)) + logger.error('failed to determine if device {} is BlueStore. device should not be used to avoid false negatives. err: {}'.format(self.path, e)) rejected.append('Failed to determine if device is BlueStore') if self.is_partition: @@ -530,7 +602,7 @@ class Device(object): except OSError as e: # likely failed to open the device. assuming the parent is BlueStore is the safest # option so that a possibly-already-existing OSD doesn't get overwritten - logger.error('failed to determine if partition {} (parent: {}) has a BlueStore parent. partition should not be used to avoid false negatives. err: {}'.format(self.abspath, self.parent_device, e)) + logger.error('failed to determine if partition {} (parent: {}) has a BlueStore parent. partition should not be used to avoid false negatives. err: {}'.format(self.path, self.parent_device, e)) rejected.append('Failed to determine if parent device is BlueStore') if self.has_gpt_headers: diff --git a/ceph/src/ceph-volume/ceph_volume/util/disk.py b/ceph/src/ceph-volume/ceph_volume/util/disk.py index 88db05138..d2459e120 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/disk.py +++ b/ceph/src/ceph-volume/ceph_volume/util/disk.py @@ -134,10 +134,10 @@ def remove_partition(device): :param device: A ``Device()`` object """ - udev_info = udevadm_property(device.abspath) + udev_info = udevadm_property(device.path) partition_number = udev_info.get('ID_PART_ENTRY_NUMBER') if not partition_number: - raise RuntimeError('Unable to detect the partition number for device: %s' % device.abspath) + raise RuntimeError('Unable to detect the partition number for device: %s' % device.path) process.run( ['parted', device.parent_device, '--script', '--', 'rm', partition_number] @@ -229,6 +229,11 @@ def _udevadm_info(device): def lsblk(device, columns=None, abspath=False): + return lsblk_all(device=device, + columns=columns, + abspath=abspath) + +def lsblk_all(device='', columns=None, abspath=False): """ Create a dictionary of identifying values for a device using ``lsblk``. Each supported column is a key, in its *raw* format (all uppercase @@ -241,6 +246,7 @@ def lsblk(device, columns=None, abspath=False): NAME device name KNAME internal kernel device name + PKNAME internal kernel parent device name MAJ:MIN major:minor device number FSTYPE filesystem type MOUNTPOINT where the device is mounted @@ -284,38 +290,46 @@ def lsblk(device, columns=None, abspath=False): Normal CLI output, as filtered by the flags in this function will look like :: - $ lsblk --nodeps -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT + $ lsblk -P -o NAME,KNAME,PKNAME,MAJ:MIN,FSTYPE,MOUNTPOINT NAME="sda1" KNAME="sda1" MAJ:MIN="8:1" FSTYPE="ext4" MOUNTPOINT="/" :param columns: A list of columns to report as keys in its original form. :param abspath: Set the flag for absolute paths on the report """ default_columns = [ - 'NAME', 'KNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL', 'UUID', - 'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE', + 'NAME', 'KNAME', 'PKNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL', + 'UUID', 'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE', 'ALIGNMENT', 'PHY-SEC', 'LOG-SEC', 'ROTA', 'SCHED', 'TYPE', 'DISC-ALN', 'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO', 'PKNAME', 'PARTLABEL' ] - device = device.rstrip('/') columns = columns or default_columns - # --nodeps -> Avoid adding children/parents to the device, only give information - # on the actual device we are querying for # -P -> Produce pairs of COLUMN="value" # -p -> Return full paths to devices, not just the names, when ``abspath`` is set # -o -> Use the columns specified or default ones provided by this function - base_command = ['lsblk', '--nodeps', '-P'] + base_command = ['lsblk', '-P'] if abspath: base_command.append('-p') base_command.append('-o') base_command.append(','.join(columns)) - base_command.append(device) + out, err, rc = process.call(base_command) if rc != 0: - return {} + raise RuntimeError(f"Error: {err}") + + result = [] + + for line in out: + result.append(_lsblk_parser(line)) + + if not device: + return result - return _lsblk_parser(' '.join(out)) + for dev in result: + if dev['NAME'] == os.path.basename(device): + return dev + return {} def is_device(dev): """ @@ -326,16 +340,14 @@ def is_device(dev): """ if not os.path.exists(dev): return False - # use lsblk first, fall back to using stat - TYPE = lsblk(dev).get('TYPE') - if TYPE: - return TYPE in ['disk', 'mpath'] + if not dev.startswith('/dev/'): + return False + if dev[len('/dev/'):].startswith('loop'): + if not allow_loop_devices(): + return False # fallback to stat return _stat_is_device(os.lstat(dev).st_mode) - if stat.S_ISBLK(os.lstat(dev)): - return True - return False def is_partition(dev): @@ -361,6 +373,13 @@ def is_partition(dev): return False +def is_ceph_rbd(dev): + """ + Boolean to determine if a given device is a ceph RBD device, like /dev/rbd0 + """ + return dev.startswith(('/dev/rbd')) + + class BaseFloatUnit(float): """ Base class to support float representations of size values. Suffix is @@ -724,24 +743,78 @@ def is_locked_raw_device(disk_path): return 0 -def get_block_devs_lsblk(): - ''' - This returns a list of lists with 3 items per inner list. - KNAME - reflects the kernel device name , for example /dev/sda or /dev/dm-0 - NAME - the device name, for example /dev/sda or - /dev/mapper/- - TYPE - the block device type: disk, partition, lvm and such +class AllowLoopDevices(object): + allow = False + warned = False - ''' - cmd = ['lsblk', '-plno', 'KNAME,NAME,TYPE'] - stdout, stderr, rc = process.call(cmd) - # lsblk returns 1 on failure - if rc == 1: - raise OSError('lsblk returned failure, stderr: {}'.format(stderr)) - return [re.split(r'\s+', line) for line in stdout] + @classmethod + def __call__(cls): + val = os.environ.get("CEPH_VOLUME_ALLOW_LOOP_DEVICES", "false").lower() + if val not in ("false", 'no', '0'): + cls.allow = True + if not cls.warned: + logger.warning( + "CEPH_VOLUME_ALLOW_LOOP_DEVICES is set in your " + "environment, so we will allow the use of unattached loop" + " devices as disks. This feature is intended for " + "development purposes only and will never be supported in" + " production. Issues filed based on this behavior will " + "likely be ignored." + ) + cls.warned = True + return cls.allow + + +allow_loop_devices = AllowLoopDevices() + + +def get_block_devs_sysfs(_sys_block_path='/sys/block', _sys_dev_block_path='/sys/dev/block'): + def holder_inner_loop(): + for holder in holders: + # /sys/block/sdy/holders/dm-8/dm/uuid + holder_dm_type = get_file_contents(os.path.join(_sys_block_path, dev, f'holders/{holder}/dm/uuid')).split('-')[0].lower() + if holder_dm_type == 'mpath': + return True + + # First, get devices that are _not_ partitions + result = list() + dev_names = os.listdir(_sys_block_path) + for dev in dev_names: + name = kname = os.path.join("/dev", dev) + if not os.path.exists(name): + continue + type_ = 'disk' + holders = os.listdir(os.path.join(_sys_block_path, dev, 'holders')) + if get_file_contents(os.path.join(_sys_block_path, dev, 'removable')) == "1": + continue + if holder_inner_loop(): + continue + dm_dir_path = os.path.join(_sys_block_path, dev, 'dm') + if os.path.isdir(dm_dir_path): + dm_type = get_file_contents(os.path.join(dm_dir_path, 'uuid')) + type_ = dm_type.split('-')[0].lower() + basename = get_file_contents(os.path.join(dm_dir_path, 'name')) + name = os.path.join("/dev/mapper", basename) + if dev.startswith('loop'): + if not allow_loop_devices(): + continue + # Skip loop devices that are not attached + if not os.path.exists(os.path.join(_sys_block_path, dev, 'loop')): + continue + type_ = 'loop' + result.append([kname, name, type_]) + # Next, look for devices that _are_ partitions + for item in os.listdir(_sys_dev_block_path): + is_part = get_file_contents(os.path.join(_sys_dev_block_path, item, 'partition')) == "1" + dev = os.path.basename(os.readlink(os.path.join(_sys_dev_block_path, item))) + if not is_part: + continue + name = kname = os.path.join("/dev", dev) + result.append([name, kname, "part"]) + return sorted(result, key=lambda x: x[0]) -def get_devices(_sys_block_path='/sys/block'): +def get_devices(_sys_block_path='/sys/block', device=''): """ Captures all available block devices as reported by lsblk. Additional interesting metadata like sectors, size, vendor, @@ -754,16 +827,24 @@ def get_devices(_sys_block_path='/sys/block'): device_facts = {} - block_devs = get_block_devs_lsblk() + block_devs = get_block_devs_sysfs(_sys_block_path) + + block_types = ['disk', 'mpath'] + if allow_loop_devices(): + block_types.append('loop') for block in block_devs: devname = os.path.basename(block[0]) diskname = block[1] - if block[2] not in ['disk', 'mpath']: + if block[2] not in block_types: continue sysdir = os.path.join(_sys_block_path, devname) metadata = {} + # If the device is ceph rbd it gets excluded + if is_ceph_rbd(diskname): + continue + # If the mapper device is a logical volume it gets excluded if is_mapper_device(diskname): if lvm.get_device_lvs(diskname): @@ -785,6 +866,12 @@ def get_devices(_sys_block_path='/sys/block'): for key, file_ in facts: metadata[key] = get_file_contents(os.path.join(sysdir, file_)) + device_slaves = os.listdir(os.path.join(sysdir, 'slaves')) + if device_slaves: + metadata['device_nodes'] = ','.join(device_slaves) + else: + metadata['device_nodes'] = devname + metadata['scheduler_mode'] = "" scheduler = get_file_contents(sysdir + "/queue/scheduler") if scheduler is not None: @@ -805,6 +892,7 @@ def get_devices(_sys_block_path='/sys/block'): metadata['human_readable_size'] = human_readable_size(metadata['size']) metadata['path'] = diskname metadata['locked'] = is_locked_raw_device(metadata['path']) + metadata['type'] = block[2] device_facts[diskname] = metadata return device_facts @@ -815,10 +903,13 @@ def has_bluestore_label(device_path): # throws OSError on failure logger.info("opening device {} to check for BlueStore label".format(device_path)) - with open(device_path, "rb") as fd: - # read first 22 bytes looking for bluestore disk signature - signature = fd.read(22) - if signature.decode('ascii', 'replace') == bluestoreDiskSignature: - isBluestore = True + try: + with open(device_path, "rb") as fd: + # read first 22 bytes looking for bluestore disk signature + signature = fd.read(22) + if signature.decode('ascii', 'replace') == bluestoreDiskSignature: + isBluestore = True + except IsADirectoryError: + logger.info(f'{device_path} is a directory, skipping.') return isBluestore diff --git a/ceph/src/ceph-volume/ceph_volume/util/encryption.py b/ceph/src/ceph-volume/ceph_volume/util/encryption.py index 2a2c03337..cefd6094b 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/encryption.py +++ b/ceph/src/ceph-volume/ceph_volume/util/encryption.py @@ -1,13 +1,14 @@ import base64 import os import logging -from ceph_volume import process, conf +from ceph_volume import process, conf, terminal from ceph_volume.util import constants, system from ceph_volume.util.device import Device from .prepare import write_keyring from .disk import lsblk, device_family, get_part_entry_type logger = logging.getLogger(__name__) +mlogger = terminal.MultiLogger(__name__) def get_key_size_from_conf(): """ @@ -18,7 +19,7 @@ def get_key_size_from_conf(): key_size = conf.ceph.get_safe( 'osd', 'osd_dmcrypt_key_size', - default='512') + default='512', check_valid=False) if key_size not in ['256', '512']: logger.warning(("Invalid value set for osd_dmcrypt_key_size ({}). " @@ -135,6 +136,7 @@ def get_dmcrypt_key(osd_id, osd_fsid, lockbox_keyring=None): name = 'client.osd-lockbox.%s' % osd_fsid config_key = 'dm-crypt/osd/%s/luks' % osd_fsid + mlogger.info(f'Running ceph config-key get {config_key}') stdout, stderr, returncode = process.call( [ 'ceph', @@ -145,7 +147,8 @@ def get_dmcrypt_key(osd_id, osd_fsid, lockbox_keyring=None): 'get', config_key ], - show_command=True + show_command=True, + logfile_verbose=False ) if returncode != 0: raise RuntimeError('Unable to retrieve dmcrypt secret') @@ -232,7 +235,7 @@ def legacy_encrypted(device): This function assumes that ``device`` will be a partition. """ if os.path.isdir(device): - mounts = system.get_mounts(paths=True) + mounts = system.Mounts(paths=True).get_mounts() # yes, rebind the device variable here because a directory isn't going # to help with parsing device = mounts.get(device, [None])[0] @@ -270,6 +273,6 @@ def legacy_encrypted(device): devices = [Device(i['NAME']) for i in device_family(parent_device)] for d in devices: if d.ceph_disk.type == 'lockbox': - metadata['lockbox'] = d.abspath + metadata['lockbox'] = d.path break return metadata diff --git a/ceph/src/ceph-volume/ceph_volume/util/prepare.py b/ceph/src/ceph-volume/ceph_volume/util/prepare.py index df6d8c704..ff7427eed 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/util/prepare.py @@ -19,7 +19,8 @@ mlogger = terminal.MultiLogger(__name__) def create_key(): stdout, stderr, returncode = process.call( ['ceph-authtool', '--gen-print-key'], - show_command=True) + show_command=True, + logfile_verbose=False) if returncode != 0: raise RuntimeError('Unable to generate a new auth key') return ' '.join(stdout).strip() @@ -40,13 +41,15 @@ def write_keyring(osd_id, secret, keyring_name='keyring', name=None): """ osd_keyring = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, keyring_name) name = name or 'osd.%s' % str(osd_id) - process.run( + mlogger.info(f'Creating keyring file for {name}') + process.call( [ 'ceph-authtool', osd_keyring, '--create-keyring', '--name', name, '--add-key', secret - ]) + ], + logfile_verbose=False) system.chown(osd_keyring) diff --git a/ceph/src/ceph-volume/ceph_volume/util/system.py b/ceph/src/ceph-volume/ceph_volume/util/system.py index ed1fb8ed2..590a0599b 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/system.py +++ b/ceph/src/ceph-volume/ceph_volume/util/system.py @@ -6,6 +6,7 @@ import platform import tempfile import uuid import subprocess +import threading from ceph_volume import process, terminal from . import as_string @@ -236,7 +237,8 @@ def path_is_mounted(path, destination=None): """ Check if the given path is mounted """ - mounts = get_mounts(paths=True) + m = Mounts(paths=True) + mounts = m.get_mounts() realpath = os.path.realpath(path) mounted_locations = mounts.get(realpath, []) @@ -250,16 +252,17 @@ def device_is_mounted(dev, destination=None): Check if the given device is mounted, optionally validating that a destination exists """ - plain_mounts = get_mounts(devices=True) - realpath_mounts = get_mounts(devices=True, realpath=True) + plain_mounts = Mounts(devices=True) + realpath_mounts = Mounts(devices=True, realpath=True) + realpath_dev = os.path.realpath(dev) if dev.startswith('/') else dev destination = os.path.realpath(destination) if destination else None # plain mounts - plain_dev_mounts = plain_mounts.get(dev, []) - realpath_dev_mounts = plain_mounts.get(realpath_dev, []) + plain_dev_mounts = plain_mounts.get_mounts().get(dev, []) + realpath_dev_mounts = plain_mounts.get_mounts().get(realpath_dev, []) # realpath mounts - plain_dev_real_mounts = realpath_mounts.get(dev, []) - realpath_dev_real_mounts = realpath_mounts.get(realpath_dev, []) + plain_dev_real_mounts = realpath_mounts.get_mounts().get(dev, []) + realpath_dev_real_mounts = realpath_mounts.get_mounts().get(realpath_dev, []) mount_locations = [ plain_dev_mounts, @@ -282,61 +285,97 @@ def device_is_mounted(dev, destination=None): logger.info('%s was not found as mounted', dev) return False - -def get_mounts(devices=False, paths=False, realpath=False): - """ - Create a mapping of all available system mounts so that other helpers can - detect nicely what path or device is mounted - - It ignores (most of) non existing devices, but since some setups might need - some extra device information, it will make an exception for: - - - tmpfs - - devtmpfs - - /dev/root - - If ``devices`` is set to ``True`` the mapping will be a device-to-path(s), - if ``paths`` is set to ``True`` then the mapping will be - a path-to-device(s) - - :param realpath: Resolve devices to use their realpaths. This is useful for - paths like LVM where more than one path can point to the same device - """ - devices_mounted = {} - paths_mounted = {} - do_not_skip = ['tmpfs', 'devtmpfs', '/dev/root'] - default_to_devices = devices is False and paths is False - - with open(PROCDIR + '/mounts', 'rb') as mounts: - proc_mounts = mounts.readlines() - - for line in proc_mounts: - fields = [as_string(f) for f in line.split()] - if len(fields) < 3: - continue - if realpath: - device = os.path.realpath(fields[0]) if fields[0].startswith('/') else fields[0] - else: - device = fields[0] - path = os.path.realpath(fields[1]) - # only care about actual existing devices - if not os.path.exists(device) or not device.startswith('/'): - if device not in do_not_skip: +class Mounts(object): + excluded_paths = [] + + def __init__(self, devices=False, paths=False, realpath=False): + self.devices = devices + self.paths = paths + self.realpath = realpath + + def safe_realpath(self, path, timeout=0.2): + def _realpath(path, result): + p = os.path.realpath(path) + result.append(p) + + result = [] + t = threading.Thread(target=_realpath, args=(path, result)) + t.setDaemon(True) + t.start() + t.join(timeout) + if t.is_alive(): + return None + return result[0] + + def get_mounts(self): + """ + Create a mapping of all available system mounts so that other helpers can + detect nicely what path or device is mounted + + It ignores (most of) non existing devices, but since some setups might need + some extra device information, it will make an exception for: + + - tmpfs + - devtmpfs + - /dev/root + + If ``devices`` is set to ``True`` the mapping will be a device-to-path(s), + if ``paths`` is set to ``True`` then the mapping will be + a path-to-device(s) + + :param realpath: Resolve devices to use their realpaths. This is useful for + paths like LVM where more than one path can point to the same device + """ + devices_mounted = {} + paths_mounted = {} + do_not_skip = ['tmpfs', 'devtmpfs', '/dev/root'] + default_to_devices = self.devices is False and self.paths is False + + + with open(PROCDIR + '/mounts', 'rb') as mounts: + proc_mounts = mounts.readlines() + + for line in proc_mounts: + fields = [as_string(f) for f in line.split()] + if len(fields) < 3: continue - if device in devices_mounted.keys(): - devices_mounted[device].append(path) - else: - devices_mounted[device] = [path] - if path in paths_mounted.keys(): - paths_mounted[path].append(device) - else: - paths_mounted[path] = [device] + if fields[0] in Mounts.excluded_paths or \ + fields[1] in Mounts.excluded_paths: + continue + if self.realpath: + if fields[0].startswith('/'): + device = self.safe_realpath(fields[0]) + if device is None: + logger.warning(f"Can't get realpath on {fields[0]}, skipping.") + Mounts.excluded_paths.append(fields[0]) + continue + else: + device = fields[0] + else: + device = fields[0] + path = self.safe_realpath(fields[1]) + if path is None: + logger.warning(f"Can't get realpath on {fields[1]}, skipping.") + Mounts.excluded_paths.append(fields[1]) + continue + # only care about actual existing devices + if not os.path.exists(device) or not device.startswith('/'): + if device not in do_not_skip: + continue + if device in devices_mounted.keys(): + devices_mounted[device].append(path) + else: + devices_mounted[device] = [path] + if path in paths_mounted.keys(): + paths_mounted[path].append(device) + else: + paths_mounted[path] = [device] - # Default to returning information for devices if - if devices is True or default_to_devices: - return devices_mounted - else: - return paths_mounted + # Default to returning information for devices if + if self.devices is True or default_to_devices: + return devices_mounted + else: + return paths_mounted def set_context(path, recursive=False): diff --git a/ceph/src/ceph-volume/tox.ini b/ceph/src/ceph-volume/tox.ini index c58951a9b..820cf6fb3 100644 --- a/ceph/src/ceph-volume/tox.ini +++ b/ceph/src/ceph-volume/tox.ini @@ -7,6 +7,7 @@ deps= pytest pytest-xdist mock + pyfakefs install_command=./tox_install_command.sh {opts} {packages} commands=py.test --numprocesses=auto -vv {posargs:ceph_volume/tests} --ignore=ceph_volume/tests/functional diff --git a/ceph/src/ceph-volume/tox_install_command.sh b/ceph/src/ceph-volume/tox_install_command.sh index 79343a4c2..c13c95533 100755 --- a/ceph/src/ceph-volume/tox_install_command.sh +++ b/ceph/src/ceph-volume/tox_install_command.sh @@ -1,3 +1,3 @@ -#!/bin/bash +#!/usr/bin/env bash python -m pip install --editable="file://`pwd`/../python-common" python -m pip install $@ diff --git a/ceph/src/cephadm/cephadm b/ceph/src/cephadm/cephadm index 2353e7b1e..ed5a48d87 100755 --- a/ceph/src/cephadm/cephadm +++ b/ceph/src/cephadm/cephadm @@ -53,8 +53,8 @@ DEFAULT_PROMTAIL_IMAGE = 'docker.io/grafana/promtail:2.4.0' DEFAULT_NODE_EXPORTER_IMAGE = 'quay.io/prometheus/node-exporter:v1.3.1' DEFAULT_ALERT_MANAGER_IMAGE = 'quay.io/prometheus/alertmanager:v0.23.0' DEFAULT_GRAFANA_IMAGE = 'quay.io/ceph/ceph-grafana:8.3.5' -DEFAULT_HAPROXY_IMAGE = 'docker.io/library/haproxy:2.3' -DEFAULT_KEEPALIVED_IMAGE = 'docker.io/arcts/keepalived' +DEFAULT_HAPROXY_IMAGE = 'quay.io/ceph/haproxy:2.3' +DEFAULT_KEEPALIVED_IMAGE = 'quay.io/ceph/keepalived:2.1.5' DEFAULT_SNMP_GATEWAY_IMAGE = 'docker.io/maxwo/snmp-notifier:v1.2.1' DEFAULT_REGISTRY = 'docker.io' # normalize unqualified digests to this # ------------------------------------------------------------------------------ @@ -82,6 +82,7 @@ CUSTOM_PS1 = r'[ceph: \u@\h \W]\$ ' DEFAULT_TIMEOUT = None # in seconds DEFAULT_RETRY = 15 DATEFMT = '%Y-%m-%dT%H:%M:%S.%fZ' +QUIET_LOG_LEVEL = 9 # DEBUG is 10, so using 9 to be lower level than DEBUG logger: logging.Logger = None # type: ignore @@ -107,9 +108,26 @@ You can invoke cephadm in two ways: """ cached_stdin = None + ################################## +async def run_func(func: Callable, cmd: str) -> subprocess.CompletedProcess: + logger.debug(f'running function {func.__name__}, with parms: {cmd}') + response = func(cmd) + return response + + +async def concurrent_tasks(func: Callable, cmd_list: List[str]) -> List[Any]: + tasks = [] + for cmd in cmd_list: + tasks.append(run_func(func, cmd)) + + data = await asyncio.gather(*tasks) + + return data + + class EndPoint: """EndPoint representing an ip:port format""" @@ -234,7 +252,7 @@ class Podman(ContainerEngine): return self._version def get_version(self, ctx: CephadmContext) -> None: - out, _, _ = call_throws(ctx, [self.path, 'version', '--format', '{{.Client.Version}}']) + out, _, _ = call_throws(ctx, [self.path, 'version', '--format', '{{.Client.Version}}'], verbosity=CallVerbosity.QUIET) self._version = _parse_podman_version(out) def __str__(self) -> str: @@ -615,14 +633,14 @@ class Monitoring(object): _, err, code = call(ctx, [ ctx.container_engine.path, 'exec', container_id, cmd, '--version' - ], verbosity=CallVerbosity.DEBUG) + ], verbosity=CallVerbosity.QUIET) if code == 0: break cmd = 'alertmanager' # reset cmd for version extraction else: _, err, code = call(ctx, [ ctx.container_engine.path, 'exec', container_id, cmd, '--version' - ], verbosity=CallVerbosity.DEBUG) + ], verbosity=CallVerbosity.QUIET) if code == 0 and \ err.startswith('%s, version ' % cmd): version = err.split(' ')[2] @@ -713,7 +731,7 @@ class NFSGanesha(object): out, err, code = call(ctx, [ctx.container_engine.path, 'exec', container_id, NFSGanesha.entrypoint, '-v'], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if code == 0: match = re.search(r'NFS-Ganesha Release\s*=\s*[V]*([\d.]+)', out) if match: @@ -845,7 +863,7 @@ class CephIscsi(object): out, err, code = call(ctx, [ctx.container_engine.path, 'exec', container_id, '/usr/bin/python3', '-c', "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if code == 0: version = out.strip() return version @@ -1442,20 +1460,21 @@ class FileLock(object): try: while True: if not self.is_locked: - logger.debug('Acquiring lock %s on %s', lock_id, - lock_filename) + logger.log(QUIET_LOG_LEVEL, 'Acquiring lock %s on %s', lock_id, + lock_filename) self._acquire() if self.is_locked: - logger.debug('Lock %s acquired on %s', lock_id, - lock_filename) + logger.log(QUIET_LOG_LEVEL, 'Lock %s acquired on %s', lock_id, + lock_filename) break elif timeout >= 0 and time.time() - start_time > timeout: logger.warning('Timeout acquiring lock %s on %s', lock_id, lock_filename) raise Timeout(self._lock_file) else: - logger.debug( + logger.log( + QUIET_LOG_LEVEL, 'Lock %s not acquired on %s, waiting %s seconds ...', lock_id, lock_filename, poll_intervall ) @@ -1534,14 +1553,47 @@ class FileLock(object): # Popen wrappers, lifted from ceph-volume class CallVerbosity(Enum): + ##### + # Format: + # Normal Operation: , Errors: + # + # NOTE: QUIET log level is custom level only used when --verbose is passed + ##### + + # Normal Operation: None, Errors: None SILENT = 0 - # log stdout/stderr to logger.debug - DEBUG = 1 - # On a non-zero exit status, it will forcefully set - # logging ON for the terminal - VERBOSE_ON_FAILURE = 2 - # log at info (instead of debug) level. - VERBOSE = 3 + # Normal Operation: QUIET, Error: QUIET + QUIET = 1 + # Normal Operation: DEBUG, Error: DEBUG + DEBUG = 2 + # Normal Operation: QUIET, Error: INFO + QUIET_UNLESS_ERROR = 3 + # Normal Operation: DEBUG, Error: INFO + VERBOSE_ON_FAILURE = 4 + # Normal Operation: INFO, Error: INFO + VERBOSE = 5 + + def success_log_level(self) -> int: + _verbosity_level_to_log_level = { + self.SILENT: 0, + self.QUIET: QUIET_LOG_LEVEL, + self.DEBUG: logging.DEBUG, + self.QUIET_UNLESS_ERROR: QUIET_LOG_LEVEL, + self.VERBOSE_ON_FAILURE: logging.DEBUG, + self.VERBOSE: logging.INFO + } + return _verbosity_level_to_log_level[self] # type: ignore + + def error_log_level(self) -> int: + _verbosity_level_to_log_level = { + self.SILENT: 0, + self.QUIET: QUIET_LOG_LEVEL, + self.DEBUG: logging.DEBUG, + self.QUIET_UNLESS_ERROR: logging.INFO, + self.VERBOSE_ON_FAILURE: logging.INFO, + self.VERBOSE: logging.INFO + } + return _verbosity_level_to_log_level[self] # type: ignore if sys.version_info < (3, 8): @@ -1689,10 +1741,6 @@ def call(ctx: CephadmContext, async for line in reader: message = line.decode('utf-8') collected.write(message) - if verbosity == CallVerbosity.VERBOSE: - logger.info(prefix + message.rstrip()) - elif verbosity != CallVerbosity.SILENT: - logger.debug(prefix + message.rstrip()) return collected.getvalue() async def run_with_timeout() -> Tuple[str, str, int]: @@ -1714,13 +1762,14 @@ def call(ctx: CephadmContext, return stdout, stderr, returncode stdout, stderr, returncode = async_run(run_with_timeout()) - if returncode != 0 and verbosity == CallVerbosity.VERBOSE_ON_FAILURE: - logger.info('Non-zero exit code %d from %s', - returncode, ' '.join(command)) - for line in stdout.splitlines(): - logger.info(prefix + 'stdout ' + line) - for line in stderr.splitlines(): - logger.info(prefix + 'stderr ' + line) + log_level = verbosity.success_log_level() + if returncode != 0: + log_level = verbosity.error_log_level() + logger.log(log_level, f'Non-zero exit code {returncode} from {" ".join(command)}') + for line in stdout.splitlines(): + logger.log(log_level, prefix + 'stdout ' + line) + for line in stderr.splitlines(): + logger.log(log_level, prefix + 'stderr ' + line) return stdout, stderr, returncode @@ -2395,7 +2444,7 @@ def check_unit(ctx, unit_name): installed = False try: out, err, code = call(ctx, ['systemctl', 'is-enabled', unit_name], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if code == 0: enabled = True installed = True @@ -2409,7 +2458,7 @@ def check_unit(ctx, unit_name): state = 'unknown' try: out, err, code = call(ctx, ['systemctl', 'is-active', unit_name], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) out = out.strip() if out in ['active']: state = 'running' @@ -2555,10 +2604,8 @@ def get_daemon_args(ctx, fsid, daemon_type, daemon_id): r += ['--cluster.peer={}'.format(peer)] # some alertmanager, by default, look elsewhere for a config r += ['--config.file=/etc/alertmanager/alertmanager.yml'] - if daemon_type == 'loki': - r += ['--config.file=/etc/loki/loki.yml'] if daemon_type == 'promtail': - r += ['--config.file=/etc/promtail/promtail.yml'] + r += ['--config.expand-env'] if daemon_type == 'node-exporter': r += ['--path.procfs=/host/proc', '--path.sysfs=/host/sys', @@ -2683,10 +2730,46 @@ def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, sg = SNMPGateway.init(ctx, fsid, daemon_id) sg.create_daemon_conf() + _write_custom_conf_files(ctx, daemon_type, str(daemon_id), fsid, uid, gid) + -def get_parm(option): - # type: (str) -> Dict[str, str] +def _write_custom_conf_files(ctx: CephadmContext, daemon_type: str, daemon_id: str, fsid: str, uid: int, gid: int) -> None: + # mostly making this its own function to make unit testing easier + if 'config_json' not in ctx or not ctx.config_json: + return + config_json = get_custom_config_files(ctx.config_json) + custom_config_dir = os.path.join(ctx.data_dir, fsid, 'custom_config_files', f'{daemon_type}.{daemon_id}') + if not os.path.exists(custom_config_dir): + makedirs(custom_config_dir, uid, gid, 0o755) + mandatory_keys = ['mount_path', 'content'] + for ccf in config_json['custom_config_files']: + if all(k in ccf for k in mandatory_keys): + file_path = os.path.join(custom_config_dir, os.path.basename(ccf['mount_path'])) + with open(file_path, 'w+', encoding='utf-8') as f: + os.fchown(f.fileno(), uid, gid) + os.fchmod(f.fileno(), 0o600) + f.write(ccf['content']) + +def get_parm(option: str) -> Dict[str, str]: + js = _get_config_json(option) + # custom_config_files is a special field that may be in the config + # dict. It is used for mounting custom config files into daemon's containers + # and should be accessed through the "get_custom_config_files" function. + # For get_parm we need to discard it. + js.pop('custom_config_files', None) + return js + + +def get_custom_config_files(option: str) -> Dict[str, List[Dict[str, str]]]: + js = _get_config_json(option) + res: Dict[str, List[Dict[str, str]]] = {'custom_config_files': []} + if 'custom_config_files' in js: + res['custom_config_files'] = js['custom_config_files'] + return res + + +def _get_config_json(option: str) -> Dict[str, Any]: if not option: return dict() @@ -3045,7 +3128,7 @@ def extract_uid_gid(ctx, img='', file_path='/var/lib/ceph'): image=img, entrypoint='stat', args=['-c', '%u %g', fp] - ).run() + ).run(verbosity=CallVerbosity.QUIET_UNLESS_ERROR) uid, gid = out.split(' ') return int(uid), int(gid) except RuntimeError as e: @@ -3288,16 +3371,6 @@ def deploy_daemon_units( bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id), cname='ceph-%s-%s.%s-activate' % (fsid, daemon_type, daemon_id), ) - if 'cluster' in ctx and ctx.cluster: - # ctx.cluster is only set during adoption of a daemon from a cluster - # with a custom name (not "ceph"). The initial activate command the first - # time we start the new cephadm based systemd unit for this osd must account - # for this by mounting to the correct data dir in the container. Otherwise - # necessary files from the old data dir of the daemon won't be copied over - # to the new data dir on the host. After the first start (e.g. on any redeploys) - # this is no longer necessary as we will have these files in the data dir on the host - if data_dir in prestart.volume_mounts: - prestart.volume_mounts[data_dir] = f'/var/lib/ceph/osd/{ctx.cluster}-{daemon_id}' _write_container_cmd_to_bash(ctx, f, prestart, 'LVM OSDs use ceph-volume lvm activate') elif daemon_type == CephIscsi.daemon_type: f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=True)) + '\n') @@ -3947,10 +4020,10 @@ class CephContainer: ] return ret - def run(self, timeout=DEFAULT_TIMEOUT): - # type: (Optional[int]) -> str + def run(self, timeout=DEFAULT_TIMEOUT, verbosity=CallVerbosity.VERBOSE_ON_FAILURE): + # type: (Optional[int], CallVerbosity) -> str out, _, _ = call_throws(self.ctx, self.run_cmd(), - desc=self.entrypoint, timeout=timeout) + desc=self.entrypoint, timeout=timeout, verbosity=verbosity) return out @@ -4497,7 +4570,7 @@ def _pull_image(ctx, image, insecure=False): cmd_str = ' '.join(cmd) for sleep_secs in [1, 4, 25]: - out, err, ret = call(ctx, cmd) + out, err, ret = call(ctx, cmd, verbosity=CallVerbosity.QUIET_UNLESS_ERROR) if not ret: return @@ -4980,7 +5053,8 @@ def wait_for_mon( timeout = ctx.timeout if ctx.timeout else 60 # seconds out, err, ret = call(ctx, c.run_cmd(), desc=c.entrypoint, - timeout=timeout) + timeout=timeout, + verbosity=CallVerbosity.QUIET_UNLESS_ERROR) return ret == 0 is_available(ctx, 'mon', is_mon_available) @@ -5007,7 +5081,9 @@ def create_mgr( # type: () -> bool timeout = ctx.timeout if ctx.timeout else 60 # seconds try: - out = clifunc(['status', '-f', 'json-pretty'], timeout=timeout) + out = clifunc(['status', '-f', 'json-pretty'], + timeout=timeout, + verbosity=CallVerbosity.QUIET_UNLESS_ERROR) j = json.loads(out) return j.get('mgrmap', {}).get('available', False) except Exception as e: @@ -5079,6 +5155,11 @@ def prepare_ssh( logger.info('Deploying %s service with default placement...' % t) cli(['orch', 'apply', t]) + if ctx.with_centralized_logging: + for t in ['loki', 'promtail']: + logger.info('Deploying %s service with default placement...' % t) + cli(['orch', 'apply', t]) + def enable_cephadm_mgr_module( cli: Callable, wait_for_mgr_restart: Callable @@ -5344,6 +5425,8 @@ def save_cluster_config(ctx: CephadmContext, uid: int, gid: int, fsid: str) -> N def command_bootstrap(ctx): # type: (CephadmContext) -> int + ctx.error_code = 0 + if not ctx.output_config: ctx.output_config = os.path.join(ctx.output_dir, CEPH_CONF) if not ctx.output_keyring: @@ -5450,8 +5533,8 @@ def command_bootstrap(ctx): tmp_config = write_tmp(config, uid, gid) # a CLI helper to reduce our typing - def cli(cmd, extra_mounts={}, timeout=DEFAULT_TIMEOUT): - # type: (List[str], Dict[str, str], Optional[int]) -> str + def cli(cmd, extra_mounts={}, timeout=DEFAULT_TIMEOUT, verbosity=CallVerbosity.VERBOSE_ON_FAILURE): + # type: (List[str], Dict[str, str], Optional[int], CallVerbosity) -> str mounts = { log_dir: '/var/log/ceph:z', admin_keyring.name: '/etc/ceph/ceph.client.admin.keyring:z', @@ -5466,7 +5549,7 @@ def command_bootstrap(ctx): entrypoint='/usr/bin/ceph', args=cmd, volume_mounts=mounts, - ).run(timeout=timeout) + ).run(timeout=timeout, verbosity=verbosity) wait_for_mon(ctx, mon_id, mon_dir, admin_keyring.name, tmp_config.name) @@ -5503,9 +5586,9 @@ def command_bootstrap(ctx): # stat' command first, then fall back to 'mgr dump' if # necessary try: - j = json_loads_retry(lambda: cli(['mgr', 'stat'])) + j = json_loads_retry(lambda: cli(['mgr', 'stat'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR)) except Exception: - j = json_loads_retry(lambda: cli(['mgr', 'dump'])) + j = json_loads_retry(lambda: cli(['mgr', 'dump'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR)) epoch = j['epoch'] # wait for mgr to have it @@ -5562,6 +5645,7 @@ def command_bootstrap(ctx): out = cli(['orch', 'apply', '-i', '/tmp/spec.yml'], extra_mounts=mounts) logger.info(out) except Exception: + ctx.error_code = -errno.EINVAL logger.info('\nApplying %s to cluster failed!\n' % ctx.apply_spec) save_cluster_config(ctx, uid, gid, fsid) @@ -5587,7 +5671,7 @@ def command_bootstrap(ctx): 'For more information see:\n\n' '\thttps://docs.ceph.com/docs/master/mgr/telemetry/\n') logger.info('Bootstrap complete.') - return 0 + return ctx.error_code ################################## @@ -5656,16 +5740,30 @@ def extract_uid_gid_monitoring(ctx, daemon_type): return uid, gid -def get_container_with_extra_args(ctx: CephadmContext, - fsid: str, daemon_type: str, daemon_id: Union[int, str], - privileged: bool = False, - ptrace: bool = False, - container_args: Optional[List[str]] = None) -> 'CephContainer': - # wrapper for get_container that additionally adds extra_container_args if present - # used for deploying daemons with additional podman/docker container arguments +def get_deployment_container(ctx: CephadmContext, + fsid: str, daemon_type: str, daemon_id: Union[int, str], + privileged: bool = False, + ptrace: bool = False, + container_args: Optional[List[str]] = None) -> 'CephContainer': + # wrapper for get_container specifically for containers made during the `cephadm deploy` + # command. Adds some extra things such as extra container args and custom config files c = get_container(ctx, fsid, daemon_type, daemon_id, privileged, ptrace, container_args) if 'extra_container_args' in ctx and ctx.extra_container_args: c.container_args.extend(ctx.extra_container_args) + if 'config_json' in ctx and ctx.config_json: + conf_files = get_custom_config_files(ctx.config_json) + mandatory_keys = ['mount_path', 'content'] + for conf in conf_files['custom_config_files']: + if all(k in conf for k in mandatory_keys): + mount_path = conf['mount_path'] + file_path = os.path.join( + ctx.data_dir, + fsid, + 'custom_config_files', + f'{daemon_type}.{daemon_id}', + os.path.basename(mount_path) + ) + c.volume_mounts[file_path] = mount_path return c @@ -5710,8 +5808,8 @@ def command_deploy(ctx): uid, gid = extract_uid_gid(ctx) make_var_run(ctx, ctx.fsid, uid, gid) - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id, - ptrace=ctx.allow_ptrace) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id, + ptrace=ctx.allow_ptrace) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, osd_fsid=ctx.osd_fsid, @@ -5735,7 +5833,7 @@ def command_deploy(ctx): 'contain arg for {}'.format(daemon_type.capitalize(), ', '.join(required_args))) uid, gid = extract_uid_gid_monitoring(ctx, daemon_type) - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, reconfig=ctx.reconfig, ports=daemon_ports) @@ -5747,7 +5845,7 @@ def command_deploy(ctx): config, keyring = get_config_and_keyring(ctx) # TODO: extract ganesha uid/gid (997, 994) ? uid, gid = extract_uid_gid(ctx) - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, reconfig=ctx.reconfig, @@ -5756,7 +5854,7 @@ def command_deploy(ctx): elif daemon_type == CephIscsi.daemon_type: config, keyring = get_config_and_keyring(ctx) uid, gid = extract_uid_gid(ctx) - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, reconfig=ctx.reconfig, @@ -5765,7 +5863,7 @@ def command_deploy(ctx): elif daemon_type == HAproxy.daemon_type: haproxy = HAproxy.init(ctx, ctx.fsid, daemon_id) uid, gid = haproxy.extract_uid_gid_haproxy() - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, reconfig=ctx.reconfig, ports=daemon_ports) @@ -5773,7 +5871,7 @@ def command_deploy(ctx): elif daemon_type == Keepalived.daemon_type: keepalived = Keepalived.init(ctx, ctx.fsid, daemon_id) uid, gid = keepalived.extract_uid_gid_keepalived() - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, reconfig=ctx.reconfig, ports=daemon_ports) @@ -5782,9 +5880,9 @@ def command_deploy(ctx): cc = CustomContainer.init(ctx, ctx.fsid, daemon_id) if not ctx.reconfig and not redeploy: daemon_ports.extend(cc.ports) - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id, - privileged=cc.privileged, - ptrace=ctx.allow_ptrace) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id, + privileged=cc.privileged, + ptrace=ctx.allow_ptrace) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid=cc.uid, gid=cc.gid, config=None, keyring=None, reconfig=ctx.reconfig, @@ -5799,7 +5897,7 @@ def command_deploy(ctx): elif daemon_type == SNMPGateway.daemon_type: sc = SNMPGateway.init(ctx, ctx.fsid, daemon_id) - c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id) + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, sc.uid, sc.gid, ports=daemon_ports) @@ -5992,7 +6090,7 @@ def command_ceph_volume(ctx): volume_mounts=mounts, ) - out, err, code = call_throws(ctx, c.run_cmd()) + out, err, code = call_throws(ctx, c.run_cmd(), verbosity=CallVerbosity.QUIET_UNLESS_ERROR) if not code: print(out) @@ -6057,7 +6155,7 @@ def _list_ipv4_networks(ctx: CephadmContext) -> Dict[str, Dict[str, Set[str]]]: execstr: Optional[str] = find_executable('ip') if not execstr: raise FileNotFoundError("unable to find 'ip' command") - out, _, _ = call_throws(ctx, [execstr, 'route', 'ls']) + out, _, _ = call_throws(ctx, [execstr, 'route', 'ls'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR) return _parse_ipv4_route(out) @@ -6085,8 +6183,8 @@ def _list_ipv6_networks(ctx: CephadmContext) -> Dict[str, Dict[str, Set[str]]]: execstr: Optional[str] = find_executable('ip') if not execstr: raise FileNotFoundError("unable to find 'ip' command") - routes, _, _ = call_throws(ctx, [execstr, '-6', 'route', 'ls']) - ips, _, _ = call_throws(ctx, [execstr, '-6', 'addr', 'ls']) + routes, _, _ = call_throws(ctx, [execstr, '-6', 'route', 'ls'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR) + ips, _, _ = call_throws(ctx, [execstr, '-6', 'addr', 'ls'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR) return _parse_ipv6_route(routes, ips) @@ -6124,7 +6222,7 @@ def _parse_ipv6_route(routes: str, ips: str) -> Dict[str, Dict[str, Set[str]]]: net = [n for n in r.keys() if ipaddress.ip_address(ip) in ipaddress.ip_network(n)] if net and iface in r[net[0]]: - assert(iface) + assert iface r[net[0]][iface].add(ip) return r @@ -6192,14 +6290,14 @@ def list_daemons(ctx, detail=True, legacy_dir=None): out, err, code = call( ctx, [container_path, 'stats', '--format', '{{.ID}},{{.MemUsage}}', '--no-stream'], - verbosity=CallVerbosity.DEBUG + verbosity=CallVerbosity.QUIET ) seen_memusage_cid_len, seen_memusage = _parse_mem_usage(code, out) out, err, code = call( ctx, [container_path, 'stats', '--format', '{{.ID}},{{.CPUPerc}}', '--no-stream'], - verbosity=CallVerbosity.DEBUG + verbosity=CallVerbosity.QUIET ) seen_cpuperc_cid_len, seen_cpuperc = _parse_cpu_perc(code, out) @@ -6228,7 +6326,7 @@ def list_daemons(ctx, detail=True, legacy_dir=None): try: out, err, code = call(ctx, ['ceph', '-v'], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if not code and out.startswith('ceph version '): host_version = out.split(' ')[2] except Exception: @@ -6279,7 +6377,7 @@ def list_daemons(ctx, detail=True, legacy_dir=None): container_path, 'image', 'inspect', image_id, '--format', '{{.RepoDigests}}', ], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if not code: image_digests = list(set(map( normalize_image_digest, @@ -6298,7 +6396,7 @@ def list_daemons(ctx, detail=True, legacy_dir=None): out, err, code = call(ctx, [container_path, 'exec', container_id, 'ceph', '-v'], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if not code and \ out.startswith('ceph version '): version = out.split(' ')[2] @@ -6307,7 +6405,7 @@ def list_daemons(ctx, detail=True, legacy_dir=None): out, err, code = call(ctx, [container_path, 'exec', container_id, 'grafana-server', '-v'], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if not code and \ out.startswith('Version '): version = out.split(' ')[1] @@ -6323,7 +6421,7 @@ def list_daemons(ctx, detail=True, legacy_dir=None): out, err, code = call(ctx, [container_path, 'exec', container_id, 'haproxy', '-v'], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if not code and \ out.startswith('HA-Proxy version '): version = out.split(' ')[2] @@ -6332,7 +6430,7 @@ def list_daemons(ctx, detail=True, legacy_dir=None): out, err, code = call(ctx, [container_path, 'exec', container_id, 'keepalived', '--version'], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) if not code and \ err.startswith('Keepalived '): version = err.split(' ')[1] @@ -6442,7 +6540,7 @@ def get_container_stats(ctx: CephadmContext, container_path: str, fsid: str, dae '--format', '{{.Id}},{{.Config.Image}},{{.Image}},{{.Created}},{{index .Config.Labels "io.ceph.version"}}', name ] - out, err, code = call(ctx, cmd, verbosity=CallVerbosity.DEBUG) + out, err, code = call(ctx, cmd, verbosity=CallVerbosity.QUIET) if not code: break return out, err, code @@ -6569,6 +6667,37 @@ class AdoptOsd(object): return osd_fsid, osd_type + def change_cluster_name(self) -> None: + logger.info('Attempting to convert osd cluster name to ceph . . .') + c = get_ceph_volume_container( + self.ctx, + args=['lvm', 'list', '{}'.format(self.osd_id), '--format=json'], + ) + out, err, code = call_throws(self.ctx, c.run_cmd()) + if code: + raise Exception(f'Failed to get list of LVs: {err}\nceph-volume failed with rc {code}') + try: + js = json.loads(out) + if not js: + raise RuntimeError(f'Failed to find osd.{self.osd_id}') + device: Optional[Dict[Any, Any]] = None + for d in js[self.osd_id]: + if d['type'] == 'block': + device = d + break + if not device: + raise RuntimeError(f'Failed to find block device for osd.{self.osd_id}') + vg = device['vg_name'] + out, err, code = call_throws(self.ctx, ['lvchange', '--deltag', f'ceph.cluster_name={self.ctx.cluster}', vg]) + if code: + raise RuntimeError(f"Can't delete tag ceph.cluster_name={self.ctx.cluster} on osd.{self.osd_id}.\nlvchange failed with rc {code}") + out, err, code = call_throws(self.ctx, ['lvchange', '--addtag', 'ceph.cluster_name=ceph', vg]) + if code: + raise RuntimeError(f"Can't add tag ceph.cluster_name=ceph on osd.{self.osd_id}.\nlvchange failed with rc {code}") + logger.info('Successfully converted osd cluster name') + except (Exception, RuntimeError) as e: + logger.info(f'Failed to convert osd cluster name: {e}') + def command_adopt_ceph(ctx, daemon_type, daemon_id, fsid): # type: (CephadmContext, str, str, str) -> None @@ -6594,6 +6723,8 @@ def command_adopt_ceph(ctx, daemon_type, daemon_id, fsid): osd_fsid, osd_type = adopt_osd.check_offline_simple_osd() if not osd_fsid: raise Error('Unable to find OSD {}'.format(daemon_id)) + elif ctx.cluster != 'ceph': + adopt_osd.change_cluster_name() logger.info('objectstore_type is %s' % osd_type) assert osd_type if osd_type == 'filestore': @@ -6919,7 +7050,7 @@ def _zap_osds(ctx: CephadmContext) -> None: raise Error(f'Invalid JSON in ceph-volume inventory: {e}') for i in ls: - matches = [lv.get('cluster_fsid') == ctx.fsid for lv in i.get('lvs', [])] + matches = [lv.get('cluster_fsid') == ctx.fsid and i.get('ceph_device') for lv in i.get('lvs', [])] if any(matches) and all(matches): _zap(ctx, i.get('path')) elif any(matches): @@ -7378,7 +7509,7 @@ class Packager(object): def repo_gpgkey(self) -> Tuple[str, str]: if self.ctx.gpg_url: - return self.ctx.gpg_url + return self.ctx.gpg_url, 'manual' if self.stable or self.version: return 'https://download.ceph.com/keys/release.gpg', 'release' else: @@ -7442,7 +7573,7 @@ class Apt(Packager): self.update() def rm_repo(self) -> None: - for name in ['autobuild', 'release']: + for name in ['autobuild', 'release', 'manual']: p = '/etc/apt/trusted.gpg.d/ceph.%s.gpg' % name if os.path.exists(p): logger.info('Removing repo GPG key %s...' % p) @@ -7526,6 +7657,7 @@ class YumDnf(Packager): 'scientific': ('centos', 'el'), 'rocky': ('centos', 'el'), 'almalinux': ('centos', 'el'), + 'ol': ('centos', 'el'), 'fedora': ('fedora', 'fc'), 'mariner': ('mariner', 'cm'), } @@ -7829,6 +7961,50 @@ def command_install(ctx: CephadmContext) -> None: pkg = create_packager(ctx) pkg.install(ctx.packages) + +def command_rescan_disks(ctx: CephadmContext) -> str: + + def probe_hba(scan_path: str) -> None: + """Tell the adapter to rescan""" + with open(scan_path, 'w') as f: + f.write('- - -') + + cmd = ctx.func.__name__.replace('command_', '') + logger.info(f'{cmd}: starting') + start = time.time() + + all_scan_files = glob('/sys/class/scsi_host/*/scan') + scan_files = [] + skipped = [] + for scan_path in all_scan_files: + adapter_name = os.path.basename(os.path.dirname(scan_path)) + proc_name = read_file([os.path.join(os.path.dirname(scan_path), 'proc_name')]) + if proc_name in ['unknown', 'usb-storage']: + skipped.append(os.path.basename(scan_path)) + logger.info(f'{cmd}: rescan skipping incompatible host adapter {adapter_name} : {proc_name}') + continue + + scan_files.append(scan_path) + + if not scan_files: + logger.info(f'{cmd}: no compatible HBAs found') + return 'Ok. No compatible HBAs found' + + responses = async_run(concurrent_tasks(probe_hba, scan_files)) + failures = [r for r in responses if r] + + logger.info(f'{cmd}: Complete. {len(scan_files)} adapters rescanned, {len(failures)} failures, {len(skipped)} skipped') + + elapsed = time.time() - start + if failures: + plural = 's' if len(failures) > 1 else '' + if len(failures) == len(scan_files): + return f'Failed. All {len(scan_files)} rescan requests failed' + else: + return f'Partial. {len(scan_files) - len(failures)} successful, {len(failures)} failure{plural} against: {", ".join(failures)}' + + return f'Ok. {len(all_scan_files)} adapters detected: {len(scan_files)} rescanned, {len(skipped)} skipped, {len(failures)} failed ({elapsed:.2f}s)' + ################################## @@ -8295,7 +8471,7 @@ class HostFacts(): security = {} try: out, err, code = call(self.ctx, ['sestatus'], - verbosity=CallVerbosity.DEBUG) + verbosity=CallVerbosity.QUIET) security['type'] = 'SELinux' status, mode, policy = '', '', '' for line in out.split('\n'): @@ -8972,6 +9148,10 @@ def _get_parser(): '--skip-monitoring-stack', action='store_true', help='Do not automatically provision monitoring stack (prometheus, grafana, alertmanager, node-exporter)') + parser_bootstrap.add_argument( + '--with-centralized-logging', + action='store_true', + help='Automatically provision centralized logging (promtail, loki)') parser_bootstrap.add_argument( '--apply-spec', help='Apply cluster spec after bootstrap (copy ssh key, add hosts and apply services)') @@ -9171,6 +9351,10 @@ def _get_parser(): '--daemon-id', help='daemon id for agent') + parser_disk_rescan = subparsers.add_parser( + 'disk-rescan', help='rescan all HBAs to detect new/removed devices') + parser_disk_rescan.set_defaults(func=command_rescan_disks) + return parser @@ -9205,6 +9389,7 @@ def cephadm_init_logging(ctx: CephadmContext, args: List[str]) -> None: """Configure the logging for cephadm as well as updating the system to have the expected log dir and logrotate configuration. """ + logging.addLevelName(QUIET_LOG_LEVEL, 'QUIET') global logger if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) @@ -9215,6 +9400,7 @@ def cephadm_init_logging(ctx: CephadmContext, args: List[str]) -> None: dictConfig(logging_config) logger = logging.getLogger() + logger.setLevel(QUIET_LOG_LEVEL) if not os.path.exists(ctx.logrotate_dir + '/cephadm'): with open(ctx.logrotate_dir + '/cephadm', 'w') as f: @@ -9225,13 +9411,14 @@ def cephadm_init_logging(ctx: CephadmContext, args: List[str]) -> None: compress missingok notifempty + su root root } """) if ctx.verbose: for handler in logger.handlers: - if handler.name == 'console': - handler.setLevel(logging.DEBUG) + if handler.name in ['console', 'log_file', 'console_stdout']: + handler.setLevel(QUIET_LOG_LEVEL) logger.debug('%s\ncephadm %s' % ('-' * 80, args)) diff --git a/ceph/src/cephadm/containers/keepalived/Dockerfile b/ceph/src/cephadm/containers/keepalived/Dockerfile new file mode 100644 index 000000000..ac305f72b --- /dev/null +++ b/ceph/src/cephadm/containers/keepalived/Dockerfile @@ -0,0 +1,24 @@ +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +RUN microdnf install --nodocs \ + bash \ + curl \ + iproute \ + keepalived-2.1.5 \ + && rm /etc/keepalived/keepalived.conf && microdnf clean all + +COPY /skel / + +RUN chmod +x init.sh + +CMD ["./init.sh"] + +# Build specific labels +LABEL maintainer="Guillaume Abrioux " +LABEL com.redhat.component="keepalived-container" +LABEL version=2.1.5 +LABEL name="keepalived" +LABEL description="keepalived for Ceph" +LABEL summary="Provides keepalived on RHEL 8 for Ceph." +LABEL io.k8s.display-name="Keepalived on RHEL 8" +LABEL io.openshift.tags="Ceph keepalived" diff --git a/ceph/src/cephadm/containers/keepalived/LICENSE b/ceph/src/cephadm/containers/keepalived/LICENSE new file mode 100644 index 000000000..74b10b143 --- /dev/null +++ b/ceph/src/cephadm/containers/keepalived/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 University of Michigan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ceph/src/cephadm/containers/keepalived/README.md b/ceph/src/cephadm/containers/keepalived/README.md new file mode 100644 index 000000000..a70d61e31 --- /dev/null +++ b/ceph/src/cephadm/containers/keepalived/README.md @@ -0,0 +1,233 @@ +# arcts/keepalived + +A small [ubi8-minimal](https://catalog.redhat.com/software/containers/registry/registry.access.redhat.com/repository/ubi8/ubi-minimal) based Docker container that provides a method of IP high availability via [keepalived](http://www.keepalived.org/) (VRRP failover), and optional Kubernetes API Server monitoring. If allowed to auto configure (default behaviour) it will automatically generate a unicast based failover configuration with a minimal amount of user supplied information. + +For specific information on Keepalived, please see the man page on [keepalived.conf](http://linux.die.net/man/5/keepalived.conf) or the [Keepalived User Guide](http://www.keepalived.org/pdf/UserGuide.pdf). + + +## Index +- [arcts/keepalived](#arctskeepalived) + - [Index](#index) + - [Prerequisites](#prerequisites) + - [Configuration](#configuration) + - [Execution Control](#execution-control) + - [Autoconfiguration Options](#autoconfiguration-options) + - [Kubernetes Options](#kubernetes-options) + - [Suggested Kubernetes Settings](#suggested-kubernetes-settings) + - [Example Keepalived Configs](#example-keepalived-configs) + - [Example Autogenerated Keepalived Master Config](#example-autogenerated-keepalived-master-config) + - [Example Autogenerated Keepalived Backup Config](#example-autogenerated-keepalived-backup-config) + - [Example Run Commands](#example-run-commands) + - [Example Master Run Command](#example-master-run-command) + - [Example Backup Run Command](#example-backup-run-command) + + +## Prerequisites + +Before attempting to deploy the keepalived container, the host must allow non local binding of ipv4 addresses. To do this, configure the sysctl tunable `net.ipv4.ip_nonlocal_bind=1`. + +In addition to enabling the nonlocal binds, the container must be run with both host networking (`--net=host`) and security setting CAP_NET_ADMIN (`--cap-add NET_ADMIN`) capability. These allow the container to manage the host's networking configuration, and this is essential to the function of keepalived. + + +## Configuration +### Execution Control + +| Variable | Default | +|:---------------------:|:------------------------------------------------:| +| `KEEPALIVED_AUTOCONF` | `true` | +| `KEEPALIVED_CONF` | `/etc/keepalived/keepalived.conf` | +| `KEEPALIVED_CMD` | `/usr/sbin/keepalived -n -l -f $KEEPALIVED_CONF` | +| `KEEPALIVED_DEBUG` | `false` | + +* `KEEPALIVED_AUTOCONF` - Enables or disables the auto-configuration of keepalived. + +* `KEEPALIVED_CONF` - The path to the keepalived configuration file. + +* `KEEPALIVED_CMD` - The command called to execute keepalived. + +* `KEEPALIVED_DEBUG` - Enables or disables debug level logging for keepalived (adds `-D` to `KEEPALIVED_CMD`. + + +### Autoconfiguration Options + +| Variable | Default | +|:-------------------------------------------:|:----------------------------------:| +| `KEEPALIVED_ADVERT_INT` | `1` | +| `KEEPALIVED_AUTH_PASS` | `pwd$KEEPALIVED_VIRTUAL_ROUTER_ID` | +| `KEEPALIVED_INTERFACE` | `eth0` | +| `KEEPALIVED_PRIORITY` | `200` | +| `KEEPALIVED_STATE` | `MASTER` | +| `KEEPALIVED_TRACK_INTERFACE_###` | | +| `KEEPALIVED_UNICAST_SRC_IP` | | +| `KEEPALIVED_UNICAST_PEER_###` | | +| `KEEPALIVED_VIRTUAL_IPADDRESS_###` | | +| `KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_###` | | +| `KEEPALIVED_VIRTUAL_ROUTER_ID` | `1` | +| `KEEPALIVED_KUBE_APISERVER_CHECK` | `false` | + +* `KEEPALIVED_ADVERT_INT` - The VRRP advertisement interval (in seconds). + +* `KEEPALIVED_AUTH_PASS` - A shared password used to authenticate each node in a VRRP group (**Note:** If password is longer than 8 characters, only the first 8 characters are used). + +* `KEEPALIVED_INTERFACE` - The host interface that keepalived will monitor and use for VRRP traffic. + +* `KEEPALIVED_PRIORITY` - Election value, the server configured with the highest priority will become the Master. + +* `KEEPALIVED_STATE` - Defines the server role as Master or Backup. (**Options:** `MASTER` or `BACKUP`). + +* `KEEPALIVED_TRACK_INTERFACE_###` - An interface that's state should be monitored (e.g. eth0). More than one can be supplied as long as the variable name ends in a number from 0-999. + +* `KEEPALIVED_UNICAST_SRC_IP` - The IP on the host that the keepalived daemon should bind to. **Note:** If not specified, it will be the first IP bound to the interface specified in `KEEPALIVED_INTERFACE`. + +* `KEEPALIVED_UNICAST_PEER_###` - An IP of a peer participating in the VRRP group. More tha one can be supplied as long as the variable name ends in a number from 0-999. + +* `KEEPALIVED_VIRTUAL_IPADDRESS_###` - An instance of an address that will be monitored and failed over from one host to another. These should be a quoted string in the form of: `/ brd dev scope label

Metrics

" + "\n" + "\n"); + response_.body() = body; + } else if (request_.target() == "/metrics") { + response_.set(http::field::content_type, "text/plain; charset=utf-8"); + DaemonMetricCollector &collector = collector_instance(); + std::string metrics = collector.get_metrics(); + response_.body() = metrics; + } else { + response_.result(http::status::method_not_allowed); + response_.set(http::field::content_type, "text/plain"); + response_.body() = "File not found \n"; + } + } + + // Asynchronously transmit the response message. + void write_response() { + auto self = shared_from_this(); + + response_.prepare_payload(); + + http::async_write(socket_, response_, + [self](beast::error_code ec, std::size_t) { + self->socket_.shutdown(tcp::socket::shutdown_send, ec); + self->deadline_.cancel(); + if (ec) { + dout(1) << "ERROR: " << ec.message() << dendl; + return; + } + }); + } + + // Check whether we have spent enough time on this connection. + void check_deadline() { + auto self = shared_from_this(); + + deadline_.async_wait([self](beast::error_code ec) { + if (!ec) { + // Close socket to cancel any outstanding operation. + self->socket_.close(ec); + } + }); + } +}; + +// "Loop" forever accepting new connections. +void http_server(tcp::acceptor &acceptor, tcp::socket &socket) { + acceptor.async_accept(socket, [&](beast::error_code ec) { + if (!ec) + std::make_shared(std::move(socket))->start(); + http_server(acceptor, socket); + }); +} + +void http_server_thread_entrypoint() { + try { + std::string exporter_addr = g_conf().get_val("exporter_addr"); + auto const address = net::ip::make_address(exporter_addr); + unsigned short port = g_conf().get_val("exporter_http_port"); + + net::io_context ioc{1}; + + tcp::acceptor acceptor{ioc, {address, port}}; + tcp::socket socket{ioc}; + http_server(acceptor, socket); + dout(1) << "Http server running on " << exporter_addr << ":" << port << dendl; + ioc.run(); + } catch (std::exception const &e) { + dout(1) << "Error: " << e.what() << dendl; + exit(EXIT_FAILURE); + } +} diff --git a/ceph/src/exporter/http_server.h b/ceph/src/exporter/http_server.h new file mode 100644 index 000000000..0d0502f57 --- /dev/null +++ b/ceph/src/exporter/http_server.h @@ -0,0 +1,5 @@ +#pragma once + +#include + +void http_server_thread_entrypoint(); diff --git a/ceph/src/exporter/util.cc b/ceph/src/exporter/util.cc new file mode 100644 index 000000000..0ae190cc6 --- /dev/null +++ b/ceph/src/exporter/util.cc @@ -0,0 +1,48 @@ +#include "util.h" +#include "common/debug.h" +#include +#include +#include +#include +#include +#include + +#define dout_context g_ceph_context +#define dout_subsys ceph_subsys_ceph_exporter + +BlockTimer::BlockTimer(std::string file, std::string function) + : file(file), function(function), stopped(false) { + t1 = std::chrono::high_resolution_clock::now(); +} +BlockTimer::~BlockTimer() { + dout(20) << file << ":" << function << ": " << ms.count() << "ms" << dendl; +} + +// useful with stop +double BlockTimer::get_ms() { + return ms.count(); +} + +// Manually stop the timer as you might want to get the time +void BlockTimer::stop() { + if (!stopped) { + stopped = true; + t2 = std::chrono::high_resolution_clock::now(); + ms = t2 - t1; + } +} + +bool string_is_digit(std::string s) { + size_t i = 0; + while (std::isdigit(s[i]) && i < s.size()) { + i++; + } + return i >= s.size(); +} + +std::string read_file_to_string(std::string path) { + std::ifstream is(path); + std::stringstream buffer; + buffer << is.rdbuf(); + return buffer.str(); +} diff --git a/ceph/src/exporter/util.h b/ceph/src/exporter/util.h new file mode 100644 index 000000000..b1fb83ad8 --- /dev/null +++ b/ceph/src/exporter/util.h @@ -0,0 +1,22 @@ +#include "common/hostname.h" +#include +#include + +#define TIMED_FUNCTION() BlockTimer timer(__FILE__, __FUNCTION__) + +class BlockTimer { + public: + BlockTimer(std::string file, std::string function); + ~BlockTimer(); + void stop(); + double get_ms(); + private: + std::chrono::duration ms; + std::string file, function; + bool stopped; + std::chrono::time_point t1, t2; +}; + +bool string_is_digit(std::string s); +std::string read_file_to_string(std::string path); +std::string get_hostname(std::string path); diff --git a/ceph/src/include/buffer.h b/ceph/src/include/buffer.h index 0c89367dd..ef4add9d7 100644 --- a/ceph/src/include/buffer.h +++ b/ceph/src/include/buffer.h @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #if __cplusplus >= 201703L @@ -863,7 +864,9 @@ struct error_code; if (first_round) { impl_f(first_round); } - if (const auto second_round = len - first_round; second_round) { + // no C++17 for the sake of the C++11 guarantees of librados, sorry. + const auto second_round = len - first_round; + if (second_round) { _refill(second_round); impl_f(second_round); } diff --git a/ceph/src/include/cephfs/ceph_ll_client.h b/ceph/src/include/cephfs/ceph_ll_client.h index 5bc693846..ac5b7c224 100644 --- a/ceph/src/include/cephfs/ceph_ll_client.h +++ b/ceph/src/include/cephfs/ceph_ll_client.h @@ -92,15 +92,24 @@ struct ceph_statx { /* * Compatibility macros until these defines make their way into glibc */ -#ifndef AT_NO_ATTR_SYNC -#define AT_NO_ATTR_SYNC 0x4000 /* Don't sync attributes with the server */ +#ifndef AT_STATX_DONT_SYNC +#define AT_STATX_SYNC_TYPE 0x6000 +#define AT_STATX_SYNC_AS_STAT 0x0000 +#define AT_STATX_FORCE_SYNC 0x2000 +#define AT_STATX_DONT_SYNC 0x4000 /* Don't sync attributes with the server */ #endif +/* + * This is deprecated and just for backwards compatibility. + * Please use AT_STATX_DONT_SYNC instead. + */ +#define AT_NO_ATTR_SYNC AT_STATX_DONT_SYNC /* Deprecated */ + /* * The statx interfaces only allow these flags. In order to allow us to add * others in the future, we disallow setting any that aren't recognized. */ -#define CEPH_REQ_FLAG_MASK (AT_SYMLINK_NOFOLLOW|AT_NO_ATTR_SYNC) +#define CEPH_REQ_FLAG_MASK (AT_SYMLINK_NOFOLLOW|AT_STATX_DONT_SYNC) /* fallocate mode flags */ #ifndef FALLOC_FL_KEEP_SIZE diff --git a/ceph/src/include/cephfs/libcephfs.h b/ceph/src/include/cephfs/libcephfs.h index a348a5b6c..344d763e8 100644 --- a/ceph/src/include/cephfs/libcephfs.h +++ b/ceph/src/include/cephfs/libcephfs.h @@ -852,7 +852,7 @@ int ceph_rename(struct ceph_mount_info *cmount, const char *from, const char *to * @param fd the file descriptor of the file to get statistics of. * @param stx the ceph_statx struct that will be filled in with the file's statistics. * @param want bitfield of CEPH_STATX_* flags showing designed attributes - * @param flags bitfield that can be used to set AT_* modifier flags (only AT_NO_ATTR_SYNC and AT_SYMLINK_NOFOLLOW) + * @param flags bitfield that can be used to set AT_* modifier flags (AT_STATX_SYNC_AS_STAT, AT_STATX_FORCE_SYNC, AT_STATX_DONT_SYNC and AT_SYMLINK_NOFOLLOW) * @returns 0 on success or negative error code on failure. */ int ceph_fstatx(struct ceph_mount_info *cmount, int fd, struct ceph_statx *stx, @@ -866,7 +866,7 @@ int ceph_fstatx(struct ceph_mount_info *cmount, int fd, struct ceph_statx *stx, * @param relpath to the file/directory to get statistics of * @param stx the ceph_statx struct that will be filled in with the file's statistics. * @param want bitfield of CEPH_STATX_* flags showing designed attributes - * @param flags bitfield that can be used to set AT_* modifier flags (only AT_NO_ATTR_SYNC and AT_SYMLINK_NOFOLLOW) + * @param flags bitfield that can be used to set AT_* modifier flags (AT_STATX_SYNC_AS_STAT, AT_STATX_FORCE_SYNC, AT_STATX_DONT_SYNC and AT_SYMLINK_NOFOLLOW) * @returns 0 on success or negative error code on failure. */ int ceph_statxat(struct ceph_mount_info *cmount, int dirfd, const char *relpath, @@ -879,7 +879,7 @@ int ceph_statxat(struct ceph_mount_info *cmount, int dirfd, const char *relpath, * @param path the file or directory to get the statistics of. * @param stx the ceph_statx struct that will be filled in with the file's statistics. * @param want bitfield of CEPH_STATX_* flags showing designed attributes - * @param flags bitfield that can be used to set AT_* modifier flags (only AT_NO_ATTR_SYNC and AT_SYMLINK_NOFOLLOW) + * @param flags bitfield that can be used to set AT_* modifier flags (AT_STATX_SYNC_AS_STAT, AT_STATX_FORCE_SYNC, AT_STATX_DONT_SYNC and AT_SYMLINK_NOFOLLOW) * @returns 0 on success or negative error code on failure. */ int ceph_statx(struct ceph_mount_info *cmount, const char *path, struct ceph_statx *stx, diff --git a/ceph/src/include/cephfs/metrics/Types.h b/ceph/src/include/cephfs/metrics/Types.h index 7f5a40e24..d7cf56138 100644 --- a/ceph/src/include/cephfs/metrics/Types.h +++ b/ceph/src/include/cephfs/metrics/Types.h @@ -27,6 +27,12 @@ enum ClientMetricType { CLIENT_METRIC_TYPE_OPENED_INODES, CLIENT_METRIC_TYPE_READ_IO_SIZES, CLIENT_METRIC_TYPE_WRITE_IO_SIZES, + CLIENT_METRIC_TYPE_AVG_READ_LATENCY, + CLIENT_METRIC_TYPE_STDEV_READ_LATENCY, + CLIENT_METRIC_TYPE_AVG_WRITE_LATENCY, + CLIENT_METRIC_TYPE_STDEV_WRITE_LATENCY, + CLIENT_METRIC_TYPE_AVG_METADATA_LATENCY, + CLIENT_METRIC_TYPE_STDEV_METADATA_LATENCY, }; inline std::ostream &operator<<(std::ostream &os, const ClientMetricType &type) { switch(type) { @@ -60,6 +66,24 @@ inline std::ostream &operator<<(std::ostream &os, const ClientMetricType &type) case ClientMetricType::CLIENT_METRIC_TYPE_WRITE_IO_SIZES: os << "WRITE_IO_SIZES"; break; + case ClientMetricType::CLIENT_METRIC_TYPE_AVG_READ_LATENCY: + os << "AVG_READ_LATENCY"; + break; + case ClientMetricType::CLIENT_METRIC_TYPE_STDEV_READ_LATENCY: + os << "STDEV_READ_LATENCY"; + break; + case ClientMetricType::CLIENT_METRIC_TYPE_AVG_WRITE_LATENCY: + os << "AVG_WRITE_LATENCY"; + break; + case ClientMetricType::CLIENT_METRIC_TYPE_STDEV_WRITE_LATENCY: + os << "STDEV_WRITE_LATENCY"; + break; + case ClientMetricType::CLIENT_METRIC_TYPE_AVG_METADATA_LATENCY: + os << "AVG_METADATA_LATENCY"; + break; + case ClientMetricType::CLIENT_METRIC_TYPE_STDEV_METADATA_LATENCY: + os << "STDEV_METADATA_LATENCY"; + break; default: os << "(UNKNOWN:" << static_cast::type>(type) << ")"; break; @@ -128,97 +152,154 @@ struct CapInfoPayload : public ClientMetricPayloadBase { struct ReadLatencyPayload : public ClientMetricPayloadBase { utime_t lat; + utime_t mean; + uint64_t sq_sum; // sum of squares + uint64_t count; // IO count ReadLatencyPayload() : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_READ_LATENCY) { } - ReadLatencyPayload(utime_t lat) - : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_READ_LATENCY), lat(lat) { + ReadLatencyPayload(utime_t lat, utime_t mean, uint64_t sq_sum, uint64_t count) + : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_READ_LATENCY), + lat(lat), + mean(mean), + sq_sum(sq_sum), + count(count) { } void encode(bufferlist &bl) const { using ceph::encode; - ENCODE_START(1, 1, bl); + ENCODE_START(2, 1, bl); encode(lat, bl); + encode(mean, bl); + encode(sq_sum, bl); + encode(count, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator &iter) { using ceph::decode; - DECODE_START(1, iter); + DECODE_START(2, iter); decode(lat, iter); + if (struct_v >= 2) { + decode(mean, iter); + decode(sq_sum, iter); + decode(count, iter); + } DECODE_FINISH(iter); } void dump(Formatter *f) const { f->dump_int("latency", lat); + f->dump_int("avg_latency", mean); + f->dump_unsigned("sq_sum", sq_sum); + f->dump_unsigned("count", count); } void print(std::ostream *out) const { - *out << "latency: " << lat; + *out << "latency: " << lat << ", avg_latency: " << mean + << ", sq_sum: " << sq_sum << ", count=" << count; } }; struct WriteLatencyPayload : public ClientMetricPayloadBase { utime_t lat; + utime_t mean; + uint64_t sq_sum; // sum of squares + uint64_t count; // IO count WriteLatencyPayload() : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_WRITE_LATENCY) { } - WriteLatencyPayload(utime_t lat) - : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_WRITE_LATENCY), lat(lat) { + WriteLatencyPayload(utime_t lat, utime_t mean, uint64_t sq_sum, uint64_t count) + : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_WRITE_LATENCY), + lat(lat), + mean(mean), + sq_sum(sq_sum), + count(count){ } void encode(bufferlist &bl) const { using ceph::encode; - ENCODE_START(1, 1, bl); + ENCODE_START(2, 1, bl); encode(lat, bl); + encode(mean, bl); + encode(sq_sum, bl); + encode(count, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator &iter) { using ceph::decode; - DECODE_START(1, iter); + DECODE_START(2, iter); decode(lat, iter); + if (struct_v >= 2) { + decode(mean, iter); + decode(sq_sum, iter); + decode(count, iter); + } DECODE_FINISH(iter); } void dump(Formatter *f) const { f->dump_int("latency", lat); + f->dump_int("avg_latency", mean); + f->dump_unsigned("sq_sum", sq_sum); + f->dump_unsigned("count", count); } void print(std::ostream *out) const { - *out << "latency: " << lat; + *out << "latency: " << lat << ", avg_latency: " << mean + << ", sq_sum: " << sq_sum << ", count=" << count; } }; struct MetadataLatencyPayload : public ClientMetricPayloadBase { utime_t lat; + utime_t mean; + uint64_t sq_sum; // sum of squares + uint64_t count; // IO count MetadataLatencyPayload() - : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_METADATA_LATENCY) { } - MetadataLatencyPayload(utime_t lat) - : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_METADATA_LATENCY), lat(lat) { + : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_METADATA_LATENCY) { } + MetadataLatencyPayload(utime_t lat, utime_t mean, uint64_t sq_sum, uint64_t count) + : ClientMetricPayloadBase(ClientMetricType::CLIENT_METRIC_TYPE_METADATA_LATENCY), + lat(lat), + mean(mean), + sq_sum(sq_sum), + count(count) { } void encode(bufferlist &bl) const { using ceph::encode; - ENCODE_START(1, 1, bl); + ENCODE_START(2, 1, bl); encode(lat, bl); + encode(mean, bl); + encode(sq_sum, bl); + encode(count, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator &iter) { using ceph::decode; - DECODE_START(1, iter); + DECODE_START(2, iter); decode(lat, iter); + if (struct_v >= 2) { + decode(mean, iter); + decode(sq_sum, iter); + decode(count, iter); + } DECODE_FINISH(iter); } void dump(Formatter *f) const { f->dump_int("latency", lat); + f->dump_int("avg_latency", mean); + f->dump_unsigned("sq_sum", sq_sum); + f->dump_unsigned("count", count); } void print(std::ostream *out) const { - *out << "latency: " << lat; + *out << "latency: " << lat << ", avg_latency: " << mean + << ", sq_sum: " << sq_sum << ", count=" << count; } }; diff --git a/ceph/src/include/err.h b/ceph/src/include/err.h index 2c63b6990..c188e9753 100644 --- a/ceph/src/include/err.h +++ b/ceph/src/include/err.h @@ -5,10 +5,11 @@ * adapted from linux 2.6.24 include/linux/err.h */ #define MAX_ERRNO 4095 -#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO) +#define IS_ERR_VALUE(x) ((x) >= (uintptr_t)-MAX_ERRNO) #include #include +#include /* this generates a warning in c++; caller can do the cast manually static inline void *ERR_PTR(long error) @@ -17,12 +18,12 @@ static inline void *ERR_PTR(long error) } */ -static inline long PTR_ERR(const void *ptr) +static inline intptr_t PTR_ERR(const void *ptr) { - return (uintptr_t) ptr; + return (intptr_t) ptr; } -static inline long IS_ERR(const void *ptr) +static inline bool IS_ERR(const void *ptr) { return IS_ERR_VALUE((uintptr_t)ptr); } diff --git a/ceph/src/include/interval_set.h b/ceph/src/include/interval_set.h index f1a21e5f9..dfb2a306c 100644 --- a/ceph/src/include/interval_set.h +++ b/ceph/src/include/interval_set.h @@ -106,6 +106,21 @@ class interval_set { return prev; } + // Predecrement + iterator& operator--() + { + --_iter; + return *this; + } + + // Postdecrement + iterator operator--(int) + { + iterator prev(_iter); + --_iter; + return prev; + } + friend class interval_set::const_iterator; protected: @@ -174,6 +189,21 @@ class interval_set { return prev; } + // Predecrement + iterator& operator--() + { + --_iter; + return *this; + } + + // Postdecrement + iterator operator--(int) + { + iterator prev(_iter); + --_iter; + return prev; + } + protected: typename Map::const_iterator _iter; }; diff --git a/ceph/src/kv/RocksDBStore.cc b/ceph/src/kv/RocksDBStore.cc index 070f79a60..be032b4fa 100644 --- a/ceph/src/kv/RocksDBStore.cc +++ b/ceph/src/kv/RocksDBStore.cc @@ -1739,6 +1739,8 @@ void RocksDBStore::RocksDBTransactionImpl::rm_range_keys(const string &prefix, const string &start, const string &end) { + ldout(db->cct, 10) << __func__ << " enter start=" << start + << " end=" << end << dendl; auto p_iter = db->cf_handles.find(prefix); if (p_iter == db->cf_handles.end()) { uint64_t cnt = db->delete_range_threshold; @@ -1750,6 +1752,8 @@ void RocksDBStore::RocksDBTransactionImpl::rm_range_keys(const string &prefix, bat.Delete(db->default_cf, combine_strings(prefix, it->key())); } if (cnt == 0) { + ldout(db->cct, 10) << __func__ << " p_iter == end(), resorting to DeleteRange" + << dendl; bat.RollbackToSavePoint(); bat.DeleteRange(db->default_cf, rocksdb::Slice(combine_strings(prefix, start)), @@ -1770,6 +1774,8 @@ void RocksDBStore::RocksDBTransactionImpl::rm_range_keys(const string &prefix, bat.Delete(cf, it->key()); } if (cnt == 0) { + ldout(db->cct, 10) << __func__ << " p_iter != end(), resorting to DeleteRange" + << dendl; bat.RollbackToSavePoint(); bat.DeleteRange(cf, rocksdb::Slice(start), rocksdb::Slice(end)); } else { @@ -1778,6 +1784,7 @@ void RocksDBStore::RocksDBTransactionImpl::rm_range_keys(const string &prefix, delete it; } } + ldout(db->cct, 10) << __func__ << " end" << dendl; } void RocksDBStore::RocksDBTransactionImpl::merge( diff --git a/ceph/src/librados/RadosClient.cc b/ceph/src/librados/RadosClient.cc index 3d4563c8b..43914a89c 100644 --- a/ceph/src/librados/RadosClient.cc +++ b/ceph/src/librados/RadosClient.cc @@ -283,7 +283,7 @@ int librados::RadosClient::connect() goto out; } - err = monclient.authenticate(conf->client_mount_timeout); + err = monclient.authenticate(std::chrono::duration(conf.get_val("client_mount_timeout")).count()); if (err) { ldout(cct, 0) << conf->name << " authentication error " << cpp_strerror(-err) << dendl; shutdown(); diff --git a/ceph/src/librados/librados_c.cc b/ceph/src/librados/librados_c.cc index 93b29911d..792768b9d 100644 --- a/ceph/src/librados/librados_c.cc +++ b/ceph/src/librados/librados_c.cc @@ -1174,7 +1174,9 @@ extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_destroy)(rados_ioctx_t io) { tracepoint(librados, rados_ioctx_destroy_enter, io); librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io; - ctx->put(); + if (ctx) { + ctx->put(); + } tracepoint(librados, rados_ioctx_destroy_exit); } LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_destroy); diff --git a/ceph/src/librbd/ImageWatcher.cc b/ceph/src/librbd/ImageWatcher.cc index f4fdc54b9..f5a27e0c2 100644 --- a/ceph/src/librbd/ImageWatcher.cc +++ b/ceph/src/librbd/ImageWatcher.cc @@ -572,12 +572,11 @@ template void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) { ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); - if (m_image_ctx.exclusive_lock == nullptr) { - // exclusive lock dynamically disabled via image refresh + // see notify_request_lock() + if (m_image_ctx.exclusive_lock == nullptr || + m_image_ctx.exclusive_lock->is_lock_owner()) { return; } - ceph_assert(m_image_ctx.exclusive_lock && - !m_image_ctx.exclusive_lock->is_lock_owner()); std::shared_lock watch_locker{this->m_watch_lock}; if (this->is_registered(this->m_watch_lock)) { diff --git a/ceph/src/librbd/api/Mirror.cc b/ceph/src/librbd/api/Mirror.cc index 51716ff36..8a0e8f825 100644 --- a/ceph/src/librbd/api/Mirror.cc +++ b/ceph/src/librbd/api/Mirror.cc @@ -115,6 +115,26 @@ int remove_peer_config_key(librados::IoCtx& io_ctx, return 0; } +std::string get_mon_host(CephContext* cct) { + std::string mon_host; + if (auto mon_addrs = cct->get_mon_addrs(); + mon_addrs != nullptr && !mon_addrs->empty()) { + CachedStackStringStream css; + for (auto it = mon_addrs->begin(); it != mon_addrs->end(); ++it) { + if (it != mon_addrs->begin()) { + *css << ","; + } + *css << *it; + } + mon_host = css->str(); + } else { + ldout(cct, 20) << "falling back to mon_host in conf" << dendl; + mon_host = cct->_conf.get_val("mon_host"); + } + ldout(cct, 20) << "mon_host=" << mon_host << dendl; + return mon_host; +} + int create_bootstrap_user(CephContext* cct, librados::Rados& rados, std::string* peer_client_id, std::string* cephx_key) { ldout(cct, 20) << dendl; @@ -1298,8 +1318,7 @@ int Mirror::peer_bootstrap_create(librados::IoCtx& io_ctx, return r; } - std::string mon_host = cct->_conf.get_val("mon_host"); - ldout(cct, 20) << "mon_host=" << mon_host << dendl; + std::string mon_host = get_mon_host(cct); // format the token response bufferlist token_bl; @@ -1471,7 +1490,7 @@ int Mirror::peer_bootstrap_import(librados::IoCtx& io_ctx, return r; } - std::string local_mon_host = cct->_conf.get_val("mon_host"); + std::string local_mon_host = get_mon_host(cct); // create local cluster peer in remote cluster r = create_bootstrap_peer(cct, remote_io_ctx, diff --git a/ceph/src/librbd/cache/pwl/AbstractWriteLog.cc b/ceph/src/librbd/cache/pwl/AbstractWriteLog.cc index 49f4161ef..580726ddf 100644 --- a/ceph/src/librbd/cache/pwl/AbstractWriteLog.cc +++ b/ceph/src/librbd/cache/pwl/AbstractWriteLog.cc @@ -314,8 +314,7 @@ void AbstractWriteLog::log_perf() { template void AbstractWriteLog::periodic_stats() { - std::lock_guard locker(m_lock); - update_image_cache_state(); + std::unique_lock locker(m_lock); ldout(m_image_ctx.cct, 5) << "STATS: m_log_entries=" << m_log_entries.size() << ", m_dirty_log_entries=" << m_dirty_log_entries.size() << ", m_free_log_entries=" << m_free_log_entries @@ -328,6 +327,9 @@ void AbstractWriteLog::periodic_stats() { << ", m_current_sync_gen=" << m_current_sync_gen << ", m_flushed_sync_gen=" << m_flushed_sync_gen << dendl; + + update_image_cache_state(); + write_image_cache_state(locker); } template @@ -570,15 +572,15 @@ void AbstractWriteLog::pwl_init(Context *on_finish, DeferredContexts &later) } template -void AbstractWriteLog::update_image_cache_state() { +void AbstractWriteLog::write_image_cache_state(std::unique_lock& locker) { using klass = AbstractWriteLog; Context *ctx = util::create_context_callback< - klass, &klass::handle_update_image_cache_state>(this); - update_image_cache_state(ctx); + klass, &klass::handle_write_image_cache_state>(this); + m_cache_state->write_image_cache_state(locker, ctx); } template -void AbstractWriteLog::update_image_cache_state(Context *on_finish) { +void AbstractWriteLog::update_image_cache_state() { ldout(m_image_ctx.cct, 10) << dendl; ceph_assert(ceph_mutex_is_locked_by_me(m_lock)); @@ -593,11 +595,10 @@ void AbstractWriteLog::update_image_cache_state(Context *on_finish) { m_cache_state->hit_bytes = m_perfcounter->get(l_librbd_pwl_rd_hit_bytes); m_cache_state->miss_bytes = m_perfcounter->get(l_librbd_pwl_rd_bytes) - m_cache_state->hit_bytes; - m_cache_state->write_image_cache_state(on_finish); } template -void AbstractWriteLog::handle_update_image_cache_state(int r) { +void AbstractWriteLog::handle_write_image_cache_state(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; @@ -622,8 +623,9 @@ void AbstractWriteLog::init(Context *on_finish) { Context *ctx = new LambdaContext( [this, on_finish](int r) { if (r >= 0) { - std::lock_guard locker(m_lock); - update_image_cache_state(on_finish); + std::unique_lock locker(m_lock); + update_image_cache_state(); + m_cache_state->write_image_cache_state(locker, on_finish); } else { on_finish->complete(r); } @@ -654,14 +656,15 @@ void AbstractWriteLog::shut_down(Context *on_finish) { Context *next_ctx = override_ctx(r, ctx); periodic_stats(); - std::lock_guard locker(m_lock); + std::unique_lock locker(m_lock); check_image_cache_state_clean(); m_wake_up_enabled = false; m_log_entries.clear(); m_cache_state->clean = true; m_cache_state->empty = true; remove_pool_file(); - update_image_cache_state(next_ctx); + update_image_cache_state(); + m_cache_state->write_image_cache_state(locker, next_ctx); }); ctx = new LambdaContext( [this, ctx](int r) { @@ -1305,6 +1308,7 @@ void AbstractWriteLog::complete_op_log_entries(GenericLogOperations &&ops, { GenericLogEntries dirty_entries; int published_reserves = 0; + bool need_update_state = false; ldout(m_image_ctx.cct, 20) << __func__ << ": completing" << dendl; for (auto &op : ops) { utime_t now = ceph_clock_now(); @@ -1327,6 +1331,7 @@ void AbstractWriteLog::complete_op_log_entries(GenericLogOperations &&ops, if (m_cache_state->clean && !this->m_dirty_log_entries.empty()) { m_cache_state->clean = false; update_image_cache_state(); + need_update_state = true; } } op->complete(result); @@ -1342,6 +1347,10 @@ void AbstractWriteLog::complete_op_log_entries(GenericLogOperations &&ops, log_entry->ram_entry.write_bytes); m_perfcounter->tinc(l_librbd_pwl_log_op_app_to_cmp_t, now - op->log_append_start_time); } + if (need_update_state) { + std::unique_lock locker(m_lock); + write_image_cache_state(locker); + } // New entries may be flushable { std::lock_guard locker(m_lock); @@ -1738,6 +1747,7 @@ void AbstractWriteLog::process_writeback_dirty_entries() { bool all_clean = false; int flushed = 0; bool has_write_entry = false; + bool need_update_state = false; ldout(cct, 20) << "Look for dirty entries" << dendl; { @@ -1760,6 +1770,7 @@ void AbstractWriteLog::process_writeback_dirty_entries() { if (!m_cache_state->clean && all_clean) { m_cache_state->clean = true; update_image_cache_state(); + need_update_state = true; } break; } @@ -1791,6 +1802,10 @@ void AbstractWriteLog::process_writeback_dirty_entries() { construct_flush_entries(entries_to_flush, post_unlock, has_write_entry); } + if (need_update_state) { + std::unique_lock locker(m_lock); + write_image_cache_state(locker); + } if (all_clean) { /* All flushing complete, drain outside lock */ @@ -2007,14 +2022,15 @@ void AbstractWriteLog::flush_dirty_entries(Context *on_finish) { bool stop_flushing; { - std::lock_guard locker(m_lock); + std::unique_lock locker(m_lock); flushing = (0 != m_flush_ops_in_flight); all_clean = m_dirty_log_entries.empty(); + stop_flushing = (m_shutting_down); if (!m_cache_state->clean && all_clean && !flushing) { m_cache_state->clean = true; update_image_cache_state(); + write_image_cache_state(locker); } - stop_flushing = (m_shutting_down); } if (!flushing && (all_clean || stop_flushing)) { diff --git a/ceph/src/librbd/cache/pwl/AbstractWriteLog.h b/ceph/src/librbd/cache/pwl/AbstractWriteLog.h index 4905edde6..ffe299c37 100644 --- a/ceph/src/librbd/cache/pwl/AbstractWriteLog.h +++ b/ceph/src/librbd/cache/pwl/AbstractWriteLog.h @@ -233,8 +233,6 @@ private: void arm_periodic_stats(); void pwl_init(Context *on_finish, pwl::DeferredContexts &later); - void update_image_cache_state(Context *on_finish); - void handle_update_image_cache_state(int r); void check_image_cache_state_clean(); void flush_dirty_entries(Context *on_finish); @@ -399,6 +397,8 @@ protected: return 0; } void update_image_cache_state(void); + void write_image_cache_state(std::unique_lock& locker); + void handle_write_image_cache_state(int r); }; } // namespace pwl diff --git a/ceph/src/librbd/cache/pwl/ImageCacheState.cc b/ceph/src/librbd/cache/pwl/ImageCacheState.cc index fe6e1087d..ab941df0f 100644 --- a/ceph/src/librbd/cache/pwl/ImageCacheState.cc +++ b/ceph/src/librbd/cache/pwl/ImageCacheState.cc @@ -60,9 +60,10 @@ bool ImageCacheState::init_from_metadata(json_spirit::mValue& json_root) { } template -void ImageCacheState::write_image_cache_state(Context *on_finish) { +void ImageCacheState::write_image_cache_state(std::unique_lock& locker, + Context *on_finish) { + ceph_assert(ceph_mutex_is_locked_by_me(*locker.mutex())); stats_timestamp = ceph_clock_now(); - std::shared_lock owner_lock{m_image_ctx->owner_lock}; json_spirit::mObject o; o["present"] = present; o["empty"] = empty; @@ -82,7 +83,9 @@ void ImageCacheState::write_image_cache_state(Context *on_finish) { o["hit_bytes"] = hit_bytes; o["miss_bytes"] = miss_bytes; std::string image_state_json = json_spirit::write(o); + locker.unlock(); + std::shared_lock owner_lock{m_image_ctx->owner_lock}; ldout(m_image_ctx->cct, 20) << __func__ << " Store state: " << image_state_json << dendl; m_plugin_api.execute_image_metadata_set(m_image_ctx, PERSISTENT_CACHE_STATE, diff --git a/ceph/src/librbd/cache/pwl/ImageCacheState.h b/ceph/src/librbd/cache/pwl/ImageCacheState.h index c2fd4b778..5be5f73ac 100644 --- a/ceph/src/librbd/cache/pwl/ImageCacheState.h +++ b/ceph/src/librbd/cache/pwl/ImageCacheState.h @@ -63,7 +63,8 @@ public: void init_from_config(); bool init_from_metadata(json_spirit::mValue& json_root); - void write_image_cache_state(Context *on_finish); + void write_image_cache_state(std::unique_lock& locker, + Context *on_finish); void clear_image_cache_state(Context *on_finish); diff --git a/ceph/src/librbd/cache/pwl/rwl/WriteLog.cc b/ceph/src/librbd/cache/pwl/rwl/WriteLog.cc index 41bdafe5b..e922ba543 100644 --- a/ceph/src/librbd/cache/pwl/rwl/WriteLog.cc +++ b/ceph/src/librbd/cache/pwl/rwl/WriteLog.cc @@ -104,7 +104,7 @@ void WriteLog::alloc_op_log_entries(GenericLogOperations &ops) ceph_assert(ceph_mutex_is_locked_by_me(this->m_log_append_lock)); /* Allocate the (already reserved) log entries */ - std::lock_guard locker(m_lock); + std::unique_lock locker(m_lock); for (auto &operation : ops) { uint32_t entry_index = this->m_first_free_entry; @@ -120,6 +120,7 @@ void WriteLog::alloc_op_log_entries(GenericLogOperations &ops) if (m_cache_state->empty && !m_log_entries.empty()) { m_cache_state->empty = false; this->update_image_cache_state(); + this->write_image_cache_state(locker); } } @@ -547,6 +548,7 @@ bool WriteLog::retire_entries(const unsigned long int frees_per_tx) { m_perfcounter->hinc(l_librbd_pwl_retire_tx_t_hist, utime_t(tx_end - tx_start).to_nsec(), retiring_entries.size()); + bool need_update_state = false; /* Update runtime copy of first_valid, and free entries counts */ { std::lock_guard locker(m_lock); @@ -557,6 +559,7 @@ bool WriteLog::retire_entries(const unsigned long int frees_per_tx) { if (!m_cache_state->empty && m_log_entries.empty()) { m_cache_state->empty = true; this->update_image_cache_state(); + need_update_state = true; } for (auto &entry: retiring_entries) { if (entry->write_bytes()) { @@ -573,6 +576,10 @@ bool WriteLog::retire_entries(const unsigned long int frees_per_tx) { this->m_alloc_failed_since_retire = false; this->wake_up(); } + if (need_update_state) { + std::unique_lock locker(m_lock); + this->write_image_cache_state(locker); + } } else { ldout(cct, 20) << "Nothing to retire" << dendl; return false; diff --git a/ceph/src/librbd/cache/pwl/ssd/WriteLog.cc b/ceph/src/librbd/cache/pwl/ssd/WriteLog.cc index 2c0dc258b..753b15b69 100644 --- a/ceph/src/librbd/cache/pwl/ssd/WriteLog.cc +++ b/ceph/src/librbd/cache/pwl/ssd/WriteLog.cc @@ -531,7 +531,7 @@ void WriteLog::release_ram(std::shared_ptr log_entry) { template void WriteLog::alloc_op_log_entries(GenericLogOperations &ops) { - std::lock_guard locker(m_lock); + std::unique_lock locker(m_lock); for (auto &operation : ops) { auto &log_entry = operation->get_log_entry(); @@ -542,6 +542,7 @@ void WriteLog::alloc_op_log_entries(GenericLogOperations &ops) { if (m_cache_state->empty && !m_log_entries.empty()) { m_cache_state->empty = false; this->update_image_cache_state(); + this->write_image_cache_state(locker); } } @@ -807,6 +808,7 @@ bool WriteLog::retire_entries(const unsigned long int frees_per_tx) { allocated_bytes += entry->get_aligned_data_size(); } } + bool need_update_state = false; { std::lock_guard locker(m_lock); m_first_valid_entry = first_valid_entry; @@ -818,6 +820,7 @@ bool WriteLog::retire_entries(const unsigned long int frees_per_tx) { if (!m_cache_state->empty && m_log_entries.empty()) { m_cache_state->empty = true; this->update_image_cache_state(); + need_update_state = true; } ldout(m_image_ctx.cct, 20) @@ -832,6 +835,10 @@ bool WriteLog::retire_entries(const unsigned long int frees_per_tx) { this->m_alloc_failed_since_retire = false; this->wake_up(); } + if (need_update_state) { + std::unique_lock locker(m_lock); + this->write_image_cache_state(locker); + } this->dispatch_deferred_writes(); this->process_writeback_dirty_entries(); diff --git a/ceph/src/librbd/deep_copy/ImageCopyRequest.cc b/ceph/src/librbd/deep_copy/ImageCopyRequest.cc index 2338dfde9..08e959dd5 100644 --- a/ceph/src/librbd/deep_copy/ImageCopyRequest.cc +++ b/ceph/src/librbd/deep_copy/ImageCopyRequest.cc @@ -5,6 +5,7 @@ #include "ObjectCopyRequest.h" #include "common/errno.h" #include "librbd/Utils.h" +#include "librbd/asio/ContextWQ.h" #include "librbd/deep_copy/Handler.h" #include "librbd/deep_copy/Utils.h" #include "librbd/object_map/DiffRequest.h" @@ -18,6 +19,7 @@ namespace librbd { namespace deep_copy { +using librbd::util::create_async_context_callback; using librbd::util::create_context_callback; using librbd::util::unique_lock_name; @@ -145,11 +147,8 @@ void ImageCopyRequest::send_object_copies() { // attempt to schedule at least 'max_ops' initial requests where // some objects might be skipped if fast-diff notes no change - while (m_current_ops < max_ops) { - int r = send_next_object_copy(); - if (r < 0) { - break; - } + for (uint64_t i = 0; i < max_ops; i++) { + send_next_object_copy(); } complete = (m_current_ops == 0) && !m_updating_progress; @@ -161,7 +160,7 @@ void ImageCopyRequest::send_object_copies() { } template -int ImageCopyRequest::send_next_object_copy() { +void ImageCopyRequest::send_next_object_copy() { ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_canceled && m_ret_val == 0) { @@ -169,13 +168,18 @@ int ImageCopyRequest::send_next_object_copy() { m_ret_val = -ECANCELED; } - if (m_ret_val < 0) { - return m_ret_val; - } else if (m_object_no >= m_end_object_no) { - return -ENODATA; + if (m_ret_val < 0 || m_object_no >= m_end_object_no) { + return; } uint64_t ono = m_object_no++; + Context *ctx = new LambdaContext( + [this, ono](int r) { + handle_object_copy(ono, r); + }); + + ldout(m_cct, 20) << "object_num=" << ono << dendl; + ++m_current_ops; uint8_t object_diff_state = object_map::DIFF_STATE_HOLE; if (m_object_diff_state.size() > 0) { @@ -199,13 +203,11 @@ int ImageCopyRequest::send_next_object_copy() { if (object_diff_state == object_map::DIFF_STATE_HOLE) { ldout(m_cct, 20) << "skipping non-existent object " << ono << dendl; - return 1; + create_async_context_callback(*m_src_image_ctx, ctx)->complete(0); + return; } } - ldout(m_cct, 20) << "object_num=" << ono << dendl; - ++m_current_ops; - uint32_t flags = 0; if (m_flatten) { flags |= OBJECT_COPY_REQUEST_FLAG_FLATTEN; @@ -215,15 +217,10 @@ int ImageCopyRequest::send_next_object_copy() { flags |= OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN; } - Context *ctx = new LambdaContext( - [this, ono](int r) { - handle_object_copy(ono, r); - }); auto req = ObjectCopyRequest::create( m_src_image_ctx, m_dst_image_ctx, m_src_snap_id_start, m_dst_snap_id_start, m_snap_map, ono, flags, m_handler, ctx); req->send(); - return 0; } template @@ -258,13 +255,7 @@ void ImageCopyRequest::handle_object_copy(uint64_t object_no, int r) { } } - while (true) { - r = send_next_object_copy(); - if (r != 1) { - break; - } - } - + send_next_object_copy(); complete = (m_current_ops == 0) && !m_updating_progress; } diff --git a/ceph/src/librbd/deep_copy/ImageCopyRequest.h b/ceph/src/librbd/deep_copy/ImageCopyRequest.h index 9b7934dd3..cb8b83781 100644 --- a/ceph/src/librbd/deep_copy/ImageCopyRequest.h +++ b/ceph/src/librbd/deep_copy/ImageCopyRequest.h @@ -109,7 +109,7 @@ private: void handle_compute_diff(int r); void send_object_copies(); - int send_next_object_copy(); + void send_next_object_copy(); void handle_object_copy(uint64_t object_no, int r); void finish(int r); diff --git a/ceph/src/librbd/image/RefreshRequest.cc b/ceph/src/librbd/image/RefreshRequest.cc index 9b7138815..24159c55b 100644 --- a/ceph/src/librbd/image/RefreshRequest.cc +++ b/ceph/src/librbd/image/RefreshRequest.cc @@ -67,6 +67,7 @@ void RefreshRequest::send() { template void RefreshRequest::send_get_migration_header() { if (m_image_ctx.ignore_migrating) { + m_migration_spec = {}; if (m_image_ctx.old_format) { send_v1_get_snapshots(); } else { @@ -95,7 +96,7 @@ Context *RefreshRequest::handle_get_migration_header(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; - if (*result == 0) { + if (*result >= 0) { auto it = m_out_bl.cbegin(); *result = cls_client::migration_get_finish(&it, &m_migration_spec); } else if (*result == -ENOENT) { @@ -222,6 +223,7 @@ Context *RefreshRequest::handle_v1_read_header(int *result) { if (migrating) { send_get_migration_header(); } else { + m_migration_spec = {}; send_v1_get_snapshots(); } return nullptr; @@ -252,7 +254,7 @@ Context *RefreshRequest::handle_v1_get_snapshots(int *result) { std::vector snap_names; std::vector snap_sizes; - if (*result == 0) { + if (*result >= 0) { auto it = m_out_bl.cbegin(); *result = cls_client::old_snapshot_list_finish(&it, &snap_names, &snap_sizes, &m_snapc); @@ -305,15 +307,16 @@ Context *RefreshRequest::handle_v1_get_locks(int *result) { ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; - if (*result == 0) { + if (*result >= 0) { auto it = m_out_bl.cbegin(); ClsLockType lock_type; *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers, &lock_type, &m_lock_tag); - if (*result == 0) { + if (*result >= 0) { m_exclusive_locked = (lock_type == ClsLockType::EXCLUSIVE); } } + if (*result < 0) { lderr(cct) << "failed to retrieve locks: " << cpp_strerror(*result) << dendl; @@ -405,10 +408,10 @@ Context *RefreshRequest::handle_v2_get_mutable_metadata(int *result) { } if (*result >= 0) { - ClsLockType lock_type = ClsLockType::NONE; + ClsLockType lock_type; *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers, &lock_type, &m_lock_tag); - if (*result == 0) { + if (*result >= 0) { m_exclusive_locked = (lock_type == ClsLockType::EXCLUSIVE); } } @@ -436,6 +439,8 @@ Context *RefreshRequest::handle_v2_get_mutable_metadata(int *result) { ldout(cct, 5) << "ignoring dynamically disabled exclusive lock" << dendl; m_features |= RBD_FEATURE_EXCLUSIVE_LOCK; m_incomplete_update = true; + } else { + m_incomplete_update = false; } if (((m_incompatible_features & RBD_FEATURE_NON_PRIMARY) != 0U) && @@ -453,6 +458,7 @@ Context *RefreshRequest::handle_v2_get_mutable_metadata(int *result) { } m_read_only = (m_read_only_flags != 0U); + m_legacy_parent = false; send_v2_get_parent(); return nullptr; } @@ -488,20 +494,25 @@ Context *RefreshRequest::handle_v2_get_parent(int *result) { auto it = m_out_bl.cbegin(); if (!m_legacy_parent) { - if (*result == 0) { + if (*result >= 0) { *result = cls_client::parent_get_finish(&it, &m_parent_md.spec); } std::optional parent_overlap; - if (*result == 0) { + if (*result >= 0) { *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap); } - if (*result == 0 && parent_overlap) { - m_parent_md.overlap = *parent_overlap; - m_head_parent_overlap = true; + if (*result >= 0) { + if (parent_overlap) { + m_parent_md.overlap = *parent_overlap; + m_head_parent_overlap = true; + } else { + m_parent_md.overlap = 0; + m_head_parent_overlap = false; + } } - } else if (*result == 0) { + } else if (*result >= 0) { *result = cls_client::get_parent_finish(&it, &m_parent_md.spec, &m_parent_md.overlap); m_head_parent_overlap = true; @@ -512,7 +523,7 @@ Context *RefreshRequest::handle_v2_get_parent(int *result) { m_legacy_parent = true; send_v2_get_parent(); return nullptr; - } if (*result < 0) { + } else if (*result < 0) { lderr(cct) << "failed to retrieve parent: " << cpp_strerror(*result) << dendl; return m_on_finish; @@ -521,10 +532,10 @@ Context *RefreshRequest::handle_v2_get_parent(int *result) { if ((m_features & RBD_FEATURE_MIGRATING) != 0) { ldout(cct, 1) << "migrating feature set" << dendl; send_get_migration_header(); - return nullptr; + } else { + m_migration_spec = {}; + send_v2_get_metadata(); } - - send_v2_get_metadata(); return nullptr; } @@ -535,6 +546,7 @@ void RefreshRequest::send_v2_get_metadata() { auto ctx = create_context_callback< RefreshRequest, &RefreshRequest::handle_v2_get_metadata>(this); + m_metadata.clear(); auto req = GetMetadataRequest::create( m_image_ctx.md_ctx, m_image_ctx.header_oid, true, ImageCtx::METADATA_CONF_PREFIX, ImageCtx::METADATA_CONF_PREFIX, 0U, @@ -591,6 +603,7 @@ Context *RefreshRequest::handle_v2_get_pool_metadata(int *result) { template void RefreshRequest::send_v2_get_op_features() { if ((m_features & RBD_FEATURE_OPERATIONS) == 0LL) { + m_op_features = 0; send_v2_get_group(); return; } @@ -618,10 +631,12 @@ Context *RefreshRequest::handle_v2_get_op_features(int *result) { // -EOPNOTSUPP handler not required since feature bit implies OSD // supports the method - if (*result == 0) { + if (*result >= 0) { auto it = m_out_bl.cbegin(); - cls_client::op_features_get_finish(&it, &m_op_features); - } else if (*result < 0) { + *result = cls_client::op_features_get_finish(&it, &m_op_features); + } + + if (*result < 0) { lderr(cct) << "failed to retrieve op features: " << cpp_strerror(*result) << dendl; return m_on_finish; @@ -655,16 +670,20 @@ Context *RefreshRequest::handle_v2_get_group(int *result) { ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; - if (*result == 0) { + if (*result >= 0) { auto it = m_out_bl.cbegin(); - cls_client::image_group_get_finish(&it, &m_group_spec); + *result = cls_client::image_group_get_finish(&it, &m_group_spec); } - if (*result < 0 && *result != -EOPNOTSUPP) { + + if (*result == -EOPNOTSUPP) { + m_group_spec = {}; + } else if (*result < 0) { lderr(cct) << "failed to retrieve group: " << cpp_strerror(*result) << dendl; return m_on_finish; } + m_legacy_snapshot = LEGACY_SNAPSHOT_DISABLED; send_v2_get_snapshots(); return nullptr; } @@ -754,16 +773,20 @@ Context *RefreshRequest::handle_v2_get_snapshots(int *result) { *result = cls_client::snapshot_get_finish(&it, &m_snap_infos[i]); } - if (*result == 0) { + if (*result >= 0) { if (m_legacy_parent) { *result = cls_client::get_parent_finish(&it, &m_snap_parents[i].spec, &m_snap_parents[i].overlap); } else { std::optional parent_overlap; *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap); - if (*result == 0 && parent_overlap && m_parent_md.spec.pool_id > -1) { - m_snap_parents[i].spec = m_parent_md.spec; - m_snap_parents[i].overlap = *parent_overlap; + if (*result >= 0) { + if (parent_overlap && m_parent_md.spec.pool_id > -1) { + m_snap_parents[i].spec = m_parent_md.spec; + m_snap_parents[i].overlap = *parent_overlap; + } else { + m_snap_parents[i] = {}; + } } } } @@ -782,8 +805,8 @@ Context *RefreshRequest::handle_v2_get_snapshots(int *result) { } } - if (*result == -ENOENT) { - ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl; + if (*result == -ENOENT && m_enoent_retries++ < MAX_ENOENT_RETRIES) { + ldout(cct, 10) << "out-of-sync snapshot state detected, retrying" << dendl; send_v2_get_mutable_metadata(); return nullptr; } else if (m_legacy_snapshot == LEGACY_SNAPSHOT_DISABLED && @@ -842,7 +865,14 @@ Context *RefreshRequest::handle_v2_refresh_parent(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; - if (*result < 0) { + if (*result == -ENOENT && m_enoent_retries++ < MAX_ENOENT_RETRIES) { + ldout(cct, 10) << "out-of-sync parent info detected, retrying" << dendl; + ceph_assert(m_refresh_parent != nullptr); + delete m_refresh_parent; + m_refresh_parent = nullptr; + send_v2_get_mutable_metadata(); + return nullptr; + } else if (*result < 0) { lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result) << dendl; save_result(result); diff --git a/ceph/src/librbd/image/RefreshRequest.h b/ceph/src/librbd/image/RefreshRequest.h index 6970a9b45..42f4b4669 100644 --- a/ceph/src/librbd/image/RefreshRequest.h +++ b/ceph/src/librbd/image/RefreshRequest.h @@ -27,6 +27,8 @@ template class RefreshParentRequest; template class RefreshRequest { public: + static constexpr int MAX_ENOENT_RETRIES = 10; + static RefreshRequest *create(ImageCtxT &image_ctx, bool acquiring_lock, bool skip_open_parent, Context *on_finish) { return new RefreshRequest(image_ctx, acquiring_lock, skip_open_parent, @@ -50,37 +52,37 @@ private: * * | | migrating) * * | (v2) v * * \-----> V2_GET_MUTABLE_METADATA V1_GET_SNAPSHOTS - * * | | - * * | -EOPNOTSUPP v - * * | * * * V1_GET_LOCKS - * * | * * | - * * v v * v - * * V2_GET_PARENT - * * | | + * * * | | + * * * | -EOPNOTSUPP v + * * * | * * * V1_GET_LOCKS + * * * | * * | + * * * v v * v + * * * V2_GET_PARENT + * * * | | * * v | * * * * * * GET_MIGRATION_HEADER (skip if not | * (ENOENT) | migrating) | * v | - * V2_GET_METADATA | - * | | - * v | - * V2_GET_POOL_METADATA | - * | | - * v (skip if not enabled) | - * V2_GET_OP_FEATURES | - * | | - * v | - * V2_GET_GROUP | - * | | - * | -EOPNOTSUPP | - * | * * * * | - * | * * | - * v v * | - * V2_GET_SNAPSHOTS (skip if no snaps) | - * | | - * v | - * V2_REFRESH_PARENT (skip if no parent or | - * | refresh not needed) | + * * V2_GET_METADATA | + * * | | + * * v | + * * V2_GET_POOL_METADATA | + * * | | + * * v (skip if not enabled) | + * * V2_GET_OP_FEATURES | + * * | | + * * v | + * * V2_GET_GROUP | + * * | | + * * | -EOPNOTSUPP | + * * | * * * | + * * | * * | + * * v v * | + * * * V2_GET_SNAPSHOTS (skip if no snaps) | + * (ENOENT) | | + * * v | + * * * V2_REFRESH_PARENT (skip if no parent or | + * (ENOENT) | refresh not needed) | * v | * V2_INIT_EXCLUSIVE_LOCK (skip if lock | * | active or disabled) | @@ -144,6 +146,8 @@ private: bool m_legacy_parent = false; LegacySnapshot m_legacy_snapshot = LEGACY_SNAPSHOT_DISABLED; + int m_enoent_retries = 0; + uint8_t m_order = 0; uint64_t m_size = 0; uint64_t m_features = 0; diff --git a/ceph/src/librbd/mirror/PromoteRequest.cc b/ceph/src/librbd/mirror/PromoteRequest.cc index b7ae9366e..b119e4edc 100644 --- a/ceph/src/librbd/mirror/PromoteRequest.cc +++ b/ceph/src/librbd/mirror/PromoteRequest.cc @@ -59,7 +59,8 @@ void PromoteRequest::handle_get_info(int r) { finish(-EINVAL); return; } else if (m_promotion_state == PROMOTION_STATE_NON_PRIMARY && !m_force) { - lderr(cct) << "image is still primary within a remote cluster" << dendl; + lderr(cct) << "image is primary within a remote cluster or demotion is not propagated yet" + << dendl; finish(-EBUSY); return; } diff --git a/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc b/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc index 13f789415..54da9ad61 100644 --- a/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc +++ b/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc @@ -218,7 +218,7 @@ void CreatePrimaryRequest::unlink_peer() { continue; } count++; - if (count == 3) { + if (count == max_snapshots) { unlink_snap_id = snap_it.first; } if (count > max_snapshots) { diff --git a/ceph/src/mds/Beacon.cc b/ceph/src/mds/Beacon.cc index 766e4c5d2..78c92a3f1 100644 --- a/ceph/src/mds/Beacon.cc +++ b/ceph/src/mds/Beacon.cc @@ -14,6 +14,7 @@ #include "common/dout.h" +#include "common/likely.h" #include "common/HeartbeatMap.h" #include "include/stringify.h" @@ -298,6 +299,11 @@ void Beacon::notify_health(MDSRank const *mds) health.metrics.clear(); + if (unlikely(g_conf().get_val("mds_inject_health_dummy"))) { + MDSHealthMetric m(MDS_HEALTH_DUMMY, HEALTH_ERR, std::string("dummy")); + health.metrics.push_back(m); + } + // Detect presence of entries in DamageTable if (!mds->damage_table.empty()) { MDSHealthMetric m(MDS_HEALTH_DAMAGE, HEALTH_ERR, std::string( diff --git a/ceph/src/mds/CDir.cc b/ceph/src/mds/CDir.cc index b5012c340..7e18263d8 100644 --- a/ceph/src/mds/CDir.cc +++ b/ceph/src/mds/CDir.cc @@ -1474,6 +1474,20 @@ void CDir::mark_new(LogSegment *ls) mdcache->mds->queue_waiters(waiters); } +void CDir::set_fresh_fnode(fnode_const_ptr&& ptr) { + ceph_assert(inode->is_auth()); + ceph_assert(!is_projected()); + ceph_assert(!state_test(STATE_COMMITTING)); + reset_fnode(std::move(ptr)); + projected_version = committing_version = committed_version = get_version(); + + if (state_test(STATE_REJOINUNDEF)) { + ceph_assert(mdcache->mds->is_rejoin()); + state_clear(STATE_REJOINUNDEF); + mdcache->opened_undef_dirfrag(this); + } +} + void CDir::mark_clean() { dout(10) << __func__ << " " << *this << " version " << get_version() << dendl; @@ -1548,16 +1562,9 @@ void CDir::fetch(MDSContext *c, std::string_view want_dn, bool ignore_authpinnab !inode->snaprealm) { dout(7) << "fetch dirfrag for unlinked directory, mark complete" << dendl; if (get_version() == 0) { - ceph_assert(inode->is_auth()); auto _fnode = allocate_fnode(); _fnode->version = 1; - reset_fnode(std::move(_fnode)); - - if (state_test(STATE_REJOINUNDEF)) { - ceph_assert(mdcache->mds->is_rejoin()); - state_clear(STATE_REJOINUNDEF); - mdcache->opened_undef_dirfrag(this); - } + set_fresh_fnode(std::move(_fnode)); } mark_complete(); @@ -2003,17 +2010,7 @@ void CDir::_omap_fetched(bufferlist& hdrbl, map& omap, // take the loaded fnode? // only if we are a fresh CDir* with no prior state. if (get_version() == 0) { - ceph_assert(!is_projected()); - ceph_assert(!state_test(STATE_COMMITTING)); - auto _fnode = allocate_fnode(got_fnode); - reset_fnode(std::move(_fnode)); - projected_version = committing_version = committed_version = get_version(); - - if (state_test(STATE_REJOINUNDEF)) { - ceph_assert(mdcache->mds->is_rejoin()); - state_clear(STATE_REJOINUNDEF); - mdcache->opened_undef_dirfrag(this); - } + set_fresh_fnode(allocate_fnode(got_fnode)); } list undef_inodes; diff --git a/ceph/src/mds/CDir.h b/ceph/src/mds/CDir.h index c8d3e417a..b2dcdafde 100644 --- a/ceph/src/mds/CDir.h +++ b/ceph/src/mds/CDir.h @@ -247,6 +247,7 @@ public: void reset_fnode(fnode_const_ptr&& ptr) { fnode = std::move(ptr); } + void set_fresh_fnode(fnode_const_ptr&& ptr); const fnode_const_ptr& get_fnode() const { return fnode; diff --git a/ceph/src/mds/CInode.cc b/ceph/src/mds/CInode.cc index 370fdfbd4..1231ca293 100644 --- a/ceph/src/mds/CInode.cc +++ b/ceph/src/mds/CInode.cc @@ -2037,10 +2037,11 @@ void CInode::decode_lock_inest(bufferlist::const_iterator& p) void CInode::encode_lock_ixattr(bufferlist& bl) { - ENCODE_START(1, 1, bl); + ENCODE_START(2, 1, bl); encode(get_inode()->version, bl); encode(get_inode()->ctime, bl); encode_xattrs(bl); + encode(get_inode()->xattr_version, bl); ENCODE_FINISH(bl); } @@ -2048,13 +2049,16 @@ void CInode::decode_lock_ixattr(bufferlist::const_iterator& p) { ceph_assert(!is_auth()); auto _inode = allocate_inode(*get_inode()); - DECODE_START(1, p); + DECODE_START(2, p); decode(_inode->version, p); utime_t tm; decode(tm, p); if (_inode->ctime < tm) _inode->ctime = tm; decode_xattrs(p); + if (struct_v >= 2) { + decode(_inode->xattr_version, p); + } DECODE_FINISH(p); reset_inode(std::move(_inode)); } diff --git a/ceph/src/mds/Locker.cc b/ceph/src/mds/Locker.cc index bacc79a10..a73c4225c 100644 --- a/ceph/src/mds/Locker.cc +++ b/ceph/src/mds/Locker.cc @@ -1201,7 +1201,6 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSCon if (lock->is_dirty() && !lock->is_flushed()) { scatter_writebehind(static_cast(lock)); - mds->mdlog->flush(); return; } lock->clear_flushed(); @@ -2230,7 +2229,11 @@ Capability* Locker::issue_new_caps(CInode *in, // [auth] twiddle mode? eval(in, CEPH_CAP_LOCKS); - if (_need_flush_mdlog(in, my_want)) + int all_allowed = -1, loner_allowed = -1, xlocker_allowed = -1; + int allowed = get_allowed_caps(in, cap, all_allowed, loner_allowed, + xlocker_allowed); + + if (_need_flush_mdlog(in, my_want & ~allowed, true)) mds->mdlog->flush(); } else { @@ -2269,30 +2272,64 @@ public: } }; -int Locker::issue_caps(CInode *in, Capability *only_cap) +int Locker::get_allowed_caps(CInode *in, Capability *cap, + int &all_allowed, int &loner_allowed, + int &xlocker_allowed) { + client_t client = cap->get_client(); + // allowed caps are determined by the lock mode. - int all_allowed = in->get_caps_allowed_by_type(CAP_ANY); - int loner_allowed = in->get_caps_allowed_by_type(CAP_LONER); - int xlocker_allowed = in->get_caps_allowed_by_type(CAP_XLOCKER); + if (all_allowed == -1) + all_allowed = in->get_caps_allowed_by_type(CAP_ANY); + if (loner_allowed == -1) + loner_allowed = in->get_caps_allowed_by_type(CAP_LONER); + if (xlocker_allowed == -1) + xlocker_allowed = in->get_caps_allowed_by_type(CAP_XLOCKER); client_t loner = in->get_loner(); if (loner >= 0) { - dout(7) << "issue_caps loner client." << loner + dout(7) << "get_allowed_caps loner client." << loner << " allowed=" << ccap_string(loner_allowed) << ", xlocker allowed=" << ccap_string(xlocker_allowed) << ", others allowed=" << ccap_string(all_allowed) << " on " << *in << dendl; } else { - dout(7) << "issue_caps allowed=" << ccap_string(all_allowed) + dout(7) << "get_allowed_caps allowed=" << ccap_string(all_allowed) << ", xlocker allowed=" << ccap_string(xlocker_allowed) << " on " << *in << dendl; } - ceph_assert(in->is_head()); + // do not issue _new_ bits when size|mtime is projected + int allowed; + if (loner == client) + allowed = loner_allowed; + else + allowed = all_allowed; + // add in any xlocker-only caps (for locks this client is the xlocker for) + allowed |= xlocker_allowed & in->get_xlocker_mask(client); + if (in->is_dir()) { + allowed &= ~CEPH_CAP_ANY_DIR_OPS; + if (allowed & CEPH_CAP_FILE_EXCL) + allowed |= cap->get_lock_cache_allowed(); + } + + if ((in->get_inode()->inline_data.version != CEPH_INLINE_NONE && + cap->is_noinline()) || + (!in->get_inode()->layout.pool_ns.empty() && + cap->is_nopoolns())) + allowed &= ~(CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR); + + return allowed; +} + +int Locker::issue_caps(CInode *in, Capability *only_cap) +{ // count conflicts with - int nissued = 0; + int nissued = 0; + int all_allowed = -1, loner_allowed = -1, xlocker_allowed = -1; + + ceph_assert(in->is_head()); // client caps map::iterator it; @@ -2302,28 +2339,8 @@ int Locker::issue_caps(CInode *in, Capability *only_cap) it = in->client_caps.begin(); for (; it != in->client_caps.end(); ++it) { Capability *cap = &it->second; - - // do not issue _new_ bits when size|mtime is projected - int allowed; - if (loner == it->first) - allowed = loner_allowed; - else - allowed = all_allowed; - - // add in any xlocker-only caps (for locks this client is the xlocker for) - allowed |= xlocker_allowed & in->get_xlocker_mask(it->first); - if (in->is_dir()) { - allowed &= ~CEPH_CAP_ANY_DIR_OPS; - if (allowed & CEPH_CAP_FILE_EXCL) - allowed |= cap->get_lock_cache_allowed(); - } - - if ((in->get_inode()->inline_data.version != CEPH_INLINE_NONE && - cap->is_noinline()) || - (!in->get_inode()->layout.pool_ns.empty() && - cap->is_nopoolns())) - allowed &= ~(CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR); - + int allowed = get_allowed_caps(in, cap, all_allowed, loner_allowed, + xlocker_allowed); int pending = cap->pending(); int wanted = cap->wanted(); @@ -2934,18 +2951,18 @@ void Locker::share_inode_max_size(CInode *in, Capability *only_cap) } } -bool Locker::_need_flush_mdlog(CInode *in, int wanted) +bool Locker::_need_flush_mdlog(CInode *in, int wanted, bool lock_state_any) { /* flush log if caps are wanted by client but corresponding lock is unstable and locked by * pending mutations. */ if (((wanted & (CEPH_CAP_FILE_RD|CEPH_CAP_FILE_WR|CEPH_CAP_FILE_SHARED|CEPH_CAP_FILE_EXCL)) && - in->filelock.is_unstable_and_locked()) || + (lock_state_any ? in->filelock.is_locked() : in->filelock.is_unstable_and_locked())) || ((wanted & (CEPH_CAP_AUTH_SHARED|CEPH_CAP_AUTH_EXCL)) && - in->authlock.is_unstable_and_locked()) || + (lock_state_any ? in->authlock.is_locked() : in->authlock.is_unstable_and_locked())) || ((wanted & (CEPH_CAP_LINK_SHARED|CEPH_CAP_LINK_EXCL)) && - in->linklock.is_unstable_and_locked()) || + (lock_state_any ? in->linklock.is_locked() : in->linklock.is_unstable_and_locked())) || ((wanted & (CEPH_CAP_XATTR_SHARED|CEPH_CAP_XATTR_EXCL)) && - in->xattrlock.is_unstable_and_locked())) + (lock_state_any ? in->xattrlock.is_locked() : in->xattrlock.is_unstable_and_locked()))) return true; return false; } @@ -4231,8 +4248,8 @@ void Locker::handle_client_lease(const cref_t &m) } -void Locker::issue_client_lease(CDentry *dn, MDRequestRef &mdr, int mask, - utime_t now, bufferlist &bl) +void Locker::issue_client_lease(CDentry *dn, CInode *in, MDRequestRef &mdr, utime_t now, + bufferlist &bl) { client_t client = mdr->get_client(); Session *session = mdr->session; @@ -4243,6 +4260,17 @@ void Locker::issue_client_lease(CDentry *dn, MDRequestRef &mdr, int mask, !diri->is_stray() && // do not issue dn leases in stray dir! !diri->filelock.can_lease(client) && !(diri->get_client_cap_pending(client) & (CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL))) { + int mask = 0; + CDentry::linkage_t *dnl = dn->get_linkage(client, mdr); + if (dnl->is_primary()) { + ceph_assert(dnl->get_inode() == in); + mask = CEPH_LEASE_PRIMARY_LINK; + } else { + if (dnl->is_remote()) + ceph_assert(dnl->get_remote_ino() == in->ino()); + else + ceph_assert(!in); + } // issue a dentry lease ClientLease *l = dn->add_client_lease(client, session); session->touch_lease(l); @@ -4262,7 +4290,7 @@ void Locker::issue_client_lease(CDentry *dn, MDRequestRef &mdr, int mask, } else { // null lease LeaseStat lstat; - lstat.mask = mask; + lstat.mask = 0; lstat.alternate_name = std::string(dn->alternate_name); encode_lease(bl, session->info, lstat); dout(20) << "issue_client_lease no/null lease on " << *dn << dendl; @@ -4669,7 +4697,6 @@ bool Locker::simple_sync(SimpleLock *lock, bool *need_issue) if (!gather && lock->is_dirty()) { lock->get_parent()->auth_pin(lock); scatter_writebehind(static_cast(lock)); - mds->mdlog->flush(); return false; } @@ -4828,7 +4855,6 @@ void Locker::simple_lock(SimpleLock *lock, bool *need_issue) if (!gather && lock->is_dirty()) { lock->get_parent()->auth_pin(lock); scatter_writebehind(static_cast(lock)); - mds->mdlog->flush(); return; } @@ -4970,6 +4996,7 @@ void Locker::scatter_writebehind(ScatterLock *lock) in->finish_scatter_gather_update_accounted(lock->get_type(), &le->metablob); mds->mdlog->submit_entry(le, new C_Locker_ScatterWB(this, lock, mut)); + mds->mdlog->flush(); } void Locker::scatter_writebehind_finish(ScatterLock *lock, MutationRef& mut) diff --git a/ceph/src/mds/Locker.h b/ceph/src/mds/Locker.h index 9cc0b80ef..3aff8db0b 100644 --- a/ceph/src/mds/Locker.h +++ b/ceph/src/mds/Locker.h @@ -164,6 +164,8 @@ public: // -- file i/o -- version_t issue_file_data_version(CInode *in); Capability* issue_new_caps(CInode *in, int mode, MDRequestRef& mdr, SnapRealm *conrealm); + int get_allowed_caps(CInode *in, Capability *cap, int &all_allowed, + int &loner_allowed, int &xlocker_allowed); int issue_caps(CInode *in, Capability *only_cap=0); void issue_caps_set(std::set& inset); void issue_truncate(CInode *in); @@ -185,7 +187,7 @@ public: // -- client leases -- void handle_client_lease(const cref_t &m); - void issue_client_lease(CDentry *dn, MDRequestRef &mdr, int mask, utime_t now, bufferlist &bl); + void issue_client_lease(CDentry *dn, CInode *in, MDRequestRef &mdr, utime_t now, bufferlist &bl); void revoke_client_leases(SimpleLock *lock); static void encode_lease(bufferlist& bl, const session_info_t& info, const LeaseStat& ls); @@ -212,7 +214,7 @@ protected: void scatter_writebehind_finish(ScatterLock *lock, MutationRef& mut); - bool _need_flush_mdlog(CInode *in, int wanted_caps); + bool _need_flush_mdlog(CInode *in, int wanted_caps, bool lock_state_any=false); void adjust_cap_wanted(Capability *cap, int wanted, int issue_seq); void handle_client_caps(const cref_t &m); void _update_cap_fields(CInode *in, int dirty, const cref_t &m, CInode::mempool_inode *pi); diff --git a/ceph/src/mds/MDCache.cc b/ceph/src/mds/MDCache.cc index 8696c6d92..387cd9b47 100644 --- a/ceph/src/mds/MDCache.cc +++ b/ceph/src/mds/MDCache.cc @@ -6324,6 +6324,11 @@ void MDCache::identify_files_to_recover() { dout(10) << "identify_files_to_recover" << dendl; int count = 0; + + // Clear the recover and check queues in case the monitor sends rejoin mdsmap twice. + rejoin_recover_q.clear(); + rejoin_check_q.clear(); + for (auto &p : inode_map) { CInode *in = p.second; if (!in->is_auth()) @@ -7888,7 +7893,7 @@ bool MDCache::shutdown_pass() // (only do this once!) if (!mds->mdlog->is_capped()) { - dout(7) << "capping the log" << dendl; + dout(7) << "capping the mdlog" << dendl; mds->mdlog->cap(); } diff --git a/ceph/src/mds/MDLog.cc b/ceph/src/mds/MDLog.cc index b5fcf043d..eea60b48a 100644 --- a/ceph/src/mds/MDLog.cc +++ b/ceph/src/mds/MDLog.cc @@ -281,7 +281,7 @@ void MDLog::_submit_entry(LogEvent *le, MDSLogContextBase *c) { ceph_assert(ceph_mutex_is_locked_by_me(submit_mutex)); ceph_assert(!mds->is_any_replay()); - ceph_assert(!capped); + ceph_assert(!mds_is_shutting_down); ceph_assert(le == cur_event); cur_event = NULL; @@ -482,9 +482,9 @@ void MDLog::kick_submitter() } void MDLog::cap() -{ - dout(5) << "cap" << dendl; - capped = true; +{ + dout(5) << "mark mds is shutting down" << dendl; + mds_is_shutting_down = true; } void MDLog::shutdown() @@ -503,7 +503,7 @@ void MDLog::shutdown() mds->mds_lock.unlock(); // Because MDS::stopping is true, it's safe to drop mds_lock: nobody else // picking it up will do anything with it. - + submit_mutex.lock(); submit_cond.notify_all(); submit_mutex.unlock(); @@ -581,6 +581,26 @@ public: } }; +void MDLog::try_to_commit_open_file_table(uint64_t last_seq) +{ + ceph_assert(ceph_mutex_is_locked_by_me(submit_mutex)); + + if (mds_is_shutting_down) // shutting down the MDS + return; + + if (mds->mdcache->open_file_table.is_any_committing()) + return; + + // when there have dirty items, maybe there has no any new log event + if (mds->mdcache->open_file_table.is_any_dirty() || + last_seq > mds->mdcache->open_file_table.get_committed_log_seq()) { + submit_mutex.unlock(); + mds->mdcache->open_file_table.commit(new C_OFT_Committed(this, last_seq), + last_seq, CEPH_MSG_PRIO_HIGH); + submit_mutex.lock(); + } +} + void MDLog::trim(int m) { unsigned max_segments = g_conf()->mds_log_max_segments; @@ -684,17 +704,7 @@ void MDLog::trim(int m) } } - if (!capped && - !mds->mdcache->open_file_table.is_any_committing()) { - uint64_t last_seq = get_last_segment_seq(); - if (mds->mdcache->open_file_table.is_any_dirty() || - last_seq > mds->mdcache->open_file_table.get_committed_log_seq()) { - submit_mutex.unlock(); - mds->mdcache->open_file_table.commit(new C_OFT_Committed(this, last_seq), - last_seq, CEPH_MSG_PRIO_HIGH); - submit_mutex.lock(); - } - } + try_to_commit_open_file_table(get_last_segment_seq()); // discard expired segments and unlock submit_mutex _trim_expired_segments(); @@ -730,14 +740,7 @@ int MDLog::trim_all() uint64_t last_seq = 0; if (!segments.empty()) { last_seq = get_last_segment_seq(); - if (!capped && - !mds->mdcache->open_file_table.is_any_committing() && - last_seq > mds->mdcache->open_file_table.get_committing_log_seq()) { - submit_mutex.unlock(); - mds->mdcache->open_file_table.commit(new C_OFT_Committed(this, last_seq), - last_seq, CEPH_MSG_PRIO_DEFAULT); - submit_mutex.lock(); - } + try_to_commit_open_file_table(last_seq); } map::iterator p = segments.begin(); @@ -831,7 +834,7 @@ void MDLog::_trim_expired_segments() break; } - if (!capped && ls->seq >= oft_committed_seq) { + if (!mds_is_shutting_down && ls->seq >= oft_committed_seq) { dout(10) << "_trim_expired_segments open file table committedseq " << oft_committed_seq << " <= " << ls->seq << "/" << ls->offset << dendl; break; @@ -880,9 +883,9 @@ void MDLog::_expired(LogSegment *ls) dout(5) << "_expired segment " << ls->seq << "/" << ls->offset << ", " << ls->num_events << " events" << dendl; - if (!capped && ls == peek_current_segment()) { + if (!mds_is_shutting_down && ls == peek_current_segment()) { dout(5) << "_expired not expiring " << ls->seq << "/" << ls->offset - << ", last one and !capped" << dendl; + << ", last one and !mds_is_shutting_down" << dendl; } else { // expired. expired_segments.insert(ls); diff --git a/ceph/src/mds/MDLog.h b/ceph/src/mds/MDLog.h index b6b0be60f..6111f85a9 100644 --- a/ceph/src/mds/MDLog.h +++ b/ceph/src/mds/MDLog.h @@ -121,7 +121,7 @@ public: Journaler *get_journaler() { return journaler; } bool empty() const { return segments.empty(); } - bool is_capped() const { return capped; } + bool is_capped() const { return mds_is_shutting_down; } void cap(); void kick_submitter(); @@ -258,7 +258,7 @@ protected: int num_events = 0; // in events int unflushed = 0; - bool capped = false; + bool mds_is_shutting_down = false; // Log position which is persistent *and* for which // submit_entry wait_for_safe callbacks have already @@ -298,6 +298,8 @@ private: void _prepare_new_segment(); void _journal_segment_subtree_map(MDSContext *onsync); + void try_to_commit_open_file_table(uint64_t last_seq); + void try_expire(LogSegment *ls, int op_prio); void _maybe_expired(LogSegment *ls, int op_prio); void _expired(LogSegment *ls); diff --git a/ceph/src/mds/MDSDaemon.cc b/ceph/src/mds/MDSDaemon.cc index f1c2f6694..fef8a18e0 100644 --- a/ceph/src/mds/MDSDaemon.cc +++ b/ceph/src/mds/MDSDaemon.cc @@ -183,7 +183,9 @@ void MDSDaemon::asok_command( if (cmd_getval(cmdmap, "value", value)) { heapcmd_vec.push_back(value); } - ceph_heap_profiler_handle_command(heapcmd_vec, ss); + std::stringstream outss; + ceph_heap_profiler_handle_command(heapcmd_vec, outss); + outbl.append(outss); r = 0; } } else if (command == "cpu_profiler") { diff --git a/ceph/src/mds/MDSMap.cc b/ceph/src/mds/MDSMap.cc index 3a417b02a..b54ad444d 100644 --- a/ceph/src/mds/MDSMap.cc +++ b/ceph/src/mds/MDSMap.cc @@ -999,28 +999,44 @@ MDSMap::availability_t MDSMap::is_cluster_available() const bool MDSMap::state_transition_valid(DaemonState prev, DaemonState next) { - bool state_valid = true; - if (next != prev) { - if (prev == MDSMap::STATE_REPLAY) { - if (next != MDSMap::STATE_RESOLVE && next != MDSMap::STATE_RECONNECT) { - state_valid = false; - } - } else if (prev == MDSMap::STATE_REJOIN) { - if (next != MDSMap::STATE_ACTIVE && - next != MDSMap::STATE_CLIENTREPLAY && - next != MDSMap::STATE_STOPPED) { - state_valid = false; - } - } else if (prev >= MDSMap::STATE_RESOLVE && prev < MDSMap::STATE_ACTIVE) { - // Once I have entered replay, the only allowable transitions are to - // the next next along in the sequence. - if (next != prev + 1) { - state_valid = false; - } + if (next == prev) + return true; + if (next == MDSMap::STATE_DAMAGED) + return true; + + if (prev == MDSMap::STATE_BOOT) { + return next == MDSMap::STATE_STANDBY; + } else if (prev == MDSMap::STATE_STANDBY) { + return next == MDSMap::STATE_STANDBY_REPLAY || + next == MDSMap::STATE_REPLAY || + next == MDSMap::STATE_CREATING || + next == MDSMap::STATE_STARTING; + } else if (prev == MDSMap::STATE_CREATING || prev == MDSMap::STATE_STARTING) { + return next == MDSMap::STATE_ACTIVE; + } else if (prev == MDSMap::STATE_STANDBY_REPLAY) { + return next == MDSMap::STATE_REPLAY; + } else if (prev == MDSMap::STATE_REPLAY) { + return next == MDSMap::STATE_RESOLVE || + next == MDSMap::STATE_RECONNECT; + } else if (prev >= MDSMap::STATE_RESOLVE && prev < MDSMap::STATE_ACTIVE) { + // Once I have entered replay, the only allowable transitions are to + // the next next along in the sequence. + // Except... + if (prev == MDSMap::STATE_REJOIN && + (next == MDSMap::STATE_ACTIVE || // No need to do client replay + next == MDSMap::STATE_STOPPED)) { // no subtrees + return true; } + return next == prev + 1; + } else if (prev == MDSMap::STATE_ACTIVE) { + return next == MDSMap::STATE_STOPPING; + } else if (prev == MDSMap::STATE_STOPPING) { + return next == MDSMap::STATE_STOPPED; + } else { + derr << __func__ << ": Unknown prev state " + << ceph_mds_state_name(prev) << "(" << prev << ")" << dendl; + return false; } - - return state_valid; } bool MDSMap::check_health(mds_rank_t standby_daemon_count) diff --git a/ceph/src/mds/MDSMap.h b/ceph/src/mds/MDSMap.h index b4f514b94..3c630ce9c 100644 --- a/ceph/src/mds/MDSMap.h +++ b/ceph/src/mds/MDSMap.h @@ -56,9 +56,8 @@ class MDSMap { public: /* These states are the union of the set of possible states of an MDS daemon, * and the set of possible states of an MDS rank. See - * doc/cephfs/mds-states.rst for state descriptions, - * doc/cephfs/mds-state-diagram.svg for a visual state diagram, and - * doc/cephfs/mds-state-diagram.dot to update mds-state-diagram.svg. + * doc/cephfs/mds-states.rst for state descriptions and a visual state diagram, and + * doc/cephfs/mds-state-diagram.dot to update the diagram. */ typedef enum { // States of an MDS daemon not currently holding a rank @@ -66,10 +65,10 @@ public: STATE_NULL = CEPH_MDS_STATE_NULL, // null value for fns returning this type. STATE_BOOT = CEPH_MDS_STATE_BOOT, // up, boot announcement. destiny unknown. STATE_STANDBY = CEPH_MDS_STATE_STANDBY, // up, idle. waiting for assignment by monitor. - STATE_STANDBY_REPLAY = CEPH_MDS_STATE_STANDBY_REPLAY, // up, replaying active node, ready to take over. // States of an MDS rank, and of any MDS daemon holding that rank // ============================================================== + STATE_STANDBY_REPLAY = CEPH_MDS_STATE_STANDBY_REPLAY, // up, replaying active node, ready to take over and not serving clients. Note: Up to two MDS hold the rank being replayed. STATE_STOPPED = CEPH_MDS_STATE_STOPPED, // down, once existed, but no subtrees. empty log. may not be held by a daemon. STATE_CREATING = CEPH_MDS_STATE_CREATING, // up, creating MDS instance (new journal, idalloc..). diff --git a/ceph/src/mds/MDSPerfMetricTypes.h b/ceph/src/mds/MDSPerfMetricTypes.h index 6bf64e918..78b838c89 100644 --- a/ceph/src/mds/MDSPerfMetricTypes.h +++ b/ceph/src/mds/MDSPerfMetricTypes.h @@ -39,66 +39,102 @@ struct CapHitMetric { struct ReadLatencyMetric { utime_t lat; + utime_t mean; + uint64_t sq_sum; + uint64_t count; bool updated = false; DENC(ReadLatencyMetric, v, p) { - DENC_START(2, 1, p); + DENC_START(3, 1, p); denc(v.lat, p); if (struct_v >= 2) denc(v.updated, p); + if (struct_v >= 3) { + denc(v.mean, p); + denc(v.sq_sum, p); + denc(v.count, p); + } DENC_FINISH(p); } void dump(Formatter *f) const { f->dump_object("read_latency", lat); + f->dump_object("avg_read_alatency", mean); + f->dump_unsigned("sq_sum", sq_sum); + f->dump_unsigned("count", count); } friend std::ostream& operator<<(std::ostream& os, const ReadLatencyMetric &metric) { - os << "{latency=" << metric.lat << "}"; + os << "{latency=" << metric.lat << ", avg_latency=" << metric.mean + << ", sq_sum=" << metric.sq_sum << ", count=" << metric.count << "}"; return os; } }; struct WriteLatencyMetric { utime_t lat; + utime_t mean; + uint64_t sq_sum; + uint64_t count; bool updated = false; DENC(WriteLatencyMetric, v, p) { - DENC_START(2, 1, p); + DENC_START(3, 1, p); denc(v.lat, p); if (struct_v >= 2) denc(v.updated, p); + if (struct_v >= 3) { + denc(v.mean, p); + denc(v.sq_sum, p); + denc(v.count, p); + } DENC_FINISH(p); } void dump(Formatter *f) const { f->dump_object("write_latency", lat); + f->dump_object("avg_write_alatency", mean); + f->dump_unsigned("sq_sum", sq_sum); + f->dump_unsigned("count", count); } friend std::ostream& operator<<(std::ostream& os, const WriteLatencyMetric &metric) { - os << "{latency=" << metric.lat << "}"; + os << "{latency=" << metric.lat << ", avg_latency=" << metric.mean + << ", sq_sum=" << metric.sq_sum << ", count=" << metric.count << "}"; return os; } }; struct MetadataLatencyMetric { utime_t lat; + utime_t mean; + uint64_t sq_sum; + uint64_t count; bool updated = false; DENC(MetadataLatencyMetric, v, p) { - DENC_START(2, 1, p); + DENC_START(3, 1, p); denc(v.lat, p); if (struct_v >= 2) denc(v.updated, p); + if (struct_v >= 3) { + denc(v.mean, p); + denc(v.sq_sum, p); + denc(v.count, p); + } DENC_FINISH(p); } void dump(Formatter *f) const { f->dump_object("metadata_latency", lat); + f->dump_object("avg_metadata_alatency", mean); + f->dump_unsigned("sq_sum", sq_sum); + f->dump_unsigned("count", count); } friend std::ostream& operator<<(std::ostream& os, const MetadataLatencyMetric &metric) { - os << "{latency=" << metric.lat << "}"; + os << "{latency=" << metric.lat << ", avg_latency=" << metric.mean + << ", sq_sum=" << metric.sq_sum << ", count=" << metric.count << "}"; return os; } }; diff --git a/ceph/src/mds/MDSRank.cc b/ceph/src/mds/MDSRank.cc index 1199a6539..014ee0631 100644 --- a/ceph/src/mds/MDSRank.cc +++ b/ceph/src/mds/MDSRank.cc @@ -2238,8 +2238,15 @@ void MDSRankDispatcher::handle_mds_map( // I am only to be passed MDSMaps in which I hold a rank ceph_assert(whoami != MDS_RANK_NONE); - MDSMap::DaemonState oldstate = state; mds_gid_t mds_gid = mds_gid_t(monc->get_global_id()); + MDSMap::DaemonState oldstate = oldmap.get_state_gid(mds_gid); + if (oldstate == MDSMap::STATE_NULL) { + // monitor may skip sending me the STANDBY map (e.g. if paxos_propose_interval is high) + // Assuming I have passed STANDBY state if I got a rank in the first map. + oldstate = MDSMap::STATE_STANDBY; + } + // I should not miss map update + ceph_assert(state == oldstate); state = mdsmap->get_state_gid(mds_gid); if (state != oldstate) { last_state = oldstate; @@ -3506,7 +3513,7 @@ void MDSRankDispatcher::handle_osd_map() // reconnect state will journal blocklisted clients (journal // is opened for writing in `replay_done` before moving to // up:resolve). - if (!is_replay()) { + if (!is_any_replay()) { std::set newly_blocklisted; objecter->consume_blocklist_events(&newly_blocklisted); auto epoch = objecter->with_osdmap([](const OSDMap &o){return o.get_epoch();}); diff --git a/ceph/src/mds/MDSRank.h b/ceph/src/mds/MDSRank.h index c08970710..d0e01f229 100644 --- a/ceph/src/mds/MDSRank.h +++ b/ceph/src/mds/MDSRank.h @@ -413,7 +413,7 @@ class MDSRank { // The last different state I held before current MDSMap::DaemonState last_state = MDSMap::STATE_BOOT; // The state assigned to me by the MDSMap - MDSMap::DaemonState state = MDSMap::STATE_BOOT; + MDSMap::DaemonState state = MDSMap::STATE_STANDBY; bool cluster_degraded = false; diff --git a/ceph/src/mds/MetricAggregator.cc b/ceph/src/mds/MetricAggregator.cc index 046e79269..6487084fb 100644 --- a/ceph/src/mds/MetricAggregator.cc +++ b/ceph/src/mds/MetricAggregator.cc @@ -168,6 +168,42 @@ void MetricAggregator::refresh_metrics_for_rank(const entity_inst_t &client, c->second = metrics.write_io_sizes_metric.total_size; } break; + case MDSPerformanceCounterType::AVG_READ_LATENCY_METRIC: + if (metrics.read_latency_metric.updated) { + c->first = metrics.read_latency_metric.mean.tv.tv_sec; + c->second = metrics.read_latency_metric.mean.tv.tv_nsec; + } + break; + case MDSPerformanceCounterType::STDEV_READ_LATENCY_METRIC: + if (metrics.read_latency_metric.updated) { + c->first = metrics.read_latency_metric.sq_sum; + c->second = metrics.read_latency_metric.count; + } + break; + case MDSPerformanceCounterType::AVG_WRITE_LATENCY_METRIC: + if (metrics.write_latency_metric.updated) { + c->first = metrics.write_latency_metric.mean.tv.tv_sec; + c->second = metrics.write_latency_metric.mean.tv.tv_nsec; + } + break; + case MDSPerformanceCounterType::STDEV_WRITE_LATENCY_METRIC: + if (metrics.write_latency_metric.updated) { + c->first = metrics.write_latency_metric.sq_sum; + c->second = metrics.write_latency_metric.count; + } + break; + case MDSPerformanceCounterType::AVG_METADATA_LATENCY_METRIC: + if (metrics.metadata_latency_metric.updated) { + c->first = metrics.metadata_latency_metric.mean.tv.tv_sec; + c->second = metrics.metadata_latency_metric.mean.tv.tv_nsec; + } + break; + case MDSPerformanceCounterType::STDEV_METADATA_LATENCY_METRIC: + if (metrics.metadata_latency_metric.updated) { + c->first = metrics.metadata_latency_metric.sq_sum; + c->second = metrics.metadata_latency_metric.count; + } + break; default: ceph_abort_msg("unknown counter type"); } diff --git a/ceph/src/mds/MetricsHandler.cc b/ceph/src/mds/MetricsHandler.cc index 3fcaaaec1..b28b06b7a 100644 --- a/ceph/src/mds/MetricsHandler.cc +++ b/ceph/src/mds/MetricsHandler.cc @@ -166,7 +166,9 @@ void MetricsHandler::handle_payload(Session *session, const CapInfoPayload &payl void MetricsHandler::handle_payload(Session *session, const ReadLatencyPayload &payload) { dout(20) << ": type=" << payload.get_type() - << ", session=" << session << ", latency=" << payload.lat << dendl; + << ", session=" << session << ", latency=" << payload.lat + << ", avg=" << payload.mean << ", sq_sum=" << payload.sq_sum + << ", count=" << payload.count << dendl; auto it = client_metrics_map.find(session->info.inst); if (it == client_metrics_map.end()) { @@ -176,12 +178,17 @@ void MetricsHandler::handle_payload(Session *session, const ReadLatencyPayload & auto &metrics = it->second.second; metrics.update_type = UPDATE_TYPE_REFRESH; metrics.read_latency_metric.lat = payload.lat; + metrics.read_latency_metric.mean = payload.mean; + metrics.read_latency_metric.sq_sum = payload.sq_sum; + metrics.read_latency_metric.count = payload.count; metrics.read_latency_metric.updated = true; } void MetricsHandler::handle_payload(Session *session, const WriteLatencyPayload &payload) { dout(20) << ": type=" << payload.get_type() - << ", session=" << session << ", latency=" << payload.lat << dendl; + << ", session=" << session << ", latency=" << payload.lat + << ", avg=" << payload.mean << ", sq_sum=" << payload.sq_sum + << ", count=" << payload.count << dendl; auto it = client_metrics_map.find(session->info.inst); if (it == client_metrics_map.end()) { @@ -191,12 +198,17 @@ void MetricsHandler::handle_payload(Session *session, const WriteLatencyPayload auto &metrics = it->second.second; metrics.update_type = UPDATE_TYPE_REFRESH; metrics.write_latency_metric.lat = payload.lat; + metrics.write_latency_metric.mean = payload.mean; + metrics.write_latency_metric.sq_sum = payload.sq_sum; + metrics.write_latency_metric.count = payload.count; metrics.write_latency_metric.updated = true; } void MetricsHandler::handle_payload(Session *session, const MetadataLatencyPayload &payload) { dout(20) << ": type=" << payload.get_type() - << ", session=" << session << ", latency=" << payload.lat << dendl; + << ", session=" << session << ", latency=" << payload.lat + << ", avg=" << payload.mean << ", sq_sum=" << payload.sq_sum + << ", count=" << payload.count << dendl; auto it = client_metrics_map.find(session->info.inst); if (it == client_metrics_map.end()) { @@ -206,6 +218,9 @@ void MetricsHandler::handle_payload(Session *session, const MetadataLatencyPaylo auto &metrics = it->second.second; metrics.update_type = UPDATE_TYPE_REFRESH; metrics.metadata_latency_metric.lat = payload.lat; + metrics.metadata_latency_metric.mean = payload.mean; + metrics.metadata_latency_metric.sq_sum = payload.sq_sum; + metrics.metadata_latency_metric.count = payload.count; metrics.metadata_latency_metric.updated = true; } diff --git a/ceph/src/mds/OpenFileTable.cc b/ceph/src/mds/OpenFileTable.cc index 1d52e4b99..784baa56c 100644 --- a/ceph/src/mds/OpenFileTable.cc +++ b/ceph/src/mds/OpenFileTable.cc @@ -271,13 +271,14 @@ public: void OpenFileTable::_commit_finish(int r, uint64_t log_seq, MDSContext *fin) { - dout(10) << __func__ << " log_seq " << log_seq << dendl; + dout(10) << __func__ << " log_seq " << log_seq << " committed_log_seq " << committed_log_seq + << " committing_log_seq " << committing_log_seq << dendl; if (r < 0) { mds->handle_write_error(r); return; } - ceph_assert(log_seq <= committing_log_seq); + ceph_assert(log_seq == committing_log_seq); ceph_assert(log_seq >= committed_log_seq); committed_log_seq = log_seq; num_pending_commit--; @@ -336,7 +337,8 @@ void OpenFileTable::_journal_finish(int r, uint64_t log_seq, MDSContext *c, void OpenFileTable::commit(MDSContext *c, uint64_t log_seq, int op_prio) { - dout(10) << __func__ << " log_seq " << log_seq << dendl; + dout(10) << __func__ << " log_seq " << log_seq << " committing_log_seq:" + << committing_log_seq << dendl; ceph_assert(num_pending_commit == 0); num_pending_commit++; @@ -1059,6 +1061,12 @@ void OpenFileTable::_prefetch_dirfrags() CInode *diri = mdcache->get_inode(ino); if (!diri) continue; + + if (!diri->is_dir()) { + dout(10) << " " << *diri << " is not dir" << dendl; + continue; + } + if (diri->state_test(CInode::STATE_REJOINUNDEF)) continue; diff --git a/ceph/src/mds/OpenFileTable.h b/ceph/src/mds/OpenFileTable.h index d3934f753..1f91c2020 100644 --- a/ceph/src/mds/OpenFileTable.h +++ b/ceph/src/mds/OpenFileTable.h @@ -42,7 +42,6 @@ public: void commit(MDSContext *c, uint64_t log_seq, int op_prio); uint64_t get_committed_log_seq() const { return committed_log_seq; } - uint64_t get_committing_log_seq() const { return committing_log_seq; } bool is_any_committing() const { return num_pending_commit > 0; } void load(MDSContext *c); diff --git a/ceph/src/mds/Server.cc b/ceph/src/mds/Server.cc index b6e4e960b..b01eb92b3 100644 --- a/ceph/src/mds/Server.cc +++ b/ceph/src/mds/Server.cc @@ -2312,20 +2312,7 @@ void Server::set_trace_dist(const ref_t &reply, dout(20) << "set_trace_dist added dir " << *dir << dendl; encode(dn->get_name(), bl); - - int lease_mask = 0; - CDentry::linkage_t *dnl = dn->get_linkage(mdr->get_client(), mdr); - if (dnl->is_primary()) { - ceph_assert(dnl->get_inode() == in); - lease_mask = CEPH_LEASE_PRIMARY_LINK; - } else { - if (dnl->is_remote()) - ceph_assert(dnl->get_remote_ino() == in->ino()); - else - ceph_assert(!in); - } - mds->locker->issue_client_lease(dn, mdr, lease_mask, now, bl); - dout(20) << "set_trace_dist added dn " << snapid << " " << *dn << dendl; + mds->locker->issue_client_lease(dn, in, mdr, now, bl); } else reply->head.is_dentry = 0; @@ -3352,18 +3339,20 @@ CInode* Server::prepare_new_inode(MDRequestRef& mdr, CDir *dir, inodeno_t useino _inode->truncate_seq = 1; /* starting with 1, 0 is kept for no-truncation logic */ CInode *diri = dir->get_inode(); + auto pip = diri->get_projected_inode(); - dout(10) << oct << " dir mode 0" << diri->get_inode()->mode << " new mode 0" << mode << dec << dendl; + dout(10) << oct << " dir mode 0" << pip->mode << " new mode 0" << mode << dec << dendl; - if (diri->get_inode()->mode & S_ISGID) { + if (pip->mode & S_ISGID) { dout(10) << " dir is sticky" << dendl; - _inode->gid = diri->get_inode()->gid; + _inode->gid = pip->gid; if (S_ISDIR(mode)) { - dout(10) << " new dir also sticky" << dendl; + dout(10) << " new dir also sticky" << dendl; _inode->mode |= S_ISGID; } - } else + } else { _inode->gid = mdr->client_request->get_caller_gid(); + } _inode->uid = mdr->client_request->get_caller_uid(); @@ -3432,15 +3421,68 @@ void Server::apply_allocated_inos(MDRequestRef& mdr, Session *session) } } +struct C_MDS_TryOpenInode : public ServerContext { + MDRequestRef mdr; + inodeno_t ino; + C_MDS_TryOpenInode(Server *s, MDRequestRef& r, inodeno_t i) : + ServerContext(s), mdr(r), ino(i) {} + void finish(int r) override { + server->_try_open_ino(mdr, r, ino); + } +}; + +void Server::_try_open_ino(MDRequestRef& mdr, int r, inodeno_t ino) +{ + dout(10) << "_try_open_ino " << mdr.get() << " ino " << ino << " r=" << r << dendl; + + // `r` is a rank if >=0, else an error code + if (r >= 0) { + mds_rank_t dest_rank(r); + if (dest_rank == mds->get_nodeid()) + dispatch_client_request(mdr); + else + mdcache->request_forward(mdr, dest_rank); + return; + } + + // give up + if (r == -CEPHFS_ENOENT || r == -CEPHFS_ENODATA) + r = -CEPHFS_ESTALE; + respond_to_request(mdr, r); +} + class C_MDS_TryFindInode : public ServerContext { MDRequestRef mdr; + MDCache *mdcache; + inodeno_t ino; public: - C_MDS_TryFindInode(Server *s, MDRequestRef& r) : ServerContext(s), mdr(r) {} + C_MDS_TryFindInode(Server *s, MDRequestRef& r, MDCache *m, inodeno_t i) : + ServerContext(s), mdr(r), mdcache(m), ino(i) {} void finish(int r) override { - if (r == -CEPHFS_ESTALE) // :( find_ino_peers failed - server->respond_to_request(mdr, r); - else + if (r == -CEPHFS_ESTALE) { // :( find_ino_peers failed + /* + * There has one case that when the MDS crashes and the + * openfiletable journal couldn't be flushed and then + * the replacing MDS is possibly won't load some already + * opened CInodes into the MDCache. And if the clients + * will retry some requests after reconnected, the MDS + * will return -ESTALE after failing to find the ino in + * all active peers. + * + * As a workaround users can run `ls -R ${mountpoint}` + * to list all the sub-files or sub-direcotries from the + * mountpoint. + * + * We need try to open the ino and try it again. + */ + CInode *in = mdcache->get_inode(ino); + if (in && in->state_test(CInode::STATE_PURGING)) + server->respond_to_request(mdr, r); + else + mdcache->open_ino(ino, (int64_t)-1, new C_MDS_TryOpenInode(server, mdr, ino)); + } else { server->dispatch_client_request(mdr); + } } }; @@ -3480,8 +3522,8 @@ CInode* Server::rdlock_path_pin_ref(MDRequestRef& mdr, respond_to_request(mdr, r); } else if (r == -CEPHFS_ESTALE) { dout(10) << "FAIL on CEPHFS_ESTALE but attempting recovery" << dendl; - MDSContext *c = new C_MDS_TryFindInode(this, mdr); - mdcache->find_ino_peers(refpath.get_ino(), c); + inodeno_t ino = refpath.get_ino(); + mdcache->find_ino_peers(ino, new C_MDS_TryFindInode(this, mdr, mdcache, ino)); } else { dout(10) << "FAIL on error " << r << dendl; respond_to_request(mdr, r); @@ -3566,7 +3608,8 @@ CDentry* Server::rdlock_path_xlock_dentry(MDRequestRef& mdr, if (r < 0) { if (r == -CEPHFS_ESTALE) { dout(10) << "FAIL on CEPHFS_ESTALE but attempting recovery" << dendl; - mdcache->find_ino_peers(refpath.get_ino(), new C_MDS_TryFindInode(this, mdr)); + inodeno_t ino = refpath.get_ino(); + mdcache->find_ino_peers(ino, new C_MDS_TryFindInode(this, mdr, mdcache, ino)); return nullptr; } respond_to_request(mdr, r); @@ -3651,7 +3694,8 @@ Server::rdlock_two_paths_xlock_destdn(MDRequestRef& mdr, bool xlock_srcdn) if (r != 0) { if (r == -CEPHFS_ESTALE) { dout(10) << "CEPHFS_ESTALE on path, attempting recovery" << dendl; - mdcache->find_ino_peers(refpath.get_ino(), new C_MDS_TryFindInode(this, mdr)); + inodeno_t ino = refpath.get_ino(); + mdcache->find_ino_peers(ino, new C_MDS_TryFindInode(this, mdr, mdcache, ino)); } else if (r < 0) { respond_to_request(mdr, r); } @@ -3663,7 +3707,8 @@ Server::rdlock_two_paths_xlock_destdn(MDRequestRef& mdr, bool xlock_srcdn) if (r != 0) { if (r == -CEPHFS_ESTALE) { dout(10) << "CEPHFS_ESTALE on path2, attempting recovery" << dendl; - mdcache->find_ino_peers(refpath2.get_ino(), new C_MDS_TryFindInode(this, mdr)); + inodeno_t ino = refpath2.get_ino(); + mdcache->find_ino_peers(ino, new C_MDS_TryFindInode(this, mdr, mdcache, ino)); } else if (r < 0) { respond_to_request(mdr, r); } @@ -4758,8 +4803,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) // dentry dout(12) << "including dn " << *dn << dendl; encode(dn->get_name(), dnbl); - int lease_mask = dnl->is_primary() ? CEPH_LEASE_PRIMARY_LINK : 0; - mds->locker->issue_client_lease(dn, mdr, lease_mask, now, dnbl); + mds->locker->issue_client_lease(dn, in, mdr, now, dnbl); // inode dout(12) << "including inode " << *in << dendl; @@ -6885,7 +6929,8 @@ void Server::handle_client_link(MDRequestRef& mdr) targeti = mdcache->get_inode(req->get_filepath2().get_ino()); if (!targeti) { dout(10) << "CEPHFS_ESTALE on path2, attempting recovery" << dendl; - mdcache->find_ino_peers(req->get_filepath2().get_ino(), new C_MDS_TryFindInode(this, mdr)); + inodeno_t ino = req->get_filepath2().get_ino(); + mdcache->find_ino_peers(ino, new C_MDS_TryFindInode(this, mdr, mdcache, ino)); return; } mdr->pin(targeti); diff --git a/ceph/src/mds/Server.h b/ceph/src/mds/Server.h index b16106ef5..33054bd06 100644 --- a/ceph/src/mds/Server.h +++ b/ceph/src/mds/Server.h @@ -187,6 +187,7 @@ public: void journal_allocated_inos(MDRequestRef& mdr, EMetaBlob *blob); void apply_allocated_inos(MDRequestRef& mdr, Session *session); + void _try_open_ino(MDRequestRef& mdr, int r, inodeno_t ino); CInode* rdlock_path_pin_ref(MDRequestRef& mdr, bool want_auth, bool no_want_auth=false); CDentry* rdlock_path_xlock_dentry(MDRequestRef& mdr, bool create, diff --git a/ceph/src/mds/SimpleLock.h b/ceph/src/mds/SimpleLock.h index 5c625b6af..725c4488c 100644 --- a/ceph/src/mds/SimpleLock.h +++ b/ceph/src/mds/SimpleLock.h @@ -256,8 +256,9 @@ public: return get_sm()->states[state].next == 0; } bool is_unstable_and_locked() const { - if (is_stable()) - return false; + return (!is_stable() && is_locked()); + } + bool is_locked() const { return is_rdlocked() || is_wrlocked() || is_xlocked(); } int get_next_state() { diff --git a/ceph/src/mds/cephfs_features.h b/ceph/src/mds/cephfs_features.h index e934914ba..85a636e9b 100644 --- a/ceph/src/mds/cephfs_features.h +++ b/ceph/src/mds/cephfs_features.h @@ -73,6 +73,12 @@ namespace ceph { CLIENT_METRIC_TYPE_OPENED_INODES, \ CLIENT_METRIC_TYPE_READ_IO_SIZES, \ CLIENT_METRIC_TYPE_WRITE_IO_SIZES, \ + CLIENT_METRIC_TYPE_AVG_READ_LATENCY, \ + CLIENT_METRIC_TYPE_STDEV_READ_LATENCY, \ + CLIENT_METRIC_TYPE_AVG_WRITE_LATENCY, \ + CLIENT_METRIC_TYPE_STDEV_WRITE_LATENCY, \ + CLIENT_METRIC_TYPE_AVG_METADATA_LATENCY, \ + CLIENT_METRIC_TYPE_STDEV_METADATA_LATENCY, \ } #define CEPHFS_FEATURES_MDS_SUPPORTED CEPHFS_FEATURES_ALL diff --git a/ceph/src/messages/MMDSBeacon.h b/ceph/src/messages/MMDSBeacon.h index 82a310d1e..f2fa150be 100644 --- a/ceph/src/messages/MMDSBeacon.h +++ b/ceph/src/messages/MMDSBeacon.h @@ -45,6 +45,7 @@ enum mds_metric_t { MDS_HEALTH_SLOW_REQUEST, MDS_HEALTH_CACHE_OVERSIZED, MDS_HEALTH_SLOW_METADATA_IO, + MDS_HEALTH_DUMMY, // not a real health warning, for testing }; inline const char *mds_metric_name(mds_metric_t m) @@ -62,6 +63,7 @@ inline const char *mds_metric_name(mds_metric_t m) case MDS_HEALTH_SLOW_REQUEST: return "MDS_SLOW_REQUEST"; case MDS_HEALTH_CACHE_OVERSIZED: return "MDS_CACHE_OVERSIZED"; case MDS_HEALTH_SLOW_METADATA_IO: return "MDS_SLOW_METADATA_IO"; + case MDS_HEALTH_DUMMY: return "MDS_DUMMY"; default: return "???"; } diff --git a/ceph/src/messages/MMgrUpdate.h b/ceph/src/messages/MMgrUpdate.h new file mode 100644 index 000000000..5e1f27b5f --- /dev/null +++ b/ceph/src/messages/MMgrUpdate.h @@ -0,0 +1,84 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2022 Prashant D + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ + + +#ifndef CEPH_MMGRUPDATE_H_ +#define CEPH_MMGRUPDATE_H_ + +#include "msg/Message.h" + +class MMgrUpdate : public Message { +private: + static constexpr int HEAD_VERSION = 2; + static constexpr int COMPAT_VERSION = 1; + +public: + + std::string daemon_name; + std::string service_name; // optional; otherwise infer from entity type + + std::map daemon_metadata; + std::map daemon_status; + + bool need_metadata_update = false; + + void decode_payload() override + { + using ceph::decode; + auto p = payload.cbegin(); + decode(daemon_name, p); + if (header.version >= 2) { + decode(service_name, p); + decode(need_metadata_update, p); + if (need_metadata_update) { + decode(daemon_metadata, p); + decode(daemon_status, p); + } + } + } + + void encode_payload(uint64_t features) override { + using ceph::encode; + encode(daemon_name, payload); + encode(service_name, payload); + encode(need_metadata_update, payload); + if (need_metadata_update) { + encode(daemon_metadata, payload); + encode(daemon_status, payload); + } + } + + std::string_view get_type_name() const override { return "mgrupdate"; } + void print(std::ostream& out) const override { + out << get_type_name() << "("; + if (service_name.length()) { + out << service_name; + } else { + out << ceph_entity_type_name(get_source().type()); + } + out << "." << daemon_name; + out << ")"; + } + +private: + MMgrUpdate() + : Message{MSG_MGR_UPDATE, HEAD_VERSION, COMPAT_VERSION} + {} + using RefCountedObject::put; + using RefCountedObject::get; + template + friend boost::intrusive_ptr ceph::make_message(Args&&... args); +}; + +#endif + diff --git a/ceph/src/mgr/BaseMgrModule.cc b/ceph/src/mgr/BaseMgrModule.cc index 2e894b031..ca441d5e5 100644 --- a/ceph/src/mgr/BaseMgrModule.cc +++ b/ceph/src/mgr/BaseMgrModule.cc @@ -1104,6 +1104,12 @@ ceph_add_mds_perf_query(BaseMgrModule *self, PyObject *args) {"opened_inodes", MDSPerformanceCounterType::OPENED_INODES_METRIC}, {"read_io_sizes", MDSPerformanceCounterType::READ_IO_SIZES_METRIC}, {"write_io_sizes", MDSPerformanceCounterType::WRITE_IO_SIZES_METRIC}, + {"avg_read_latency", MDSPerformanceCounterType::AVG_READ_LATENCY_METRIC}, + {"stdev_read_latency", MDSPerformanceCounterType::STDEV_READ_LATENCY_METRIC}, + {"avg_write_latency", MDSPerformanceCounterType::AVG_WRITE_LATENCY_METRIC}, + {"stdev_write_latency", MDSPerformanceCounterType::STDEV_WRITE_LATENCY_METRIC}, + {"avg_metadata_latency", MDSPerformanceCounterType::AVG_METADATA_LATENCY_METRIC}, + {"stdev_metadata_latency", MDSPerformanceCounterType::STDEV_METADATA_LATENCY_METRIC}, }; PyObject *py_query = nullptr; diff --git a/ceph/src/mgr/CMakeLists.txt b/ceph/src/mgr/CMakeLists.txt index a53d1d8c2..b11c40c42 100644 --- a/ceph/src/mgr/CMakeLists.txt +++ b/ceph/src/mgr/CMakeLists.txt @@ -34,6 +34,7 @@ if(WITH_MGR) mgr_commands.cc $) add_executable(ceph-mgr ${mgr_srcs}) + target_compile_definitions(ceph-mgr PRIVATE PY_SSIZE_T_CLEAN) if(WITH_LIBCEPHSQLITE) target_link_libraries(ceph-mgr cephsqlite SQLite3::SQLite3) endif() diff --git a/ceph/src/mgr/DaemonServer.cc b/ceph/src/mgr/DaemonServer.cc index fd493a8d9..1b4744cf6 100644 --- a/ceph/src/mgr/DaemonServer.cc +++ b/ceph/src/mgr/DaemonServer.cc @@ -27,6 +27,7 @@ #include "mon/MonCommand.h" #include "messages/MMgrOpen.h" +#include "messages/MMgrUpdate.h" #include "messages/MMgrClose.h" #include "messages/MMgrConfigure.h" #include "messages/MMonMgrReport.h" @@ -267,6 +268,8 @@ bool DaemonServer::ms_dispatch2(const ref_t& m) return handle_report(ref_cast(m)); case MSG_MGR_OPEN: return handle_open(ref_cast(m)); + case MSG_MGR_UPDATE: + return handle_update(ref_cast(m)); case MSG_MGR_CLOSE: return handle_close(ref_cast(m)); case MSG_COMMAND: @@ -531,6 +534,49 @@ bool DaemonServer::handle_open(const ref_t& m) return true; } +bool DaemonServer::handle_update(const ref_t& m) +{ + DaemonKey key; + if (!m->service_name.empty()) { + key.type = m->service_name; + } else { + key.type = ceph_entity_type_name(m->get_connection()->get_peer_type()); + } + key.name = m->daemon_name; + + dout(10) << "from " << m->get_connection() << " " << key << dendl; + + if (m->get_connection()->get_peer_type() == entity_name_t::TYPE_CLIENT && + m->service_name.empty()) { + // Clients should not be sending us update request + dout(10) << "rejecting update request from non-daemon client " << m->daemon_name + << dendl; + clog->warn() << "rejecting report from non-daemon client " << m->daemon_name + << " at " << m->get_connection()->get_peer_addrs(); + m->get_connection()->mark_down(); + return true; + } + + + { + std::unique_lock locker(lock); + + DaemonStatePtr daemon; + // Look up the DaemonState + if (daemon_state.exists(key)) { + dout(20) << "updating existing DaemonState for " << key << dendl; + + daemon = daemon_state.get(key); + if (m->need_metadata_update && + !m->daemon_metadata.empty()) { + daemon_state.update_metadata(daemon, m->daemon_metadata); + } + } + } + + return true; +} + bool DaemonServer::handle_close(const ref_t& m) { std::lock_guard l(lock); @@ -2780,7 +2826,7 @@ void DaemonServer::adjust_pgs() dout(10) << "pool " << i.first << " pg_num " << p.get_pg_num() << " - pgp_num " << p.get_pgp_num() - << " gap > max_pg_num_change " << max_jump + << " gap >= max_pg_num_change " << max_jump << " - must scale pgp_num first" << dendl; } else { @@ -2937,13 +2983,19 @@ void DaemonServer::got_service_map() if (pending_service_map.epoch == 0) { // we just started up dout(10) << "got initial map e" << service_map.epoch << dendl; + ceph_assert(pending_service_map_dirty == 0); + pending_service_map = service_map; + pending_service_map.epoch = service_map.epoch + 1; + } else if (pending_service_map.epoch <= service_map.epoch) { + // we just started up but got one more not our own map + dout(10) << "got newer initial map e" << service_map.epoch << dendl; + ceph_assert(pending_service_map_dirty == 0); pending_service_map = service_map; pending_service_map.epoch = service_map.epoch + 1; } else { - // we we already active and therefore must have persisted it, + // we already active and therefore must have persisted it, // which means ours is the same or newer. dout(10) << "got updated map e" << service_map.epoch << dendl; - ceph_assert(pending_service_map.epoch > service_map.epoch); } }); diff --git a/ceph/src/mgr/DaemonServer.h b/ceph/src/mgr/DaemonServer.h index d719a96a7..ff9835680 100644 --- a/ceph/src/mgr/DaemonServer.h +++ b/ceph/src/mgr/DaemonServer.h @@ -36,6 +36,7 @@ class MMgrReport; class MMgrOpen; +class MMgrUpdate; class MMgrClose; class MMonMgrReport; class MCommand; @@ -275,6 +276,7 @@ public: void fetch_missing_metadata(const DaemonKey& key, const entity_addr_t& addr); bool handle_open(const ceph::ref_t& m); + bool handle_update(const ceph::ref_t& m); bool handle_close(const ceph::ref_t& m); bool handle_report(const ceph::ref_t& m); bool handle_command(const ceph::ref_t& m); diff --git a/ceph/src/mgr/MDSPerfMetricTypes.cc b/ceph/src/mgr/MDSPerfMetricTypes.cc index 5568cbe5d..a16003774 100644 --- a/ceph/src/mgr/MDSPerfMetricTypes.cc +++ b/ceph/src/mgr/MDSPerfMetricTypes.cc @@ -35,6 +35,12 @@ void MDSPerformanceCounterDescriptor::pack_counter( case MDSPerformanceCounterType::OPENED_INODES_METRIC: case MDSPerformanceCounterType::READ_IO_SIZES_METRIC: case MDSPerformanceCounterType::WRITE_IO_SIZES_METRIC: + case MDSPerformanceCounterType::AVG_READ_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_READ_LATENCY_METRIC: + case MDSPerformanceCounterType::AVG_WRITE_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_WRITE_LATENCY_METRIC: + case MDSPerformanceCounterType::AVG_METADATA_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_METADATA_LATENCY_METRIC: break; default: ceph_abort_msg("unknown counter type"); @@ -57,6 +63,12 @@ void MDSPerformanceCounterDescriptor::unpack_counter( case MDSPerformanceCounterType::OPENED_INODES_METRIC: case MDSPerformanceCounterType::READ_IO_SIZES_METRIC: case MDSPerformanceCounterType::WRITE_IO_SIZES_METRIC: + case MDSPerformanceCounterType::AVG_READ_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_READ_LATENCY_METRIC: + case MDSPerformanceCounterType::AVG_WRITE_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_WRITE_LATENCY_METRIC: + case MDSPerformanceCounterType::AVG_METADATA_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_METADATA_LATENCY_METRIC: break; default: ceph_abort_msg("unknown counter type"); @@ -95,6 +107,24 @@ std::ostream& operator<<(std::ostream &os, const MDSPerformanceCounterDescriptor case MDSPerformanceCounterType::WRITE_IO_SIZES_METRIC: os << "write_io_sizes_metric"; break; + case MDSPerformanceCounterType::AVG_READ_LATENCY_METRIC: + os << "avg_read_latency"; + break; + case MDSPerformanceCounterType::STDEV_READ_LATENCY_METRIC: + os << "stdev_read_latency"; + break; + case MDSPerformanceCounterType::AVG_WRITE_LATENCY_METRIC: + os << "avg_write_latency"; + break; + case MDSPerformanceCounterType::STDEV_WRITE_LATENCY_METRIC: + os << "stdev_write_latency"; + break; + case MDSPerformanceCounterType::AVG_METADATA_LATENCY_METRIC: + os << "avg_metadata_latency"; + break; + case MDSPerformanceCounterType::STDEV_METADATA_LATENCY_METRIC: + os << "stdev_metadata_latency"; + break; } return os; diff --git a/ceph/src/mgr/MDSPerfMetricTypes.h b/ceph/src/mgr/MDSPerfMetricTypes.h index a965e5fa7..aa35b8cab 100644 --- a/ceph/src/mgr/MDSPerfMetricTypes.h +++ b/ceph/src/mgr/MDSPerfMetricTypes.h @@ -126,6 +126,12 @@ enum class MDSPerformanceCounterType : uint8_t { OPENED_INODES_METRIC = 7, READ_IO_SIZES_METRIC = 8, WRITE_IO_SIZES_METRIC = 9, + AVG_READ_LATENCY_METRIC = 10, + STDEV_READ_LATENCY_METRIC = 11, + AVG_WRITE_LATENCY_METRIC = 12, + STDEV_WRITE_LATENCY_METRIC = 13, + AVG_METADATA_LATENCY_METRIC = 14, + STDEV_METADATA_LATENCY_METRIC = 15, }; struct MDSPerformanceCounterDescriptor { @@ -143,6 +149,12 @@ struct MDSPerformanceCounterDescriptor { case MDSPerformanceCounterType::OPENED_INODES_METRIC: case MDSPerformanceCounterType::READ_IO_SIZES_METRIC: case MDSPerformanceCounterType::WRITE_IO_SIZES_METRIC: + case MDSPerformanceCounterType::AVG_READ_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_READ_LATENCY_METRIC: + case MDSPerformanceCounterType::AVG_WRITE_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_WRITE_LATENCY_METRIC: + case MDSPerformanceCounterType::AVG_METADATA_LATENCY_METRIC: + case MDSPerformanceCounterType::STDEV_METADATA_LATENCY_METRIC: return true; default: return false; diff --git a/ceph/src/mgr/MgrClient.cc b/ceph/src/mgr/MgrClient.cc index 66ac5901e..6253d2670 100644 --- a/ceph/src/mgr/MgrClient.cc +++ b/ceph/src/mgr/MgrClient.cc @@ -21,6 +21,7 @@ #include "messages/MMgrMap.h" #include "messages/MMgrReport.h" #include "messages/MMgrOpen.h" +#include "messages/MMgrUpdate.h" #include "messages/MMgrClose.h" #include "messages/MMgrConfigure.h" #include "messages/MCommand.h" @@ -245,6 +246,24 @@ void MgrClient::_send_open() } } +void MgrClient::_send_update() +{ + if (session && session->con) { + auto update = make_message(); + if (!service_name.empty()) { + update->service_name = service_name; + update->daemon_name = daemon_name; + } else { + update->daemon_name = cct->_conf->name.get_id(); + } + if (need_metadata_update) { + update->daemon_metadata = daemon_metadata; + } + update->need_metadata_update = need_metadata_update; + session->con->send_message2(update); + } +} + bool MgrClient::handle_mgr_map(ref_t m) { ceph_assert(ceph_mutex_is_locked_by_me(lock)); @@ -566,6 +585,30 @@ bool MgrClient::handle_command_reply( return true; } +int MgrClient::update_daemon_metadata( + const std::string& service, + const std::string& name, + const std::map& metadata) +{ + std::lock_guard l(lock); + if (service_daemon) { + return -EEXIST; + } + ldout(cct,1) << service << "." << name << " metadata " << metadata << dendl; + service_name = service; + daemon_name = name; + daemon_metadata = metadata; + daemon_dirty_status = true; + + if (need_metadata_update && + !daemon_metadata.empty()) { + _send_update(); + need_metadata_update = false; + } + + return 0; +} + int MgrClient::service_daemon_register( const std::string& service, const std::string& name, diff --git a/ceph/src/mgr/MgrClient.h b/ceph/src/mgr/MgrClient.h index f09ba91b7..a48ae163e 100644 --- a/ceph/src/mgr/MgrClient.h +++ b/ceph/src/mgr/MgrClient.h @@ -94,6 +94,7 @@ protected: bool service_daemon = false; bool daemon_dirty_status = false; bool task_dirty_status = false; + bool need_metadata_update = true; std::string service_name, daemon_name; std::map daemon_metadata; std::map daemon_status; @@ -102,6 +103,7 @@ protected: void reconnect(); void _send_open(); + void _send_update(); // In pre-luminous clusters, the ceph-mgr service is absent or optional, // so we must not block in start_command waiting for it. @@ -157,6 +159,10 @@ public: ceph::buffer::list *outbl, std::string *outs, Context *onfinish); + int update_daemon_metadata( + const std::string& service, + const std::string& name, + const std::map& metadata); int service_daemon_register( const std::string& service, const std::string& name, diff --git a/ceph/src/mon/ConfigMonitor.cc b/ceph/src/mon/ConfigMonitor.cc index 39aa9f9b1..471aebf6d 100644 --- a/ceph/src/mon/ConfigMonitor.cc +++ b/ceph/src/mon/ConfigMonitor.cc @@ -191,6 +191,7 @@ bool ConfigMonitor::preprocess_command(MonOpRequestRef op) stringstream ss; string name; cmd_getval(cmdmap, "key", name); + name = ConfFile::normalize_key_name(name); const Option *opt = g_conf().find_option(name); if (!opt) { opt = mon.mgrmon()->find_module_option(name); @@ -321,11 +322,13 @@ bool ConfigMonitor::preprocess_command(MonOpRequestRef op) &src); if (cmd_getval(cmdmap, "key", name)) { + name = ConfFile::normalize_key_name(name); const Option *opt = g_conf().find_option(name); if (!opt) { opt = mon.mgrmon()->find_module_option(name); } if (!opt) { + ss << "unrecognized key '" << name << "'"; err = -ENOENT; goto reply; } @@ -535,7 +538,8 @@ bool ConfigMonitor::prepare_command(MonOpRequestRef op) cmd_getval(cmdmap, "name", name); cmd_getval(cmdmap, "value", value); cmd_getval(cmdmap, "force", force); - + name = ConfFile::normalize_key_name(name); + if (prefix == "config set" && !force) { const Option *opt = g_conf().find_option(name); if (!opt) { diff --git a/ceph/src/mon/Elector.cc b/ceph/src/mon/Elector.cc index 79b227a9c..42be292d4 100644 --- a/ceph/src/mon/Elector.cc +++ b/ceph/src/mon/Elector.cc @@ -713,8 +713,10 @@ void Elector::notify_rank_removed(int rank_removed) peer_tracker.notify_rank_removed(rank_removed); /* we have to clean up the pinging state, which is annoying because it's not indexed anywhere (and adding indexing - would also be annoying). So what we do is start with the - remoed rank and examine the state of the surrounding ranks. + would also be annoying). + In the case where we are removing any rank that is not the + higest, we start with the removed rank and examine the state + of the surrounding ranks. Everybody who remains with larger rank gets a new rank one lower than before, and we have to figure out the remaining scheduled ping contexts. So, starting one past with the removed rank, we: @@ -725,35 +727,46 @@ void Elector::notify_rank_removed(int rank_removed) * * start pinging it if we're not already * check if the next rank is in the same pinging set, and delete * ourselves if not. + In the case where we are removing the highest rank, + we erase the removed rank from all sets. */ - for (unsigned i = rank_removed + 1; i <= paxos_size() ; ++i) { - if (live_pinging.count(i)) { - dead_pinging.erase(i-1); - if (!live_pinging.count(i-1)) { - begin_peer_ping(i-1); + if (rank_removed < paxos_size()) { + for (unsigned i = rank_removed + 1; i <= paxos_size() ; ++i) { + if (live_pinging.count(i)) { + dead_pinging.erase(i-1); + if (!live_pinging.count(i-1)) { + begin_peer_ping(i-1); + } + if (!live_pinging.count(i+1)) { + live_pinging.erase(i); + } } - if (!live_pinging.count(i+1)) { - live_pinging.erase(i); + else if (dead_pinging.count(i)) { + live_pinging.erase(i-1); + if (!dead_pinging.count(i-1)) { + begin_dead_ping(i-1); + } + if (!dead_pinging.count(i+1)) { + dead_pinging.erase(i); + } + } else { + // we aren't pinging rank i at all + if (i-1 == (unsigned)rank_removed) { + // so we special case to make sure we + // actually nuke the removed rank + dead_pinging.erase(rank_removed); + live_pinging.erase(rank_removed); + } } - } - else if (dead_pinging.count(i)) { - live_pinging.erase(i-1); - if (!dead_pinging.count(i-1)) { - begin_dead_ping(i-1); - } - if (!dead_pinging.count(i+1)) { - dead_pinging.erase(i); - } - } else { - // we aren't pinging rank i at all - if (i-1 == (unsigned)rank_removed) { - // so we special case to make sure we - // actually nuke the removed rank - dead_pinging.erase(rank_removed); - live_pinging.erase(rank_removed); - } - } - } + } + } else { + if (live_pinging.count(rank_removed)) { + live_pinging.erase(rank_removed); + } + if (dead_pinging.count(rank_removed)) { + dead_pinging.erase(rank_removed); + } + } } void Elector::notify_strategy_maybe_changed(int strategy) diff --git a/ceph/src/mon/LogMonitor.cc b/ceph/src/mon/LogMonitor.cc index eb489e5df..80e069d59 100644 --- a/ceph/src/mon/LogMonitor.cc +++ b/ceph/src/mon/LogMonitor.cc @@ -415,7 +415,7 @@ void LogMonitor::log_external(const LogEntry& le) } if (fd >= 0) { - fmt::format_to(file_log_buffer, "{}\n", le); + fmt::format_to(std::back_inserter(file_log_buffer), "{}\n", le); int err = safe_write(fd, file_log_buffer.data(), file_log_buffer.size()); file_log_buffer.clear(); if (err < 0) { diff --git a/ceph/src/mon/MDSMonitor.cc b/ceph/src/mon/MDSMonitor.cc index 5b397cb59..638612df8 100644 --- a/ceph/src/mon/MDSMonitor.cc +++ b/ceph/src/mon/MDSMonitor.cc @@ -248,6 +248,9 @@ void MDSMonitor::encode_pending(MonitorDBStore::TransactionRef t) health.decode(bl_i); } for (const auto &metric : health.metrics) { + if (metric.type == MDS_HEALTH_DUMMY) { + continue; + } const auto rank = info.rank; health_check_t *check = &new_checks.get_or_add( mds_metric_name(metric.type), @@ -593,10 +596,16 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) std::set new_types; for (const auto &i : new_health) { + if (i.type == MDS_HEALTH_DUMMY) { + continue; + } new_types.insert(i.type); } for (const auto &new_metric: new_health) { + if (new_metric.type == MDS_HEALTH_DUMMY) { + continue; + } if (old_types.count(new_metric.type) == 0) { dout(10) << "MDS health message (" << m->get_orig_source() << "): " << new_metric.sev << " " << new_metric.message << dendl; @@ -691,26 +700,27 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) return true; } - // legal state change? - if ((info.state == MDSMap::STATE_STANDBY && state > 0) || - (info.state == MDSMap::STATE_STANDBY_REPLAY && state > 0 && state != MDSMap::STATE_DAMAGED)) { - /* N.B.: standby-replay can indicate the rank is damaged due to failure to replay */ - dout(10) << "mds_beacon mds can't activate itself (" << ceph_mds_state_name(info.state) - << " -> " << ceph_mds_state_name(state) << ")" << dendl; + if (state == MDSMap::STATE_DNE) { + dout(1) << __func__ << ": DNE from " << info << dendl; goto evict; - } else if ((state == MDSMap::STATE_STANDBY || state == MDSMap::STATE_STANDBY_REPLAY) - && info.rank != MDS_RANK_NONE) - { - dout(4) << "mds_beacon MDS can't go back into standby after taking rank: " - "held rank " << info.rank << " while requesting state " - << ceph_mds_state_name(state) << dendl; + } + + // legal state change? + if ((info.state == MDSMap::STATE_STANDBY && state != info.state) || + (info.state == MDSMap::STATE_STANDBY_REPLAY && state != info.state && state != MDSMap::STATE_DAMAGED)) { + // Standby daemons should never modify their own state. + // Except that standby-replay can indicate the rank is damaged due to failure to replay. + // Reject any attempts to do so. + derr << "standby " << gid << " attempted to change state to " + << ceph_mds_state_name(state) << ", rejecting" << dendl; goto evict; - } else if (info.state == MDSMap::STATE_STOPPING && - state != MDSMap::STATE_STOPPING && - state != MDSMap::STATE_STOPPED) { - // we can't transition to any other states from STOPPING - dout(0) << "got beacon for MDS in STATE_STOPPING, ignoring requested state change" - << dendl; + } else if (info.state != MDSMap::STATE_STANDBY && state != info.state && + !MDSMap::state_transition_valid(info.state, state)) { + // Validate state transitions for daemons that hold a rank + derr << "daemon " << gid << " (rank " << info.rank << ") " + << "reported invalid state transition " + << ceph_mds_state_name(info.state) << " -> " + << ceph_mds_state_name(state) << dendl; goto evict; } @@ -791,23 +801,6 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) last_beacon.erase(rankgid); /* MDS expects beacon reply back */ - } else if (state == MDSMap::STATE_DNE) { - dout(1) << __func__ << ": DNE from " << info << dendl; - goto evict; - } else if (info.state == MDSMap::STATE_STANDBY && state != info.state) { - // Standby daemons should never modify their own - // state. Reject any attempts to do so. - derr << "standby " << gid << " attempted to change state to " - << ceph_mds_state_name(state) << ", rejecting" << dendl; - goto evict; - } else if (info.state != MDSMap::STATE_STANDBY && state != info.state && - !MDSMap::state_transition_valid(info.state, state)) { - // Validate state transitions for daemons that hold a rank - derr << "daemon " << gid << " (rank " << info.rank << ") " - << "reported invalid state transition " - << ceph_mds_state_name(info.state) << " -> " - << ceph_mds_state_name(state) << dendl; - goto evict; } else { if (info.state != MDSMap::STATE_ACTIVE && state == MDSMap::STATE_ACTIVE) { const auto &fscid = pending.mds_roles.at(gid); diff --git a/ceph/src/mon/MonCap.cc b/ceph/src/mon/MonCap.cc index e1dc37239..2f2378f62 100644 --- a/ceph/src/mon/MonCap.cc +++ b/ceph/src/mon/MonCap.cc @@ -213,6 +213,9 @@ void MonCapGrant::expand_profile(const EntityName& name) const profile_grants.push_back(MonCapGrant("auth rm")); // tell commands (this is a bit of a kludge) profile_grants.push_back(MonCapGrant("smart")); + // allow the Telemetry module to gather heap and mempool metrics + profile_grants.push_back(MonCapGrant("heap")); + profile_grants.push_back(MonCapGrant("dump_mempools")); } if (profile == "osd" || profile == "mds" || profile == "mon" || profile == "mgr") { diff --git a/ceph/src/mon/MonClient.cc b/ceph/src/mon/MonClient.cc index 9ffa7d367..38ad26f61 100644 --- a/ceph/src/mon/MonClient.cc +++ b/ceph/src/mon/MonClient.cc @@ -154,7 +154,7 @@ int MonClient::get_monmap_and_config() if (r < 0) { return r; } - r = authenticate(cct->_conf->client_mount_timeout); + r = authenticate(std::chrono::duration(cct->_conf.get_val("client_mount_timeout")).count()); if (r == -ETIMEDOUT) { shutdown(); continue; diff --git a/ceph/src/mon/MonClient.h b/ceph/src/mon/MonClient.h index 19aa047c2..de6bba574 100644 --- a/ceph/src/mon/MonClient.h +++ b/ceph/src/mon/MonClient.h @@ -158,7 +158,7 @@ struct MonClientPinger : public Dispatcher, int wait_for_reply(double timeout = 0.0) { std::unique_lock locker{lock}; if (timeout <= 0) { - timeout = cct->_conf->client_mount_timeout; + timeout = std::chrono::duration(cct->_conf.get_val("client_mount_timeout")).count(); } done = false; if (ping_recvd_cond.wait_for(locker, diff --git a/ceph/src/mon/Monitor.cc b/ceph/src/mon/Monitor.cc index ac6d6632c..f756c1f07 100644 --- a/ceph/src/mon/Monitor.cc +++ b/ceph/src/mon/Monitor.cc @@ -2966,6 +2966,19 @@ void Monitor::log_health( } } +void Monitor::update_pending_metadata() +{ + Metadata metadata; + collect_metadata(&metadata); + size_t version_size = mon_metadata[rank]["ceph_version_short"].size(); + const std::string current_version = mon_metadata[rank]["ceph_version_short"]; + const std::string pending_version = metadata["ceph_version_short"]; + + if (current_version.compare(0, version_size, pending_version) < 0) { + mgr_client.update_daemon_metadata("mon", name, metadata); + } +} + void Monitor::get_cluster_status(stringstream &ss, Formatter *f, MonSession *session) { @@ -3425,7 +3438,15 @@ void Monitor::handle_command(MonOpRequestRef op) // validate user's permissions for requested command map param_str_map; - _generate_command_map(cmdmap, param_str_map); + + // Catch bad_cmd_get exception if _generate_command_map() throws it + try { + _generate_command_map(cmdmap, param_str_map); + } + catch(bad_cmd_get& e) { + reply_command(op, -EINVAL, e.what(), 0); + } + if (!_allowed_command(session, service, prefix, cmdmap, param_str_map, mon_cmd)) { dout(1) << __func__ << " access denied" << dendl; diff --git a/ceph/src/mon/Monitor.h b/ceph/src/mon/Monitor.h index 454daf63c..575fef0a9 100644 --- a/ceph/src/mon/Monitor.h +++ b/ceph/src/mon/Monitor.h @@ -796,6 +796,8 @@ public: const health_check_map_t& previous, MonitorDBStore::TransactionRef t); + void update_pending_metadata(); + protected: class HealthCheckLogStatus { diff --git a/ceph/src/mon/MonmapMonitor.cc b/ceph/src/mon/MonmapMonitor.cc index f9792ed57..6c5a9ce57 100644 --- a/ceph/src/mon/MonmapMonitor.cc +++ b/ceph/src/mon/MonmapMonitor.cc @@ -248,6 +248,8 @@ void MonmapMonitor::on_active() apply_mon_features(mon.get_quorum_mon_features(), mon.quorum_min_mon_release); + + mon.update_pending_metadata(); } bool MonmapMonitor::preprocess_query(MonOpRequestRef op) diff --git a/ceph/src/mon/OSDMonitor.cc b/ceph/src/mon/OSDMonitor.cc index 0d63ba8b5..5d76862e0 100644 --- a/ceph/src/mon/OSDMonitor.cc +++ b/ceph/src/mon/OSDMonitor.cc @@ -12600,6 +12600,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, return false; } + // make sure kvmon is writeable. + if (!mon.kvmon()->is_writeable()) { + dout(10) << __func__ << " waiting for kv mon to be writeable for " + << "osd new" << dendl; + mon.kvmon()->wait_for_writeable(op, new C_RetryMessage(this, op)); + return false; + } + map param_map; bufferlist bl = m->get_data(); diff --git a/ceph/src/mrun b/ceph/src/mrun index ba6f8cece..4bcd4d8cc 100755 --- a/ceph/src/mrun +++ b/ceph/src/mrun @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash [ $# -lt 2 ] && echo "usage: $0 [params...]" && exit 1 diff --git a/ceph/src/msg/Message.cc b/ceph/src/msg/Message.cc index 6c57d355b..266eb7676 100644 --- a/ceph/src/msg/Message.cc +++ b/ceph/src/msg/Message.cc @@ -190,6 +190,7 @@ #include "messages/MMgrDigest.h" #include "messages/MMgrReport.h" #include "messages/MMgrOpen.h" +#include "messages/MMgrUpdate.h" #include "messages/MMgrClose.h" #include "messages/MMgrConfigure.h" #include "messages/MMonMgrReport.h" @@ -893,6 +894,10 @@ Message *decode_message(CephContext *cct, m = make_message(); break; + case MSG_MGR_UPDATE: + m = make_message(); + break; + case MSG_MGR_CLOSE: m = make_message(); break; diff --git a/ceph/src/msg/Message.h b/ceph/src/msg/Message.h index 362ae5ec6..a7aff1e27 100644 --- a/ceph/src/msg/Message.h +++ b/ceph/src/msg/Message.h @@ -235,6 +235,9 @@ #define MSG_MGR_COMMAND 0x709 #define MSG_MGR_COMMAND_REPLY 0x70a +// *** ceph-mgr <-> MON daemons *** +#define MSG_MGR_UPDATE 0x70b + // ====================================================== // abstract Message class diff --git a/ceph/src/msg/MessageRef.h b/ceph/src/msg/MessageRef.h index cd5b2dd74..ef7a56a44 100644 --- a/ceph/src/msg/MessageRef.h +++ b/ceph/src/msg/MessageRef.h @@ -101,6 +101,7 @@ class MMgrConfigure; class MMgrDigest; class MMgrMap; class MMgrOpen; +class MMgrUpdate; class MMgrReport; class MMonCommandAck; class MMonCommand; diff --git a/ceph/src/msg/async/ProtocolV1.cc b/ceph/src/msg/async/ProtocolV1.cc index 43363371b..1bc661a55 100644 --- a/ceph/src/msg/async/ProtocolV1.cc +++ b/ceph/src/msg/async/ProtocolV1.cc @@ -662,7 +662,7 @@ CtPtr ProtocolV1::throttle_message() { << "/" << connection->policy.throttler_messages->get_max() << dendl; if (!connection->policy.throttler_messages->get_or_fail()) { - ldout(cct, 10) << __func__ << " wants 1 message from policy throttle " + ldout(cct, 1) << __func__ << " wants 1 message from policy throttle " << connection->policy.throttler_messages->get_current() << "/" << connection->policy.throttler_messages->get_max() << " failed, just wait." << dendl; @@ -693,7 +693,7 @@ CtPtr ProtocolV1::throttle_bytes() { << connection->policy.throttler_bytes->get_current() << "/" << connection->policy.throttler_bytes->get_max() << dendl; if (!connection->policy.throttler_bytes->get_or_fail(cur_msg_size)) { - ldout(cct, 10) << __func__ << " wants " << cur_msg_size + ldout(cct, 1) << __func__ << " wants " << cur_msg_size << " bytes from policy throttler " << connection->policy.throttler_bytes->get_current() << "/" << connection->policy.throttler_bytes->get_max() @@ -720,7 +720,7 @@ CtPtr ProtocolV1::throttle_dispatch_queue() { if (cur_msg_size) { if (!connection->dispatch_queue->dispatch_throttler.get_or_fail( cur_msg_size)) { - ldout(cct, 10) + ldout(cct, 1) << __func__ << " wants " << cur_msg_size << " bytes from dispatch throttle " << connection->dispatch_queue->dispatch_throttler.get_current() << "/" diff --git a/ceph/src/msg/async/ProtocolV2.cc b/ceph/src/msg/async/ProtocolV2.cc index a176fc2c8..d63621438 100644 --- a/ceph/src/msg/async/ProtocolV2.cc +++ b/ceph/src/msg/async/ProtocolV2.cc @@ -1530,7 +1530,7 @@ CtPtr ProtocolV2::throttle_message() { << "/" << connection->policy.throttler_messages->get_max() << dendl; if (!connection->policy.throttler_messages->get_or_fail()) { - ldout(cct, 10) << __func__ << " wants 1 message from policy throttle " + ldout(cct, 1) << __func__ << " wants 1 message from policy throttle " << connection->policy.throttler_messages->get_current() << "/" << connection->policy.throttler_messages->get_max() << " failed, just wait." << dendl; @@ -1560,7 +1560,7 @@ CtPtr ProtocolV2::throttle_bytes() { << connection->policy.throttler_bytes->get_current() << "/" << connection->policy.throttler_bytes->get_max() << dendl; if (!connection->policy.throttler_bytes->get_or_fail(cur_msg_size)) { - ldout(cct, 10) << __func__ << " wants " << cur_msg_size + ldout(cct, 1) << __func__ << " wants " << cur_msg_size << " bytes from policy throttler " << connection->policy.throttler_bytes->get_current() << "/" << connection->policy.throttler_bytes->get_max() @@ -1588,7 +1588,7 @@ CtPtr ProtocolV2::throttle_dispatch_queue() { if (cur_msg_size) { if (!connection->dispatch_queue->dispatch_throttler.get_or_fail( cur_msg_size)) { - ldout(cct, 10) + ldout(cct, 1) << __func__ << " wants " << cur_msg_size << " bytes from dispatch throttle " << connection->dispatch_queue->dispatch_throttler.get_current() << "/" @@ -2596,7 +2596,7 @@ CtPtr ProtocolV2::handle_reconnect(ceph::bufferlist &payload) CtPtr ProtocolV2::handle_existing_connection(const AsyncConnectionRef& existing) { ldout(cct, 20) << __func__ << " existing=" << existing << dendl; - std::lock_guard l(existing->lock); + std::unique_lock l(existing->lock); ProtocolV2 *exproto = dynamic_cast(existing->protocol.get()); if (!exproto) { @@ -2607,6 +2607,7 @@ CtPtr ProtocolV2::handle_existing_connection(const AsyncConnectionRef& existing) if (exproto->state == CLOSED) { ldout(cct, 1) << __func__ << " existing " << existing << " already closed." << dendl; + l.unlock(); return send_server_ident(); } @@ -2636,6 +2637,7 @@ CtPtr ProtocolV2::handle_existing_connection(const AsyncConnectionRef& existing) << dendl; existing->protocol->stop(); existing->dispatch_queue->queue_reset(existing.get()); + l.unlock(); return send_server_ident(); } @@ -2714,14 +2716,11 @@ CtPtr ProtocolV2::reuse_connection(const AsyncConnectionRef& existing, exproto->pre_auth.enabled = false; if (!reconnecting) { - exproto->peer_supported_features = peer_supported_features; - exproto->tx_frame_asm.set_is_rev1(tx_frame_asm.get_is_rev1()); - exproto->rx_frame_asm.set_is_rev1(rx_frame_asm.get_is_rev1()); - exproto->client_cookie = client_cookie; exproto->peer_name = peer_name; exproto->connection_features = connection_features; existing->set_features(connection_features); + exproto->peer_supported_features = peer_supported_features; } exproto->peer_global_seq = peer_global_seq; @@ -2764,6 +2763,9 @@ CtPtr ProtocolV2::reuse_connection(const AsyncConnectionRef& existing, new_worker, new_center, exproto, + reconnecting=reconnecting, + tx_is_rev1=tx_frame_asm.get_is_rev1(), + rx_is_rev1=rx_frame_asm.get_is_rev1(), temp_stream_handlers=std::move(temp_stream_handlers), temp_compression_handlers=std::move(temp_compression_handlers) ](ConnectedSocket &cs) mutable { @@ -2781,6 +2783,10 @@ CtPtr ProtocolV2::reuse_connection(const AsyncConnectionRef& existing, existing->open_write = false; exproto->session_stream_handlers = std::move(temp_stream_handlers); exproto->session_compression_handlers = std::move(temp_compression_handlers); + if (!reconnecting) { + exproto->tx_frame_asm.set_is_rev1(tx_is_rev1); + exproto->rx_frame_asm.set_is_rev1(rx_is_rev1); + } existing->write_lock.unlock(); if (exproto->state == NONE) { existing->shutdown_socket(); diff --git a/ceph/src/msg/msg_types.h b/ceph/src/msg/msg_types.h index 6e0ee1261..a33545d91 100644 --- a/ceph/src/msg/msg_types.h +++ b/ceph/src/msg/msg_types.h @@ -26,6 +26,11 @@ #define MAX_PORT_NUMBER 65535 +#ifdef _WIN32 +// ceph_sockaddr_storage matches the Linux format. +#define AF_INET6_LINUX 10 +#endif + namespace ceph { class Formatter; } @@ -161,6 +166,14 @@ static inline void encode(const sockaddr_storage& a, ceph::buffer::list& bl) { (unsigned char*)(&ss + 1) - dst); ::memcpy(dst, src, copy_size); encode(ss, bl); +#elif defined(_WIN32) + ceph_sockaddr_storage ss{}; + ::memcpy(&ss, &a, std::min(sizeof(ss), sizeof(a))); + // The Windows AF_INET6 definition doesn't match the Linux one. + if (a.ss_family == AF_INET6) { + ss.ss_family = AF_INET6_LINUX; + } + encode(ss, bl); #else ceph_sockaddr_storage ss; ::memset(&ss, '\0', sizeof(ss)); @@ -186,6 +199,13 @@ static inline void decode(sockaddr_storage& a, auto const copy_size = std::min((unsigned char*)(&ss + 1) - src, (unsigned char*)(&a + 1) - dst); ::memcpy(dst, src, copy_size); +#elif defined(_WIN32) + ceph_sockaddr_storage ss{}; + decode(ss, bl); + ::memcpy(&a, &ss, std::min(sizeof(ss), sizeof(a))); + if (a.ss_family == AF_INET6_LINUX) { + a.ss_family = AF_INET6; + } #else ceph_sockaddr_storage ss{}; decode(ss, bl); @@ -470,7 +490,11 @@ struct entity_addr_t { encode(elen, bl); if (elen) { uint16_t ss_family = u.sa.sa_family; - +#if defined(_WIN32) + if (ss_family == AF_INET6) { + ss_family = AF_INET6_LINUX; + } +#endif encode(ss_family, bl); elen -= sizeof(u.sa.sa_family); bl.append(u.sa.sa_data, elen); @@ -501,6 +525,11 @@ struct entity_addr_t { throw ceph::buffer::malformed_input("elen smaller than family len"); } decode(ss_family, bl); +#if defined(_WIN32) + if (ss_family == AF_INET6_LINUX) { + ss_family = AF_INET6; + } +#endif u.sa.sa_family = ss_family; elen -= sizeof(ss_family); if (elen > get_sockaddr_len() - sizeof(u.sa.sa_family)) { diff --git a/ceph/src/mypy-constrains.txt b/ceph/src/mypy-constrains.txt index 8851ed501..9eb774c18 100644 --- a/ceph/src/mypy-constrains.txt +++ b/ceph/src/mypy-constrains.txt @@ -12,6 +12,7 @@ types-PyYAML==5.4.0 # src/pybind types-backports==0.1.2 +types-pkg_resources==0.1.3 # qa/ types-boto==0.1.0 diff --git a/ceph/src/neorados/RADOSImpl.cc b/ceph/src/neorados/RADOSImpl.cc index bddd62c2f..6c9c210a8 100644 --- a/ceph/src/neorados/RADOSImpl.cc +++ b/ceph/src/neorados/RADOSImpl.cc @@ -59,7 +59,7 @@ RADOS::RADOS(boost::asio::io_context& ioctx, if (err) { throw boost::system::system_error(ceph::to_error_code(err)); } - err = monclient.authenticate(cct->_conf->client_mount_timeout); + err = monclient.authenticate(std::chrono::duration(cct->_conf.get_val("client_mount_timeout")).count()); if (err) { throw boost::system::system_error(ceph::to_error_code(err)); } diff --git a/ceph/src/os/bluestore/BlueStore.cc b/ceph/src/os/bluestore/BlueStore.cc index 6c9e737b4..3239a8258 100644 --- a/ceph/src/os/bluestore/BlueStore.cc +++ b/ceph/src/os/bluestore/BlueStore.cc @@ -13918,6 +13918,10 @@ int BlueStore::_deferred_replay() dout(10) << __func__ << " start" << dendl; int count = 0; int r = 0; + interval_set bluefs_extents; + if (bluefs) { + bluefs->get_block_extents(bluefs_layout.shared_bdev, &bluefs_extents); + } CollectionRef ch = _get_collection(coll_t::meta()); bool fake_ch = false; if (!ch) { @@ -13943,10 +13947,15 @@ int BlueStore::_deferred_replay() r = -EIO; goto out; } - TransContext *txc = _txc_create(ch.get(), osr, nullptr); - txc->deferred_txn = deferred_txn; - txc->set_state(TransContext::STATE_KV_DONE); - _txc_state_proc(txc); + bool has_some = _eliminate_outdated_deferred(deferred_txn, bluefs_extents); + if (has_some) { + TransContext *txc = _txc_create(ch.get(), osr, nullptr); + txc->deferred_txn = deferred_txn; + txc->set_state(TransContext::STATE_KV_DONE); + _txc_state_proc(txc); + } else { + delete deferred_txn; + } } out: dout(20) << __func__ << " draining osr" << dendl; @@ -13959,6 +13968,76 @@ int BlueStore::_deferred_replay() return r; } +bool BlueStore::_eliminate_outdated_deferred(bluestore_deferred_transaction_t* deferred_txn, + interval_set& bluefs_extents) +{ + bool has_some = false; + dout(30) << __func__ << " bluefs_extents: " << std::hex << bluefs_extents << std::dec << dendl; + auto it = deferred_txn->ops.begin(); + while (it != deferred_txn->ops.end()) { + // We process a pair of _data_/_extents_ (here: it->data/it->extents) + // by eliminating _extents_ that belong to bluefs, removing relevant parts of _data_ + // example: + // +------------+---------------+---------------+---------------+ + // | data | aaaaaaaabbbbb | bbbbcccccdddd | ddddeeeeeefff | + // | extent | 40000 - 44000 | 50000 - 58000 | 58000 - 60000 | + // | in bluefs? | no | yes | no | + // +------------+---------------+---------------+---------------+ + // result: + // +------------+---------------+---------------+ + // | data | aaaaaaaabbbbb | ddddeeeeeefff | + // | extent | 40000 - 44000 | 58000 - 60000 | + // +------------+---------------+---------------+ + PExtentVector new_extents; + ceph::buffer::list new_data; + uint32_t data_offset = 0; // this tracks location of extent 'e' inside it->data + dout(30) << __func__ << " input extents: " << it->extents << dendl; + for (auto& e: it->extents) { + interval_set region; + region.insert(e.offset, e.length); + + auto mi = bluefs_extents.lower_bound(e.offset); + if (mi != bluefs_extents.begin()) { + --mi; + if (mi.get_end() <= e.offset) { + ++mi; + } + } + while (mi != bluefs_extents.end() && mi.get_start() < e.offset + e.length) { + // The interval_set does not like (asserts) when we erase interval that does not exist. + // Hence we do we implement (region-mi) by ((region+mi)-mi). + region.union_insert(mi.get_start(), mi.get_len()); + region.erase(mi.get_start(), mi.get_len()); + ++mi; + } + // 'region' is now a subset of e, without parts used by bluefs + // we trim coresponding parts from it->data (actally constructing new_data / new_extents) + for (auto ki = region.begin(); ki != region.end(); ki++) { + ceph::buffer::list chunk; + // A chunk from it->data; data_offset is a an offset where 'e' was located; + // 'ki.get_start() - e.offset' is an offset of ki inside 'e'. + chunk.substr_of(it->data, data_offset + (ki.get_start() - e.offset), ki.get_len()); + new_data.claim_append(chunk); + new_extents.emplace_back(bluestore_pextent_t(ki.get_start(), ki.get_len())); + } + data_offset += e.length; + } + dout(30) << __func__ << " output extents: " << new_extents << dendl; + if (it->data.length() != new_data.length()) { + dout(10) << __func__ << " trimmed deferred extents: " << it->extents << "->" << new_extents << dendl; + } + if (new_extents.size() == 0) { + it = deferred_txn->ops.erase(it); + } else { + has_some = true; + std::swap(it->extents, new_extents); + std::swap(it->data, new_data); + ++it; + } + } + return has_some; +} + // --------------------------- // transactions diff --git a/ceph/src/os/bluestore/BlueStore.h b/ceph/src/os/bluestore/BlueStore.h index 10fdd0f72..5f1b84d91 100644 --- a/ceph/src/os/bluestore/BlueStore.h +++ b/ceph/src/os/bluestore/BlueStore.h @@ -2684,6 +2684,8 @@ private: void _deferred_submit_unlock(OpSequencer *osr); void _deferred_aio_finish(OpSequencer *osr); int _deferred_replay(); + bool _eliminate_outdated_deferred(bluestore_deferred_transaction_t* deferred_txn, + interval_set& bluefs_extents); public: using mempool_dynamic_bitset = diff --git a/ceph/src/os/bluestore/bluestore_types.cc b/ceph/src/os/bluestore/bluestore_types.cc index 121f7ccd3..b62f6e2a3 100644 --- a/ceph/src/os/bluestore/bluestore_types.cc +++ b/ceph/src/os/bluestore/bluestore_types.cc @@ -366,11 +366,12 @@ ostream& operator<<(ostream& out, const bluestore_extent_ref_map_t& m) bluestore_blob_use_tracker_t::bluestore_blob_use_tracker_t( const bluestore_blob_use_tracker_t& tracker) : au_size{tracker.au_size}, - num_au{tracker.num_au}, + num_au(0), + alloc_au(0), bytes_per_au{nullptr} { - if (num_au > 0) { - allocate(); + if (tracker.num_au > 0) { + allocate(tracker.num_au); std::copy(tracker.bytes_per_au, tracker.bytes_per_au + num_au, bytes_per_au); } else { total_bytes = tracker.total_bytes; @@ -385,9 +386,8 @@ bluestore_blob_use_tracker_t::operator=(const bluestore_blob_use_tracker_t& rhs) } clear(); au_size = rhs.au_size; - num_au = rhs.num_au; if (rhs.num_au > 0) { - allocate(); + allocate( rhs.num_au); std::copy(rhs.bytes_per_au, rhs.bytes_per_au + num_au, bytes_per_au); } else { total_bytes = rhs.total_bytes; @@ -395,19 +395,31 @@ bluestore_blob_use_tracker_t::operator=(const bluestore_blob_use_tracker_t& rhs) return *this; } -void bluestore_blob_use_tracker_t::allocate() +void bluestore_blob_use_tracker_t::allocate(uint32_t au_count) { - ceph_assert(num_au != 0); - bytes_per_au = new uint32_t[num_au]; + ceph_assert(au_count != 0); + ceph_assert(num_au == 0); + ceph_assert(alloc_au == 0); + num_au = alloc_au = au_count; + bytes_per_au = new uint32_t[alloc_au]; mempool::get_pool( mempool::pool_index_t(mempool::mempool_bluestore_cache_other)). - adjust_count(1, sizeof(uint32_t) * num_au); + adjust_count(alloc_au, sizeof(uint32_t) * alloc_au); for (uint32_t i = 0; i < num_au; ++i) { bytes_per_au[i] = 0; } } +void bluestore_blob_use_tracker_t::release(uint32_t au_count, uint32_t* ptr) { + if (au_count) { + delete[] ptr; + mempool::get_pool( + mempool::pool_index_t(mempool::mempool_bluestore_cache_other)). + adjust_count(-(int32_t)au_count, -(int32_t)(sizeof(uint32_t) * au_count)); + } +} + void bluestore_blob_use_tracker_t::init( uint32_t full_length, uint32_t _au_size) { ceph_assert(!au_size || is_empty()); @@ -417,8 +429,7 @@ void bluestore_blob_use_tracker_t::init( uint32_t _num_au = round_up_to(full_length, _au_size) / _au_size; au_size = _au_size; if ( _num_au > 1 ) { - num_au = _num_au; - allocate(); + allocate(_num_au); } } diff --git a/ceph/src/os/bluestore/bluestore_types.h b/ceph/src/os/bluestore/bluestore_types.h index ed82991a5..e92a75552 100644 --- a/ceph/src/os/bluestore/bluestore_types.h +++ b/ceph/src/os/bluestore/bluestore_types.h @@ -247,10 +247,11 @@ struct bluestore_blob_use_tracker_t { // 1) Struct isn't packed hence it's padded. And even if it's packed see 2) // 2) Mem manager has its own granularity, most probably >= 8 bytes // - uint32_t au_size; // Allocation (=tracking) unit size, - // == 0 if uninitialized - uint32_t num_au; // Amount of allocation units tracked - // == 0 if single unit or the whole blob is tracked + uint32_t au_size; // Allocation (=tracking) unit size, + // == 0 if uninitialized + uint32_t num_au; // Amount of allocation units tracked + // == 0 if single unit or the whole blob is tracked + uint32_t alloc_au; // Amount of allocation units allocated union { uint32_t* bytes_per_au; @@ -258,7 +259,7 @@ struct bluestore_blob_use_tracker_t { }; bluestore_blob_use_tracker_t() - : au_size(0), num_au(0), bytes_per_au(nullptr) { + : au_size(0), num_au(0), alloc_au(0), bytes_per_au(nullptr) { } bluestore_blob_use_tracker_t(const bluestore_blob_use_tracker_t& tracker); bluestore_blob_use_tracker_t& operator=(const bluestore_blob_use_tracker_t& rhs); @@ -267,15 +268,11 @@ struct bluestore_blob_use_tracker_t { } void clear() { - if (num_au != 0) { - delete[] bytes_per_au; - mempool::get_pool( - mempool::pool_index_t(mempool::mempool_bluestore_cache_other)). - adjust_count(-1, -sizeof(uint32_t) * num_au); - } + release(alloc_au, bytes_per_au); + num_au = 0; + alloc_au = 0; bytes_per_au = 0; au_size = 0; - num_au = 0; } uint32_t get_referenced_bytes() const { @@ -311,7 +308,6 @@ struct bluestore_blob_use_tracker_t { ceph_assert(_num_au <= num_au); if (_num_au) { num_au = _num_au; // bytes_per_au array is left unmodified - } else { clear(); } @@ -337,15 +333,17 @@ struct bluestore_blob_use_tracker_t { if (_num_au > num_au) { auto old_bytes = bytes_per_au; auto old_num_au = num_au; - num_au = _num_au; - allocate(); + auto old_alloc_au = alloc_au; + alloc_au = num_au = 0; // to bypass an assertion in allocate() + bytes_per_au = nullptr; + allocate(_num_au); for (size_t i = 0; i < old_num_au; i++) { bytes_per_au[i] = old_bytes[i]; } for (size_t i = old_num_au; i < num_au; i++) { bytes_per_au[i] = 0; } - delete[] old_bytes; + release(old_alloc_au, old_bytes); } } } @@ -410,12 +408,14 @@ struct bluestore_blob_use_tracker_t { clear(); denc_varint(au_size, p); if (au_size) { - denc_varint(num_au, p); - if (!num_au) { + uint32_t _num_au; + denc_varint(_num_au, p); + if (!_num_au) { + num_au = 0; denc_varint(total_bytes, p); } else { - allocate(); - for (size_t i = 0; i < num_au; ++i) { + allocate(_num_au); + for (size_t i = 0; i < _num_au; ++i) { denc_varint(bytes_per_au[i], p); } } @@ -425,7 +425,8 @@ struct bluestore_blob_use_tracker_t { void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list& o); private: - void allocate(); + void allocate(uint32_t _num_au); + void release(uint32_t _num_au, uint32_t* ptr); }; WRITE_CLASS_DENC(bluestore_blob_use_tracker_t) std::ostream& operator<<(std::ostream& out, const bluestore_blob_use_tracker_t& rm); diff --git a/ceph/src/osd/OSD.cc b/ceph/src/osd/OSD.cc index 141326d45..a21b04a68 100644 --- a/ceph/src/osd/OSD.cc +++ b/ceph/src/osd/OSD.cc @@ -1991,7 +1991,10 @@ void OSDService::_queue_for_recovery( // Commands shared between OSD's console and admin console: namespace ceph::osd_cmds { -int heap(CephContext& cct, const cmdmap_t& cmdmap, Formatter& f, std::ostream& os); +int heap(CephContext& cct, + const cmdmap_t& cmdmap, + std::ostream& outos, + std::ostream& erros); } // namespace ceph::osd_cmds @@ -2896,7 +2899,9 @@ will start to track new ops received afterwards."; } else if (prefix == "heap") { - ret = ceph::osd_cmds::heap(*cct, cmdmap, *f, ss); + std::stringstream outss; + ret = ceph::osd_cmds::heap(*cct, cmdmap, outss, ss); + outbl.append(outss); } else if (prefix == "debug dump_missing") { @@ -10877,7 +10882,7 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb) if (is_smallest_thread_index) { sdata->shard_lock.unlock(); handle_oncommits(oncommits); - return; + sdata->shard_lock.lock(); } std::unique_lock wait_lock{sdata->sdata_wait_lock}; auto future_time = ceph::real_clock::from_double(*when_ready); @@ -10893,6 +10898,11 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb) // Reapply default wq timeouts osd->cct->get_heartbeat_map()->reset_timeout(hb, timeout_interval, suicide_interval); + // Populate the oncommits list if there were any additions + // to the context_queue while we were waiting + if (is_smallest_thread_index) { + sdata->context_queue.move_to(oncommits); + } } } // while @@ -11217,17 +11227,19 @@ void OSD::ShardedOpWQ::stop_for_fast_shutdown() namespace ceph::osd_cmds { -int heap(CephContext& cct, const cmdmap_t& cmdmap, Formatter& f, - std::ostream& os) +int heap(CephContext& cct, + const cmdmap_t& cmdmap, + std::ostream& outos, + std::ostream& erros) { if (!ceph_using_tcmalloc()) { - os << "could not issue heap profiler command -- not using tcmalloc!"; + erros << "could not issue heap profiler command -- not using tcmalloc!"; return -EOPNOTSUPP; } string cmd; if (!cmd_getval(cmdmap, "heapcmd", cmd)) { - os << "unable to get value for command \"" << cmd << "\""; + erros << "unable to get value for command \"" << cmd << "\""; return -EINVAL; } @@ -11239,7 +11251,7 @@ int heap(CephContext& cct, const cmdmap_t& cmdmap, Formatter& f, cmd_vec.push_back(val); } - ceph_heap_profiler_handle_command(cmd_vec, os); + ceph_heap_profiler_handle_command(cmd_vec, outos); return 0; } diff --git a/ceph/src/osd/PG.cc b/ceph/src/osd/PG.cc index d77cef0d7..9b0eb491c 100644 --- a/ceph/src/osd/PG.cc +++ b/ceph/src/osd/PG.cc @@ -1326,6 +1326,7 @@ unsigned int PG::scrub_requeue_priority(Scrub::scrub_prio_t with_priority, unsig */ Scrub::schedule_result_t PG::sched_scrub() { + using Scrub::schedule_result_t; dout(15) << __func__ << " pg(" << info.pgid << (is_active() ? ") " : ") ") << (is_clean() ? " " : " ") << dendl; @@ -1333,11 +1334,19 @@ Scrub::schedule_result_t PG::sched_scrub() ceph_assert(m_scrubber); if (is_scrub_queued_or_active()) { - return Scrub::schedule_result_t::already_started; + return schedule_result_t::already_started; } if (!is_primary() || !is_active() || !is_clean()) { - return Scrub::schedule_result_t::bad_pg_state; + return schedule_result_t::bad_pg_state; + } + + if (state_test(PG_STATE_SNAPTRIM) || state_test(PG_STATE_SNAPTRIM_WAIT)) { + // note that the trimmer checks scrub status when setting 'snaptrim_wait' + // (on the transition from NotTrimming to Trimming/WaitReservation), + // i.e. some time before setting 'snaptrim'. + dout(10) << __func__ << ": cannot scrub while snap-trimming" << dendl; + return schedule_result_t::bad_pg_state; } // analyse the combination of the requested scrub flags, the osd/pool configuration @@ -1349,14 +1358,14 @@ Scrub::schedule_result_t PG::sched_scrub() // (due to configuration or priority issues) // The reason was already reported by the callee. dout(10) << __func__ << ": failed to initiate a scrub" << dendl; - return Scrub::schedule_result_t::preconditions; + return schedule_result_t::preconditions; } // try to reserve the local OSD resources. If failing: no harm. We will // be retried by the OSD later on. if (!m_scrubber->reserve_local()) { dout(10) << __func__ << ": failed to reserve locally" << dendl; - return Scrub::schedule_result_t::no_local_resources; + return schedule_result_t::no_local_resources; } // can commit to the updated flags now, as nothing will stop the scrub @@ -1371,7 +1380,7 @@ Scrub::schedule_result_t PG::sched_scrub() dout(10) << __func__ << ": queueing" << dendl; osd->queue_for_scrub(this, Scrub::scrub_prio_t::low_priority); - return Scrub::schedule_result_t::scrub_initiated; + return schedule_result_t::scrub_initiated; } double PG::next_deepscrub_interval() const diff --git a/ceph/src/osd/PGLog.cc b/ceph/src/osd/PGLog.cc index d7ab12c01..ad6762b29 100644 --- a/ceph/src/osd/PGLog.cc +++ b/ceph/src/osd/PGLog.cc @@ -60,6 +60,7 @@ void PGLog::IndexedLog::trim( set* trimmed_dups, eversion_t *write_from_dups) { + lgeneric_subdout(cct, osd, 10) << "IndexedLog::trim s=" << s << dendl; ceph_assert(s <= can_rollback_to); if (complete_to != log.end()) lgeneric_subdout(cct, osd, 20) << " complete_to " << complete_to->version << dendl; @@ -131,10 +132,18 @@ void PGLog::IndexedLog::trim( } } - while (!dups.empty()) { + // we can hit an inflated `dups` b/c of https://tracker.ceph.com/issues/53729 + // the idea is to slowly trim them over a prolonged period of time and mix + // omap deletes with writes (if we're here, a new log entry got added) to + // neither: 1) blow size of single Transaction nor 2) generate-n-accumulate + // large amount of tombstones in BlueStore's RocksDB. + // if trimming immediately is a must, then the ceph-objectstore-tool is + // the way to go. + const size_t max_dups = cct->_conf->osd_pg_log_dups_tracked; + for (size_t max_dups_to_trim = cct->_conf->osd_pg_log_trim_max; + max_dups_to_trim > 0 && dups.size() > max_dups; + max_dups_to_trim--) { const auto& e = *dups.begin(); - if (e.version.version >= earliest_dup_version) - break; lgeneric_subdout(cct, osd, 20) << "trim dup " << e << dendl; if (trimmed_dups) trimmed_dups->insert(e.get_key_name()); @@ -145,6 +154,10 @@ void PGLog::IndexedLog::trim( // raise tail? if (tail < s) tail = s; + lgeneric_subdout(cct, osd, 20) << "IndexedLog::trim after trim" + << " dups.size()=" << dups.size() + << " tail=" << tail + << " s=" << s << dendl; } ostream& PGLog::IndexedLog::print(ostream& out) const @@ -506,6 +519,9 @@ void PGLog::merge_log(pg_info_t &oinfo, pg_log_t&& olog, pg_shard_t fromosd, // returns true if any changes were made to log.dups bool PGLog::merge_log_dups(const pg_log_t& olog) { + dout(5) << __func__ + << " log.dups.size()=" << log.dups.size() + << "olog.dups.size()=" << olog.dups.size() << dendl; bool changed = false; if (!olog.dups.empty()) { @@ -584,6 +600,10 @@ bool PGLog::merge_log_dups(const pg_log_t& olog) { } } + dout(5) << "end of " << __func__ << " changed=" << changed + << " log.dups.size()=" << log.dups.size() + << " olog.dups.size()=" << olog.dups.size() << dendl; + return changed; } @@ -641,7 +661,8 @@ void PGLog::write_log_and_missing( dirty_from_dups, write_from_dups, &may_include_deletes_in_missing_dirty, - (pg_log_debug ? &log_keys_debug : nullptr)); + (pg_log_debug ? &log_keys_debug : nullptr), + this); undirty(); } else { dout(10) << "log is not dirty" << dendl; @@ -655,14 +676,15 @@ void PGLog::write_log_and_missing_wo_missing( pg_log_t &log, const coll_t& coll, const ghobject_t &log_oid, map &divergent_priors, - bool require_rollback + bool require_rollback, + const DoutPrefixProvider *dpp ) { _write_log_and_missing_wo_missing( t, km, log, coll, log_oid, divergent_priors, eversion_t::max(), eversion_t(), eversion_t(), true, true, require_rollback, - eversion_t::max(), eversion_t(), eversion_t(), nullptr); + eversion_t::max(), eversion_t(), eversion_t(), nullptr, dpp); } // static @@ -674,7 +696,8 @@ void PGLog::write_log_and_missing( const ghobject_t &log_oid, const pg_missing_tracker_t &missing, bool require_rollback, - bool *may_include_deletes_in_missing_dirty) + bool *may_include_deletes_in_missing_dirty, + const DoutPrefixProvider *dpp) { _write_log_and_missing( t, km, log, coll, log_oid, @@ -688,7 +711,7 @@ void PGLog::write_log_and_missing( eversion_t::max(), eversion_t(), eversion_t(), - may_include_deletes_in_missing_dirty, nullptr); + may_include_deletes_in_missing_dirty, nullptr, dpp); } // static @@ -707,10 +730,14 @@ void PGLog::_write_log_and_missing_wo_missing( eversion_t dirty_to_dups, eversion_t dirty_from_dups, eversion_t write_from_dups, - set *log_keys_debug + set *log_keys_debug, + const DoutPrefixProvider *dpp ) { - // dout(10) << "write_log_and_missing, clearing up to " << dirty_to << dendl; + ldpp_dout(dpp, 10) << "_write_log_and_missing_wo_missing, clearing up to " << dirty_to + << " dirty_to_dups=" << dirty_to_dups + << " dirty_from_dups=" << dirty_from_dups + << " write_from_dups=" << write_from_dups << dendl; if (touch_log) t.touch(coll, log_oid); if (dirty_to != eversion_t()) { @@ -761,6 +788,8 @@ void PGLog::_write_log_and_missing_wo_missing( if (dirty_to_dups != eversion_t()) { pg_log_dup_t min, dirty_to_dup; dirty_to_dup.version = dirty_to_dups; + ldpp_dout(dpp, 10) << __func__ << " remove dups min=" << min.get_key_name() + << " to dirty_to_dup=" << dirty_to_dup.get_key_name() << dendl; t.omap_rmkeyrange( coll, log_oid, min.get_key_name(), dirty_to_dup.get_key_name()); @@ -769,11 +798,16 @@ void PGLog::_write_log_and_missing_wo_missing( pg_log_dup_t max, dirty_from_dup; max.version = eversion_t::max(); dirty_from_dup.version = dirty_from_dups; + ldpp_dout(dpp, 10) << __func__ << " remove dups dirty_from_dup=" + << dirty_from_dup.get_key_name() + << " to max=" << max.get_key_name() << dendl; t.omap_rmkeyrange( coll, log_oid, dirty_from_dup.get_key_name(), max.get_key_name()); } + ldpp_dout(dpp, 10) << __func__ << " going to encode log.dups.size()=" + << log.dups.size() << dendl; for (const auto& entry : log.dups) { if (entry.version > dirty_to_dups) break; @@ -781,7 +815,8 @@ void PGLog::_write_log_and_missing_wo_missing( encode(entry, bl); (*km)[entry.get_key_name()] = std::move(bl); } - + ldpp_dout(dpp, 10) << __func__ << " 1st round encoded log.dups.size()=" + << log.dups.size() << dendl; for (auto p = log.dups.rbegin(); p != log.dups.rend() && (p->version >= dirty_from_dups || p->version >= write_from_dups) && @@ -791,9 +826,12 @@ void PGLog::_write_log_and_missing_wo_missing( encode(*p, bl); (*km)[p->get_key_name()] = std::move(bl); } + ldpp_dout(dpp, 10) << __func__ << " 2st round encoded log.dups.size()=" + << log.dups.size() << dendl; if (dirty_divergent_priors) { - //dout(10) << "write_log_and_missing: writing divergent_priors" << dendl; + ldpp_dout(dpp, 10) << "write_log_and_missing: writing divergent_priors" + << dendl; encode(divergent_priors, (*km)["divergent_priors"]); } if (require_rollback) { @@ -804,6 +842,7 @@ void PGLog::_write_log_and_missing_wo_missing( log.get_rollback_info_trimmed_to(), (*km)["rollback_info_trimmed_to"]); } + ldpp_dout(dpp, 10) << "end of " << __func__ << dendl; } // static @@ -825,8 +864,14 @@ void PGLog::_write_log_and_missing( eversion_t dirty_from_dups, eversion_t write_from_dups, bool *may_include_deletes_in_missing_dirty, // in/out param - set *log_keys_debug + set *log_keys_debug, + const DoutPrefixProvider *dpp ) { + ldpp_dout(dpp, 10) << __func__ << " clearing up to " << dirty_to + << " dirty_to_dups=" << dirty_to_dups + << " dirty_from_dups=" << dirty_from_dups + << " write_from_dups=" << write_from_dups + << " trimmed_dups.size()=" << trimmed_dups.size() << dendl; set to_remove; to_remove.swap(trimmed_dups); for (auto& t : trimmed) { @@ -849,7 +894,8 @@ void PGLog::_write_log_and_missing( clear_up_to(log_keys_debug, dirty_to.get_key_name()); } if (dirty_to != eversion_t::max() && dirty_from != eversion_t::max()) { - // dout(10) << "write_log_and_missing, clearing from " << dirty_from << dendl; + ldpp_dout(dpp, 10) << "write_log_and_missing, clearing from " + << dirty_from << dendl; t.omap_rmkeyrange( coll, log_oid, dirty_from.get_key_name(), eversion_t::max().get_key_name()); @@ -890,6 +936,8 @@ void PGLog::_write_log_and_missing( if (dirty_to_dups != eversion_t()) { pg_log_dup_t min, dirty_to_dup; dirty_to_dup.version = dirty_to_dups; + ldpp_dout(dpp, 10) << __func__ << " remove dups min=" << min.get_key_name() + << " to dirty_to_dup=" << dirty_to_dup.get_key_name() << dendl; t.omap_rmkeyrange( coll, log_oid, min.get_key_name(), dirty_to_dup.get_key_name()); @@ -898,11 +946,16 @@ void PGLog::_write_log_and_missing( pg_log_dup_t max, dirty_from_dup; max.version = eversion_t::max(); dirty_from_dup.version = dirty_from_dups; + ldpp_dout(dpp, 10) << __func__ << " remove dups dirty_from_dup=" + << dirty_from_dup.get_key_name() + << " to max=" << max.get_key_name() << dendl; t.omap_rmkeyrange( coll, log_oid, dirty_from_dup.get_key_name(), max.get_key_name()); } + ldpp_dout(dpp, 10) << __func__ << " going to encode log.dups.size()=" + << log.dups.size() << dendl; for (const auto& entry : log.dups) { if (entry.version > dirty_to_dups) break; @@ -910,6 +963,8 @@ void PGLog::_write_log_and_missing( encode(entry, bl); (*km)[entry.get_key_name()] = std::move(bl); } + ldpp_dout(dpp, 10) << __func__ << " 1st round encoded log.dups.size()=" + << log.dups.size() << dendl; for (auto p = log.dups.rbegin(); p != log.dups.rend() && @@ -920,9 +975,12 @@ void PGLog::_write_log_and_missing( encode(*p, bl); (*km)[p->get_key_name()] = std::move(bl); } + ldpp_dout(dpp, 10) << __func__ << " 2st round encoded log.dups.size()=" + << log.dups.size() << dendl; if (clear_divergent_priors) { - //dout(10) << "write_log_and_missing: writing divergent_priors" << dendl; + ldpp_dout(dpp, 10) << "write_log_and_missing: writing divergent_priors" + << dendl; to_remove.insert("divergent_priors"); } // since we encode individual missing items instead of a whole @@ -952,6 +1010,7 @@ void PGLog::_write_log_and_missing( if (!to_remove.empty()) t.omap_rmkeys(coll, log_oid, to_remove); + ldpp_dout(dpp, 10) << "end of " << __func__ << dendl; } void PGLog::rebuild_missing_set_with_deletes( diff --git a/ceph/src/osd/PGLog.h b/ceph/src/osd/PGLog.h index 25c88217c..5fe78e784 100644 --- a/ceph/src/osd/PGLog.h +++ b/ceph/src/osd/PGLog.h @@ -1334,7 +1334,8 @@ public: pg_log_t &log, const coll_t& coll, const ghobject_t &log_oid, std::map &divergent_priors, - bool require_rollback); + bool require_rollback, + const DoutPrefixProvider *dpp = nullptr); static void write_log_and_missing( ObjectStore::Transaction& t, @@ -1344,7 +1345,8 @@ public: const ghobject_t &log_oid, const pg_missing_tracker_t &missing, bool require_rollback, - bool *rebuilt_missing_set_with_deletes); + bool *rebuilt_missing_set_with_deletes, + const DoutPrefixProvider *dpp = nullptr); static void _write_log_and_missing_wo_missing( ObjectStore::Transaction& t, @@ -1361,7 +1363,8 @@ public: eversion_t dirty_to_dups, eversion_t dirty_from_dups, eversion_t write_from_dups, - std::set *log_keys_debug + std::set *log_keys_debug, + const DoutPrefixProvider *dpp = nullptr ); static void _write_log_and_missing( @@ -1382,7 +1385,8 @@ public: eversion_t dirty_from_dups, eversion_t write_from_dups, bool *may_include_deletes_in_missing_dirty, - std::set *log_keys_debug + std::set *log_keys_debug, + const DoutPrefixProvider *dpp = nullptr ); void read_log_and_missing( @@ -1395,7 +1399,7 @@ public: bool debug_verify_stored_missing = false ) { return read_log_and_missing( - store, ch, pgmeta_oid, info, + cct, store, ch, pgmeta_oid, info, log, missing, oss, tolerate_divergent_missing_log, &clear_divergent_priors, @@ -1406,6 +1410,7 @@ public: template static void read_log_and_missing( + CephContext *cct, ObjectStore *store, ObjectStore::CollectionHandle &ch, ghobject_t pgmeta_oid, @@ -1419,8 +1424,9 @@ public: std::set *log_keys_debug = nullptr, bool debug_verify_stored_missing = false ) { - ldpp_dout(dpp, 20) << "read_log_and_missing coll " << ch->cid + ldpp_dout(dpp, 10) << "read_log_and_missing coll " << ch->cid << " " << pgmeta_oid << dendl; + size_t total_dups = 0; // legacy? struct stat st; @@ -1438,6 +1444,7 @@ public: missing.may_include_deletes = false; std::list entries; std::list dups; + const auto NUM_DUPS_WARN_THRESHOLD = 2*cct->_conf->osd_pg_log_dups_tracked; if (p) { using ceph::decode; for (p->seek_to_first(); p->valid() ; p->next()) { @@ -1469,11 +1476,20 @@ public: } missing.add(oid, std::move(item)); } else if (p->key().substr(0, 4) == std::string("dup_")) { + ++total_dups; pg_log_dup_t dup; decode(dup, bp); if (!dups.empty()) { ceph_assert(dups.back().version < dup.version); } + if (dups.size() == NUM_DUPS_WARN_THRESHOLD) { + ldpp_dout(dpp, 0) << "read_log_and_missing WARN num of dups exceeded " + << NUM_DUPS_WARN_THRESHOLD << "." + << " You can be hit by THE DUPS BUG" + << " https://tracker.ceph.com/issues/53729." + << " Consider ceph-objectstore-tool --op trim-pg-log-dups" + << dendl; + } dups.push_back(dup); } else { pg_log_entry_t e; @@ -1653,7 +1669,9 @@ public: (*clear_divergent_priors) = false; missing.flush(); } - ldpp_dout(dpp, 10) << "read_log_and_missing done" << dendl; + ldpp_dout(dpp, 10) << "read_log_and_missing done coll " << ch->cid + << " total_dups=" << total_dups + << " log.dups.size()=" << log.dups.size() << dendl; } // static read_log_and_missing #ifdef WITH_SEASTAR diff --git a/ceph/src/osd/PeeringState.cc b/ceph/src/osd/PeeringState.cc index 68b8d2225..19abf2021 100644 --- a/ceph/src/osd/PeeringState.cc +++ b/ceph/src/osd/PeeringState.cc @@ -1213,14 +1213,14 @@ void PeeringState::proc_lease_ack(int from, const pg_lease_ack_t& a) was_min = true; } acting_readable_until_ub[i] = a.readable_until_ub; - break; } + break; } } if (was_min) { auto old_ru = readable_until; recalc_readable_until(); - if (now < old_ru) { + if (now >= old_ru) { pl->recheck_readable(); } } diff --git a/ceph/src/osd/PrimaryLogPG.cc b/ceph/src/osd/PrimaryLogPG.cc index f7b15a5f8..c86f40bb3 100644 --- a/ceph/src/osd/PrimaryLogPG.cc +++ b/ceph/src/osd/PrimaryLogPG.cc @@ -7114,7 +7114,11 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) goto fail; } pg_t raw_pg; - get_osdmap()->object_locator_to_pg(target_name, target_oloc, raw_pg); + result = get_osdmap()->object_locator_to_pg(target_name, target_oloc, raw_pg); + if (result < 0) { + dout(5) << " pool information is invalid: " << result << dendl; + break; + } hobject_t target(target_name, target_oloc.key, target_snapid, raw_pg.ps(), raw_pg.pool(), target_oloc.nspace); @@ -7263,7 +7267,11 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) pg_t raw_pg; chunk_info_t chunk_info; - get_osdmap()->object_locator_to_pg(tgt_name, tgt_oloc, raw_pg); + result = get_osdmap()->object_locator_to_pg(tgt_name, tgt_oloc, raw_pg); + if (result < 0) { + dout(5) << " pool information is invalid: " << result << dendl; + break; + } hobject_t target(tgt_name, tgt_oloc.key, snapid_t(), raw_pg.ps(), raw_pg.pool(), tgt_oloc.nspace); @@ -10589,7 +10597,10 @@ int PrimaryLogPG::do_cdc(const object_info_t& oi, for (auto p : cdc_chunks) { bufferlist chunk; chunk.substr_of(bl, p.first, p.second); - hobject_t target = get_fpoid_from_chunk(oi.soid, chunk); + auto [ret, target] = get_fpoid_from_chunk(oi.soid, chunk); + if (ret < 0) { + return ret; + } chunks[p.first] = std::move(chunk); chunk_map[p.first] = chunk_info_t(0, p.second, target); total_length += p.second; @@ -10597,11 +10608,12 @@ int PrimaryLogPG::do_cdc(const object_info_t& oi, return total_length; } -hobject_t PrimaryLogPG::get_fpoid_from_chunk(const hobject_t soid, bufferlist& chunk) +std::pair PrimaryLogPG::get_fpoid_from_chunk( + const hobject_t soid, bufferlist& chunk) { pg_pool_t::fingerprint_t fp_algo = pool.info.get_fingerprint_type(); if (fp_algo == pg_pool_t::TYPE_FINGERPRINT_NONE) { - return hobject_t(); + return make_pair(-EINVAL, hobject_t()); } object_t fp_oid = [&fp_algo, &chunk]() -> string { switch (fp_algo) { @@ -10622,11 +10634,14 @@ hobject_t PrimaryLogPG::get_fpoid_from_chunk(const hobject_t soid, bufferlist& c oloc.pool = pool.info.get_dedup_tier(); // check if dedup_tier isn't set ceph_assert(oloc.pool > 0); - get_osdmap()->object_locator_to_pg(fp_oid, oloc, raw_pg); + int ret = get_osdmap()->object_locator_to_pg(fp_oid, oloc, raw_pg); + if (ret < 0) { + return make_pair(ret, hobject_t()); + } hobject_t target(fp_oid, oloc.key, snapid_t(), raw_pg.ps(), raw_pg.pool(), oloc.nspace); - return target; + return make_pair(0, target); } int PrimaryLogPG::finish_set_dedup(hobject_t oid, int r, ceph_tid_t tid, uint64_t offset) diff --git a/ceph/src/osd/PrimaryLogPG.h b/ceph/src/osd/PrimaryLogPG.h index b92c46bf4..4a54e6ebe 100644 --- a/ceph/src/osd/PrimaryLogPG.h +++ b/ceph/src/osd/PrimaryLogPG.h @@ -1458,7 +1458,7 @@ protected: int do_cdc(const object_info_t& oi, std::map& chunk_map, std::map& chunks); int start_dedup(OpRequestRef op, ObjectContextRef obc); - hobject_t get_fpoid_from_chunk(const hobject_t soid, bufferlist& chunk); + std::pair get_fpoid_from_chunk(const hobject_t soid, bufferlist& chunk); int finish_set_dedup(hobject_t oid, int r, ceph_tid_t tid, uint64_t offset); int finish_set_manifest_refcount(hobject_t oid, int r, ceph_tid_t tid, uint64_t offset); diff --git a/ceph/src/osd/SnapMapper.cc b/ceph/src/osd/SnapMapper.cc index 07c6f57c7..804213b1f 100644 --- a/ceph/src/osd/SnapMapper.cc +++ b/ceph/src/osd/SnapMapper.cc @@ -660,6 +660,38 @@ bool SnapMapper::is_legacy_mapping(const string &to_test) LEGACY_MAPPING_PREFIX; } +/* Octopus modified the SnapMapper key format from + * + * __ + * + * to + * + * ___ + * + * We can't reconstruct the new key format just from the value since the + * Mapping object contains an hobject rather than a ghobject. Instead, + * we exploit the fact that the new format is identical starting at . + * + * Note that the original version of this conversion introduced in 94ebe0ea + * had a crucial bug which essentially destroyed legacy keys by mapping + * them to + * + * __ + * + * without the object-unique suffix. + * See https://tracker.ceph.com/issues/56147 + */ +std::string SnapMapper::convert_legacy_key( + const std::string& old_key, + const bufferlist& value) +{ + auto old = from_raw(make_pair(old_key, value)); + std::string object_suffix = old_key.substr( + SnapMapper::LEGACY_MAPPING_PREFIX.length()); + return SnapMapper::MAPPING_PREFIX + std::to_string(old.second.pool) + + "_" + object_suffix; +} + int SnapMapper::convert_legacy( CephContext *cct, ObjectStore *store, @@ -681,13 +713,9 @@ int SnapMapper::convert_legacy( while (iter->valid()) { bool valid = SnapMapper::is_legacy_mapping(iter->key()); if (valid) { - SnapMapper::Mapping m; - bufferlist bl(iter->value()); - auto bp = bl.cbegin(); - decode(m, bp); to_set.emplace( - SnapMapper::get_prefix(m.hoid.pool, m.snap), - bl); + convert_legacy_key(iter->key(), iter->value()), + iter->value()); ++n; iter->next(); } diff --git a/ceph/src/osd/SnapMapper.h b/ceph/src/osd/SnapMapper.h index f8c2aff1b..90b0c7c8d 100644 --- a/ceph/src/osd/SnapMapper.h +++ b/ceph/src/osd/SnapMapper.h @@ -100,6 +100,7 @@ public: * particular snap will group under up to 8 prefixes. */ class SnapMapper { + friend class MapperVerifier; public: CephContext* cct; struct object_snaps { @@ -174,6 +175,10 @@ public: void run(); }; + static std::string convert_legacy_key( + const std::string& old_key, + const bufferlist& value); + static int convert_legacy( CephContext *cct, ObjectStore *store, diff --git a/ceph/src/osd/osd_types.cc b/ceph/src/osd/osd_types.cc index e794d78a2..3a9259140 100644 --- a/ceph/src/osd/osd_types.cc +++ b/ceph/src/osd/osd_types.cc @@ -5219,7 +5219,8 @@ static void _handle_dups(CephContext* cct, pg_log_t &target, const pg_log_t &oth { auto earliest_dup_version = target.head.version < maxdups ? 0u : target.head.version - maxdups + 1; - lgeneric_subdout(cct, osd, 20) << "copy_up_to/copy_after earliest_dup_version " << earliest_dup_version << dendl; + lgeneric_subdout(cct, osd, 20) << __func__ << " earliest_dup_version " + << earliest_dup_version << dendl; for (auto d = other.dups.cbegin(); d != other.dups.cend(); ++d) { if (d->version.version >= earliest_dup_version) { @@ -5249,7 +5250,9 @@ void pg_log_t::copy_after(CephContext* cct, const pg_log_t &other, eversion_t v) can_rollback_to = other.can_rollback_to; head = other.head; tail = other.tail; - lgeneric_subdout(cct, osd, 20) << __func__ << " v " << v << dendl; + lgeneric_subdout(cct, osd, 20) << __func__ << " v " << v + << " dups.size()=" << dups.size() + << " other.dups.size()=" << other.dups.size() << dendl; for (auto i = other.log.crbegin(); i != other.log.crend(); ++i) { ceph_assert(i->version > other.tail); if (i->version <= v) { @@ -5261,6 +5264,9 @@ void pg_log_t::copy_after(CephContext* cct, const pg_log_t &other, eversion_t v) log.push_front(*i); } _handle_dups(cct, *this, other, cct->_conf->osd_pg_log_dups_tracked); + lgeneric_subdout(cct, osd, 20) << __func__ << " END v " << v + << " dups.size()=" << dups.size() + << " other.dups.size()=" << other.dups.size() << dendl; } void pg_log_t::copy_up_to(CephContext* cct, const pg_log_t &other, int max) @@ -5269,7 +5275,9 @@ void pg_log_t::copy_up_to(CephContext* cct, const pg_log_t &other, int max) int n = 0; head = other.head; tail = other.tail; - lgeneric_subdout(cct, osd, 20) << __func__ << " max " << max << dendl; + lgeneric_subdout(cct, osd, 20) << __func__ << " max " << max + << " dups.size()=" << dups.size() + << " other.dups.size()=" << other.dups.size() << dendl; for (auto i = other.log.crbegin(); i != other.log.crend(); ++i) { ceph_assert(i->version > other.tail); if (n++ >= max) { @@ -5280,6 +5288,9 @@ void pg_log_t::copy_up_to(CephContext* cct, const pg_log_t &other, int max) log.push_front(*i); } _handle_dups(cct, *this, other, cct->_conf->osd_pg_log_dups_tracked); + lgeneric_subdout(cct, osd, 20) << __func__ << " END max " << max + << " dups.size()=" << dups.size() + << " other.dups.size()=" << other.dups.size() << dendl; } ostream& pg_log_t::print(ostream& out) const diff --git a/ceph/src/osd/osd_types_fmt.h b/ceph/src/osd/osd_types_fmt.h index deac85c5a..23c0e8a3b 100644 --- a/ceph/src/osd/osd_types_fmt.h +++ b/ceph/src/osd/osd_types_fmt.h @@ -104,3 +104,14 @@ struct fmt::formatter { return fmt::format_to(ctx.out(), ")"); } }; + +template <> +struct fmt::formatter { + constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); } + + template + auto format(const pg_t& pg, FormatContext& ctx) + { + return fmt::format_to(ctx.out(), "{}.{:x}", pg.pool(), pg.m_seed); + } +}; diff --git a/ceph/src/osd/scheduler/mClockScheduler.cc b/ceph/src/osd/scheduler/mClockScheduler.cc index 848712713..0f2abf87b 100644 --- a/ceph/src/osd/scheduler/mClockScheduler.cc +++ b/ceph/src/osd/scheduler/mClockScheduler.cc @@ -345,33 +345,33 @@ void mClockScheduler::set_profile_config() static_cast(op_scheduler_class::background_best_effort)]; // Set external client params - cct->_conf.set_val("osd_mclock_scheduler_client_res", + cct->_conf.set_val_default("osd_mclock_scheduler_client_res", std::to_string(client.res)); - cct->_conf.set_val("osd_mclock_scheduler_client_wgt", + cct->_conf.set_val_default("osd_mclock_scheduler_client_wgt", std::to_string(client.wgt)); - cct->_conf.set_val("osd_mclock_scheduler_client_lim", + cct->_conf.set_val_default("osd_mclock_scheduler_client_lim", std::to_string(client.lim)); dout(10) << __func__ << " client QoS params: " << "[" << client.res << "," << client.wgt << "," << client.lim << "]" << dendl; // Set background recovery client params - cct->_conf.set_val("osd_mclock_scheduler_background_recovery_res", + cct->_conf.set_val_default("osd_mclock_scheduler_background_recovery_res", std::to_string(rec.res)); - cct->_conf.set_val("osd_mclock_scheduler_background_recovery_wgt", + cct->_conf.set_val_default("osd_mclock_scheduler_background_recovery_wgt", std::to_string(rec.wgt)); - cct->_conf.set_val("osd_mclock_scheduler_background_recovery_lim", + cct->_conf.set_val_default("osd_mclock_scheduler_background_recovery_lim", std::to_string(rec.lim)); dout(10) << __func__ << " Recovery QoS params: " << "[" << rec.res << "," << rec.wgt << "," << rec.lim << "]" << dendl; // Set background best effort client params - cct->_conf.set_val("osd_mclock_scheduler_background_best_effort_res", + cct->_conf.set_val_default("osd_mclock_scheduler_background_best_effort_res", std::to_string(best_effort.res)); - cct->_conf.set_val("osd_mclock_scheduler_background_best_effort_wgt", + cct->_conf.set_val_default("osd_mclock_scheduler_background_best_effort_wgt", std::to_string(best_effort.wgt)); - cct->_conf.set_val("osd_mclock_scheduler_background_best_effort_lim", + cct->_conf.set_val_default("osd_mclock_scheduler_background_best_effort_lim", std::to_string(best_effort.lim)); dout(10) << __func__ << " Best effort QoS params: " << "[" << best_effort.res << "," << best_effort.wgt << "," << best_effort.lim diff --git a/ceph/src/osd/scrubber/pg_scrubber.cc b/ceph/src/osd/scrubber/pg_scrubber.cc index 25e4a83d9..220d2933c 100644 --- a/ceph/src/osd/scrubber/pg_scrubber.cc +++ b/ceph/src/osd/scrubber/pg_scrubber.cc @@ -127,7 +127,7 @@ bool PgScrubber::verify_against_abort(epoch_t epoch_to_verify) << " vs last-aborted: " << m_last_aborted << dendl; // if we were not aware of the abort before - kill the scrub. - if (epoch_to_verify > m_last_aborted) { + if (epoch_to_verify >= m_last_aborted) { scrub_clear_state(); m_last_aborted = std::max(epoch_to_verify, m_epoch_start); } @@ -146,9 +146,7 @@ bool PgScrubber::should_abort() const dout(10) << "nodeep_scrub set, aborting" << dendl; return true; } - } - - if (get_osdmap()->test_flag(CEPH_OSDMAP_NOSCRUB) || + } else if (get_osdmap()->test_flag(CEPH_OSDMAP_NOSCRUB) || m_pg->pool.info.has_flag(pg_pool_t::FLAG_NOSCRUB)) { dout(10) << "noscrub set, aborting" << dendl; return true; @@ -1619,8 +1617,8 @@ void PgScrubber::handle_scrub_reserve_grant(OpRequestRef op, pg_shard_t from) if (m_reservations.has_value()) { m_reservations->handle_reserve_grant(op, from); } else { - derr << __func__ << ": received unsolicited reservation grant from osd " << from - << " (" << op << ")" << dendl; + dout(20) << __func__ << ": late/unsolicited reservation grant from osd " + << from << " (" << op << ")" << dendl; } } @@ -2130,6 +2128,10 @@ PgScrubber::PgScrubber(PG* pg) void PgScrubber::set_scrub_begin_time() { scrub_begin_stamp = ceph_clock_now(); + m_osds->clog->debug() << fmt::format( + "{} {} starts", + m_pg->info.pgid.pgid, + m_mode_desc); } void PgScrubber::set_scrub_duration() diff --git a/ceph/src/pybind/cephfs/cephfs.pyx b/ceph/src/pybind/cephfs/cephfs.pyx index 96125ba2e..71a6a3f04 100644 --- a/ceph/src/pybind/cephfs/cephfs.pyx +++ b/ceph/src/pybind/cephfs/cephfs.pyx @@ -22,8 +22,11 @@ import os import time from typing import Any, Dict, Optional -AT_NO_ATTR_SYNC = 0x4000 -AT_SYMLINK_NOFOLLOW = 0x100 +AT_SYMLINK_NOFOLLOW = 0x0100 +AT_STATX_SYNC_TYPE = 0x6000 +AT_STATX_SYNC_AS_STAT = 0x0000 +AT_STATX_FORCE_SYNC = 0x2000 +AT_STATX_DONT_SYNC = 0x4000 cdef int AT_SYMLINK_NOFOLLOW_CDEF = AT_SYMLINK_NOFOLLOW CEPH_STATX_BASIC_STATS = 0x7ff cdef int CEPH_STATX_BASIC_STATS_CDEF = CEPH_STATX_BASIC_STATS @@ -177,7 +180,8 @@ class NotDirectory(OSError): class DiskQuotaExceeded(OSError): pass - +class PermissionDenied(OSError): + pass cdef errno_to_exception = { CEPHFS_EPERM : PermissionError, @@ -193,6 +197,7 @@ cdef errno_to_exception = { CEPHFS_ENOTEMPTY : ObjectNotEmpty, CEPHFS_ENOTDIR : NotDirectory, CEPHFS_EDQUOT : DiskQuotaExceeded, + CEPHFS_EACCES : PermissionDenied, } @@ -938,7 +943,7 @@ cdef class LibCephFS(object): with nogil: ret = ceph_opendir(self.cluster, _path, &handle); if ret < 0: - raise make_ex(ret, "opendir failed") + raise make_ex(ret, "opendir failed at {}".format(path.decode('utf-8'))) d = DirResult() d.lib = self d.handle = handle @@ -1941,7 +1946,7 @@ cdef class LibCephFS(object): :param path: the file or directory to get the statistics of. :param mask: want bitfield of CEPH_STATX_* flags showing designed attributes. - :param flag: bitfield that can be used to set AT_* modifier flags (only AT_NO_ATTR_SYNC and AT_SYMLINK_NOFOLLOW) + :param flag: bitfield that can be used to set AT_* modifier flags (AT_STATX_SYNC_AS_STAT, AT_STATX_FORCE_SYNC, AT_STATX_DONT_SYNC and AT_SYMLINK_NOFOLLOW) """ self.require_state("mounted") diff --git a/ceph/src/pybind/mgr/cephadm/inventory.py b/ceph/src/pybind/mgr/cephadm/inventory.py index dbdedd477..04385a4fa 100644 --- a/ceph/src/pybind/mgr/cephadm/inventory.py +++ b/ceph/src/pybind/mgr/cephadm/inventory.py @@ -1,15 +1,17 @@ import datetime +import enum from copy import copy import ipaddress import json import logging +import math import socket from typing import TYPE_CHECKING, Dict, List, Iterator, Optional, Any, Tuple, Set, Mapping, cast, \ NamedTuple, Type import orchestrator from ceph.deployment import inventory -from ceph.deployment.service_spec import ServiceSpec, PlacementSpec +from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, TunedProfileSpec from ceph.utils import str_to_datetime, datetime_to_str, datetime_now from orchestrator import OrchestratorError, HostSpec, OrchestratorEvent, service_to_daemon_types from cephadm.services.cephadmservice import CephadmDaemonDeploySpec @@ -28,6 +30,12 @@ SPEC_STORE_PREFIX = "spec." AGENT_CACHE_PREFIX = 'agent.' +class HostCacheStatus(enum.Enum): + stray = 'stray' + host = 'host' + devices = 'devices' + + class Inventory: """ The inventory stores a HostSpec for all hosts persistently. @@ -397,6 +405,80 @@ class ClientKeyringStore(): self.save() +class TunedProfileStore(): + """ + Store for out tuned profile information + """ + + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr: CephadmOrchestrator = mgr + self.mgr = mgr + self.profiles: Dict[str, TunedProfileSpec] = {} + + def __contains__(self, profile: str) -> bool: + return profile in self.profiles + + def load(self) -> None: + c = self.mgr.get_store('tuned_profiles') or b'{}' + j = json.loads(c) + for k, v in j.items(): + self.profiles[k] = TunedProfileSpec.from_json(v) + self.profiles[k]._last_updated = datetime_to_str(datetime_now()) + + def exists(self, profile_name: str) -> bool: + return profile_name in self.profiles + + def save(self) -> None: + profiles_json = {k: v.to_json() for k, v in self.profiles.items()} + self.mgr.set_store('tuned_profiles', json.dumps(profiles_json)) + + def add_setting(self, profile: str, setting: str, value: str) -> None: + if profile in self.profiles: + self.profiles[profile].settings[setting] = value + self.profiles[profile]._last_updated = datetime_to_str(datetime_now()) + self.save() + else: + logger.error( + f'Attempted to set setting "{setting}" for nonexistent os tuning profile "{profile}"') + + def rm_setting(self, profile: str, setting: str) -> None: + if profile in self.profiles: + if setting in self.profiles[profile].settings: + self.profiles[profile].settings.pop(setting, '') + self.profiles[profile]._last_updated = datetime_to_str(datetime_now()) + self.save() + else: + logger.error( + f'Attemped to remove nonexistent setting "{setting}" from os tuning profile "{profile}"') + else: + logger.error( + f'Attempted to remove setting "{setting}" from nonexistent os tuning profile "{profile}"') + + def add_profile(self, spec: TunedProfileSpec) -> None: + spec._last_updated = datetime_to_str(datetime_now()) + self.profiles[spec.profile_name] = spec + self.save() + + def rm_profile(self, profile: str) -> None: + if profile in self.profiles: + self.profiles.pop(profile, TunedProfileSpec('')) + else: + logger.error(f'Attempted to remove nonexistent os tuning profile "{profile}"') + self.save() + + def last_updated(self, profile: str) -> Optional[datetime.datetime]: + if profile not in self.profiles or not self.profiles[profile]._last_updated: + return None + return str_to_datetime(self.profiles[profile]._last_updated) + + def set_last_updated(self, profile: str, new_datetime: datetime.datetime) -> None: + if profile in self.profiles: + self.profiles[profile]._last_updated = datetime_to_str(new_datetime) + + def list_profiles(self) -> List[TunedProfileSpec]: + return [p for p in self.profiles.values()] + + class HostCache(): """ HostCache stores different things: @@ -443,6 +525,7 @@ class HostCache(): self.last_network_update = {} # type: Dict[str, datetime.datetime] self.last_device_update = {} # type: Dict[str, datetime.datetime] self.last_device_change = {} # type: Dict[str, datetime.datetime] + self.last_tuned_profile_update = {} # type: Dict[str, datetime.datetime] self.daemon_refresh_queue = [] # type: List[str] self.device_refresh_queue = [] # type: List[str] self.network_refresh_queue = [] # type: List[str] @@ -463,7 +546,9 @@ class HostCache(): # type: () -> None for k, v in self.mgr.get_store_prefix(HOST_CACHE_PREFIX).items(): host = k[len(HOST_CACHE_PREFIX):] - if host not in self.mgr.inventory: + if self._get_host_cache_entry_status(host) != HostCacheStatus.host: + if self._get_host_cache_entry_status(host) == HostCacheStatus.devices: + continue self.mgr.log.warning('removing stray HostCache host record %s' % ( host)) self.mgr.set_store(k, None) @@ -482,14 +567,16 @@ class HostCache(): self.daemons[host] = {} self.osdspec_previews[host] = [] self.osdspec_last_applied[host] = {} - self.devices[host] = [] self.networks[host] = {} self.daemon_config_deps[host] = {} for name, d in j.get('daemons', {}).items(): self.daemons[host][name] = \ orchestrator.DaemonDescription.from_json(d) + self.devices[host] = [] + # still want to check old device location for upgrade scenarios for d in j.get('devices', []): self.devices[host].append(inventory.Device.from_json(d)) + self.devices[host] += self.load_host_devices(host) self.networks[host] = j.get('networks_and_interfaces', {}) self.osdspec_previews[host] = j.get('osdspec_previews', {}) self.last_client_files[host] = j.get('last_client_files', {}) @@ -503,6 +590,9 @@ class HostCache(): } if 'last_host_check' in j: self.last_host_check[host] = str_to_datetime(j['last_host_check']) + if 'last_tuned_profile_update' in j: + self.last_tuned_profile_update[host] = str_to_datetime( + j['last_tuned_profile_update']) self.registry_login_queue.add(host) self.scheduled_daemon_actions[host] = j.get('scheduled_daemon_actions', {}) self.metadata_up_to_date[host] = j.get('metadata_up_to_date', False) @@ -517,6 +607,23 @@ class HostCache(): host, e)) pass + def _get_host_cache_entry_status(self, host: str) -> HostCacheStatus: + # return whether a host cache entry in the config-key + # store is for a host, a set of devices or is stray. + # for a host, the entry name will match a hostname in our + # inventory. For devices, it will be formatted + # .devices. where is + # in out inventory. If neither case applies, it is stray + if host in self.mgr.inventory: + return HostCacheStatus.host + try: + # try stripping off the ".devices." and see if we get + # a host name that matches our inventory + actual_host = '.'.join(host.split('.')[:-2]) + return HostCacheStatus.devices if actual_host in self.mgr.inventory else HostCacheStatus.stray + except Exception: + return HostCacheStatus.stray + def update_host_daemons(self, host, dm): # type: (str, Dict[str, orchestrator.DaemonDescription]) -> None self.daemons[host] = dm @@ -668,12 +775,11 @@ class HostCache(): j['last_network_update'] = datetime_to_str(self.last_network_update[host]) if host in self.last_device_change: j['last_device_change'] = datetime_to_str(self.last_device_change[host]) + if host in self.last_tuned_profile_update: + j['last_tuned_profile_update'] = datetime_to_str(self.last_tuned_profile_update[host]) if host in self.daemons: for name, dd in self.daemons[host].items(): j['daemons'][name] = dd.to_json() - if host in self.devices: - for d in self.devices[host]: - j['devices'].append(d.to_json()) if host in self.networks: j['networks_and_interfaces'] = self.networks[host] if host in self.daemon_config_deps: @@ -697,9 +803,71 @@ class HostCache(): j['scheduled_daemon_actions'] = self.scheduled_daemon_actions[host] if host in self.metadata_up_to_date: j['metadata_up_to_date'] = self.metadata_up_to_date[host] + if host in self.devices: + self.save_host_devices(host) self.mgr.set_store(HOST_CACHE_PREFIX + host, json.dumps(j)) + def save_host_devices(self, host: str) -> None: + if host not in self.devices or not self.devices[host]: + logger.debug(f'Host {host} has no devices to save') + return + + devs: List[Dict[str, Any]] = [] + for d in self.devices[host]: + devs.append(d.to_json()) + + def byte_len(s: str) -> int: + return len(s.encode('utf-8')) + + dev_cache_counter: int = 0 + cache_size: int = self.mgr.get_foreign_ceph_option('mon', 'mon_config_key_max_entry_size') + if cache_size is not None and cache_size != 0 and byte_len(json.dumps(devs)) > cache_size - 1024: + # no guarantee all device entries take up the same amount of space + # splitting it up so there's one more entry than we need should be fairly + # safe and save a lot of extra logic checking sizes + cache_entries_needed = math.ceil(byte_len(json.dumps(devs)) / cache_size) + 1 + dev_sublist_size = math.ceil(len(devs) / cache_entries_needed) + dev_lists: List[List[Dict[str, Any]]] = [devs[i:i + dev_sublist_size] + for i in range(0, len(devs), dev_sublist_size)] + for dev_list in dev_lists: + dev_dict: Dict[str, Any] = {'devices': dev_list} + if dev_cache_counter == 0: + dev_dict.update({'entries': len(dev_lists)}) + self.mgr.set_store(HOST_CACHE_PREFIX + host + '.devices.' + + str(dev_cache_counter), json.dumps(dev_dict)) + dev_cache_counter += 1 + else: + self.mgr.set_store(HOST_CACHE_PREFIX + host + '.devices.' + + str(dev_cache_counter), json.dumps({'devices': devs, 'entries': 1})) + + def load_host_devices(self, host: str) -> List[inventory.Device]: + dev_cache_counter: int = 0 + devs: List[Dict[str, Any]] = [] + dev_entries: int = 0 + try: + # number of entries for the host's devices should be in + # the "entries" field of the first entry + dev_entries = json.loads(self.mgr.get_store( + HOST_CACHE_PREFIX + host + '.devices.0')).get('entries') + except Exception: + logger.debug(f'No device entries found for host {host}') + for i in range(dev_entries): + try: + new_devs = json.loads(self.mgr.get_store( + HOST_CACHE_PREFIX + host + '.devices.' + str(i))).get('devices', []) + if len(new_devs) > 0: + # verify list contains actual device objects by trying to load one from json + inventory.Device.from_json(new_devs[0]) + # if we didn't throw an Exception on above line, we can add the devices + devs = devs + new_devs + dev_cache_counter += 1 + except Exception as e: + logger.error(('Hit exception trying to load devices from ' + + f'{HOST_CACHE_PREFIX + host + ".devices." + str(dev_cache_counter)} in key store: {e}')) + return [] + return [inventory.Device.from_json(d) for d in devs] + def rm_host(self, host): # type: (str) -> None if host in self.daemons: @@ -728,6 +896,8 @@ class HostCache(): del self.last_network_update[host] if host in self.last_device_change: del self.last_device_change[host] + if host in self.last_tuned_profile_update: + del self.last_tuned_profile_update[host] if host in self.daemon_config_deps: del self.daemon_config_deps[host] if host in self.scheduled_daemon_actions: @@ -769,6 +939,15 @@ class HostCache(): h for h in self.mgr.inventory.all_specs() if '_no_schedule' not in h.labels ] + def get_draining_hosts(self) -> List[HostSpec]: + """ + Returns all hosts that have _no_schedule label and therefore should have + no daemons placed on them, but are potentially still reachable + """ + return [ + h for h in self.mgr.inventory.all_specs() if '_no_schedule' in h.labels + ] + def get_unreachable_hosts(self) -> List[HostSpec]: """ Return all hosts that are offline or in maintenance mode. @@ -911,6 +1090,24 @@ class HostCache(): return True return False + def host_needs_tuned_profile_update(self, host: str, profile: str) -> bool: + if host in self.mgr.offline_hosts: + logger.debug(f'Host "{host}" marked as offline. Cannot apply tuned profile') + return False + if profile not in self.mgr.tuned_profiles: + logger.debug( + f'Cannot apply tuned profile {profile} on host {host}. Profile does not exist') + return False + if host not in self.last_tuned_profile_update: + return True + last_profile_update = self.mgr.tuned_profiles.last_updated(profile) + if last_profile_update is None: + self.mgr.tuned_profiles.set_last_updated(profile, datetime_now()) + return True + if self.last_tuned_profile_update[host] < last_profile_update: + return True + return False + def host_had_daemon_refresh(self, host: str) -> bool: """ ... at least once. diff --git a/ceph/src/pybind/mgr/cephadm/migrations.py b/ceph/src/pybind/mgr/cephadm/migrations.py index 672a895bc..69f39cb91 100644 --- a/ceph/src/pybind/mgr/cephadm/migrations.py +++ b/ceph/src/pybind/mgr/cephadm/migrations.py @@ -112,6 +112,7 @@ class Migrations: spec=spec, hosts=self.mgr.inventory.all_specs(), unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), daemons=existing_daemons, ).place() diff --git a/ceph/src/pybind/mgr/cephadm/module.py b/ceph/src/pybind/mgr/cephadm/module.py index 766ea7726..b2a48801c 100644 --- a/ceph/src/pybind/mgr/cephadm/module.py +++ b/ceph/src/pybind/mgr/cephadm/module.py @@ -25,7 +25,8 @@ from ceph.deployment import inventory from ceph.deployment.drive_group import DriveGroupSpec from ceph.deployment.service_spec import \ ServiceSpec, PlacementSpec, \ - HostPlacementSpec, IngressSpec + HostPlacementSpec, IngressSpec, \ + TunedProfileSpec from ceph.utils import str_to_datetime, datetime_to_str, datetime_now from cephadm.serve import CephadmServe from cephadm.services.cephadmservice import CephadmDaemonDeploySpec @@ -56,13 +57,14 @@ from .services.monitoring import GrafanaService, AlertmanagerService, Prometheus NodeExporterService, SNMPGatewayService, LokiService, PromtailService from .schedule import HostAssignment from .inventory import Inventory, SpecStore, HostCache, AgentCache, EventStore, \ - ClientKeyringStore, ClientKeyringSpec + ClientKeyringStore, ClientKeyringSpec, TunedProfileStore from .upgrade import CephadmUpgrade from .template import TemplateMgr from .utils import CEPH_IMAGE_TYPES, RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES, forall_hosts, \ cephadmNoImage, CEPH_UPGRADE_ORDER from .configchecks import CephadmConfigChecks from .offline_watcher import OfflineHostWatcher +from .tuned_profiles import TunedProfileUtils try: import asyncssh @@ -100,8 +102,8 @@ DEFAULT_LOKI_IMAGE = 'docker.io/grafana/loki:2.4.0' DEFAULT_PROMTAIL_IMAGE = 'docker.io/grafana/promtail:2.4.0' DEFAULT_ALERT_MANAGER_IMAGE = 'quay.io/prometheus/alertmanager:v0.23.0' DEFAULT_GRAFANA_IMAGE = 'quay.io/ceph/ceph-grafana:8.3.5' -DEFAULT_HAPROXY_IMAGE = 'docker.io/library/haproxy:2.3' -DEFAULT_KEEPALIVED_IMAGE = 'docker.io/arcts/keepalived' +DEFAULT_HAPROXY_IMAGE = 'quay.io/ceph/haproxy:2.3' +DEFAULT_KEEPALIVED_IMAGE = 'quay.io/ceph/keepalived:2.1.5' DEFAULT_SNMP_GATEWAY_IMAGE = 'docker.io/maxwo/snmp-notifier:v1.2.1' # ------------------------------------------------------------------------------ @@ -505,6 +507,11 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self.keys = ClientKeyringStore(self) self.keys.load() + self.tuned_profiles = TunedProfileStore(self) + self.tuned_profiles.load() + + self.tuned_profile_utils = TunedProfileUtils(self) + # ensure the host lists are in sync for h in self.inventory.keys(): if h not in self.cache.daemons: @@ -1756,6 +1763,34 @@ Then run the following: return f"Ceph cluster {self._cluster_fsid} on {hostname} has exited maintenance mode" + @handle_orch_error + @host_exists() + def rescan_host(self, hostname: str) -> str: + """Use cephadm to issue a disk rescan on each HBA + + Some HBAs and external enclosures don't automatically register + device insertion with the kernel, so for these scenarios we need + to manually rescan + + :param hostname: (str) host name + """ + self.log.info(f'disk rescan request sent to host "{hostname}"') + _out, _err, _code = self.wait_async(CephadmServe(self)._run_cephadm(hostname, cephadmNoImage, "disk-rescan", + [], + no_fsid=True, + error_ok=True)) + if not _err: + raise OrchestratorError('Unexpected response from cephadm disk-rescan call') + + msg = _err[0].split('\n')[-1] + log_msg = f'disk rescan: {msg}' + if msg.upper().startswith('OK'): + self.log.info(log_msg) + else: + self.log.warning(log_msg) + + return f'{msg}' + def get_minimal_ceph_conf(self) -> str: _, config, _ = self.check_mon_command({ "prefix": "config generate-minimal-conf", @@ -1890,6 +1925,9 @@ Then run the following: @handle_orch_error def service_action(self, action: str, service_name: str) -> List[str]: + if service_name not in self.spec_store.all_specs.keys(): + raise OrchestratorError(f'Invalid service name "{service_name}".' + + ' View currently running services using "ceph orch ls"') dds: List[DaemonDescription] = self.cache.get_daemons_by_service(service_name) if not dds: raise OrchestratorError(f'No daemons exist under service name "{service_name}".' @@ -1918,6 +1956,10 @@ Then run the following: if daemon_spec.daemon_type != 'osd': daemon_spec = self.cephadm_services[daemon_type_to_service( daemon_spec.daemon_type)].prepare_create(daemon_spec) + else: + # for OSDs, we still need to update config, just not carry out the full + # prepare_create function + daemon_spec.final_config, daemon_spec.deps = self.osd_service.generate_config(daemon_spec) return self.wait_async(CephadmServe(self)._create_daemon(daemon_spec, reconfig=(action == 'reconfig'))) actions = { @@ -2322,8 +2364,9 @@ Then run the following: else: need = { 'prometheus': ['mgr', 'alertmanager', 'node-exporter', 'ingress'], - 'grafana': ['prometheus'], + 'grafana': ['prometheus', 'loki'], 'alertmanager': ['mgr', 'alertmanager', 'snmp-gateway'], + 'promtail': ['loki'], } for dep_type in need.get(daemon_type, []): for dd in self.cache.get_daemons_by_type(dep_type): @@ -2435,6 +2478,49 @@ Then run the following: return self._apply_service_spec(cast(ServiceSpec, spec)) + @handle_orch_error + def apply_tuned_profiles(self, specs: List[TunedProfileSpec], no_overwrite: bool = False) -> str: + outs = [] + for spec in specs: + if no_overwrite and self.tuned_profiles.exists(spec.profile_name): + outs.append(f"Tuned profile '{spec.profile_name}' already exists (--no-overwrite was passed)") + else: + self.tuned_profiles.add_profile(spec) + outs.append(f'Saved tuned profile {spec.profile_name}') + self._kick_serve_loop() + return '\n'.join(outs) + + @handle_orch_error + def rm_tuned_profile(self, profile_name: str) -> str: + if profile_name not in self.tuned_profiles: + raise OrchestratorError( + f'Tuned profile {profile_name} does not exist. Nothing to remove.') + self.tuned_profiles.rm_profile(profile_name) + self._kick_serve_loop() + return f'Removed tuned profile {profile_name}' + + @handle_orch_error + def tuned_profile_ls(self) -> List[TunedProfileSpec]: + return self.tuned_profiles.list_profiles() + + @handle_orch_error + def tuned_profile_add_setting(self, profile_name: str, setting: str, value: str) -> str: + if profile_name not in self.tuned_profiles: + raise OrchestratorError( + f'Tuned profile {profile_name} does not exist. Cannot add setting.') + self.tuned_profiles.add_setting(profile_name, setting, value) + self._kick_serve_loop() + return f'Added setting {setting} with value {value} to tuned profile {profile_name}' + + @handle_orch_error + def tuned_profile_rm_setting(self, profile_name: str, setting: str) -> str: + if profile_name not in self.tuned_profiles: + raise OrchestratorError( + f'Tuned profile {profile_name} does not exist. Cannot remove setting.') + self.tuned_profiles.rm_setting(profile_name, setting) + self._kick_serve_loop() + return f'Removed setting {setting} from tuned profile {profile_name}' + def set_health_warning(self, name: str, summary: str, count: int, detail: List[str]) -> None: self.health_checks[name] = { 'severity': 'warning', @@ -2460,6 +2546,7 @@ Then run the following: spec=spec, hosts=self.cache.get_schedulable_hosts(), unreachable_hosts=self.cache.get_unreachable_hosts(), + draining_hosts=self.cache.get_draining_hosts(), networks=self.cache.networks, daemons=self.cache.get_daemons_by_service(spec.service_name()), allow_colo=svc.allow_colo(), @@ -2538,6 +2625,7 @@ Then run the following: spec=spec, hosts=self.inventory.all_specs(), # All hosts, even those without daemon refresh unreachable_hosts=self.cache.get_unreachable_hosts(), + draining_hosts=self.cache.get_draining_hosts(), networks=self.cache.networks, daemons=self.cache.get_daemons_by_service(spec.service_name()), allow_colo=self.cephadm_services[spec.service_type].allow_colo(), @@ -2770,7 +2858,10 @@ Then run the following: # trigger the serve loop to initiate the removal self._kick_serve_loop() - return "Scheduled OSD(s) for removal" + warning_zap = "" if zap else ("\nVG/LV for the OSDs won't be zapped (--zap wasn't passed).\n" + "Run the `ceph-volume lvm zap` command with `--destroy`" + " against the VG/LV if you want them to be destroyed.") + return f"Scheduled OSD(s) for removal.{warning_zap}" @handle_orch_error def stop_remove_osds(self, osd_ids: List[str]) -> str: diff --git a/ceph/src/pybind/mgr/cephadm/registry.py b/ceph/src/pybind/mgr/cephadm/registry.py index 7b293a4c1..31e5fb23e 100644 --- a/ceph/src/pybind/mgr/cephadm/registry.py +++ b/ceph/src/pybind/mgr/cephadm/registry.py @@ -39,7 +39,11 @@ class Registry: headers = {'Accept': 'application/json'} url = f'https://{self.api_domain}/v2/{image}/tags/list' while True: - r = requests.get(url, headers=headers) + try: + r = requests.get(url, headers=headers) + except requests.exceptions.ConnectionError as e: + msg = f"Cannot get tags from url '{url}': {e}" + raise ValueError(msg) from e if r.status_code == 401: if 'Authorization' in headers: raise ValueError('failed authentication') diff --git a/ceph/src/pybind/mgr/cephadm/schedule.py b/ceph/src/pybind/mgr/cephadm/schedule.py index 612c55804..692b4282e 100644 --- a/ceph/src/pybind/mgr/cephadm/schedule.py +++ b/ceph/src/pybind/mgr/cephadm/schedule.py @@ -1,3 +1,4 @@ +import ipaddress import hashlib import logging import random @@ -143,6 +144,7 @@ class HostAssignment(object): spec, # type: ServiceSpec hosts: List[orchestrator.HostSpec], unreachable_hosts: List[orchestrator.HostSpec], + draining_hosts: List[orchestrator.HostSpec], daemons: List[orchestrator.DaemonDescription], networks: Dict[str, Dict[str, Dict[str, List[str]]]] = {}, filter_new_host=None, # type: Optional[Callable[[str],bool]] @@ -156,6 +158,7 @@ class HostAssignment(object): self.primary_daemon_type = primary_daemon_type or spec.service_type self.hosts: List[orchestrator.HostSpec] = hosts self.unreachable_hosts: List[orchestrator.HostSpec] = unreachable_hosts + self.draining_hosts: List[orchestrator.HostSpec] = draining_hosts self.filter_new_host = filter_new_host self.service_name = spec.service_name() self.daemons = daemons @@ -189,7 +192,8 @@ class HostAssignment(object): if self.spec.placement.hosts: explicit_hostnames = {h.hostname for h in self.spec.placement.hosts} - unknown_hosts = explicit_hostnames.difference(set(self.get_hostnames())) + known_hosts = self.get_hostnames() + [h.hostname for h in self.draining_hosts] + unknown_hosts = explicit_hostnames.difference(set(known_hosts)) if unknown_hosts: raise OrchestratorValidationError( f'Cannot place {self.spec.one_line_str()} on {", ".join(sorted(unknown_hosts))}: Unknown hosts') @@ -353,19 +357,19 @@ class HostAssignment(object): for i in range(len(to_add)): to_add[i] = to_add[i].assign_rank_generation(ranks[i], self.rank_map) - # If we don't have the list of candidates is definitive. - if count is None: - final = existing_slots + to_add - logger.debug('Provided hosts: %s' % final) - return self.place_per_host_daemons(final, to_add, to_remove) - - logger.debug('Combine hosts with existing daemons %s + new hosts %s' % ( - existing, to_add)) + logger.debug('Combine hosts with existing daemons %s + new hosts %s' % (existing, to_add)) return self.place_per_host_daemons(existing_slots + to_add, to_add, to_remove) def find_ip_on_host(self, hostname: str, subnets: List[str]) -> Optional[str]: for subnet in subnets: ips: List[str] = [] + # following is to allow loopback interfaces for both ipv4 and ipv6. Since we + # only have the subnet (and no IP) we assume default loopback IP address. + if ipaddress.ip_network(subnet).is_loopback: + if ipaddress.ip_network(subnet).version == 4: + ips.append('127.0.0.1') + else: + ips.append('::1') for iface, iface_ips in self.networks.get(hostname, {}).get(subnet, {}).items(): ips.extend(iface_ips) if ips: @@ -378,7 +382,7 @@ class HostAssignment(object): DaemonPlacement(daemon_type=self.primary_daemon_type, hostname=h.hostname, network=h.network, name=h.name, ports=self.ports_start) - for h in self.spec.placement.hosts + for h in self.spec.placement.hosts if h.hostname not in [dh.hostname for dh in self.draining_hosts] ] elif self.spec.placement.label: ls = [ @@ -429,15 +433,17 @@ class HostAssignment(object): if len(old) > len(ls): logger.debug('Filtered %s down to %s' % (old, ls)) - # shuffle for pseudo random selection - # gen seed off of self.spec to make shuffling deterministic + # now that we have the list of nodes candidates based on the configured + # placement, let's shuffle the list for node pseudo-random selection. For this, + # we generate a seed from the service name and we use to shuffle the candidates. + # This makes shuffling deterministic for the same service name. seed = int( hashlib.sha1(self.spec.service_name().encode('utf-8')).hexdigest(), 16 - ) % (2 ** 32) + ) % (2 ** 32) # truncate result to 32 bits final = sorted(ls) random.Random(seed).shuffle(final) - return ls + return final def remove_non_maintenance_unreachable_candidates(self, candidates: List[DaemonPlacement]) -> List[DaemonPlacement]: in_maintenance: Dict[str, bool] = {} diff --git a/ceph/src/pybind/mgr/cephadm/serve.py b/ceph/src/pybind/mgr/cephadm/serve.py index 207f3c6ba..6ca94393e 100644 --- a/ceph/src/pybind/mgr/cephadm/serve.py +++ b/ceph/src/pybind/mgr/cephadm/serve.py @@ -1,3 +1,4 @@ +import ipaddress import hashlib import json import logging @@ -162,7 +163,7 @@ class CephadmServe: ) ret, out, err = self.mgr.mon_command({ 'prefix': 'config set', - 'who': f'osd/host:{host}', + 'who': f'osd/host:{host.split(".")[0]}', 'name': 'osd_memory_target', 'value': str(val), }) @@ -171,11 +172,15 @@ class CephadmServe: f'Unable to set osd_memory_target on {host} to {val}: {err}' ) else: - self.mgr.check_mon_command({ - 'prefix': 'config rm', - 'who': f'osd/host:{host}', - 'name': 'osd_memory_target', - }) + # if osd memory autotuning is off, we don't want to remove these config + # options as users may be using them. Since there is no way to set autotuning + # on/off at a host level, best we can do is check if it is globally on. + if self.mgr.get_foreign_ceph_option('osd', 'osd_memory_target_autotune'): + self.mgr.check_mon_command({ + 'prefix': 'config rm', + 'who': f'osd/host:{host.split(".")[0]}', + 'name': 'osd_memory_target', + }) self.mgr.cache.update_autotune(host) def _refresh_hosts_and_daemons(self) -> None: @@ -515,6 +520,7 @@ class CephadmServe: len(self.mgr.apply_spec_fails), warnings) self.mgr.update_watched_hosts() + self.mgr.tuned_profile_utils._write_all_tuned_profiles() return r def _apply_service_config(self, spec: ServiceSpec) -> None: @@ -603,14 +609,19 @@ class CephadmServe: def matches_network(host): # type: (str) -> bool - # make sure we have 1 or more IPs for any of those networks on that - # host - for network in public_networks: - if len(self.mgr.cache.networks[host].get(network, [])) > 0: - return True + # make sure the host has at least one network that belongs to some configured public network(s) + for pn in public_networks: + public_network = ipaddress.ip_network(pn) + for hn in self.mgr.cache.networks[host]: + host_network = ipaddress.ip_network(hn) + if host_network.overlaps(public_network): + return True + + host_networks = ','.join(self.mgr.cache.networks[host]) + pub_networks = ','.join(public_networks) self.log.info( - f"Filtered out host {host}: does not belong to mon public_network" - f" ({','.join(public_networks)})" + f"Filtered out host {host}: does not belong to mon public_network(s): " + f" {pub_networks}, host network(s): {host_networks}" ) return False @@ -622,6 +633,7 @@ class CephadmServe: hosts=self.mgr.cache.get_non_draining_hosts() if spec.service_name( ) == 'agent' else self.mgr.cache.get_schedulable_hosts(), unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), daemons=daemons, networks=self.mgr.cache.networks, filter_new_host=( @@ -1000,6 +1012,7 @@ class CephadmServe: spec=ServiceSpec('mon', placement=pspec), hosts=self.mgr.cache.get_schedulable_hosts(), unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), daemons=[], networks=self.mgr.cache.networks, ) @@ -1030,6 +1043,7 @@ class CephadmServe: spec=ServiceSpec('mon', placement=ks.placement), hosts=self.mgr.cache.get_schedulable_hosts(), unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), daemons=[], networks=self.mgr.cache.networks, ) @@ -1052,6 +1066,8 @@ class CephadmServe: client_files: Dict[str, Dict[str, Tuple[int, int, int, bytes, str]]], host: str) -> None: updated_files = False + if host in self.mgr.offline_hosts: + return old_files = self.mgr.cache.get_host_client_files(host).copy() for path, m in client_files.get(host, {}).items(): mode, uid, gid, content, digest = m @@ -1065,6 +1081,8 @@ class CephadmServe: self.mgr.cache.update_client_file(host, path, digest, mode, uid, gid) updated_files = True for path in old_files.keys(): + if path == '/etc/ceph/ceph.conf': + continue self.log.info(f'Removing {host}:{path}') cmd = ['rm', '-f', path] self.mgr.ssh.check_execute_command(host, cmd) @@ -1125,6 +1143,12 @@ class CephadmServe: except AttributeError: eca = None + if daemon_spec.service_name in self.mgr.spec_store: + configs = self.mgr.spec_store[daemon_spec.service_name].spec.custom_configs + if configs is not None: + daemon_spec.final_config.update( + {'custom_config_files': [c.to_json() for c in configs]}) + if self.mgr.cache.host_needs_registry_login(daemon_spec.host) and self.mgr.registry_url: await self._registry_login(daemon_spec.host, json.loads(str(self.mgr.get_store('registry_credentials')))) diff --git a/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py b/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py index 8abb0e63a..8028b27c6 100644 --- a/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py +++ b/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py @@ -40,7 +40,8 @@ class CephadmDaemonDeploySpec: ports: Optional[List[int]] = None, rank: Optional[int] = None, rank_generation: Optional[int] = None, - extra_container_args: Optional[List[str]] = None): + extra_container_args: Optional[List[str]] = None, + ): """ A data struction to encapsulate `cephadm deploy ... """ @@ -178,10 +179,6 @@ class CephadmService(metaclass=ABCMeta): rank: Optional[int] = None, rank_generation: Optional[int] = None, ) -> CephadmDaemonDeploySpec: - try: - eca = spec.extra_container_args - except AttributeError: - eca = None return CephadmDaemonDeploySpec( host=host, daemon_id=daemon_id, @@ -192,7 +189,8 @@ class CephadmService(metaclass=ABCMeta): ip=ip, rank=rank, rank_generation=rank_generation, - extra_container_args=eca, + extra_container_args=spec.extra_container_args if hasattr( + spec, 'extra_container_args') else None, ) def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: diff --git a/ceph/src/pybind/mgr/cephadm/services/ingress.py b/ceph/src/pybind/mgr/cephadm/services/ingress.py index cd5936ec6..75276535a 100644 --- a/ceph/src/pybind/mgr/cephadm/services/ingress.py +++ b/ceph/src/pybind/mgr/cephadm/services/ingress.py @@ -15,6 +15,7 @@ logger = logging.getLogger(__name__) class IngressService(CephService): TYPE = 'ingress' + MAX_KEEPALIVED_PASS_LEN = 8 def primary_daemon_type(self) -> str: return 'haproxy' @@ -77,7 +78,8 @@ class IngressService(CephService): password = self.mgr.get_store(pw_key) if password is None: if not spec.monitor_password: - password = ''.join(random.choice(string.ascii_lowercase) for _ in range(20)) + password = ''.join(random.choice(string.ascii_lowercase) + for _ in range(self.MAX_KEEPALIVED_PASS_LEN)) self.mgr.set_store(pw_key, password) else: if spec.monitor_password: @@ -98,7 +100,7 @@ class IngressService(CephService): for rank in range(num_ranks): if rank in by_rank: d = by_rank[rank] - assert(d.ports) + assert d.ports servers.append({ 'name': f"{spec.backend_service}.{rank}", 'ip': d.ip or resolve_ip(self.mgr.inventory.get_addr(str(d.hostname))), @@ -129,7 +131,7 @@ class IngressService(CephService): 'servers': servers, 'user': spec.monitor_user or 'admin', 'password': password, - 'ip': str(spec.virtual_ip).split('/')[0] or daemon_spec.ip or '*', + 'ip': "*" if spec.virtual_ips_list else str(spec.virtual_ip).split('/')[0] or daemon_spec.ip or '*', 'frontend_port': daemon_spec.ports[0] if daemon_spec.ports else spec.frontend_port, 'monitor_port': daemon_spec.ports[1] if daemon_spec.ports else spec.monitor_port, } @@ -176,7 +178,8 @@ class IngressService(CephService): password = self.mgr.get_store(pw_key) if password is None: if not spec.keepalived_password: - password = ''.join(random.choice(string.ascii_lowercase) for _ in range(20)) + password = ''.join(random.choice(string.ascii_lowercase) + for _ in range(self.MAX_KEEPALIVED_PASS_LEN)) self.mgr.set_store(pw_key, password) else: if spec.keepalived_password: @@ -196,15 +199,23 @@ class IngressService(CephService): hosts = sorted(list(set([host] + [str(d.hostname) for d in daemons]))) # interface - bare_ip = str(spec.virtual_ip).split('/')[0] + bare_ips = [] + if spec.virtual_ip: + bare_ips.append(str(spec.virtual_ip).split('/')[0]) + elif spec.virtual_ips_list: + bare_ips = [str(vip).split('/')[0] for vip in spec.virtual_ips_list] interface = None - for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): - if ifaces and ipaddress.ip_address(bare_ip) in ipaddress.ip_network(subnet): - interface = list(ifaces.keys())[0] - logger.info( - f'{bare_ip} is in {subnet} on {host} interface {interface}' - ) - break + for bare_ip in bare_ips: + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if ifaces and ipaddress.ip_address(bare_ip) in ipaddress.ip_network(subnet): + interface = list(ifaces.keys())[0] + logger.info( + f'{bare_ip} is in {subnet} on {host} interface {interface}' + ) + break + else: # nobreak + continue + break # try to find interface by matching spec.virtual_interface_networks if not interface and spec.virtual_interface_networks: for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): @@ -230,10 +241,33 @@ class IngressService(CephService): script = f'/usr/bin/curl {build_url(scheme="http", host=d.ip or "localhost", port=port)}/health' assert script - # set state. first host in placement is master all others backups - state = 'BACKUP' - if hosts[0] == host: - state = 'MASTER' + states = [] + priorities = [] + virtual_ips = [] + + # Set state and priority. Have one master for each VIP. Or at least the first one as master if only one VIP. + if spec.virtual_ip: + virtual_ips.append(spec.virtual_ip) + if hosts[0] == host: + states.append('MASTER') + priorities.append(100) + else: + states.append('BACKUP') + priorities.append(90) + + elif spec.virtual_ips_list: + virtual_ips = spec.virtual_ips_list + if len(virtual_ips) > len(hosts): + raise OrchestratorError( + "Number of virtual IPs for ingress is greater than number of available hosts" + ) + for x in range(len(virtual_ips)): + if hosts[x] == host: + states.append('MASTER') + priorities.append(100) + else: + states.append('BACKUP') + priorities.append(90) # remove host, daemon is being deployed on from hosts list for # other_ips in conf file and converter to ips @@ -248,7 +282,9 @@ class IngressService(CephService): 'script': script, 'password': password, 'interface': interface, - 'state': state, + 'virtual_ips': virtual_ips, + 'states': states, + 'priorities': priorities, 'other_ips': other_ips, 'host_ip': resolve_ip(self.mgr.inventory.get_addr(host)), } diff --git a/ceph/src/pybind/mgr/cephadm/services/monitoring.py b/ceph/src/pybind/mgr/cephadm/services/monitoring.py index 0bd388f46..f99c79e79 100644 --- a/ceph/src/pybind/mgr/cephadm/services/monitoring.py +++ b/ceph/src/pybind/mgr/cephadm/services/monitoring.py @@ -1,6 +1,8 @@ import errno +import ipaddress import logging import os +import socket from typing import List, Any, Tuple, Dict, Optional, cast from urllib.parse import urlparse @@ -37,19 +39,23 @@ class GrafanaService(CephadmService): deps.append(dd.name()) - daemons = self.mgr.cache.get_daemons_by_service('mgr') + daemons = self.mgr.cache.get_daemons_by_service('loki') loki_host = '' - assert daemons is not None - if daemons != []: - assert daemons[0].hostname is not None - addr = daemons[0].ip if daemons[0].ip else self._inventory_get_fqdn(daemons[0].hostname) - loki_host = build_url(scheme='http', host=addr, port=3100) + for i, dd in enumerate(daemons): + assert dd.hostname is not None + if i == 0: + addr = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + loki_host = build_url(scheme='http', host=addr, port=3100) + + deps.append(dd.name()) grafana_data_sources = self.mgr.template.render( 'services/grafana/ceph-dashboard.yml.j2', {'hosts': prom_services, 'loki_host': loki_host}) - cert = self.mgr.get_store('grafana_crt') - pkey = self.mgr.get_store('grafana_key') + cert_path = f'{daemon_spec.host}/grafana_crt' + key_path = f'{daemon_spec.host}/grafana_key' + cert = self.mgr.get_store(cert_path) + pkey = self.mgr.get_store(key_path) if cert and pkey: try: verify_tls(cert, pkey) @@ -57,9 +63,9 @@ class GrafanaService(CephadmService): logger.warning('Provided grafana TLS certificates invalid: %s', str(e)) cert, pkey = None, None if not (cert and pkey): - cert, pkey = create_self_signed_cert('Ceph', 'cephadm') - self.mgr.set_store('grafana_crt', cert) - self.mgr.set_store('grafana_key', pkey) + cert, pkey = create_self_signed_cert('Ceph', daemon_spec.host) + self.mgr.set_store(cert_path, cert) + self.mgr.set_store(key_path, pkey) if 'dashboard' in self.mgr.get('mgr_map')['modules']: self.mgr.check_mon_command({ 'prefix': 'dashboard set-grafana-api-ssl-verify', @@ -75,6 +81,10 @@ class GrafanaService(CephadmService): 'http_addr': daemon_spec.ip if daemon_spec.ip else '' }) + if 'dashboard' in self.mgr.get('mgr_map')['modules'] and spec.initial_admin_password: + self.mgr.check_mon_command( + {'prefix': 'dashboard set-grafana-api-password'}, inbuf=spec.initial_admin_password) + config_file = { 'files': { "grafana.ini": grafana_ini, @@ -106,6 +116,17 @@ class GrafanaService(CephadmService): service_url ) + def pre_remove(self, daemon: DaemonDescription) -> None: + """ + Called before grafana daemon is removed. + """ + if daemon.hostname is not None: + # delete cert/key entires for this grafana daemon + cert_path = f'{daemon.hostname}/grafana_crt' + key_path = f'{daemon.hostname}/grafana_key' + self.mgr.set_store(cert_path, None) + self.mgr.set_store(key_path, None) + def ok_to_stop(self, daemon_ids: List[str], force: bool = False, @@ -148,8 +169,19 @@ class AlertmanagerService(CephadmService): proto = None # http: or https: url = mgr_map.get('services', {}).get('dashboard', None) if url: - dashboard_urls.append(url.rstrip('/')) - p_result = urlparse(url) + p_result = urlparse(url.rstrip('/')) + hostname = socket.getfqdn(p_result.hostname) + + try: + ip = ipaddress.ip_address(hostname) + except ValueError: + pass + else: + if ip.version == 6: + hostname = f'[{hostname}]' + + dashboard_urls.append( + f'{p_result.scheme}://{hostname}:{p_result.port}{p_result.path}') proto = p_result.scheme port = p_result.port # scan all mgrs to generate deps and to get standbys too. @@ -345,6 +377,23 @@ class PrometheusService(CephadmService): alerts = f.read() r['files']['/etc/prometheus/alerting/ceph_alerts.yml'] = alerts + # Include custom alerts if present in key value store. This enables the + # users to add custom alerts. Write the file in any case, so that if the + # content of the key value store changed, that file is overwritten + # (emptied in case they value has been removed from the key value + # store). This prevents the necessity to adapt `cephadm` binary to + # remove the file. + # + # Don't use the template engine for it as + # + # 1. the alerts are always static and + # 2. they are a template themselves for the Go template engine, which + # use curly braces and escaping that is cumbersome and unnecessary + # for the user. + # + r['files']['/etc/prometheus/alerting/custom_alerts.yml'] = \ + self.mgr.get_store('services/prometheus/alerting/custom_alerts.yml', '') + return r, sorted(deps) def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription: @@ -432,14 +481,18 @@ class PromtailService(CephadmService): def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: assert self.TYPE == daemon_spec.daemon_type deps: List[str] = [] - hostnames: List[str] = [] - for dd in self.mgr.cache.get_daemons_by_service('mgr'): + + daemons = self.mgr.cache.get_daemons_by_service('loki') + loki_host = '' + for i, dd in enumerate(daemons): assert dd.hostname is not None - addr = self.mgr.inventory.get_addr(dd.hostname) - hostnames.append(addr) + if i == 0: + loki_host = dd.ip if dd.ip else self._inventory_get_fqdn(dd.hostname) + + deps.append(dd.name()) + context = { - 'hostnames': hostnames, - 'client_hostname': hostnames[0], + 'client_hostname': loki_host, } yml = self.mgr.template.render('services/promtail.yml.j2', context) diff --git a/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 b/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 index 4caeebf51..f560c9756 100644 --- a/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 +++ b/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 @@ -7,11 +7,12 @@ vrrp_script check_backend { fall 2 } -vrrp_instance VI_0 { - state {{ state }} - priority 100 +{% for x in range(virtual_ips|length) %} +vrrp_instance VI_{{ x }} { + state {{ states[x] }} + priority {{ priorities[x] }} interface {{ interface }} - virtual_router_id 51 + virtual_router_id {{ 50 + x }} advert_int 1 authentication { auth_type PASS @@ -24,9 +25,10 @@ vrrp_instance VI_0 { {% endfor %} } virtual_ipaddress { - {{ spec.virtual_ip }} dev {{ interface }} + {{ virtual_ips[x] }} dev {{ interface }} } track_script { check_backend } } +{% endfor %} diff --git a/ceph/src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 b/ceph/src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 index f500f5d22..5ce7a3103 100644 --- a/ceph/src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 +++ b/ceph/src/pybind/mgr/cephadm/templates/services/promtail.yml.j2 @@ -12,10 +12,6 @@ clients: scrape_configs: - job_name: system static_configs: - - targets: -{% for url in hostnames %} - - {{ url }} -{% endfor %} - labels: + - labels: job: Cluster Logs - __path__: /var/log/ceph/**/*.log + __path__: /var/log/ceph/**/*.log \ No newline at end of file diff --git a/ceph/src/pybind/mgr/cephadm/tests/fixtures.py b/ceph/src/pybind/mgr/cephadm/tests/fixtures.py index 7a4ac0d87..0567f7f7e 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/fixtures.py +++ b/ceph/src/pybind/mgr/cephadm/tests/fixtures.py @@ -99,7 +99,8 @@ def with_cephadm_module(module_options=None, store=None): mock.patch("cephadm.agent.CephadmAgentHelpers._apply_agent", return_value=False), \ mock.patch("cephadm.agent.CephadmAgentHelpers._agent_down", return_value=False), \ mock.patch('cephadm.agent.CherryPyThread.run'), \ - mock.patch('cephadm.offline_watcher.OfflineHostWatcher.run'): + mock.patch('cephadm.offline_watcher.OfflineHostWatcher.run'), \ + mock.patch('cephadm.tuned_profiles.TunedProfileUtils._remove_stray_tuned_profiles'): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py b/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py index d8eb76b43..354ee338a 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -7,6 +7,7 @@ import pytest from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection from cephadm.serve import CephadmServe +from cephadm.inventory import HostCacheStatus from cephadm.services.osd import OSD, OSDRemovalQueue, OsdIdClaims try: @@ -15,7 +16,8 @@ except ImportError: pass from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \ - NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec, MDSSpec + NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec, MDSSpec, \ + CustomConfig from ceph.deployment.drive_selection.selector import DriveSelection from ceph.deployment.inventory import Devices, Device from ceph.utils import datetime_to_str, datetime_now @@ -474,6 +476,37 @@ class TestCephadm(object): image='', ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_custom_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + test_cert = ['-----BEGIN PRIVATE KEY-----', + 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg', + 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=', + '-----END PRIVATE KEY-----', + '-----BEGIN CERTIFICATE-----', + 'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg', + 'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=', + '-----END CERTIFICATE-----'] + configs = [ + CustomConfig(content='something something something', + mount_path='/etc/test.conf'), + CustomConfig(content='\n'.join(test_cert), mount_path='/usr/share/grafana/thing.crt') + ] + conf_outs = [json.dumps(c.to_json()) for c in configs] + stdin_str = '{' + \ + f'"config": "", "keyring": "", "custom_config_files": [{conf_outs[0]}, {conf_outs[1]}]' + '}' + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, ServiceSpec(service_type='crash', custom_configs=configs), CephadmOrchestrator.apply_crash): + _run_cephadm.assert_called_with( + 'test', 'crash.test', 'deploy', [ + '--name', 'crash.test', + '--meta-json', '{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}', + '--config-json', '-', + ], + stdin=stdin_str, + image='', + ) + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): @@ -1153,7 +1186,9 @@ class TestCephadm(object): @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option") @mock.patch("cephadm.serve.CephadmServe._run_cephadm") - def test_invalid_config_option_health_warning(self, _run_cephadm, get_foreign_ceph_option, cephadm_module: CephadmOrchestrator): + @mock.patch("cephadm.module.HostCache.save_host_devices") + def test_invalid_config_option_health_warning(self, _save_devs, _run_cephadm, get_foreign_ceph_option, cephadm_module: CephadmOrchestrator): + _save_devs.return_value = None _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) with with_host(cephadm_module, 'test'): ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1) @@ -1167,6 +1202,144 @@ class TestCephadm(object): assert 'Ignoring invalid mgr config option test' in cephadm_module.health_checks[ 'CEPHADM_INVALID_CONFIG_OPTION']['detail'] + @mock.patch("cephadm.module.CephadmOrchestrator.get_foreign_ceph_option") + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + @mock.patch("cephadm.module.CephadmOrchestrator.set_store") + def test_save_devices(self, _set_store, _run_cephadm, _get_foreign_ceph_option, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + entry_size = 65536 # default 64k size + _get_foreign_ceph_option.return_value = entry_size + + class FakeDev(): + def __init__(self, c: str = 'a'): + # using 1015 here makes the serialized string exactly 1024 bytes if c is one char + self.content = {c: c * 1015} + + def to_json(self): + return self.content + + def from_json(self, stuff): + return json.loads(stuff) + + def byte_len(s): + return len(s.encode('utf-8')) + + with with_host(cephadm_module, 'test'): + fake_devices = [FakeDev()] * 100 # should be ~100k + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 34], 'entries': 3})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 34]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 32]})), + ] + _set_store.assert_has_calls(expected_calls) + + fake_devices = [FakeDev()] * 300 # should be ~300k + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size * 4 + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 5 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50], 'entries': 6})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.3', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.4', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + mock.call('host.test.devices.5', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 50]})), + ] + _set_store.assert_has_calls(expected_calls) + + fake_devices = [FakeDev()] * 62 # should be ~62k, just under cache size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 62], 'entries': 1})), + ] + _set_store.assert_has_calls(expected_calls) + + # should be ~64k but just over so it requires more entries + fake_devices = [FakeDev()] * 64 + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 22], 'entries': 3})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 22]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev()] * 20]})), + ] + _set_store.assert_has_calls(expected_calls) + + # test for actual content being correct using differing devices + entry_size = 3072 + _get_foreign_ceph_option.return_value = entry_size + fake_devices = [FakeDev('a'), FakeDev('b'), FakeDev('c'), FakeDev('d'), FakeDev('e')] + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) > entry_size + assert byte_len(json.dumps([d.to_json() for d in fake_devices])) < entry_size * 2 + cephadm_module.cache.update_host_devices('test', fake_devices) + cephadm_module.cache.save_host_devices('test') + expected_calls = [ + mock.call('host.test.devices.0', json.dumps( + {'devices': [d.to_json() for d in [FakeDev('a'), FakeDev('b')]], 'entries': 3})), + mock.call('host.test.devices.1', json.dumps( + {'devices': [d.to_json() for d in [FakeDev('c'), FakeDev('d')]]})), + mock.call('host.test.devices.2', json.dumps( + {'devices': [d.to_json() for d in [FakeDev('e')]]})), + ] + _set_store.assert_has_calls(expected_calls) + + @mock.patch("cephadm.module.CephadmOrchestrator.get_store") + def test_load_devices(self, _get_store, cephadm_module: CephadmOrchestrator): + def _fake_store(key): + if key == 'host.test.devices.0': + return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 9], 'entries': 3}) + elif key == 'host.test.devices.1': + return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 7]}) + elif key == 'host.test.devices.2': + return json.dumps({'devices': [d.to_json() for d in [Device('/path')] * 4]}) + else: + raise Exception(f'Get store with unexpected value {key}') + + _get_store.side_effect = _fake_store + devs = cephadm_module.cache.load_host_devices('test') + assert devs == [Device('/path')] * 20 + + @mock.patch("cephadm.module.Inventory.__contains__") + def test_check_stray_host_cache_entry(self, _contains, cephadm_module: CephadmOrchestrator): + def _fake_inv(key): + if key in ['host1', 'node02', 'host.something.com']: + return True + return False + + _contains.side_effect = _fake_inv + assert cephadm_module.cache._get_host_cache_entry_status('host1') == HostCacheStatus.host + assert cephadm_module.cache._get_host_cache_entry_status( + 'host.something.com') == HostCacheStatus.host + assert cephadm_module.cache._get_host_cache_entry_status( + 'node02.devices.37') == HostCacheStatus.devices + assert cephadm_module.cache._get_host_cache_entry_status( + 'host.something.com.devices.0') == HostCacheStatus.devices + assert cephadm_module.cache._get_host_cache_entry_status('hostXXX') == HostCacheStatus.stray + assert cephadm_module.cache._get_host_cache_entry_status( + 'host.nothing.com') == HostCacheStatus.stray + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) @mock.patch("cephadm.services.nfs.NFSService.run_grace_tool", mock.MagicMock()) @mock.patch("cephadm.services.nfs.NFSService.purge", mock.MagicMock()) diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_scheduling.py b/ceph/src/pybind/mgr/cephadm/tests/test_scheduling.py index c70ef9fb5..fcdee838b 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_scheduling.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_scheduling.py @@ -133,6 +133,7 @@ def run_scheduler_test(results, mk_spec, hosts, daemons, key_elems): spec=spec, hosts=hosts, unreachable_hosts=[], + draining_hosts=[], daemons=daemons, ).place() if isinstance(host_res, list): @@ -149,6 +150,7 @@ def run_scheduler_test(results, mk_spec, hosts, daemons, key_elems): spec=spec, hosts=hosts, unreachable_hosts=[], + draining_hosts=[], daemons=daemons ).place() @@ -157,39 +159,6 @@ def run_scheduler_test(results, mk_spec, hosts, daemons, key_elems): assert_res(e) -# * first match from the top wins -# * where e=[], *=any -# -# + list of known hosts available for scheduling (host_key) -# | + hosts used for explict placement (explicit_key) -# | | + count -# | | | + section (host, label, pattern) -# | | | | + expected result -# | | | | | -test_explicit_scheduler_results = [ - (k("* * 0 *"), error(SpecValidationError, 'num/count must be >= 1')), - (k("* e N l"), error(OrchestratorValidationError, 'Cannot place : No matching hosts for label mylabel')), - (k("* e N p"), error(OrchestratorValidationError, 'Cannot place : No matching hosts')), - (k("* e N h"), error(OrchestratorValidationError, 'placement spec is empty: no hosts, no label, no pattern, no count')), - (k("* e * *"), none), - (k("1 12 * h"), error(OrchestratorValidationError, "Cannot place on 2: Unknown hosts")), - (k("1 123 * h"), error(OrchestratorValidationError, "Cannot place on 2, 3: Unknown hosts")), - (k("1 * * *"), exactly('1')), - (k("12 1 * *"), exactly('1')), - (k("12 12 1 *"), one_of('1', '2')), - (k("12 12 * *"), exactly('1', '2')), - (k("12 123 * h"), error(OrchestratorValidationError, "Cannot place on 3: Unknown hosts")), - (k("12 123 1 *"), one_of('1', '2', '3')), - (k("12 123 * *"), two_of('1', '2', '3')), - (k("123 1 * *"), exactly('1')), - (k("123 12 1 *"), one_of('1', '2')), - (k("123 12 * *"), exactly('1', '2')), - (k("123 123 1 *"), one_of('1', '2', '3')), - (k("123 123 2 *"), two_of('1', '2', '3')), - (k("123 123 * *"), exactly('1', '2', '3')), -] - - @pytest.mark.parametrize("dp,n,result", [ # noqa: E128 ( @@ -240,6 +209,39 @@ def test_daemon_placement_match(dp, dd, result): assert dp.matches_daemon(dd) == result +# * first match from the top wins +# * where e=[], *=any +# +# + list of known hosts available for scheduling (host_key) +# | + hosts used for explict placement (explicit_key) +# | | + count +# | | | + section (host, label, pattern) +# | | | | + expected result +# | | | | | +test_explicit_scheduler_results = [ + (k("* * 0 *"), error(SpecValidationError, 'num/count must be >= 1')), + (k("* e N l"), error(OrchestratorValidationError, 'Cannot place : No matching hosts for label mylabel')), + (k("* e N p"), error(OrchestratorValidationError, 'Cannot place : No matching hosts')), + (k("* e N h"), error(OrchestratorValidationError, 'placement spec is empty: no hosts, no label, no pattern, no count')), + (k("* e * *"), none), + (k("1 12 * h"), error(OrchestratorValidationError, "Cannot place on 2: Unknown hosts")), + (k("1 123 * h"), error(OrchestratorValidationError, "Cannot place on 2, 3: Unknown hosts")), + (k("1 * * *"), exactly('1')), + (k("12 1 * *"), exactly('1')), + (k("12 12 1 *"), one_of('1', '2')), + (k("12 12 * *"), exactly('1', '2')), + (k("12 123 * h"), error(OrchestratorValidationError, "Cannot place on 3: Unknown hosts")), + (k("12 123 1 *"), one_of('1', '2', '3')), + (k("12 123 * *"), two_of('1', '2', '3')), + (k("123 1 * *"), exactly('1')), + (k("123 12 1 *"), one_of('1', '2')), + (k("123 12 * *"), exactly('1', '2')), + (k("123 123 1 *"), one_of('1', '2', '3')), + (k("123 123 2 *"), two_of('1', '2', '3')), + (k("123 123 * *"), exactly('1', '2', '3')), +] + + @pytest.mark.parametrize("spec_section_key,spec_section", [ # noqa: E128 ('h', 'hosts'), @@ -655,8 +657,8 @@ class NodeAssignmentTest(NamedTuple): [], {}, {0: {0: None}, 1: {0: None}, 2: {0: None}}, - ['nfs:host1(rank=0.0)', 'nfs:host2(rank=1.0)', 'nfs:host3(rank=2.0)'], - ['nfs:host1(rank=0.0)', 'nfs:host2(rank=1.0)', 'nfs:host3(rank=2.0)'], + ['nfs:host3(rank=0.0)', 'nfs:host2(rank=1.0)', 'nfs:host1(rank=2.0)'], + ['nfs:host3(rank=0.0)', 'nfs:host2(rank=1.0)', 'nfs:host1(rank=2.0)'], [] ), # 21: ranked, exist @@ -669,8 +671,8 @@ class NodeAssignmentTest(NamedTuple): ], {0: {1: '0.1'}}, {0: {1: '0.1'}, 1: {0: None}, 2: {0: None}}, - ['nfs:host1(rank=0.1)', 'nfs:host2(rank=1.0)', 'nfs:host3(rank=2.0)'], - ['nfs:host2(rank=1.0)', 'nfs:host3(rank=2.0)'], + ['nfs:host1(rank=0.1)', 'nfs:host3(rank=1.0)', 'nfs:host2(rank=2.0)'], + ['nfs:host3(rank=1.0)', 'nfs:host2(rank=2.0)'], [] ), # ranked, exist, different ranks @@ -778,8 +780,8 @@ class NodeAssignmentTest(NamedTuple): ], {0: {2: '0.2'}, 1: {2: '1.2', 3: '1.3'}}, {0: {2: '0.2'}, 1: {2: '1.2', 3: '1.3', 4: None}}, - ['nfs:host1(rank=0.2)', 'nfs:host2(rank=1.4)'], - ['nfs:host2(rank=1.4)'], + ['nfs:host1(rank=0.2)', 'nfs:host3(rank=1.4)'], + ['nfs:host3(rank=1.4)'], ['nfs.1.2'] ), # ranked, not enough hosts @@ -841,6 +843,7 @@ def test_node_assignment(service_type, placement, hosts, daemons, rank_map, post spec=spec, hosts=[HostSpec(h, labels=['foo']) for h in hosts], unreachable_hosts=[], + draining_hosts=[], daemons=daemons, allow_colo=allow_colo, rank_map=rank_map, @@ -871,6 +874,79 @@ def test_node_assignment(service_type, placement, hosts, daemons, rank_map, post assert sorted([d.name() for d in to_remove]) == sorted(expected_remove) +class NodeAssignmentTest5(NamedTuple): + service_type: str + placement: PlacementSpec + available_hosts: List[str] + candidates_hosts: List[str] + + +@pytest.mark.parametrize("service_type, placement, available_hosts, expected_candidates", + [ # noqa: E128 + NodeAssignmentTest5( + 'alertmanager', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host3 host1 host4 host2'.split(), + ), + NodeAssignmentTest5( + 'prometheus', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host3 host2 host4 host1'.split(), + ), + NodeAssignmentTest5( + 'grafana', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host1 host2 host4 host3'.split(), + ), + NodeAssignmentTest5( + 'mgr', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host4 host2 host1 host3'.split(), + ), + NodeAssignmentTest5( + 'mon', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host1 host3 host4 host2'.split(), + ), + NodeAssignmentTest5( + 'rgw', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host1 host3 host2 host4'.split(), + ), + NodeAssignmentTest5( + 'cephfs-mirror', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + 'host4 host3 host1 host2'.split(), + ), + ]) +def test_node_assignment_random_shuffle(service_type, placement, available_hosts, expected_candidates): + spec = None + service_id = None + allow_colo = False + spec = ServiceSpec(service_type=service_type, + service_id=service_id, + placement=placement) + + candidates = HostAssignment( + spec=spec, + hosts=[HostSpec(h, labels=['foo']) for h in available_hosts], + unreachable_hosts=[], + draining_hosts=[], + daemons=[], + allow_colo=allow_colo, + ).get_candidates() + + candidates_hosts = [h.hostname for h in candidates] + assert candidates_hosts == expected_candidates + + class NodeAssignmentTest2(NamedTuple): service_type: str placement: PlacementSpec @@ -947,6 +1023,7 @@ def test_node_assignment2(service_type, placement, hosts, spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h, labels=['foo']) for h in hosts], unreachable_hosts=[], + draining_hosts=[], daemons=daemons, ).place() assert len(hosts) == expected_len @@ -981,6 +1058,7 @@ def test_node_assignment3(service_type, placement, hosts, spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h) for h in hosts], unreachable_hosts=[], + draining_hosts=[], daemons=daemons, ).place() assert len(hosts) == expected_len @@ -1078,6 +1156,7 @@ def test_node_assignment4(spec, networks, daemons, spec=spec, hosts=[HostSpec(h, labels=['foo']) for h in networks.keys()], unreachable_hosts=[], + draining_hosts=[], daemons=daemons, allow_colo=True, networks=networks, @@ -1164,6 +1243,7 @@ def test_bad_specs(service_type, placement, hosts, daemons, expected): spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h) for h in hosts], unreachable_hosts=[], + draining_hosts=[], daemons=daemons, ).place() assert str(e.value) == expected @@ -1340,6 +1420,7 @@ def test_active_assignment(service_type, placement, hosts, daemons, expected, ex spec=spec, hosts=[HostSpec(h) for h in hosts], unreachable_hosts=[], + draining_hosts=[], daemons=daemons, ).place() assert sorted([h.hostname for h in hosts]) in expected @@ -1437,6 +1518,7 @@ def test_unreachable_host(service_type, placement, hosts, unreachable_hosts, dae spec=spec, hosts=[HostSpec(h) for h in hosts], unreachable_hosts=[HostSpec(h) for h in unreachable_hosts], + draining_hosts=[], daemons=daemons, ).place() assert sorted([h.hostname for h in to_add]) in expected_add @@ -1513,6 +1595,76 @@ def test_remove_from_offline(service_type, placement, hosts, maintenance_hosts, spec=spec, hosts=host_specs, unreachable_hosts=[h for h in host_specs if h.status], + draining_hosts=[], + daemons=daemons, + ).place() + assert sorted([h.hostname for h in to_add]) in expected_add + assert sorted([h.name() for h in to_remove]) in expected_remove + + +class DrainExplicitPlacementTest(NamedTuple): + service_type: str + placement: PlacementSpec + hosts: List[str] + maintenance_hosts: List[str] + offline_hosts: List[str] + draining_hosts: List[str] + daemons: List[DaemonDescription] + expected_add: List[List[str]] + expected_remove: List[List[str]] + + +@pytest.mark.parametrize("service_type,placement,hosts,maintenance_hosts,offline_hosts,draining_hosts,daemons,expected_add,expected_remove", + [ + DrainExplicitPlacementTest( + 'crash', + PlacementSpec(hosts='host1 host2 host3'.split()), + 'host1 host2 host3 host4'.split(), + [], + [], + ['host3'], + [ + DaemonDescription('crash', 'host1', 'host1'), + DaemonDescription('crash', 'host2', 'host2'), + DaemonDescription('crash', 'host3', 'host3'), + ], + [[]], + [['crash.host3']], + ), + DrainExplicitPlacementTest( + 'crash', + PlacementSpec(hosts='host1 host2 host3 host4'.split()), + 'host1 host2 host3 host4'.split(), + [], + [], + ['host1', 'host4'], + [ + DaemonDescription('crash', 'host1', 'host1'), + DaemonDescription('crash', 'host3', 'host3'), + ], + [['host2']], + [['crash.host1']], + ), + ]) +def test_drain_from_explict_placement(service_type, placement, hosts, maintenance_hosts, offline_hosts, draining_hosts, daemons, expected_add, expected_remove): + + spec = ServiceSpec(service_type=service_type, + service_id='test', + placement=placement) + + host_specs = [HostSpec(h) for h in hosts] + draining_host_specs = [HostSpec(h) for h in draining_hosts] + for h in host_specs: + if h.hostname in offline_hosts: + h.status = 'offline' + if h.hostname in maintenance_hosts: + h.status = 'maintenance' + + hosts, to_add, to_remove = HostAssignment( + spec=spec, + hosts=host_specs, + unreachable_hosts=[h for h in host_specs if h.status], + draining_hosts=draining_host_specs, daemons=daemons, ).place() assert sorted([h.hostname for h in to_add]) in expected_add diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_services.py b/ceph/src/pybind/mgr/cephadm/tests/test_services.py index e59e95c7b..98dcc850f 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_services.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_services.py @@ -238,40 +238,116 @@ class TestISCSIService: class TestMonitoring: + def _get_config(self, url: str) -> str: + return f""" + # This file is generated by cephadm. + # See https://prometheus.io/docs/alerting/configuration/ for documentation. + + global: + resolve_timeout: 5m + http_config: + tls_config: + insecure_skip_verify: true + + route: + receiver: 'default' + routes: + - group_by: ['alertname'] + group_wait: 10s + group_interval: 10s + repeat_interval: 1h + receiver: 'ceph-dashboard' + + receivers: + - name: 'default' + webhook_configs: + - name: 'ceph-dashboard' + webhook_configs: + - url: '{url}/api/prometheus_receiver' + """ + @patch("cephadm.serve.CephadmServe._run_cephadm") - def test_alertmanager_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + @patch("mgr_module.MgrModule.get") + def test_alertmanager_config(self, mock_get, _run_cephadm, + cephadm_module: CephadmOrchestrator): _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + mock_get.return_value = {"services": {"dashboard": "http://[::1]:8080"}} with with_host(cephadm_module, 'test'): with with_service(cephadm_module, AlertManagerSpec()): + y = dedent(self._get_config('http://localhost:8080')).lstrip() + _run_cephadm.assert_called_with( + 'test', + 'alertmanager.test', + 'deploy', + [ + '--name', 'alertmanager.test', + '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}', + '--config-json', '-', '--tcp-ports', '9093 9094' + ], + stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}), + image='') - y = dedent(""" - # This file is generated by cephadm. - # See https://prometheus.io/docs/alerting/configuration/ for documentation. + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("mgr_module.MgrModule.get") + def test_alertmanager_config_v6(self, mock_get, _run_cephadm, + cephadm_module: CephadmOrchestrator): + dashboard_url = "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080" + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + mock_get.return_value = {"services": {"dashboard": dashboard_url}} - global: - resolve_timeout: 5m - http_config: - tls_config: - insecure_skip_verify: true - - route: - receiver: 'default' - routes: - - group_by: ['alertname'] - group_wait: 10s - group_interval: 10s - repeat_interval: 1h - receiver: 'ceph-dashboard' - - receivers: - - name: 'default' - webhook_configs: - - name: 'ceph-dashboard' - webhook_configs: - - url: 'http://[::1]:8080/api/prometheus_receiver' - """).lstrip() + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, AlertManagerSpec()): + y = dedent(self._get_config(dashboard_url)).lstrip() + _run_cephadm.assert_called_with( + 'test', + 'alertmanager.test', + 'deploy', + [ + '--name', 'alertmanager.test', + '--meta-json', + '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}', + '--config-json', '-', '--tcp-ports', '9093 9094' + ], + stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}), + image='') + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("mgr_module.MgrModule.get") + @patch("socket.getfqdn") + def test_alertmanager_config_v6_fqdn(self, mock_getfqdn, mock_get, _run_cephadm, + cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + mock_getfqdn.return_value = "mgr.test.fqdn" + mock_get.return_value = {"services": { + "dashboard": "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080"}} + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, AlertManagerSpec()): + y = dedent(self._get_config("http://mgr.test.fqdn:8080")).lstrip() + _run_cephadm.assert_called_with( + 'test', + 'alertmanager.test', + 'deploy', + [ + '--name', 'alertmanager.test', + '--meta-json', + '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}', + '--config-json', '-', '--tcp-ports', '9093 9094' + ], + stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}), + image='') + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("mgr_module.MgrModule.get") + def test_alertmanager_config_v4(self, mock_get, _run_cephadm, cephadm_module: CephadmOrchestrator): + dashboard_url = "http://192.168.0.123:8080" + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + mock_get.return_value = {"services": {"dashboard": dashboard_url}} + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, AlertManagerSpec()): + y = dedent(self._get_config(dashboard_url)).lstrip() _run_cephadm.assert_called_with( 'test', 'alertmanager.test', @@ -282,8 +358,32 @@ class TestMonitoring: '--config-json', '-', '--tcp-ports', '9093 9094' ], stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}), - image='')\ + image='') + + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("mgr_module.MgrModule.get") + @patch("socket.getfqdn") + def test_alertmanager_config_v4_fqdn(self, mock_getfqdn, mock_get, _run_cephadm, + cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + mock_getfqdn.return_value = "mgr.test.fqdn" + mock_get.return_value = {"services": {"dashboard": "http://192.168.0.123:8080"}} + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, AlertManagerSpec()): + y = dedent(self._get_config("http://mgr.test.fqdn:8080")).lstrip() + _run_cephadm.assert_called_with( + 'test', + 'alertmanager.test', + 'deploy', + [ + '--name', 'alertmanager.test', + '--meta-json', + '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}', + '--config-json', '-', '--tcp-ports', '9093 9094' + ], + stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}), + image='') @patch("cephadm.serve.CephadmServe._run_cephadm") def test_prometheus_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): @@ -326,7 +426,8 @@ class TestMonitoring: '--config-json', '-', '--tcp-ports', '9095' ], - stdin=json.dumps({"files": {"prometheus.yml": y}}), + stdin=json.dumps({"files": {"prometheus.yml": y, + "/etc/prometheus/alerting/custom_alerts.yml": ""}}), image='') @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -398,14 +499,12 @@ class TestMonitoring: filename: /tmp/positions.yaml clients: - - url: http://1::4:3100/loki/api/v1/push + - url: http://:3100/loki/api/v1/push scrape_configs: - job_name: system static_configs: - - targets: - - 1::4 - labels: + - labels: job: Cluster Logs __path__: /var/log/ceph/**/*.log""").lstrip() @@ -430,8 +529,8 @@ class TestMonitoring: _run_cephadm.side_effect = async_side_effect(("{}", "", 0)) with with_host(cephadm_module, "test"): - cephadm_module.set_store("grafana_crt", "c") - cephadm_module.set_store("grafana_key", "k") + cephadm_module.set_store("test/grafana_crt", "c") + cephadm_module.set_store("test/grafana_key", "k") with with_service( cephadm_module, MonitoringSpec("prometheus") ) as _, with_service(cephadm_module, ServiceSpec("mgr")) as _, with_service( @@ -483,7 +582,7 @@ class TestMonitoring: type: 'loki' access: 'proxy' orgId: 2 - url: 'http://[1::4]:3100' + url: '' basicAuth: false isDefault: true editable: false""").lstrip(), @@ -551,7 +650,7 @@ class TestMonitoring: " type: 'loki'\n" " access: 'proxy'\n" ' orgId: 2\n' - " url: 'http://[1::4]:3100'\n" + " url: ''\n" ' basicAuth: false\n' ' isDefault: true\n' ' editable: false', @@ -827,7 +926,7 @@ class TestIngressService: 'state MASTER\n ' 'priority 100\n ' 'interface if0\n ' - 'virtual_router_id 51\n ' + 'virtual_router_id 50\n ' 'advert_int 1\n ' 'authentication {\n ' 'auth_type PASS\n ' @@ -841,7 +940,7 @@ class TestIngressService: '}\n ' 'track_script {\n ' 'check_backend\n }\n' - '}' + '}\n' } } @@ -905,6 +1004,130 @@ class TestIngressService: assert haproxy_generated_conf[0] == haproxy_expected_conf + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ingress_config_multi_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.4/32'] + } + }) + + # Check the ingress with multiple VIPs + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_interface_networks=['1.2.3.0/24'], + virtual_ips_list=["1.2.3.4/32"]) + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # generate the keepalived conf based on the specified spec + # Test with only 1 IP on the list, as it will fail with more VIPS but only one host. + keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + keepalived_expected_conf = { + 'files': + { + 'keepalived.conf': + '# This file is generated by cephadm.\n' + 'vrrp_script check_backend {\n ' + 'script "/usr/bin/curl http://localhost:8999/health"\n ' + 'weight -20\n ' + 'interval 2\n ' + 'rise 2\n ' + 'fall 2\n}\n\n' + 'vrrp_instance VI_0 {\n ' + 'state MASTER\n ' + 'priority 100\n ' + 'interface if0\n ' + 'virtual_router_id 50\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 1::4\n ' + 'unicast_peer {\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '1.2.3.4/32 dev if0\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + } + } + + # check keepalived config + assert keepalived_generated_conf[0] == keepalived_expected_conf + + # generate the haproxy conf based on the specified spec + haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + haproxy_expected_conf = { + 'files': + { + 'haproxy.cfg': + '# This file is generated by cephadm.' + '\nglobal\n log ' + '127.0.0.1 local2\n ' + 'chroot /var/lib/haproxy\n ' + 'pidfile /var/lib/haproxy/haproxy.pid\n ' + 'maxconn 8000\n ' + 'daemon\n ' + 'stats socket /var/lib/haproxy/stats\n' + '\ndefaults\n ' + 'mode http\n ' + 'log global\n ' + 'option httplog\n ' + 'option dontlognull\n ' + 'option http-server-close\n ' + 'option forwardfor except 127.0.0.0/8\n ' + 'option redispatch\n ' + 'retries 3\n ' + 'timeout queue 20s\n ' + 'timeout connect 5s\n ' + 'timeout http-request 1s\n ' + 'timeout http-keep-alive 5s\n ' + 'timeout client 1s\n ' + 'timeout server 1s\n ' + 'timeout check 5s\n ' + 'maxconn 8000\n' + '\nfrontend stats\n ' + 'mode http\n ' + 'bind *:8999\n ' + 'bind localhost:8999\n ' + 'stats enable\n ' + 'stats uri /stats\n ' + 'stats refresh 10s\n ' + 'stats auth admin:12345\n ' + 'http-request use-service prometheus-exporter if { path /metrics }\n ' + 'monitor-uri /health\n' + '\nfrontend frontend\n ' + 'bind *:8089\n ' + 'default_backend backend\n\n' + 'backend backend\n ' + 'option forwardfor\n ' + 'balance static-rr\n ' + 'option httpchk HEAD / HTTP/1.0\n ' + 'server ' + + haproxy_generated_conf[1][0] + ' 1::4:80 check weight 100\n' + } + } + + assert haproxy_generated_conf[0] == haproxy_expected_conf + class TestCephFsMirror: @patch("cephadm.serve.CephadmServe._run_cephadm") diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py b/ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py new file mode 100644 index 000000000..1521b0110 --- /dev/null +++ b/ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py @@ -0,0 +1,220 @@ +import pytest +import json +from tests import mock +from cephadm.tuned_profiles import TunedProfileUtils, SYSCTL_DIR +from cephadm.inventory import TunedProfileStore +from ceph.utils import datetime_now +from ceph.deployment.service_spec import TunedProfileSpec, PlacementSpec +from cephadm.ssh import SSHManager +from orchestrator import HostSpec + +from typing import List, Dict + + +class SaveError(Exception): + pass + + +class FakeCache: + def __init__(self, + hosts, + schedulable_hosts, + unreachable_hosts): + self.hosts = hosts + self.unreachable_hosts = [HostSpec(h) for h in unreachable_hosts] + self.schedulable_hosts = [HostSpec(h) for h in schedulable_hosts] + self.last_tuned_profile_update = {} + + def get_hosts(self): + return self.hosts + + def get_schedulable_hosts(self): + return self.schedulable_hosts + + def get_unreachable_hosts(self): + return self.unreachable_hosts + + def get_draining_hosts(self): + return [] + + @property + def networks(self): + return {h: {'a': {'b': ['c']}} for h in self.hosts} + + def host_needs_tuned_profile_update(self, host, profile_name): + return profile_name == 'p2' + + +class FakeMgr: + def __init__(self, + hosts: List[str], + schedulable_hosts: List[str], + unreachable_hosts: List[str], + profiles: Dict[str, TunedProfileSpec]): + self.cache = FakeCache(hosts, schedulable_hosts, unreachable_hosts) + self.tuned_profiles = TunedProfileStore(self) + self.tuned_profiles.profiles = profiles + self.ssh = SSHManager(self) + self.offline_hosts = [] + + def set_store(self, what: str, value: str): + raise SaveError(f'{what}: {value}') + + def get_store(self, what: str): + if what == 'tuned_profiles': + return json.dumps({'x': TunedProfileSpec('x', + PlacementSpec(hosts=['x']), + {'x': 'x'}).to_json(), + 'y': TunedProfileSpec('y', + PlacementSpec(hosts=['y']), + {'y': 'y'}).to_json()}) + return '' + + +class TestTunedProfiles: + tspec1 = TunedProfileSpec('p1', + PlacementSpec(hosts=['a', 'b', 'c']), + {'setting1': 'value1', + 'setting2': 'value2', + 'setting with space': 'value with space'}) + tspec2 = TunedProfileSpec('p2', + PlacementSpec(hosts=['a', 'c']), + {'something': 'something_else', + 'high': '5'}) + tspec3 = TunedProfileSpec('p3', + PlacementSpec(hosts=['c']), + {'wow': 'wow2', + 'setting with space': 'value with space', + 'down': 'low'}) + + def profiles_to_calls(self, tp: TunedProfileUtils, profiles: List[TunedProfileSpec]) -> List[Dict[str, str]]: + # this function takes a list of tuned profiles and returns a mapping from + # profile names to the string that will be written to the actual config file on the host. + res = [] + for p in profiles: + p_str = tp._profile_to_str(p) + res.append({p.profile_name: p_str}) + return res + + @mock.patch("cephadm.tuned_profiles.TunedProfileUtils._remove_stray_tuned_profiles") + @mock.patch("cephadm.tuned_profiles.TunedProfileUtils._write_tuned_profiles") + def test_write_all_tuned_profiles(self, _write_profiles, _rm_profiles): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + profiles) + tp = TunedProfileUtils(mgr) + tp._write_all_tuned_profiles() + # need to check that _write_tuned_profiles is correctly called with the + # profiles that match the tuned profile placements and with the correct + # strings that should be generated from the settings the profiles have. + # the _profiles_to_calls helper allows us to generated the input we + # should check against + calls = [ + mock.call('a', self.profiles_to_calls(tp, [self.tspec1, self.tspec2])), + mock.call('b', self.profiles_to_calls(tp, [self.tspec1])), + mock.call('c', self.profiles_to_calls(tp, [self.tspec1, self.tspec2, self.tspec3])) + ] + _write_profiles.assert_has_calls(calls, any_order=True) + + @mock.patch('cephadm.ssh.SSHManager.check_execute_command') + def test_rm_stray_tuned_profiles(self, _check_execute_command): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + # for this test, going to use host "a" and put 4 cephadm generated + # profiles "p1" "p2", "p3" and "who" only two of which should be there ("p1", "p2") + # as well as a file not generated by cephadm. Only the "p3" and "who" + # profiles should be removed from the host. This should total to 4 + # calls to check_execute_command, 1 "ls", 2 "rm", and 1 "sysctl --system" + _check_execute_command.return_value = '\n'.join(['p1-cephadm-tuned-profile.conf', + 'p2-cephadm-tuned-profile.conf', + 'p3-cephadm-tuned-profile.conf', + 'who-cephadm-tuned-profile.conf', + 'dont-touch-me']) + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + profiles) + tp = TunedProfileUtils(mgr) + tp._remove_stray_tuned_profiles('a', self.profiles_to_calls(tp, [self.tspec1, self.tspec2])) + calls = [ + mock.call('a', ['ls', SYSCTL_DIR]), + mock.call('a', ['rm', '-f', f'{SYSCTL_DIR}/p3-cephadm-tuned-profile.conf']), + mock.call('a', ['rm', '-f', f'{SYSCTL_DIR}/who-cephadm-tuned-profile.conf']), + mock.call('a', ['sysctl', '--system']) + ] + _check_execute_command.assert_has_calls(calls, any_order=True) + + @mock.patch('cephadm.ssh.SSHManager.check_execute_command') + @mock.patch('cephadm.ssh.SSHManager.write_remote_file') + def test_write_tuned_profiles(self, _write_remote_file, _check_execute_command): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + # for this test we will use host "a" and have it so host_needs_tuned_profile_update + # returns True for p2 and False for p1 (see FakeCache class). So we should see + # 2 ssh calls, one to write p2, one to run sysctl --system + _check_execute_command.return_value = 'success' + _write_remote_file.return_value = 'success' + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + profiles) + tp = TunedProfileUtils(mgr) + tp._write_tuned_profiles('a', self.profiles_to_calls(tp, [self.tspec1, self.tspec2])) + _check_execute_command.assert_called_with('a', ['sysctl', '--system']) + _write_remote_file.assert_called_with( + 'a', f'{SYSCTL_DIR}/p2-cephadm-tuned-profile.conf', tp._profile_to_str(self.tspec2).encode('utf-8')) + + def test_store(self): + mgr = FakeMgr(['a', 'b', 'c'], + ['a', 'b', 'c'], + [], + {}) + tps = TunedProfileStore(mgr) + save_str_p1 = 'tuned_profiles: ' + json.dumps({'p1': self.tspec1.to_json()}) + tspec1_updated = self.tspec1.copy() + tspec1_updated.settings.update({'new-setting': 'new-value'}) + save_str_p1_updated = 'tuned_profiles: ' + json.dumps({'p1': tspec1_updated.to_json()}) + save_str_p1_updated_p2 = 'tuned_profiles: ' + \ + json.dumps({'p1': tspec1_updated.to_json(), 'p2': self.tspec2.to_json()}) + tspec2_updated = self.tspec2.copy() + tspec2_updated.settings.pop('something') + save_str_p1_updated_p2_updated = 'tuned_profiles: ' + \ + json.dumps({'p1': tspec1_updated.to_json(), 'p2': tspec2_updated.to_json()}) + save_str_p2_updated = 'tuned_profiles: ' + json.dumps({'p2': tspec2_updated.to_json()}) + with pytest.raises(SaveError) as e: + tps.add_profile(self.tspec1) + assert str(e.value) == save_str_p1 + assert 'p1' in tps + with pytest.raises(SaveError) as e: + tps.add_setting('p1', 'new-setting', 'new-value') + assert str(e.value) == save_str_p1_updated + assert 'new-setting' in tps.list_profiles()[0].settings + with pytest.raises(SaveError) as e: + tps.add_profile(self.tspec2) + assert str(e.value) == save_str_p1_updated_p2 + assert 'p2' in tps + assert 'something' in tps.list_profiles()[1].settings + with pytest.raises(SaveError) as e: + tps.rm_setting('p2', 'something') + assert 'something' not in tps.list_profiles()[1].settings + assert str(e.value) == save_str_p1_updated_p2_updated + with pytest.raises(SaveError) as e: + tps.rm_profile('p1') + assert str(e.value) == save_str_p2_updated + assert 'p1' not in tps + assert 'p2' in tps + assert len(tps.list_profiles()) == 1 + assert tps.list_profiles()[0].profile_name == 'p2' + + cur_last_updated = tps.last_updated('p2') + new_last_updated = datetime_now() + assert cur_last_updated != new_last_updated + tps.set_last_updated('p2', new_last_updated) + assert tps.last_updated('p2') == new_last_updated + + # check FakeMgr get_store func to see what is expected to be found in Key Store here + tps.load() + assert 'x' in tps + assert 'y' in tps + assert [p for p in tps.list_profiles() if p.profile_name == 'x'][0].settings == {'x': 'x'} + assert [p for p in tps.list_profiles() if p.profile_name == 'y'][0].settings == {'y': 'y'} diff --git a/ceph/src/pybind/mgr/cephadm/tuned_profiles.py b/ceph/src/pybind/mgr/cephadm/tuned_profiles.py new file mode 100644 index 000000000..3f6179568 --- /dev/null +++ b/ceph/src/pybind/mgr/cephadm/tuned_profiles.py @@ -0,0 +1,80 @@ +import logging +from typing import Dict, List, TYPE_CHECKING +from ceph.utils import datetime_now +from .schedule import HostAssignment +from ceph.deployment.service_spec import ServiceSpec, TunedProfileSpec + +if TYPE_CHECKING: + from cephadm.module import CephadmOrchestrator + +logger = logging.getLogger(__name__) + +SYSCTL_DIR = '/etc/sysctl.d' + + +class TunedProfileUtils(): + def __init__(self, mgr: "CephadmOrchestrator") -> None: + self.mgr = mgr + + def _profile_to_str(self, p: TunedProfileSpec) -> str: + p_str = f'# created by cephadm\n# tuned profile "{p.profile_name}"\n\n' + for k, v in p.settings.items(): + p_str += f'{k} = {v}\n' + return p_str + + def _write_all_tuned_profiles(self) -> None: + host_profile_mapping: Dict[str, List[Dict[str, str]]] = {} + for host in self.mgr.cache.get_hosts(): + host_profile_mapping[host] = [] + + for profile in self.mgr.tuned_profiles.list_profiles(): + p_str = self._profile_to_str(profile) + ha = HostAssignment( + spec=ServiceSpec( + 'crash', placement=profile.placement), + hosts=self.mgr.cache.get_schedulable_hosts(), + unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), + draining_hosts=self.mgr.cache.get_draining_hosts(), + daemons=[], + networks=self.mgr.cache.networks, + ) + all_slots, _, _ = ha.place() + for host in {s.hostname for s in all_slots}: + host_profile_mapping[host].append({profile.profile_name: p_str}) + + for host, profiles in host_profile_mapping.items(): + self._remove_stray_tuned_profiles(host, profiles) + self._write_tuned_profiles(host, profiles) + + def _remove_stray_tuned_profiles(self, host: str, profiles: List[Dict[str, str]]) -> None: + if host in self.mgr.offline_hosts: + return + cmd = ['ls', SYSCTL_DIR] + found_files = self.mgr.ssh.check_execute_command(host, cmd).split('\n') + found_files = [s.strip() for s in found_files] + updated = False + for file in found_files: + if '-cephadm-tuned-profile.conf' not in file: + continue + if not any(file.split('-')[0] in p.keys() for p in profiles): + logger.info(f'Removing stray tuned profile file {file}') + cmd = ['rm', '-f', f'{SYSCTL_DIR}/{file}'] + self.mgr.ssh.check_execute_command(host, cmd) + updated = True + if updated: + self.mgr.ssh.check_execute_command(host, ['sysctl', '--system']) + + def _write_tuned_profiles(self, host: str, profiles: List[Dict[str, str]]) -> None: + if host in self.mgr.offline_hosts: + return + updated = False + for p in profiles: + for profile_name, content in p.items(): + if self.mgr.cache.host_needs_tuned_profile_update(host, profile_name): + logger.info(f'Writing tuned profile {profile_name} to host {host}') + profile_filename: str = f'{SYSCTL_DIR}/{profile_name}-cephadm-tuned-profile.conf' + self.mgr.ssh.write_remote_file(host, profile_filename, content.encode('utf-8')) + updated = True + if updated: + self.mgr.ssh.check_execute_command(host, ['sysctl', '--system']) + self.mgr.cache.last_tuned_profile_update[host] = datetime_now() diff --git a/ceph/src/pybind/mgr/cephadm/upgrade.py b/ceph/src/pybind/mgr/cephadm/upgrade.py index d41b9286e..c2cc0aff9 100644 --- a/ceph/src/pybind/mgr/cephadm/upgrade.py +++ b/ceph/src/pybind/mgr/cephadm/upgrade.py @@ -147,6 +147,7 @@ class CephadmUpgrade: r.target_image = self.target_image r.in_progress = True r.progress, r.services_complete = self._get_upgrade_info() + r.is_paused = self.upgrade_state.paused if self.upgrade_state.daemon_types is not None: which_str = f'Upgrading daemons of type(s) {",".join(self.upgrade_state.daemon_types)}' @@ -267,7 +268,11 @@ class CephadmUpgrade: "registry": reg_name, "bare_image": bare_image, } - ls = reg.get_tags(bare_image) + + try: + ls = reg.get_tags(bare_image) + except ValueError as e: + raise OrchestratorError(f'{e}') if not tags: for t in ls: if t[0] != 'v': @@ -451,6 +456,7 @@ class CephadmUpgrade: if not self.upgrade_state.paused: return 'Upgrade to %s not paused' % self.target_image self.upgrade_state.paused = False + self.upgrade_state.error = '' self.mgr.log.info('Upgrade: Resumed upgrade to %s' % self.target_image) self._save_upgrade_state() self.mgr.event.set() diff --git a/ceph/src/pybind/mgr/dashboard/__init__.py b/ceph/src/pybind/mgr/dashboard/__init__.py index 653474c63..d2eab9751 100644 --- a/ceph/src/pybind/mgr/dashboard/__init__.py +++ b/ceph/src/pybind/mgr/dashboard/__init__.py @@ -48,5 +48,13 @@ else: os.path.dirname(__file__), 'frontend/dist')) + import rbd + + # Api tests do not mock rbd as opposed to dashboard unit tests. Both + # use UNITTEST env variable. + if isinstance(rbd, mock.Mock): + rbd.RBD_MIRROR_IMAGE_MODE_JOURNAL = 0 + rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT = 1 + # DO NOT REMOVE: required for ceph-mgr to load a module from .module import Module, StandbyModule # noqa: F401 diff --git a/ceph/src/pybind/mgr/dashboard/cherrypy_backports.py b/ceph/src/pybind/mgr/dashboard/cherrypy_backports.py index 9be4c9ba9..8871004fe 100644 --- a/ceph/src/pybind/mgr/dashboard/cherrypy_backports.py +++ b/ceph/src/pybind/mgr/dashboard/cherrypy_backports.py @@ -32,7 +32,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -from distutils.version import StrictVersion +from pkg_resources import parse_version # The SSL code in CherryPy 3.5.0 is buggy. It was fixed long ago, # but 3.5.0 is still shipping in major linux distributions @@ -42,7 +42,7 @@ from distutils.version import StrictVersion def patch_http_connection_init(v): # It was fixed in 3.7.0. Exact lower bound version is probably earlier, # but 3.5.0 is what this monkey patch is tested on. - if StrictVersion("3.5.0") <= v < StrictVersion("3.7.0"): + if parse_version("3.5.0") <= v < parse_version("3.7.0"): from cherrypy.wsgiserver.wsgiserver2 import CP_fileobject, HTTPConnection def fixed_init(hc_self, server, sock, makefile=CP_fileobject): @@ -63,7 +63,7 @@ def patch_http_connection_init(v): def skip_wait_for_occupied_port(v): # the issue was fixed in 3.2.3. it's present in 3.2.2 (current version on # centos:7) and back to at least 3.0.0. - if StrictVersion("3.1.2") <= v < StrictVersion("3.2.3"): + if parse_version("3.1.2") <= v < parse_version("3.2.3"): # https://github.com/cherrypy/cherrypy/issues/1100 from cherrypy.process import servers servers.wait_for_occupied_port = lambda host, port: None @@ -71,7 +71,7 @@ def skip_wait_for_occupied_port(v): # cherrypy.wsgiserver was extracted wsgiserver into cheroot in cherrypy v9.0.0 def patch_builtin_ssl_wrap(v, new_wrap): - if v < StrictVersion("9.0.0"): + if v < parse_version("9.0.0"): from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter as builtin_ssl else: from cheroot.ssl.builtin import BuiltinSSLAdapter as builtin_ssl # type: ignore @@ -81,7 +81,7 @@ def patch_builtin_ssl_wrap(v, new_wrap): def accept_exceptions_from_builtin_ssl(v): # the fix was included by cheroot v5.2.0, which was included by cherrypy # 10.2.0. - if v < StrictVersion("10.2.0"): + if v < parse_version("10.2.0"): # see https://github.com/cherrypy/cheroot/pull/4 import ssl @@ -117,11 +117,11 @@ def accept_socket_error_0(v): # see https://github.com/cherrypy/cherrypy/issues/1618 try: import cheroot - cheroot_version = cheroot.__version__ + cheroot_version = parse_version(cheroot.__version__) except ImportError: pass - if v < StrictVersion("9.0.0") or cheroot_version < StrictVersion("6.5.5"): + if v < parse_version("9.0.0") or cheroot_version < parse_version("6.5.5"): generic_socket_error = OSError def accept_socket_error_0(func): @@ -157,7 +157,7 @@ def patch_request_unique_id(v): Monkey-patching is preferred over alternatives as inheritance, as it'd break type checks (cherrypy/lib/cgtools.py: `isinstance(obj, _cprequest.Request)`) """ - if v < StrictVersion('11.1.0'): + if v < parse_version('11.1.0'): import uuid from functools import update_wrapper @@ -191,8 +191,9 @@ def patch_request_unique_id(v): def patch_cherrypy(v): - patch_http_connection_init(v) - skip_wait_for_occupied_port(v) - accept_exceptions_from_builtin_ssl(v) - accept_socket_error_0(v) - patch_request_unique_id(v) + ver = parse_version(v) + patch_http_connection_init(ver) + skip_wait_for_occupied_port(ver) + accept_exceptions_from_builtin_ssl(ver) + accept_socket_error_0(ver) + patch_request_unique_id(ver) diff --git a/ceph/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh b/ceph/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh index a0e997587..5a3fa4349 100755 --- a/ceph/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh +++ b/ceph/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh @@ -11,10 +11,13 @@ mkdir -p /etc/ceph mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}') bootstrap_extra_options='--allow-fqdn-hostname --dashboard-password-noupdate' -bootstrap_extra_options_not_expanded='--skip-monitoring-stack' -{% if expanded_cluster is not defined %} - bootstrap_extra_options+=" ${bootstrap_extra_options_not_expanded}" -{% endif %} + +# commenting the below lines. Uncomment it when any extra options are +# needed for the bootstrap. +# bootstrap_extra_options_not_expanded='' +# {% if expanded_cluster is not defined %} +# bootstrap_extra_options+=" ${bootstrap_extra_options_not_expanded}" +# {% endif %} cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --shared_ceph_folder /mnt/{{ ceph_dev_folder }} ${bootstrap_extra_options} diff --git a/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh index e10929716..063b544f4 100755 --- a/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh +++ b/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh @@ -24,7 +24,7 @@ export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD cypress_run () { local specs="$1" local timeout="$2" - local override_config="ignoreTestFiles=*.po.ts,retries=0,testFiles=${specs}" + local override_config="ignoreTestFiles=*.po.ts,retries=0,testFiles=${specs},chromeWebSecurity=false" if [[ -n "$timeout" ]]; then override_config="${override_config},defaultCommandTimeout=${timeout}" fi @@ -38,4 +38,13 @@ cypress_run () { cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend -cypress_run "orchestrator/workflow/*-spec.ts" +# check if the prometheus daemon is running +# before starting the e2e tests + +PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running') +while [[ $PROMETHEUS_RUNNING_COUNT -lt 1 ]]; do + PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running') +done + +cypress_run ["orchestrator/workflow/*.feature, orchestrator/workflow/*-spec.ts"] +cypress_run "orchestrator/grafana/*.feature" diff --git a/ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh b/ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh index 74ebb1c5f..26fbd8a7c 100755 --- a/ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh +++ b/ceph/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh @@ -5,31 +5,36 @@ set -eEx cleanup() { set +x if [[ -n "$JENKINS_HOME" ]]; then - printf "\n\nStarting cleanup...\n\n" + echo "Starting cleanup..." kcli delete plan -y ceph || true kcli delete network ceph-dashboard -y docker container prune -f - printf "\n\nCleanup completed.\n\n" + echo "Cleanup completed." fi } on_error() { set +x if [ "$1" != "0" ]; then - printf "\n\nERROR $1 thrown on line $2\n\n" - printf "\n\nCollecting info...\n\n" - printf "\n\nDisplaying MGR logs:\n\n" - kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' - for vm_id in 0 1 2 + echo "ERROR $1 thrown on line $2" + echo + echo "Collecting info..." + echo + echo "Saving MGR logs:" + echo + mkdir -p ${CEPH_DEV_FOLDER}/logs + kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log + for vm_id in {0..3} do local vm="ceph-node-0${vm_id}" - printf "\n\nDisplaying journalctl from VM ${vm}:\n\n" - kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true - printf "\n\nEnd of journalctl from VM ${vm}\n\n" - printf "\n\nDisplaying container logs:\n\n" - kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' || true + echo "Saving journalctl from VM ${vm}:" + echo + kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true + echo "Saving container logs:" + echo + kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true done - printf "\n\nTEST FAILED.\n\n" + echo "TEST FAILED." fi } diff --git a/ceph/src/pybind/mgr/dashboard/constraints.txt b/ceph/src/pybind/mgr/dashboard/constraints.txt index 75af03232..00e1aad01 100644 --- a/ceph/src/pybind/mgr/dashboard/constraints.txt +++ b/ceph/src/pybind/mgr/dashboard/constraints.txt @@ -1,5 +1,5 @@ CherryPy==13.1.0 -more-itertools==4.1.0 +more-itertools==8.14.0 PyJWT==2.0.1 bcrypt==3.1.4 python3-saml==1.4.1 diff --git a/ceph/src/pybind/mgr/dashboard/controllers/home.py b/ceph/src/pybind/mgr/dashboard/controllers/home.py index b79b53ca8..f911cf388 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/home.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/home.py @@ -14,7 +14,6 @@ import cherrypy from cherrypy.lib.static import serve_file from .. import mgr -from ..services.custom_banner import get_login_banner_mgr from . import BaseController, Endpoint, Proxy, Router, UIRouter logger = logging.getLogger("controllers.home") @@ -146,4 +145,4 @@ class LangsController(BaseController, LanguageMixin): class LoginController(BaseController): @Endpoint('GET', 'custom_banner') def __call__(self): - return get_login_banner_mgr() + return mgr.get_store('custom_login_banner') diff --git a/ceph/src/pybind/mgr/dashboard/controllers/nfs.py b/ceph/src/pybind/mgr/dashboard/controllers/nfs.py index d14b54028..3c177511c 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/nfs.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/nfs.py @@ -81,31 +81,8 @@ def NfsTask(name, metadata, wait_for): # noqa: N802 return composed_decorator -@APIRouter('/nfs-ganesha', Scope.NFS_GANESHA) -@APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha") -class NFSGanesha(RESTController): - - @EndpointDoc("Status of NFS-Ganesha management feature", - responses={200: { - 'available': (bool, "Is API available?"), - 'message': (str, "Error message") - }}) - @Endpoint() - @ReadPermission - def status(self): - status = {'available': True, 'message': None} - try: - mgr.remote('nfs', 'cluster_ls') - except (ImportError, RuntimeError) as error: - logger.exception(error) - status['available'] = False - status['message'] = str(error) # type: ignore - - return status - - @APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA) -@APIDoc(group="NFS-Ganesha") +@APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha") class NFSGaneshaCluster(RESTController): @ReadPermission @RESTController.MethodMap(version=APIVersion.EXPERIMENTAL) @@ -285,3 +262,16 @@ class NFSGaneshaUi(BaseController): @ReadPermission def filesystems(self): return CephFS.list_filesystems() + + @Endpoint() + @ReadPermission + def status(self): + status = {'available': True, 'message': None} + try: + mgr.remote('nfs', 'cluster_ls') + except (ImportError, RuntimeError) as error: + logger.exception(error) + status['available'] = False + status['message'] = str(error) # type: ignore + + return status diff --git a/ceph/src/pybind/mgr/dashboard/controllers/orchestrator.py b/ceph/src/pybind/mgr/dashboard/controllers/orchestrator.py index b93b850e0..51d0a459d 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/orchestrator.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/orchestrator.py @@ -4,7 +4,7 @@ from functools import wraps from ..exceptions import DashboardException from ..services.orchestrator import OrchClient -from . import APIDoc, APIRouter, Endpoint, EndpointDoc, ReadPermission, RESTController +from . import APIDoc, Endpoint, EndpointDoc, ReadPermission, RESTController, UIRouter STATUS_SCHEMA = { "available": (bool, "Orchestrator status"), @@ -35,7 +35,7 @@ def raise_if_no_orchestrator(features=None): return inner -@APIRouter('/orchestrator') +@UIRouter('/orchestrator') @APIDoc("Orchestrator Management API", "Orchestrator") class Orchestrator(RESTController): diff --git a/ceph/src/pybind/mgr/dashboard/controllers/osd.py b/ceph/src/pybind/mgr/dashboard/controllers/osd.py index ceebb5acd..9c903ed67 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/osd.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/osd.py @@ -14,10 +14,11 @@ from ..security import Scope from ..services.ceph_service import CephService, SendCommandError from ..services.exception import handle_orchestrator_error, handle_send_command_error from ..services.orchestrator import OrchClient, OrchFeature +from ..services.osd import HostStorageSummary, OsdDeploymentOptions from ..tools import str_to_bool from . import APIDoc, APIRouter, CreatePermission, DeletePermission, Endpoint, \ - EndpointDoc, ReadPermission, RESTController, Task, UpdatePermission, \ - allow_empty_body + EndpointDoc, ReadPermission, RESTController, Task, UIRouter, \ + UpdatePermission, allow_empty_body from ._version import APIVersion from .orchestrator import raise_if_no_orchestrator @@ -47,6 +48,72 @@ EXPORT_INDIV_FLAGS_GET_SCHEMA = { } +class DeploymentOptions: + def __init__(self): + self.options = { + OsdDeploymentOptions.COST_CAPACITY: + HostStorageSummary(OsdDeploymentOptions.COST_CAPACITY, + title='Cost/Capacity-optimized', + desc='All the available HDDs are selected'), + OsdDeploymentOptions.THROUGHPUT: + HostStorageSummary(OsdDeploymentOptions.THROUGHPUT, + title='Throughput-optimized', + desc="HDDs/SSDs are selected for data" + "devices and SSDs/NVMes for DB/WAL devices"), + OsdDeploymentOptions.IOPS: + HostStorageSummary(OsdDeploymentOptions.IOPS, + title='IOPS-optimized', + desc='All the available NVMes are selected'), + } + self.recommended_option = None + + def as_dict(self): + return { + 'options': {k: v.as_dict() for k, v in self.options.items()}, + 'recommended_option': self.recommended_option + } + + +predefined_drive_groups = { + OsdDeploymentOptions.COST_CAPACITY: { + 'service_type': 'osd', + 'service_id': 'cost_capacity', + 'placement': { + 'host_pattern': '*' + }, + 'data_devices': { + 'rotational': 1 + }, + 'encrypted': False + }, + OsdDeploymentOptions.THROUGHPUT: { + 'service_type': 'osd', + 'service_id': 'throughput_optimized', + 'placement': { + 'host_pattern': '*' + }, + 'data_devices': { + 'rotational': 1 + }, + 'db_devices': { + 'rotational': 0 + }, + 'encrypted': False + }, + OsdDeploymentOptions.IOPS: { + 'service_type': 'osd', + 'service_id': 'iops_optimized', + 'placement': { + 'host_pattern': '*' + }, + 'data_devices': { + 'rotational': 0 + }, + 'encrypted': False + }, +} + + def osd_task(name, metadata, wait_for=2.0): return Task("osd/{}".format(name), metadata, wait_for) @@ -291,6 +358,18 @@ class Osd(RESTController): id=int(svc_id), weight=float(weight)) + def _create_predefined_drive_group(self, data): + orch = OrchClient.instance() + option = OsdDeploymentOptions(data[0]['option']) + if option in list(OsdDeploymentOptions): + try: + predefined_drive_groups[ + option]['encrypted'] = data[0]['encrypted'] + orch.osds.create([DriveGroupSpec.from_json( + predefined_drive_groups[option])]) + except (ValueError, TypeError, DriveGroupValidationError) as e: + raise DashboardException(e, component='osd') + def _create_bare(self, data): """Create a OSD container that has no associated device. @@ -330,6 +409,8 @@ class Osd(RESTController): return self._create_bare(data) if method == 'drive_groups': return self._create_with_drive_groups(data) + if method == 'predefined': + return self._create_predefined_drive_group(data) raise DashboardException( component='osd', http_status_code=400, msg='Unknown method: {}'.format(method)) @@ -405,6 +486,44 @@ class Osd(RESTController): return CephService.send_command('mon', 'device ls-by-daemon', who='osd.{}'.format(svc_id)) +@UIRouter('/osd', Scope.OSD) +@APIDoc("Dashboard UI helper function; not part of the public API", "OsdUI") +class OsdUi(Osd): + @Endpoint('GET') + @ReadPermission + @raise_if_no_orchestrator([OrchFeature.DAEMON_LIST]) + @handle_orchestrator_error('host') + def deployment_options(self): + orch = OrchClient.instance() + hdds = 0 + ssds = 0 + nvmes = 0 + res = DeploymentOptions() + + for inventory_host in orch.inventory.list(hosts=None, refresh=True): + for device in inventory_host.devices.devices: + if device.available: + if device.human_readable_type == 'hdd': + hdds += 1 + # SSDs and NVMe are both counted as 'ssd' + # so differentiating nvme using its path + elif '/dev/nvme' in device.path: + nvmes += 1 + else: + ssds += 1 + + if hdds: + res.options[OsdDeploymentOptions.COST_CAPACITY].available = True + res.recommended_option = OsdDeploymentOptions.COST_CAPACITY + if hdds and ssds: + res.options[OsdDeploymentOptions.THROUGHPUT].available = True + res.recommended_option = OsdDeploymentOptions.THROUGHPUT + if nvmes: + res.options[OsdDeploymentOptions.IOPS].available = True + + return res.as_dict() + + @APIRouter('/osd/flags', Scope.OSD) @APIDoc(group='OSD') class OsdFlagsController(RESTController): diff --git a/ceph/src/pybind/mgr/dashboard/controllers/pool.py b/ceph/src/pybind/mgr/dashboard/controllers/pool.py index 386d58440..f3aa6d0a4 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/pool.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/pool.py @@ -343,3 +343,9 @@ class PoolUi(Pool): "used_profiles": used_profiles, 'nodes': mgr.get('osd_map_tree')['nodes'] } + + +class RBDPool(Pool): + def create(self, pool='rbd-mirror'): # pylint: disable=arguments-differ + super().create(pool, pg_num=1, pool_type='replicated', + rule_name='replicated_rule', application_metadata=['rbd']) diff --git a/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py b/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py index f1dae7f1a..af90f0d60 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py @@ -51,7 +51,12 @@ class PrometheusRESTController(RESTController): "Could not reach {}'s API on {}".format(api_name, base_url), http_status_code=404, component='prometheus') - content = json.loads(response.content) + try: + content = json.loads(response.content, strict=False) + except json.JSONDecodeError as e: + raise DashboardException( + "Error parsing Prometheus Alertmanager response: {}".format(e.msg), + component='prometheus') if content['status'] == 'success': if 'data' in content: return content['data'] diff --git a/ceph/src/pybind/mgr/dashboard/controllers/rbd.py b/ceph/src/pybind/mgr/dashboard/controllers/rbd.py index 252ddfc61..c20ee8ca2 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/rbd.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/rbd.py @@ -7,6 +7,7 @@ import math from datetime import datetime from functools import partial +import cherrypy import rbd from .. import mgr @@ -14,17 +15,19 @@ from ..exceptions import DashboardException from ..security import Scope from ..services.ceph_service import CephService from ..services.exception import handle_rados_error, handle_rbd_error, serialize_dashboard_exception -from ..services.rbd import RbdConfiguration, RbdService, RbdSnapshotService, \ - format_bitmask, format_features, parse_image_spec, rbd_call, \ +from ..services.rbd import MIRROR_IMAGE_MODE, RbdConfiguration, \ + RbdMirroringService, RbdService, RbdSnapshotService, format_bitmask, \ + format_features, get_image_spec, parse_image_spec, rbd_call, \ rbd_image_call from ..tools import ViewCache, str_to_bool -from . import APIDoc, APIRouter, CreatePermission, DeletePermission, \ - EndpointDoc, RESTController, Task, UpdatePermission, allow_empty_body +from . import APIDoc, APIRouter, BaseController, CreatePermission, \ + DeletePermission, Endpoint, EndpointDoc, ReadPermission, RESTController, \ + Task, UIRouter, UpdatePermission, allow_empty_body +from ._version import APIVersion logger = logging.getLogger(__name__) RBD_SCHEMA = ([{ - "status": (int, 'Status of the image'), "value": ([str], ''), "pool_name": (str, 'pool name') }]) @@ -76,31 +79,41 @@ class Rbd(RESTController): ALLOW_DISABLE_FEATURES = {"exclusive-lock", "object-map", "fast-diff", "deep-flatten", "journaling"} - def _rbd_list(self, pool_name=None): + DEFAULT_LIMIT = 5 + + def _rbd_list(self, pool_name=None, offset=0, limit=DEFAULT_LIMIT, search='', sort=''): if pool_name: pools = [pool_name] else: pools = [p['pool_name'] for p in CephService.get_pool_list('rbd')] - result = [] - for pool in pools: - # pylint: disable=unbalanced-tuple-unpacking - status, value = RbdService.rbd_pool_list(pool) - for i, image in enumerate(value): - value[i]['configuration'] = RbdConfiguration( - pool, image['namespace'], image['name']).list() - result.append({'status': status, 'value': value, 'pool_name': pool}) - return result + images, num_total_images = RbdService.rbd_pool_list( + pools, offset=offset, limit=limit, search=search, sort=sort) + cherrypy.response.headers['X-Total-Count'] = num_total_images + pool_result = {} + for i, image in enumerate(images): + pool = image['pool_name'] + if pool not in pool_result: + pool_result[pool] = {'value': [], 'pool_name': image['pool_name']} + pool_result[pool]['value'].append(image) + + images[i]['configuration'] = RbdConfiguration( + pool, image['namespace'], image['name']).list() + return list(pool_result.values()) @handle_rbd_error() @handle_rados_error('pool') @EndpointDoc("Display Rbd Images", parameters={ 'pool_name': (str, 'Pool Name'), + 'limit': (int, 'limit'), + 'offset': (int, 'offset'), }, responses={200: RBD_SCHEMA}) - def list(self, pool_name=None): - return self._rbd_list(pool_name) + @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore + def list(self, pool_name=None, offset: int = 0, limit: int = DEFAULT_LIMIT, + search: str = '', sort: str = ''): + return self._rbd_list(pool_name, offset=offset, limit=limit, search=search, sort=sort) @handle_rbd_error() @handle_rados_error('pool') @@ -109,8 +122,9 @@ class Rbd(RESTController): @RbdTask('create', {'pool_name': '{pool_name}', 'namespace': '{namespace}', 'image_name': '{name}'}, 2.0) - def create(self, name, pool_name, size, namespace=None, obj_size=None, features=None, - stripe_unit=None, stripe_count=None, data_pool=None, configuration=None): + def create(self, name, pool_name, size, namespace=None, schedule_interval='', + obj_size=None, features=None, stripe_unit=None, stripe_count=None, + data_pool=None, configuration=None, mirror_mode=None): size = int(size) @@ -132,6 +146,13 @@ class Rbd(RESTController): image_name=name).set_configuration(configuration) rbd_call(pool_name, namespace, _create) + if mirror_mode: + RbdMirroringService.enable_image(name, pool_name, namespace, + MIRROR_IMAGE_MODE[mirror_mode]) + + if schedule_interval: + image_spec = get_image_spec(pool_name, namespace, name) + RbdMirroringService.snapshot_schedule_add(image_spec, schedule_interval) @RbdTask('delete', ['{image_spec}'], 2.0) def delete(self, image_spec): @@ -146,7 +167,11 @@ class Rbd(RESTController): return rbd_call(pool_name, namespace, rbd_inst.remove, image_name) @RbdTask('edit', ['{image_spec}', '{name}'], 4.0) - def set(self, image_spec, name=None, size=None, features=None, configuration=None): + def set(self, image_spec, name=None, size=None, features=None, + configuration=None, enable_mirror=None, primary=None, + resync=False, mirror_mode=None, schedule_interval='', + remove_scheduling=False): + pool_name, namespace, image_name = parse_image_spec(image_spec) def _edit(ioctx, image): @@ -182,6 +207,32 @@ class Rbd(RESTController): RbdConfiguration(pool_ioctx=ioctx, image_name=image_name).set_configuration( configuration) + mirror_image_info = image.mirror_image_get_info() + if enable_mirror and mirror_image_info['state'] == rbd.RBD_MIRROR_IMAGE_DISABLED: + RbdMirroringService.enable_image( + image_name, pool_name, namespace, + MIRROR_IMAGE_MODE[mirror_mode]) + elif (enable_mirror is False + and mirror_image_info['state'] == rbd.RBD_MIRROR_IMAGE_ENABLED): + RbdMirroringService.disable_image( + image_name, pool_name, namespace) + + if primary and not mirror_image_info['primary']: + RbdMirroringService.promote_image( + image_name, pool_name, namespace) + elif primary is False and mirror_image_info['primary']: + RbdMirroringService.demote_image( + image_name, pool_name, namespace) + + if resync: + RbdMirroringService.resync_image(image_name, pool_name, namespace) + + if schedule_interval: + RbdMirroringService.snapshot_schedule_add(image_spec, schedule_interval) + + if remove_scheduling: + RbdMirroringService.snapshot_schedule_remove(image_spec) + return rbd_image_call(pool_name, namespace, image_name, _edit) @RbdTask('copy', @@ -263,6 +314,20 @@ class Rbd(RESTController): return rbd_call(pool_name, namespace, rbd_inst.trash_move, image_name, delay) +@UIRouter('/block/rbd') +class RbdStatus(BaseController): + @EndpointDoc("Display RBD Image feature status") + @Endpoint() + @ReadPermission + def status(self): + status = {'available': True, 'message': None} + if not CephService.get_pool_list('rbd'): + status['available'] = False + status['message'] = 'No RBD pools in the cluster. Please create a pool '\ + 'with the "rbd" application label.' # type: ignore + return status + + @APIRouter('/block/image/{image_spec}/snap', Scope.RBD_IMAGE) @APIDoc("RBD Snapshot Management API", "RbdSnapshot") class RbdSnapshot(RESTController): @@ -275,7 +340,13 @@ class RbdSnapshot(RESTController): pool_name, namespace, image_name = parse_image_spec(image_spec) def _create_snapshot(ioctx, img, snapshot_name): - img.create_snap(snapshot_name) + mirror_info = img.mirror_image_get_info() + mirror_mode = img.mirror_image_get_mode() + if (mirror_info['state'] == rbd.RBD_MIRROR_IMAGE_ENABLED + and mirror_mode == rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT): + img.mirror_image_create_snapshot() + else: + img.create_snap(snapshot_name) return rbd_image_call(pool_name, namespace, image_name, _create_snapshot, snapshot_name) @@ -467,7 +538,7 @@ class RbdNamespace(RESTController): def delete(self, pool_name, namespace): with mgr.rados.open_ioctx(pool_name) as ioctx: # pylint: disable=unbalanced-tuple-unpacking - _, images = RbdService.rbd_pool_list(pool_name, namespace) + images, _ = RbdService.rbd_pool_list([pool_name], namespace=namespace) if images: raise DashboardException( msg='Namespace contains images which must be deleted first', @@ -481,7 +552,7 @@ class RbdNamespace(RESTController): namespaces = self.rbd_inst.namespace_list(ioctx) for namespace in namespaces: # pylint: disable=unbalanced-tuple-unpacking - _, images = RbdService.rbd_pool_list(pool_name, namespace) + images, _ = RbdService.rbd_pool_list([pool_name], namespace=namespace) result.append({ 'namespace': namespace, 'num_images': len(images) if images else 0 diff --git a/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py b/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py index f5bda8c89..17ef0b88b 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py @@ -4,19 +4,23 @@ import json import logging import re from functools import partial -from typing import no_type_check +from typing import NamedTuple, Optional, no_type_check import cherrypy import rbd from .. import mgr +from ..controllers.pool import RBDPool +from ..controllers.service import Service from ..security import Scope from ..services.ceph_service import CephService from ..services.exception import handle_rados_error, handle_rbd_error, serialize_dashboard_exception +from ..services.orchestrator import OrchClient from ..services.rbd import rbd_call from ..tools import ViewCache -from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \ - ReadPermission, RESTController, Task, UpdatePermission, allow_empty_body +from . import APIDoc, APIRouter, BaseController, CreatePermission, Endpoint, \ + EndpointDoc, ReadPermission, RESTController, Task, UIRouter, \ + UpdatePermission, allow_empty_body logger = logging.getLogger('controllers.rbd_mirror') @@ -199,6 +203,13 @@ def get_daemons_and_pools(): # pylint: disable=R0915 } +class ReplayingData(NamedTuple): + bytes_per_second: Optional[int] = None + seconds_until_synced: Optional[int] = None + syncing_percent: Optional[float] = None + entries_behind_primary: Optional[int] = None + + @ViewCache() @no_type_check def _get_pool_datum(pool_name): @@ -228,15 +239,17 @@ def _get_pool_datum(pool_name): 'state': 'Error' }, rbd.MIRROR_IMAGE_STATUS_STATE_SYNCING: { - 'health': 'syncing' + 'health': 'syncing', + 'state_color': 'success', + 'state': 'Syncing' }, rbd.MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY: { - 'health': 'ok', + 'health': 'syncing', 'state_color': 'success', 'state': 'Starting' }, rbd.MIRROR_IMAGE_STATUS_STATE_REPLAYING: { - 'health': 'ok', + 'health': 'syncing', 'state_color': 'success', 'state': 'Replaying' }, @@ -248,8 +261,9 @@ def _get_pool_datum(pool_name): rbd.MIRROR_IMAGE_STATUS_STATE_STOPPED: { 'health': 'ok', 'state_color': 'info', - 'state': 'Primary' + 'state': 'Stopped' } + } rbdctx = rbd.RBD() @@ -271,6 +285,29 @@ def _get_pool_datum(pool_name): return data +def _update_syncing_image_data(mirror_image, image): + if mirror_image['state'] == 'Replaying': + p = re.compile("replaying, ({.*})") + replaying_data = p.findall(mirror_image['description']) + assert len(replaying_data) == 1 + replaying_data = json.loads(replaying_data[0]) + if 'replay_state' in replaying_data and replaying_data['replay_state'] == 'idle': + image.update({ + 'state_color': 'info', + 'state': 'Idle' + }) + for field in ReplayingData._fields: + try: + image[field] = replaying_data[field] + except KeyError: + pass + else: + p = re.compile("bootstrapping, IMAGE_COPY/COPY_OBJECT (.*)%") + image.update({ + 'progress': (p.findall(mirror_image['description']) or [0])[0] + }) + + @ViewCache() def _get_content_data(): # pylint: disable=R0914 pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd') @@ -296,26 +333,21 @@ def _get_content_data(): # pylint: disable=R0914 for mirror_image in mirror_images: image = { 'pool_name': pool_name, - 'name': mirror_image['name'] + 'name': mirror_image['name'], + 'state_color': mirror_image['state_color'], + 'state': mirror_image['state'] } if mirror_image['health'] == 'ok': image.update({ - 'state_color': mirror_image['state_color'], - 'state': mirror_image['state'], 'description': mirror_image['description'] }) image_ready.append(image) elif mirror_image['health'] == 'syncing': - p = re.compile("bootstrapping, IMAGE_COPY/COPY_OBJECT (.*)%") - image.update({ - 'progress': (p.findall(mirror_image['description']) or [0])[0] - }) + _update_syncing_image_data(mirror_image, image) image_syncing.append(image) else: image.update({ - 'state_color': mirror_image['state_color'], - 'state': mirror_image['state'], 'description': mirror_image['description'] }) image_error.append(image) @@ -574,3 +606,41 @@ class RbdMirroringPoolPeer(RESTController): rbd.RBD().mirror_peer_set_attributes(ioctx, peer_uuid, attributes) _reset_view_cache() + + +@UIRouter('/block/mirroring', Scope.RBD_MIRRORING) +class RbdMirroringStatus(BaseController): + @EndpointDoc('Display RBD Mirroring Status') + @Endpoint() + @ReadPermission + def status(self): + status = {'available': True, 'message': None} + orch_status = OrchClient.instance().status() + + # if the orch is not available we can't create the service + # using dashboard. + if not orch_status['available']: + return status + if not CephService.get_service_list('rbd-mirror') or not CephService.get_pool_list('rbd'): + status['available'] = False + status['message'] = 'RBD mirroring is not configured' # type: ignore + return status + + @Endpoint('POST') + @EndpointDoc('Configure RBD Mirroring') + @CreatePermission + def configure(self): + rbd_pool = RBDPool() + service = Service() + + service_spec = { + 'service_type': 'rbd-mirror', + 'placement': {}, + 'unmanaged': False + } + + if not CephService.get_service_list('rbd-mirror'): + service.create(service_spec, 'rbd-mirror') + + if not CephService.get_pool_list('rbd'): + rbd_pool.create() diff --git a/ceph/src/pybind/mgr/dashboard/controllers/rgw.py b/ceph/src/pybind/mgr/dashboard/controllers/rgw.py index 713294d8f..f42b91a0e 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/rgw.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/rgw.py @@ -13,7 +13,7 @@ from ..services.ceph_service import CephService from ..services.rgw_client import NoRgwDaemonsException, RgwClient from ..tools import json_str_to_object, str_to_bool from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \ - ReadPermission, RESTController, allow_empty_body + ReadPermission, RESTController, UIRouter, allow_empty_body from ._version import APIVersion try: @@ -41,7 +41,7 @@ RGW_USER_SCHEMA = { } -@APIRouter('/rgw', Scope.RGW) +@UIRouter('/rgw', Scope.RGW) @APIDoc("RGW Management API", "Rgw") class Rgw(BaseController): @Endpoint() diff --git a/ceph/src/pybind/mgr/dashboard/controllers/service.py b/ceph/src/pybind/mgr/dashboard/controllers/service.py index d3ba882a1..afe684302 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/service.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/service.py @@ -3,12 +3,11 @@ from typing import Dict, List, Optional import cherrypy from ceph.deployment.service_spec import ServiceSpec -from ..exceptions import DashboardException from ..security import Scope -from ..services.exception import handle_orchestrator_error +from ..services.exception import handle_custom_error, handle_orchestrator_error from ..services.orchestrator import OrchClient, OrchFeature from . import APIDoc, APIRouter, CreatePermission, DeletePermission, Endpoint, \ - ReadPermission, RESTController, Task + ReadPermission, RESTController, Task, UpdatePermission from .orchestrator import raise_if_no_orchestrator @@ -50,6 +49,7 @@ class Service(RESTController): return [d.to_dict() for d in daemons] @CreatePermission + @handle_custom_error('service', exceptions=(ValueError, TypeError)) @raise_if_no_orchestrator([OrchFeature.SERVICE_CREATE]) @handle_orchestrator_error('service') @service_task('create', {'service_name': '{service_name}'}) @@ -59,11 +59,22 @@ class Service(RESTController): :param service_name: The service name, e.g. 'alertmanager'. :return: None """ - try: - orch = OrchClient.instance() - orch.services.apply(service_spec) - except (ValueError, TypeError) as e: - raise DashboardException(e, component='service') + + OrchClient.instance().services.apply(service_spec, no_overwrite=True) + + @UpdatePermission + @handle_custom_error('service', exceptions=(ValueError, TypeError)) + @raise_if_no_orchestrator([OrchFeature.SERVICE_CREATE]) + @handle_orchestrator_error('service') + @service_task('edit', {'service_name': '{service_name}'}) + def set(self, service_spec: Dict, service_name: str): # pylint: disable=W0613 + """ + :param service_spec: The service specification as JSON. + :param service_name: The service name, e.g. 'alertmanager'. + :return: None + """ + + OrchClient.instance().services.apply(service_spec, no_overwrite=False) @DeletePermission @raise_if_no_orchestrator([OrchFeature.SERVICE_DELETE]) diff --git a/ceph/src/pybind/mgr/dashboard/frontend/.gherkin-lintrc b/ceph/src/pybind/mgr/dashboard/frontend/.gherkin-lintrc new file mode 100644 index 000000000..706b93bea --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/.gherkin-lintrc @@ -0,0 +1,33 @@ +{ + "no-files-without-scenarios" : "on", + "no-unnamed-features": "on", + "no-unnamed-scenarios": "on", + "no-dupe-scenario-names": ["on", "in-feature"], + "no-dupe-feature-names": "on", + "no-partially-commented-tag-lines": "on", + "indentation": ["on", { + "Feature": 0, + "Background": 4, + "Scenario": 4, + "Step": 8, + "Examples": 8, + "example": 12 + }], + "no-trailing-spaces": "on", + "new-line-at-eof": ["on", "yes"], + "no-multiple-empty-lines": "on", + "no-empty-file": "on", + "no-scenario-outlines-without-examples": "on", + "name-length": "off", + "no-restricted-tags": ["on", {"tags": ["@watch", "@wip"]}], + "use-and": "off", + "no-duplicate-tags": "on", + "no-superfluous-tags": "on", + "no-homogenous-tags": "on", + "one-space-between-tags": "on", + "no-unused-variables": "on", + "no-background-only-scenario": "off", + "no-empty-background": "on", + "no-examples-in-scenarios": "on", + "scenario-size": ["on", { "steps-length": {"Background": 3, "Scenario": 15}}] +} diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/fixtures/block-rbd-status.json b/ceph/src/pybind/mgr/dashboard/frontend/cypress/fixtures/block-rbd-status.json new file mode 100644 index 000000000..1d6f30b9a --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/fixtures/block-rbd-status.json @@ -0,0 +1 @@ +{ "available": false, "message": "No RBD pools in the cluster. Please create a pool with the \"rbd\" application label." } \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/block/mirroring.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/block/mirroring.e2e-spec.ts index 120956579..dfba73b27 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/block/mirroring.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/block/mirroring.e2e-spec.ts @@ -20,9 +20,9 @@ describe('Mirroring page', () => { }); it('should show text for all tabs', () => { - mirroring.getTabText(0).should('eq', 'Issues'); - mirroring.getTabText(1).should('eq', 'Syncing'); - mirroring.getTabText(2).should('eq', 'Ready'); + mirroring.getTabText(0).should('eq', 'Issues (0)'); + mirroring.getTabText(1).should('eq', 'Syncing (0)'); + mirroring.getTabText(2).should('eq', 'Ready (0)'); }); describe('checks that edit mode functionality shows in the pools table', () => { diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/create-cluster.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/create-cluster.po.ts index 819afd559..300eddbcc 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/create-cluster.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/create-cluster.po.ts @@ -1,7 +1,7 @@ -import { HostsPageHelper } from 'cypress/integration/cluster/hosts.po'; -import { ServicesPageHelper } from 'cypress/integration/cluster/services.po'; -import { PageHelper } from 'cypress/integration/page-helper.po'; -import { NotificationSidebarPageHelper } from 'cypress/integration/ui/notification.po'; +import { PageHelper } from '../page-helper.po'; +import { NotificationSidebarPageHelper } from '../ui/notification.po'; +import { HostsPageHelper } from './hosts.po'; +import { ServicesPageHelper } from './services.po'; const pages = { index: { url: '#/expand-cluster', id: 'cd-create-cluster' } diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts index b741749f7..b769c880a 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts @@ -158,14 +158,26 @@ export class HostsPageHelper extends PageHelper { this.clickActionButton('start-drain'); this.checkLabelExists(hostname, ['_no_schedule'], true); + // unselect it to avoid colliding with any other selection + // in different steps + this.getTableCell(this.columnIndex.hostname, hostname).click(); + this.clickTab('cd-host-details', hostname, 'Daemons'); cy.get('cd-host-details').within(() => { cy.wait(20000); this.expectTableCount('total', 0); }); + } - // unselect it to avoid colliding with any other selection - // in different steps - this.getTableCell(this.columnIndex.hostname, hostname).click(); + checkServiceInstancesExist(hostname: string, instances: string[]) { + this.getTableCell(this.columnIndex.hostname, hostname) + .parent() + .find(`datatable-body-cell:nth-child(${this.columnIndex.services}) .badge`) + .should(($ele) => { + const serviceInstances = $ele.toArray().map((v) => v.innerText); + for (const instance of instances) { + expect(serviceInstances).to.include(instance); + } + }); } } diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/logs.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/logs.e2e-spec.ts index 9868b89ae..ecc3cc1cd 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/logs.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/logs.e2e-spec.ts @@ -27,8 +27,8 @@ describe('Logs page', () => { logs.expectBreadcrumbText('Logs'); }); - it('should show two tabs', () => { - logs.getTabsCount().should('eq', 2); + it('should show three tabs', () => { + logs.getTabsCount().should('eq', 3); }); it('should show cluster logs tab at first', () => { @@ -38,6 +38,10 @@ describe('Logs page', () => { it('should show audit logs as a second tab', () => { logs.getTabText(1).should('eq', 'Audit Logs'); }); + + it('should show daemon logs as a third tab', () => { + logs.getTabText(2).should('eq', 'Daemon Logs'); + }); }); describe('audit logs respond to pool creation and deletion test', () => { diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/mgr-modules.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/mgr-modules.po.ts index 8faaff09a..04d2eee46 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/mgr-modules.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/mgr-modules.po.ts @@ -33,10 +33,12 @@ export class ManagerModulesPageHelper extends PageHelper { // Clears the editable fields for (const input of inputs) { - const id = `#${input.id}`; - cy.get(id).clear(); if (input.oldValue) { - cy.get(id).type(input.oldValue); + const id = `#${input.id}`; + cy.get(id).clear(); + if (input.oldValue) { + cy.get(id).type(input.oldValue); + } } } @@ -44,10 +46,12 @@ export class ManagerModulesPageHelper extends PageHelper { cy.contains('button', 'Update').click(); this.getExpandCollapseElement(name).should('be.visible').click(); for (const input of inputs) { - cy.get('.datatable-body') - .eq(1) - .should('contain', input.id) - .and('not.contain', input.newValue); + if (input.oldValue) { + cy.get('.datatable-body') + .eq(1) + .should('contain', input.id) + .and('not.contain', input.newValue); + } } } } diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/osds.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/osds.po.ts index d388a3c5b..cd812f474 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/osds.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/osds.po.ts @@ -14,6 +14,7 @@ export class OSDsPageHelper extends PageHelper { }; create(deviceType: 'hdd' | 'ssd', hostname?: string, expandCluster = false) { + cy.get('[aria-label="toggle advanced mode"]').click(); // Click Primary devices Add button cy.get('cd-osd-devices-selection-groups[name="Primary"]').as('primaryGroups'); cy.get('@primaryGroups').find('button').click(); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/services.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/services.po.ts index a7959c7b3..481d6bc9b 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/services.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/services.po.ts @@ -41,17 +41,21 @@ export class ServicesPageHelper extends PageHelper { exist?: boolean, count = '1', snmpVersion?: string, - snmpPrivProtocol?: boolean + snmpPrivProtocol?: boolean, + unmanaged = false ) { cy.get(`${this.pages.create.id}`).within(() => { this.selectServiceType(serviceType); switch (serviceType) { case 'rgw': cy.get('#service_id').type('foo'); - cy.get('#count').type(count); + unmanaged ? cy.get('label[for=unmanaged]').click() : cy.get('#count').type(count); break; case 'ingress': + if (unmanaged) { + cy.get('label[for=unmanaged]').click(); + } this.selectOption('backend_service', 'rgw.foo'); cy.get('#service_id').should('have.value', 'rgw.foo'); cy.get('#virtual_ip').type('192.168.100.1/24'); @@ -61,7 +65,7 @@ export class ServicesPageHelper extends PageHelper { case 'nfs': cy.get('#service_id').type('testnfs'); - cy.get('#count').type(count); + unmanaged ? cy.get('label[for=unmanaged]').click() : cy.get('#count').type(count); break; case 'snmp-gateway': @@ -85,7 +89,7 @@ export class ServicesPageHelper extends PageHelper { default: cy.get('#service_id').type('test'); - cy.get('#count').type(count); + unmanaged ? cy.get('label[for=unmanaged]').click() : cy.get('#count').type(count); break; } if (serviceType === 'snmp-gateway') { @@ -159,6 +163,18 @@ export class ServicesPageHelper extends PageHelper { }); } + isUnmanaged(serviceName: string, unmanaged: boolean) { + this.getTableCell(this.columnIndex.service_name, serviceName) + .parent() + .find(`datatable-body-cell:nth-child(${this.columnIndex.placement})`) + .should(($ele) => { + const placement = $ele.text().split(';'); + unmanaged + ? expect(placement).to.include('unmanaged') + : expect(placement).to.not.include('unmanaged'); + }); + } + deleteService(serviceName: string) { const getRow = this.getTableCell.bind(this, this.columnIndex.service_name); getRow(serviceName).click(); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/01-global.feature.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/01-global.feature.po.ts new file mode 100644 index 000000000..575d4013b --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/01-global.feature.po.ts @@ -0,0 +1,188 @@ +import { And, Given, Then, When } from 'cypress-cucumber-preprocessor/steps'; + +import { UrlsCollection } from './urls.po'; + +const urlsCollection = new UrlsCollection(); + +Given('I am logged in', () => { + cy.login(); + Cypress.Cookies.preserveOnce('token'); +}); + +Given('I am on the {string} page', (page: string) => { + cy.visit(urlsCollection.pages[page].url); + cy.get(urlsCollection.pages[page].id).should('exist'); +}); + +Then('I should be on the {string} page', (page: string) => { + cy.get(urlsCollection.pages[page].id).should('exist'); +}); + +And('I should see a button to {string}', (button: string) => { + cy.get(`[aria-label="${button}"]`).should('be.visible'); +}); + +When('I click on {string} button', (button: string) => { + cy.get(`[aria-label="${button}"]`).first().click(); +}); + +// When you are clicking on an action in the table actions dropdown button +When('I click on {string} button from the table actions', (button: string) => { + cy.get('.table-actions button.dropdown-toggle').first().click(); + cy.get(`[aria-label="${button}"]`).first().click(); +}); + +And('select options {string}', (labels: string) => { + if (labels) { + cy.get('a[data-testid=select-menu-edit]').click(); + for (const label of labels.split(', ')) { + cy.get('.popover-body div.select-menu-item-content').contains(label).click(); + } + } +}); + +And('{string} option {string}', (action: string, labels: string) => { + if (labels) { + if (action === 'add') { + cy.get('cd-modal').find('.select-menu-edit').click(); + for (const label of labels.split(', ')) { + cy.get('.popover-body input').type(`${label}{enter}`); + } + } else { + for (const label of labels.split(', ')) { + cy.contains('cd-modal .badge', new RegExp(`^${label}$`)) + .find('.badge-remove') + .click(); + } + } + } +}); + +/** + * Fills in the given field using the value provided + * @param field ID of the field that needs to be filled out. + * @param value Value that should be filled in the field. + */ +And('enter {string} {string}', (field: string, value: string) => { + cy.get('cd-modal').within(() => { + cy.get(`input[id=${field}]`).type(value); + }); +}); + +And('I click on submit button', () => { + cy.get('[data-cy=submitBtn]').click(); +}); + +/** + * Selects any row on the datatable if it matches the given name + */ +When('I select a row {string}', (row: string) => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).click(); +}); + +Then('I should see the modal', () => { + cy.get('cd-modal').should('exist'); +}); + +Then('I should not see the modal', () => { + cy.get('cd-modal').should('not.exist'); +}); + +/** + * Some modals have an additional confirmation to be provided + * by ticking the 'Are you sure?' box. + */ +Then('I check the tick box in modal', () => { + cy.get('cd-modal .custom-control-label').click(); +}); + +And('I confirm to {string}', (action: string) => { + cy.contains('cd-modal button', action).click(); + cy.get('cd-modal').should('not.exist'); +}); + +Then('I should see an error in {string} field', (field: string) => { + cy.get('cd-modal').within(() => { + cy.get(`input[id=${field}]`).should('have.class', 'ng-invalid'); + }); +}); + +Then('I should see a row with {string}', (row: string) => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( + 'exist' + ); +}); + +Then('I should not see a row with {string}', (row: string) => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( + 'not.exist' + ); +}); + +Then('I should see rows with following entries', (entries) => { + entries.hashes().forEach((entry: any) => { + cy.get('cd-table .search input').first().clear().type(entry.hostname); + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label`, + entry.hostname + ).should('exist'); + }); +}); + +And('I should see row {string} have {string}', (row: string, options: string) => { + if (options) { + cy.get('cd-table .search input').first().clear().type(row); + for (const option of options.split(',')) { + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`, + option + ).should('exist'); + } + } +}); + +And('I should see row {string} does not have {string}', (row: string, options: string) => { + if (options) { + cy.get('cd-table .search input').first().clear().type(row); + for (const option of options.split(',')) { + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`, + option + ).should('not.exist'); + } + } +}); + +And('I go to the {string} tab', (names: string) => { + for (const name of names.split(', ')) { + cy.contains('.nav.nav-tabs li', name).click(); + } +}); + +And('select {string} {string}', (selectionName: string, option: string) => { + cy.get(`select[name=${selectionName}]`).select(option); + cy.get(`select[name=${selectionName}] option:checked`).contains(option); +}); + +When('I expand the row {string}', (row: string) => { + cy.contains('.datatable-body-row', row).first().find('.tc_expand-collapse').click(); +}); + +And('I should see row {string} have {string} on this tab', (row: string, options: string) => { + if (options) { + cy.get('cd-table').should('exist'); + cy.get('datatable-scroller, .empty-row'); + cy.get('.datatable-row-detail').within(() => { + cy.get('cd-table .search input').first().clear().type(row); + for (const option of options.split(',')) { + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label span`, + option + ).should('exist'); + } + }); + } +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/create-cluster/create-cluster.feature.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/create-cluster/create-cluster.feature.po.ts new file mode 100644 index 000000000..d18c34855 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/create-cluster/create-cluster.feature.po.ts @@ -0,0 +1,12 @@ +import { Given, Then } from 'cypress-cucumber-preprocessor/steps'; + +Given('I am on the {string} section', (page: string) => { + cy.get('cd-wizard').within(() => { + cy.get('.nav-link').should('contain.text', page).first().click(); + cy.get('.nav-link.active').should('contain.text', page); + }); +}); + +Then('I should see a message {string}', () => { + cy.get('cd-create-cluster').should('contain.text', 'Please expand your cluster first'); +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/grafana.feature.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/grafana.feature.po.ts new file mode 100644 index 000000000..7366f8bab --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/grafana.feature.po.ts @@ -0,0 +1,86 @@ +import { e2e } from '@grafana/e2e'; +import { Then, When } from 'cypress-cucumber-preprocessor/steps'; +import 'cypress-iframe'; + +function getIframe() { + cy.frameLoaded('#iframe'); + return cy.iframe(); +} + +Then('I should see the grafana panel {string}', (panels: string) => { + getIframe().within(() => { + for (const panel of panels.split(', ')) { + cy.get('.grafana-app') + .wait(100) + .within(() => { + e2e.components.Panels.Panel.title(panel).should('be.visible'); + }); + } + }); +}); + +When('I view the grafana panel {string}', (panels: string) => { + getIframe().within(() => { + for (const panel of panels.split(', ')) { + cy.get('.grafana-app') + .wait(100) + .within(() => { + e2e.components.Panels.Panel.title(panel).should('be.visible').click(); + e2e.components.Panels.Panel.headerItems('View').should('be.visible').click(); + }); + } + }); +}); + +Then('I should not see {string} in the panel {string}', (value: string, panels: string) => { + getIframe().within(() => { + for (const panel of panels.split(', ')) { + cy.get('.grafana-app') + .wait(100) + .within(() => { + cy.get(`[aria-label="${panel} panel"]`) + .should('be.visible') + .within(() => { + cy.get('span').first().should('not.have.text', value); + }); + }); + } + }); +}); + +Then( + 'I should see the legends {string} in the graph {string}', + (legends: string, panels: string) => { + getIframe().within(() => { + for (const panel of panels.split(', ')) { + cy.get('.grafana-app') + .wait(100) + .within(() => { + cy.get(`[aria-label="${panel} panel"]`) + .should('be.visible') + .within(() => { + for (const legend of legends.split(', ')) { + cy.get('a').contains(legend); + } + }); + }); + } + }); + } +); + +Then('I should not see No Data in the graph {string}', (panels: string) => { + getIframe().within(() => { + for (const panel of panels.split(', ')) { + cy.get('.grafana-app') + .wait(100) + .within(() => { + cy.get(`[aria-label="${panel} panel"]`) + .should('be.visible') + .within(() => { + cy.get('div.datapoints-warning').should('not.exist'); + }); + }); + } + }); +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/urls.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/urls.po.ts new file mode 100644 index 000000000..286355085 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/common/urls.po.ts @@ -0,0 +1,44 @@ +import { PageHelper } from '../page-helper.po'; + +export class UrlsCollection extends PageHelper { + pages = { + // Cluster expansion + welcome: { url: '#/expand-cluster', id: 'cd-create-cluster' }, + + // Landing page + dashboard: { url: '#/dashboard', id: 'cd-dashboard' }, + + // Hosts + hosts: { url: '#/hosts', id: 'cd-hosts' }, + 'add hosts': { url: '#/hosts/(modal:add)', id: 'cd-host-form' }, + + // Services + services: { url: '#/services', id: 'cd-services' }, + 'create services': { url: '#/services/(modal:create)', id: 'cd-service-form' }, + + // Physical Disks + 'physical disks': { url: '#/inventory', id: 'cd-inventory' }, + + // Monitors + monitors: { url: '#/monitor', id: 'cd-monitor' }, + + // OSDs + osds: { url: '#/osd', id: 'cd-osd-list' }, + 'create osds': { url: '#/osd/create', id: 'cd-osd-form' }, + + // Configuration + configuration: { url: '#/configuration', id: 'cd-configuration' }, + + // Crush Map + 'crush map': { url: '#/crush-map', id: 'cd-crushmap' }, + + // Mgr modules + 'mgr-modules': { url: '#/mgr-modules', id: 'cd-mgr-module-list' }, + + // Logs + logs: { url: '#/logs', id: 'cd-logs' }, + + // RGW Daemons + 'rgw daemons': { url: '#/rgw/daemon', id: 'cd-rgw-daemon-list' } + }; +} diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature new file mode 100644 index 000000000..62476ad25 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/grafana/grafana.feature @@ -0,0 +1,63 @@ +Feature: Grafana panels + + Go to some of the grafana performance section and check if + panels are populated without any issues + + Background: Log in + Given I am logged in + + Scenario Outline: Hosts Overall Performance + Given I am on the "hosts" page + When I go to the "Overall Performance" tab + Then I should see the grafana panel "" + When I view the grafana panel "" + Then I should not see "No Data" in the panel "" + + Examples: + | panel | + | OSD Hosts | + | AVG CPU Busy | + | AVG RAM Utilization | + | Physical IOPS | + | AVG Disk Utilization | + | Network Load | + | CPU Busy - Top 10 Hosts | + | Network Load - Top 10 Hosts | + + Scenario Outline: RGW Daemon Overall Performance + Given I am on the "rgw daemons" page + When I go to the "Overall Performance" tab + Then I should see the grafana panel "" + When I view the grafana panel "" + Then I should not see No Data in the graph "" + And I should see the legends "" in the graph "" + + Examples: + | panel | legends | + | Total Requests/sec by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 | + | GET Latencies by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 | + | Bandwidth by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 | + | PUT Latencies by RGW Instance | foo.ceph-node-00, foo.ceph-node-01, foo.ceph-node-02 | + | Average GET/PUT Latencies | GET AVG, PUT AVG | + | Bandwidth Consumed by Type | GETs, PUTs | + + Scenario Outline: RGW per Daemon Performance + Given I am on the "rgw daemons" page + When I expand the row "" + And I go to the "Performance Details" tab + Then I should see the grafana panel "" + When I view the grafana panel "" + Then I should not see No Data in the graph "" + And I should see the legends "" in the graph "" + + Examples: + | name | panel | + | foo.ceph-node-00 | Bandwidth by HTTP Operation | + | foo.ceph-node-00 | HTTP Request Breakdown | + | foo.ceph-node-00 | Workload Breakdown | + | foo.ceph-node-01 | Bandwidth by HTTP Operation | + | foo.ceph-node-01 | HTTP Request Breakdown | + | foo.ceph-node-01 | Workload Breakdown | + | foo.ceph-node-02 | Bandwidth by HTTP Operation | + | foo.ceph-node-02 | HTTP Request Breakdown | + | foo.ceph-node-02 | Workload Breakdown | diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome-page.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome-page.e2e-spec.ts deleted file mode 100644 index 02ebb184b..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome-page.e2e-spec.ts +++ /dev/null @@ -1,19 +0,0 @@ -import { CreateClusterWizardHelper } from 'cypress/integration/cluster/create-cluster.po'; - -describe('Create cluster page', () => { - const createCluster = new CreateClusterWizardHelper(); - - beforeEach(() => { - cy.login(); - Cypress.Cookies.preserveOnce('token'); - createCluster.navigateTo(); - }); - - it('should open the wizard when Expand Cluster is clicked', () => { - createCluster.createCluster(); - }); - - it('should skip to dashboard landing page when Skip is clicked', () => { - createCluster.doSkip(); - }); -}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature new file mode 100644 index 000000000..6ba2fc4fc --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-create-cluster-welcome.feature @@ -0,0 +1,26 @@ +Feature: Cluster expansion welcome screen + + Go to the welcome screen and decide whether + to proceed to wizard or skips to landing page + + Background: Login + Given I am logged in + + Scenario: Cluster expansion welcome screen + Given I am on the "welcome" page + And I should see a button to "Expand Cluster" + And I should see a button to "Skip" + And I should see a message "Please expand your cluster first" + + Scenario: Go to the Cluster expansion wizard + Given I am on the "welcome" page + And I should see a button to "Expand Cluster" + When I click on "Expand Cluster" button + Then I am on the "Add Hosts" section + + Scenario: Skips the process and go to the landing page + Given I am on the "welcome" page + And I should see a button to "Skip" + When I click on "Skip" button + And I confirm to "Continue" + Then I should be on the "dashboard" page diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.e2e-spec.ts deleted file mode 100644 index a8d4b8768..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.e2e-spec.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { - CreateClusterHostPageHelper, - CreateClusterWizardHelper -} from 'cypress/integration/cluster/create-cluster.po'; - -describe('Create cluster add host page', () => { - const createCluster = new CreateClusterWizardHelper(); - const createClusterHostPage = new CreateClusterHostPageHelper(); - const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02', 'ceph-node-[01-03]']; - const addHost = (hostname: string, exist?: boolean, pattern?: boolean, labels: string[] = []) => { - cy.get('button[data-testid=table-action-button]').click(); - createClusterHostPage.add(hostname, exist, false, labels); - if (!pattern) { - createClusterHostPage.checkExist(hostname, true); - } - }; - - beforeEach(() => { - cy.login(); - Cypress.Cookies.preserveOnce('token'); - createCluster.navigateTo(); - createCluster.createCluster(); - }); - - it('should check if title contains Add Hosts', () => { - cy.get('.nav-link').should('contain.text', 'Add Hosts'); - - cy.get('.title').should('contain.text', 'Add Hosts'); - }); - - it('should check existing host and add new hosts', () => { - createClusterHostPage.checkExist(hostnames[0], true); - - addHost(hostnames[1], false); - addHost(hostnames[2], false); - createClusterHostPage.remove(hostnames[1]); - createClusterHostPage.remove(hostnames[2]); - addHost(hostnames[3], false, true); - }); - - it('should remove a host', () => { - createClusterHostPage.remove(hostnames[1]); - }); - - it('should add a host with some predefined labels and verify it', () => { - const labels = ['mon', 'mgr', 'rgw', 'osd']; - addHost(hostnames[1], false, false, labels); - createClusterHostPage.checkLabelExists(hostnames[1], labels, true); - }); - - it('should verify "_no_schedule" label is added', () => { - createClusterHostPage.checkLabelExists(hostnames[1], ['_no_schedule'], true); - createClusterHostPage.checkLabelExists(hostnames[2], ['_no_schedule'], true); - }); - - it('should not add an existing host', () => { - cy.get('.btn.btn-accent').first().click({ force: true }); - createClusterHostPage.add(hostnames[0], true); - }); - - it('should edit host labels', () => { - const labels = ['foo', 'bar']; - createClusterHostPage.editLabels(hostnames[0], labels, true); - createClusterHostPage.editLabels(hostnames[0], labels, false); - }); -}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature new file mode 100644 index 000000000..93c10833d --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/02-create-cluster-add-host.feature @@ -0,0 +1,74 @@ +Feature: Cluster expansion host addition + + Add some hosts and perform some host related actions like editing the labels + and removing the hosts from the cluster and verify all of the actions are performed + as expected + + Background: Cluster expansion wizard + Given I am logged in + And I am on the "welcome" page + And I click on "Expand Cluster" button + + Scenario Outline: Add hosts + Given I am on the "Add Hosts" section + When I click on "Add" button + And enter "hostname" "" + And select options "" + And I click on submit button + Then I should see a row with "" + And I should see row "" have "" + + Examples: + | hostname | labels | + | ceph-node-01 | mon, mgr | + | ceph-node-02 || + + Scenario Outline: Remove hosts + Given I am on the "Add Hosts" section + And I should see a row with "" + When I select a row "" + And I click on "Remove" button from the table actions + Then I should see the modal + And I check the tick box in modal + And I click on "Remove Host" button + Then I should not see the modal + And I should not see a row with "" + + Examples: + | hostname | + | ceph-node-01 | + | ceph-node-02 | + + Scenario: Add hosts using pattern 'ceph-node-[01-02]' + Given I am on the "Add Hosts" section + When I click on "Add" button + And enter "hostname" "ceph-node-[01-02]" + And I click on submit button + Then I should see rows with following entries + | hostname | + | ceph-node-01 | + | ceph-node-02 | + + Scenario: Add exisiting host and verify it failed + Given I am on the "Add Hosts" section + And I should see a row with "ceph-node-00" + When I click on "Add" button + And enter "hostname" "ceph-node-00" + Then I should see an error in "hostname" field + + Scenario Outline: Add and remove labels on host + Given I am on the "Add Hosts" section + When I select a row "" + And I click on "Edit" button from the table actions + And "add" option "" + And I click on submit button + Then I should see row "" have "" + When I select a row "" + And I click on "Edit" button from the table actions + And "remove" option "" + And I click on submit button + Then I should see row "" does not have "" + + Examples: + | hostname | labels | + | ceph-node-01 | foo | diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts index e386a170a..7668cafcf 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts @@ -1,14 +1,16 @@ +/* tslint:disable*/ import { CreateClusterServicePageHelper, CreateClusterWizardHelper -} from 'cypress/integration/cluster/create-cluster.po'; +} from '../../cluster/create-cluster.po'; +/* tslint:enable*/ describe('Create cluster create services page', () => { const createCluster = new CreateClusterWizardHelper(); const createClusterServicePage = new CreateClusterServicePageHelper(); const createService = (serviceType: string, serviceName: string, count = '1') => { - cy.get('button[data-testid=table-action-button]').click(); + cy.get('[aria-label=Create]').first().click(); createClusterServicePage.addService(serviceType, false, count); createClusterServicePage.checkExist(serviceName, true); }; diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts index 12a2ac8b5..a82be9855 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts @@ -1,5 +1,7 @@ -import { CreateClusterWizardHelper } from 'cypress/integration/cluster/create-cluster.po'; -import { OSDsPageHelper } from 'cypress/integration/cluster/osds.po'; +/* tslint:disable*/ +import { CreateClusterWizardHelper } from '../../cluster/create-cluster.po'; +import { OSDsPageHelper } from '../../cluster/osds.po'; +/* tslint:enable*/ const osds = new OSDsPageHelper(); @@ -20,7 +22,7 @@ describe('Create cluster create osds page', () => { describe('when Orchestrator is available', () => { it('should create OSDs', () => { - const hostnames = ['ceph-node-00', 'ceph-node-02', 'ceph-node-03']; + const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02']; for (const hostname of hostnames) { osds.create('hdd', hostname, true); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts index ba6e78dde..f93ad7a97 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts @@ -1,7 +1,9 @@ +/* tslint:disable*/ import { CreateClusterHostPageHelper, CreateClusterWizardHelper -} from 'cypress/integration/cluster/create-cluster.po'; +} from '../../cluster/create-cluster.po'; +/* tslint:enable*/ describe('Create Cluster Review page', () => { const createCluster = new CreateClusterWizardHelper(); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts index 8a0d7ccf4..589cbaa90 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/06-cluster-check.e2e-spec.ts @@ -1,11 +1,15 @@ -import { CreateClusterWizardHelper } from 'cypress/integration/cluster/create-cluster.po'; -import { HostsPageHelper } from 'cypress/integration/cluster/hosts.po'; -import { ServicesPageHelper } from 'cypress/integration/cluster/services.po'; +/* tslint:disable*/ +import { Input, ManagerModulesPageHelper } from '../../cluster/mgr-modules.po'; +import { CreateClusterWizardHelper } from '../../cluster/create-cluster.po'; +import { HostsPageHelper } from '../../cluster/hosts.po'; +import { ServicesPageHelper } from '../../cluster/services.po'; +/* tslint:enable*/ describe('when cluster creation is completed', () => { const createCluster = new CreateClusterWizardHelper(); const services = new ServicesPageHelper(); const hosts = new HostsPageHelper(); + const mgrmodules = new ManagerModulesPageHelper(); const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02', 'ceph-node-03']; @@ -28,6 +32,41 @@ describe('when cluster creation is completed', () => { hosts.navigateTo(); }); + it('should check if monitoring stacks are running on the root host', () => { + const monitoringStack = ['alertmanager', 'grafana', 'node-exporter', 'prometheus']; + hosts.clickTab('cd-host-details', 'ceph-node-00', 'Daemons'); + for (const daemon of monitoringStack) { + cy.get('cd-host-details').within(() => { + services.checkServiceStatus(daemon); + }); + } + }); + + // avoid creating node-exporter on the newly added host + // to favour the host draining process + it('should reduce the count for node-exporter', () => { + services.editService('node-exporter', '3'); + }); + + // grafana ip address is set to the fqdn by default. + // kcli is not working with that, so setting the IP manually. + it('should change ip address of grafana', { retries: 2 }, () => { + const dashboardArr: Input[] = [ + { + id: 'GRAFANA_API_URL', + newValue: 'https://192.168.100.100:3000', + oldValue: '' + } + ]; + mgrmodules.editMgrModule('dashboard', dashboardArr); + }); + + it('should add one more host', () => { + hosts.navigateTo('add'); + hosts.add(hostnames[3]); + hosts.checkExist(hostnames[3], true); + }); + it('should have removed "_no_schedule" label', () => { for (const hostname of hostnames) { hosts.checkLabelExists(hostname, ['_no_schedule'], false); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts index 90db14668..a0a1dd032 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/07-osds.e2e-spec.ts @@ -1,4 +1,6 @@ -import { OSDsPageHelper } from 'cypress/integration/cluster/osds.po'; +/* tslint:disable*/ +import { OSDsPageHelper } from '../../cluster/osds.po'; +/* tslint:enable*/ describe('OSDs page', () => { const osds = new OSDsPageHelper(); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts index 0abead174..374ecdb0c 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/08-hosts.e2e-spec.ts @@ -1,5 +1,7 @@ -import { HostsPageHelper } from 'cypress/integration/cluster/hosts.po'; -import { ServicesPageHelper } from 'cypress/integration/cluster/services.po'; +/* tslint:disable*/ +import { HostsPageHelper } from '../../cluster/hosts.po'; +import { ServicesPageHelper } from '../../cluster/services.po'; +/* tslint:enable*/ describe('Host Page', () => { const hosts = new HostsPageHelper(); @@ -29,15 +31,19 @@ describe('Host Page', () => { } }); - it('should force maintenance and exit', { retries: 2 }, () => { - hosts.maintenance(hostnames[1], true, true); + it('should force maintenance and exit', () => { + hosts.maintenance(hostnames[3], true, true); }); it('should drain, remove and add the host back', () => { - hosts.drain(hostnames[1]); - hosts.remove(hostnames[1]); + hosts.drain(hostnames[3]); + hosts.remove(hostnames[3]); hosts.navigateTo('add'); - hosts.add(hostnames[1]); - hosts.checkExist(hostnames[1], true); + hosts.add(hostnames[3]); + hosts.checkExist(hostnames[3], true); + }); + + it('should show the exact count of daemons', () => { + hosts.checkServiceInstancesExist(hostnames[0], ['mgr: 1', 'prometheus: 1']); }); }); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts index 4349b1ecf..ed9ffb989 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/09-services.e2e-spec.ts @@ -1,4 +1,6 @@ -import { ServicesPageHelper } from 'cypress/integration/cluster/services.po'; +/* tslint:disable*/ +import { ServicesPageHelper } from '../../cluster/services.po'; +/* tslint:enable*/ describe('Services page', () => { const services = new ServicesPageHelper(); @@ -101,4 +103,12 @@ describe('Services page', () => { services.deleteService('snmp-gateway'); }); + + it('should create ingress as unmanaged', () => { + services.navigateTo('create'); + services.addService('ingress', false, undefined, undefined, undefined, true); + services.checkExist('ingress.rgw.foo', true); + services.isUnmanaged('ingress.rgw.foo', true); + services.deleteService('ingress.rgw.foo'); + }); }); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts index 2d9207529..f4b5499f0 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/10-nfs-exports.e2e-spec.ts @@ -1,6 +1,8 @@ -import { ServicesPageHelper } from 'cypress/integration/cluster/services.po'; -import { NFSPageHelper } from 'cypress/integration/orchestrator/workflow/nfs/nfs-export.po'; -import { BucketsPageHelper } from 'cypress/integration/rgw/buckets.po'; +/* tslint:disable*/ +import { ServicesPageHelper } from '../../cluster/services.po'; +import { NFSPageHelper } from '../../orchestrator/workflow/nfs/nfs-export.po'; +import { BucketsPageHelper } from '../../rgw/buckets.po'; +/* tslint:enable*/ describe('nfsExport page', () => { const nfsExport = new NFSPageHelper(); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts index 03b3bb18d..c700ef058 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/nfs/nfs-export.po.ts @@ -1,4 +1,6 @@ -import { PageHelper } from 'cypress/integration/page-helper.po'; +/* tslint:disable*/ +import { PageHelper } from '../../../page-helper.po'; +/* tslint:enable*/ const pages = { index: { url: '#/nfs', id: 'cd-nfs-list' }, diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/api-docs.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/api-docs.e2e-spec.ts index 53c7c3ca8..52994859e 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/api-docs.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/api-docs.e2e-spec.ts @@ -1,4 +1,4 @@ -import { ApiDocsPageHelper } from 'cypress/integration/ui/api-docs.po'; +import { ApiDocsPageHelper } from '../ui/api-docs.po'; describe('Api Docs Page', () => { const apiDocs = new ApiDocsPageHelper(); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/navigation.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/navigation.po.ts index a3673284c..a7ecf3af0 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/navigation.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/ui/navigation.po.ts @@ -35,7 +35,7 @@ export class NavigationPageHelper extends PageHelper { { menu: 'Block', submenus: [ - { menu: 'Images', component: 'cd-rbd-list' }, + { menu: 'Images', component: 'cd-error' }, { menu: 'Mirroring', component: 'cd-mirroring' }, { menu: 'iSCSI', component: 'cd-iscsi' } ] @@ -52,9 +52,10 @@ export class NavigationPageHelper extends PageHelper { } checkNavigations(navs: any) { - // The nfs-ganesha and RGW status requests are mocked to ensure that this method runs in time - cy.intercept('/api/nfs-ganesha/status', { fixture: 'nfs-ganesha-status.json' }); - cy.intercept('/api/rgw/status', { fixture: 'rgw-status.json' }); + // The nfs-ganesha, RGW, and block/rbd status requests are mocked to ensure that this method runs in time + cy.intercept('/ui-api/nfs-ganesha/status', { fixture: 'nfs-ganesha-status.json' }); + cy.intercept('/ui-api/rgw/status', { fixture: 'rgw-status.json' }); + cy.intercept('/ui-api/block/rbd/status', { fixture: 'block-rbd-status.json' }); navs.forEach((nav: any) => { cy.contains('.simplebar-content li.nav-item a', nav.menu).click(); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/plugins/index.js b/ceph/src/pybind/mgr/dashboard/frontend/cypress/plugins/index.js index ce29463bb..d9294002b 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/plugins/index.js +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/plugins/index.js @@ -1,10 +1,26 @@ + +const browserify = require('@cypress/browserify-preprocessor'); +const cucumber = require('cypress-cucumber-preprocessor').default; module.exports = (on, _config) => { + const options = { + ...browserify.defaultOptions, + typescript: require.resolve("typescript"), + }; + + on('file:preprocessor', cucumber(options)); on('before:browser:launch', (browser, launchOptions) => { if (browser.name === 'chrome' && browser.isHeadless) { launchOptions.args.push('--disable-gpu'); return launchOptions; } }); + + on('task', { + log({ message, optional }) { + optional ? console.log(message, optional) : console.log(message); + return null; + }, + }); }; require('@applitools/eyes-cypress')(module); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts index 8c9b9ea96..6ff17f9d6 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts @@ -2,14 +2,20 @@ declare global { namespace Cypress { interface Chainable { login(): void; + logToConsole(message: string, optional?: any): void; text(): Chainable; } } } - -import { CdHelperClass } from '~/app/shared/classes/cd-helper.class'; -import { Permissions } from '~/app/shared/models/permissions'; - +// Disabling tslint rule since cypress-cucumber has +// issues with absolute import paths. +// This can be removed when +// https://github.com/cypress-io/cypress-browserify-preprocessor/issues/53 +// is fixed. +/* tslint:disable*/ +import { CdHelperClass } from '../../src/app/shared/classes/cd-helper.class'; +import { Permissions } from '../../src/app/shared/models/permissions'; +/* tslint:enable*/ let auth: any; const fillAuth = () => { @@ -44,6 +50,10 @@ Cypress.Commands.add('login', () => { }); // @ts-ignore -Cypress.Commands.add('text', { prevSubject: true }, (subject) => { +Cypress.Commands.add('text', { prevSubject: true }, (subject: any) => { return subject.text(); }); + +Cypress.Commands.add('logToConsole', (message: string, optional?: any) => { + cy.task('log', { message: `(${new Date().toISOString()}) ${message}`, optional }); +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.7c1918629ff8b413cc76.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.7c1918629ff8b413cc76.js new file mode 100644 index 000000000..ae5e1b628 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.7c1918629ff8b413cc76.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[281],{59281:(ri,Oe,r)=>{r.r(Oe),r.d(Oe,{RgwModule:()=>e_,RoutedRgwModule:()=>ai});var f=r(12057),a=r(24751),w=r(6283),M=r(38549),Ue=r(37496),A=r(79512),O_=r(44466),U_=r(66265),W_=r(23815),u=r.n(W_),Y=r(35758),Me=r(95152),We=r(33394),Ze=r(64762),$e=r(58497),me=r(25917),he=r(19773),Z_=r(96736),$_=r(5304),ge=r(20523),h_=r(93523),e=r(74788);let O=class{constructor(o,_){this.http=o,this.rgwDaemonService=_,this.url="api/rgw/user"}list(){return this.enumerate().pipe((0,he.zg)(o=>o.length>0?(0,Y.D)(o.map(_=>this.get(_))):(0,me.of)([])))}enumerate(){return this.rgwDaemonService.request(o=>this.http.get(this.url,{params:o}))}enumerateEmail(){return this.rgwDaemonService.request(o=>this.http.get(`${this.url}/get_emails`,{params:o}))}get(o){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${o}`,{params:_}))}getQuota(o){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${o}/quota`,{params:_}))}create(o){return this.rgwDaemonService.request(_=>(u().keys(o).forEach(n=>{_=_.append(n,o[n])}),this.http.post(this.url,null,{params:_})))}update(o,_){return this.rgwDaemonService.request(n=>(u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.put(`${this.url}/${o}`,null,{params:n})))}updateQuota(o,_){return this.rgwDaemonService.request(n=>(u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.put(`${this.url}/${o}/quota`,null,{params:n})))}delete(o){return this.rgwDaemonService.request(_=>this.http.delete(`${this.url}/${o}`,{params:_}))}createSubuser(o,_){return this.rgwDaemonService.request(n=>(u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.post(`${this.url}/${o}/subuser`,null,{params:n})))}deleteSubuser(o,_){return this.rgwDaemonService.request(n=>this.http.delete(`${this.url}/${o}/subuser/${_}`,{params:n}))}addCapability(o,_,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",n),this.http.post(`${this.url}/${o}/capability`,null,{params:i})))}deleteCapability(o,_,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",n),this.http.delete(`${this.url}/${o}/capability`,{params:i})))}addS3Key(o,_){return this.rgwDaemonService.request(n=>(n=n.append("key_type","s3"),u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.post(`${this.url}/${o}/key`,null,{params:n})))}deleteS3Key(o,_){return this.rgwDaemonService.request(n=>(n=(n=n.append("key_type","s3")).append("access_key",_),this.http.delete(`${this.url}/${o}/key`,{params:n})))}exists(o){return this.get(o).pipe((0,Z_.h)(!0),(0,$_.K)(_=>(u().isFunction(_.preventDefault)&&_.preventDefault(),(0,me.of)(!1))))}emailExists(o){return o=decodeURIComponent(o),this.enumerateEmail().pipe((0,he.zg)(_=>{const n=u().indexOf(_,o);return(0,me.of)(-1!==n)}))}};O.\u0275fac=function(o){return new(o||O)(e.LFG($e.eN),e.LFG(ge.b))},O.\u0275prov=e.Yz7({token:O,factory:O.\u0275fac,providedIn:"root"}),O=(0,Ze.gn)([h_.o,(0,Ze.w6)("design:paramtypes",[$e.eN,ge.b])],O);var D=r(65862),Ae=r(18001),Ie=r(93614),m=r(77205),ve=r(97161),k=(()=>{return(t=k||(k={})).ENABLED="Enabled",t.DISABLED="Disabled",k;var t})(),B=(()=>{return(t=B||(B={})).ENABLED="Enabled",t.SUSPENDED="Suspended",B;var t})(),J=r(62862),Fe=r(63622),V=r(41582),H=r(56310),q=r(87925),X=r(94276),j=r(82945),I_=r(18372),ee=r(30839),K=r(10545);function v_(t,o){1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",35),e.SDv(2,36),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",37),e.qZA(),e.qZA())}function F_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,39),e.qZA())}function L_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,40),e.qZA())}function D_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,41),e.qZA())}function y_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,42),e.qZA())}function x_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,43),e.qZA())}function q_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,44),e.qZA())}function w_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,45),e.qZA())}function k_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,46),e.qZA())}function B_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,48),e.qZA()),2&t&&e.Q6J("ngValue",null)}function H_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,49),e.qZA()),2&t&&e.Q6J("ngValue",null)}function X_(t,o){if(1&t&&(e.TgZ(0,"option",50),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function K_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,51),e.qZA())}function z_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,53),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Q_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,54),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Y_(t,o){if(1&t&&(e.TgZ(0,"option",50),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_.name),e.xp6(1),e.Oqu(_.description)}}function J_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,55),e.qZA())}function V_(t,o){if(1&t&&(e.TgZ(0,"select",52),e.YNc(1,z_,2,1,"option",18),e.YNc(2,Q_,2,1,"option",18),e.YNc(3,Y_,2,2,"option",19),e.qZA(),e.YNc(4,J_,2,0,"span",14)),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(1),e.Q6J("ngIf",null===n.placementTargets),e.xp6(1),e.Q6J("ngIf",null!==n.placementTargets),e.xp6(1),e.Q6J("ngForOf",n.placementTargets),e.xp6(1),e.Q6J("ngIf",n.bucketForm.showError("placement-target",_,"required"))}}function j_(t,o){1&t&&(e.ynx(0),e._UZ(1,"input",56),e.BQk())}function et(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend",25),e.SDv(2,57),e.qZA(),e.TgZ(3,"div",9),e.TgZ(4,"div",27),e.TgZ(5,"div",28),e.TgZ(6,"input",58),e.NdJ("change",function(){return e.CHM(_),e.oxw(2).setMfaDeleteValidators()}),e.qZA(),e.TgZ(7,"label",59),e.SDv(8,60),e.qZA(),e.TgZ(9,"cd-helper"),e.TgZ(10,"span"),e.SDv(11,61),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}}function _t(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,70),e.qZA())}function tt(t,o){if(1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",67),e.SDv(2,68),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",69),e.YNc(5,_t,2,0,"span",14),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.bucketForm.showError("mfa-token-serial",_,"required"))}}function nt(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,74),e.qZA())}function ot(t,o){if(1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",71),e.SDv(2,72),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",73),e.YNc(5,nt,2,0,"span",14),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.bucketForm.showError("mfa-token-pin",_,"required"))}}function it(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend",25),e.SDv(2,62),e.qZA(),e.TgZ(3,"div",9),e.TgZ(4,"div",27),e.TgZ(5,"div",28),e.TgZ(6,"input",63),e.NdJ("change",function(){return e.CHM(_),e.oxw(2).setMfaDeleteValidators()}),e.qZA(),e.TgZ(7,"label",64),e.SDv(8,65),e.qZA(),e.TgZ(9,"cd-helper"),e.TgZ(10,"span"),e.SDv(11,66),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(12,tt,6,1,"div",8),e.YNc(13,ot,6,1,"div",8),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(12),e.Q6J("ngIf",_.areMfaCredentialsRequired()),e.xp6(1),e.Q6J("ngIf",_.areMfaCredentialsRequired())}}function st(t,o){1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",75),e.SDv(2,76),e.qZA(),e.TgZ(3,"div",12),e.TgZ(4,"select",77),e.TgZ(5,"option",78),e.SDv(6,79),e.qZA(),e.TgZ(7,"option",80),e.SDv(8,81),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function at(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,86),e.qZA())}function rt(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,87),e.qZA())}function lt(t,o){if(1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",82),e.ynx(2),e.SDv(3,83),e.BQk(),e.TgZ(4,"cd-helper"),e.SDv(5,84),e.qZA(),e.qZA(),e.TgZ(6,"div",12),e._UZ(7,"input",85),e.YNc(8,at,2,0,"span",14),e.YNc(9,rt,2,0,"span",14),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(8),e.Q6J("ngIf",n.bucketForm.showError("lock_retention_period_days",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.bucketForm.showError("lock_retention_period_days",_,"lockDays"))}}const Le=function(t){return{required:t}};function ct(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,v_,5,0,"div",8),e.TgZ(10,"div",9),e.TgZ(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,F_,2,0,"span",14),e.YNc(16,L_,2,0,"span",14),e.YNc(17,D_,2,0,"span",14),e.YNc(18,y_,2,0,"span",14),e.YNc(19,x_,2,0,"span",14),e.YNc(20,q_,2,0,"span",14),e.YNc(21,w_,2,0,"span",14),e.YNc(22,k_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(23,"div",9),e.TgZ(24,"label",15),e.SDv(25,16),e.qZA(),e.TgZ(26,"div",12),e.TgZ(27,"select",17),e.YNc(28,B_,2,1,"option",18),e.YNc(29,H_,2,1,"option",18),e.YNc(30,X_,2,2,"option",19),e.qZA(),e.YNc(31,K_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(32,"div",9),e.TgZ(33,"label",20),e.SDv(34,21),e.qZA(),e.TgZ(35,"div",12),e.YNc(36,V_,5,4,"ng-template",null,22,e.W1O),e.YNc(38,j_,2,0,"ng-container",23),e.qZA(),e.qZA(),e.YNc(39,et,12,0,"fieldset",24),e.YNc(40,it,14,2,"fieldset",24),e.TgZ(41,"fieldset"),e.TgZ(42,"legend",25),e.SDv(43,26),e.qZA(),e.TgZ(44,"div",9),e.TgZ(45,"div",27),e.TgZ(46,"div",28),e._UZ(47,"input",29),e.TgZ(48,"label",30),e.SDv(49,31),e.qZA(),e.TgZ(50,"cd-helper"),e.TgZ(51,"span"),e.SDv(52,32),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(53,st,9,0,"div",8),e.YNc(54,lt,10,2,"div",8),e.qZA(),e.qZA(),e.TgZ(55,"div",33),e.TgZ(56,"cd-form-button-panel",34),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().submit()}),e.ALo(57,"titlecase"),e.ALo(58,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.MAs(2),n=e.MAs(37),i=e.oxw();e.xp6(1),e.Q6J("formGroup",i.bucketForm),e.xp6(6),e.pQV(e.lcZ(6,29,i.action))(e.lcZ(7,31,i.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",i.editing),e.xp6(2),e.Q6J("ngClass",e.VKq(37,Le,!i.editing)),e.xp6(3),e.Q6J("readonly",i.editing)("autofocus",!i.editing),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"required")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameInvalid")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameNotAllowed")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"containsUpperCase")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"lowerCaseOrNumber")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"ipAddress")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"onlyLowerCaseAndNumbers")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"shouldBeInRange")),e.xp6(5),e.Q6J("autofocus",i.editing),e.xp6(1),e.Q6J("ngIf",null===i.owners),e.xp6(1),e.Q6J("ngIf",null!==i.owners),e.xp6(1),e.Q6J("ngForOf",i.owners),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("owner",_,"required")),e.xp6(2),e.Q6J("ngClass",e.VKq(39,Le,!i.editing)),e.xp6(5),e.Q6J("ngIf",i.editing)("ngIfElse",n),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(13),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(2),e.Q6J("form",i.bucketForm)("submitText",e.lcZ(57,33,i.action)+" "+e.lcZ(58,35,i.resource))}}let De=(()=>{class t extends Ie.E{constructor(_,n,i,s,c,d,E,g){super(),this.route=_,this.router=n,this.formBuilder=i,this.rgwBucketService=s,this.rgwSiteService=c,this.rgwUserService=d,this.notificationService=E,this.actionLabels=g,this.editing=!1,this.owners=null,this.placementTargets=[],this.isVersioningAlreadyEnabled=!1,this.isMfaDeleteAlreadyEnabled=!1,this.icons=D.P,this.editing=this.router.url.startsWith(`/rgw/bucket/${A.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="bucket",this.createForm()}get isVersioningEnabled(){return this.bucketForm.getValue("versioning")}get isMfaDeleteEnabled(){return this.bucketForm.getValue("mfa-delete")}createForm(){const _=this,n=m.h.custom("lockDays",()=>{if(!_.bucketForm||!u().get(_.bucketForm.getRawValue(),"lock_enabled"))return!1;const i=Number(_.bucketForm.getValue("lock_retention_period_days"));return!Number.isInteger(i)||0===i});this.bucketForm=this.formBuilder.group({id:[null],bid:[null,[a.kI.required],this.editing?[]:[m.h.bucketName(),m.h.bucketExistence(!1,this.rgwBucketService)]],owner:[null,[a.kI.required]],"placement-target":[null,this.editing?[]:[a.kI.required]],versioning:[null],"mfa-delete":[null],"mfa-token-serial":[""],"mfa-token-pin":[""],lock_enabled:[{value:!1,disabled:this.editing}],lock_mode:["COMPLIANCE"],lock_retention_period_days:[0,[m.h.number(!1),n]]})}ngOnInit(){const _={owners:this.rgwUserService.enumerate()};this.editing||(_.getPlacementTargets=this.rgwSiteService.get("placement-targets")),this.route.params.subscribe(n=>{if(n.hasOwnProperty("bid")){const i=decodeURIComponent(n.bid);_.getBid=this.rgwBucketService.get(i)}(0,Y.D)(_).subscribe(i=>{if(this.owners=i.owners.sort(),i.getPlacementTargets){const s=i.getPlacementTargets;this.zonegroup=s.zonegroup,u().forEach(s.placement_targets,c=>{c.description=`${c.name} (${"pool"}: ${c.data_pool})`,this.placementTargets.push(c)}),1===this.placementTargets.length&&this.bucketForm.get("placement-target").setValue(this.placementTargets[0].name)}if(i.getBid){const s=i.getBid,c=u().clone(this.bucketForm.getRawValue());let d=u().pick(s,u().keys(c));d.lock_retention_period_days=this.rgwBucketService.getLockDays(s),d["placement-target"]=s.placement_rule,d.versioning=s.versioning===B.ENABLED,d["mfa-delete"]=s.mfa_delete===k.ENABLED,d=u().merge(c,d),this.bucketForm.setValue(d),this.editing&&(this.isVersioningAlreadyEnabled=this.isVersioningEnabled,this.isMfaDeleteAlreadyEnabled=this.isMfaDeleteEnabled,this.setMfaDeleteValidators(),d.lock_enabled&&this.bucketForm.controls.versioning.disable())}this.loadingReady()})})}goToListView(){this.router.navigate(["/rgw/bucket"])}submit(){if(this.bucketForm.pristine)return void this.goToListView();const _=this.bucketForm.value;if(this.editing){const n=this.getVersioningStatus(),i=this.getMfaDeleteStatus();this.rgwBucketService.update(_.bid,_.id,_.owner,n,i,_["mfa-token-serial"],_["mfa-token-pin"],_.lock_mode,_.lock_retention_period_days).subscribe(()=>{this.notificationService.show(Ae.k.success,"Updated Object Gateway bucket '" + _.bid + "'."),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}else this.rgwBucketService.create(_.bid,_.owner,this.zonegroup,_["placement-target"],_.lock_enabled,_.lock_mode,_.lock_retention_period_days).subscribe(()=>{this.notificationService.show(Ae.k.success,"Created Object Gateway bucket '" + _.bid + "'"),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}areMfaCredentialsRequired(){return this.isMfaDeleteEnabled!==this.isMfaDeleteAlreadyEnabled||this.isMfaDeleteAlreadyEnabled&&this.isVersioningEnabled!==this.isVersioningAlreadyEnabled}setMfaDeleteValidators(){const _=this.bucketForm.get("mfa-token-serial"),n=this.bucketForm.get("mfa-token-pin");this.areMfaCredentialsRequired()?(_.setValidators(a.kI.required),n.setValidators(a.kI.required)):(_.setValidators(null),n.setValidators(null)),_.updateValueAndValidity(),n.updateValueAndValidity()}getVersioningStatus(){return this.isVersioningEnabled?B.ENABLED:B.SUSPENDED}getMfaDeleteStatus(){return this.isMfaDeleteEnabled?k.ENABLED:k.DISABLED}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(w.gz),e.Y36(w.F0),e.Y36(J.O),e.Y36(Me.o),e.Y36(We.I),e.Y36(O),e.Y36(ve.g),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F,T,y,x,S,_e,te,ne,oe,ie,se,ae,re,le,ce,de,ue,Re;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",n="Name...",i="Owner",s="Placement target",c="Locking",d="Enabled",E="Enables locking for the objects in the bucket. Locking can only be enabled while creating a bucket.",g="Id",C="This field is required.",b="Bucket names can only contain lowercase letters, numbers, periods and hyphens.",P="The chosen name is already in use.",G="Bucket names must not contain uppercase characters or underscores.",N="Each label must start and end with a lowercase letter or a number.",p="Bucket names cannot be formatted as IP address.",U="Bucket labels cannot be empty and can only contain lowercase letters, numbers and hyphens.",W="Bucket names must be 3 to 63 characters long.",Z="Loading...",$="-- Select a user --",h="This field is required.",I="Loading...",v="-- Select a placement target --",F="This field is required.",T="Versioning",y="Enabled",x="Enables versioning for the objects in the bucket.",S="Multi-Factor Authentication",_e="Delete enabled",te="Enables MFA (multi-factor authentication) Delete, which requires additional authentication for changing the bucket versioning state.",ne="Token Serial Number",oe="This field is required.",ie="Token PIN",se="This field is required.",ae="Mode",re="Compliance",le="Governance",ce="Days",de="The number of days that you want to specify for the default retention period that will be applied to new objects placed in this bucket.",ue="The entered value must be a positive integer.",Re="Retention Days must be a positive integer.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","bucketForm","novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],o,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","bid",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","bid","name","bid","type","text","placeholder",n,"formControlName","bid",1,"form-control",3,"readonly","autofocus"],["class","invalid-feedback",4,"ngIf"],["for","owner",1,"cd-col-form-label","required"],i,["id","owner","name","owner","formControlName","owner",1,"form-control",3,"autofocus"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],["for","placement-target",1,"cd-col-form-label",3,"ngClass"],s,["placementTargetSelect",""],[4,"ngIf","ngIfElse"],[4,"ngIf"],[1,"cd-header"],c,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","lock_enabled","formControlName","lock_enabled","type","checkbox",1,"custom-control-input"],["for","lock_enabled",1,"custom-control-label"],d,E,[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","id",1,"cd-col-form-label"],g,["id","id","name","id","type","text","formControlName","id","readonly","",1,"form-control"],[1,"invalid-feedback"],C,b,P,G,N,p,U,W,[3,"ngValue"],Z,$,[3,"value"],h,["id","placement-target","name","placement-target","formControlName","placement-target",1,"form-control"],I,v,F,["id","placement-target","name","placement-target","formControlName","placement-target","type","text","readonly","",1,"form-control"],T,["type","checkbox","id","versioning","name","versioning","formControlName","versioning",1,"custom-control-input",3,"change"],["for","versioning",1,"custom-control-label"],y,x,S,["type","checkbox","id","mfa-delete","name","mfa-delete","formControlName","mfa-delete",1,"custom-control-input",3,"change"],["for","mfa-delete",1,"custom-control-label"],_e,te,["for","mfa-token-serial",1,"cd-col-form-label"],ne,["type","text","id","mfa-token-serial","name","mfa-token-serial","formControlName","mfa-token-serial",1,"form-control"],oe,["for","mfa-token-pin",1,"cd-col-form-label"],ie,["type","text","id","mfa-token-pin","name","mfa-token-pin","formControlName","mfa-token-pin",1,"form-control"],se,["for","lock_mode",1,"cd-col-form-label"],ae,["formControlName","lock_mode","name","lock_mode","id","lock_mode",1,"form-control"],["value","COMPLIANCE"],re,["value","GOVERNANCE"],le,["for","lock_retention_period_days",1,"cd-col-form-label"],ce,de,["type","number","id","lock_retention_period_days","formControlName","lock_retention_period_days","min","0",1,"form-control"],ue,Re]},template:function(_,n){1&_&&e.YNc(0,ct,59,41,"div",0),2&_&&e.Q6J("cdFormLoading",n.loading)},directives:[Fe.y,a._Y,a.JL,V.V,a.sg,f.O5,H.P,f.mk,q.o,a.Fj,X.b,a.JJ,a.u,j.U,a.EJ,f.sg,a.Wl,I_.S,ee.p,a.YN,a.Kr,a.wV,a.qQ],pipes:[f.rS,K.m],styles:[""]}),t})();var ye=r(18891),be=r(68136),xe=r(30982),z=r(35905),Pe=r(68774),qe=r(47557),we=r(66369),Q=r(51847),Ee=r(74937),Te=r(63285),ke=r(94928),dt=r(96102),Be=r(68962);function ut(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,24),e.qZA())}function Rt(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimless"),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.selection.bucket_quota.max_size)," ")}}function gt(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,25),e.qZA())}function Et(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",_.selection.bucket_quota.max_objects," ")}}function Tt(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,20),e.qZA(),e.TgZ(3,"table",1),e.TgZ(4,"tbody"),e.TgZ(5,"tr"),e.TgZ(6,"td",2),e.SDv(7,21),e.qZA(),e.TgZ(8,"td",4),e._uU(9),e.ALo(10,"booleanText"),e.qZA(),e.qZA(),e.TgZ(11,"tr"),e.TgZ(12,"td",5),e.SDv(13,22),e.qZA(),e.YNc(14,ut,2,0,"td",0),e.YNc(15,Rt,3,3,"td",0),e.qZA(),e.TgZ(16,"tr"),e.TgZ(17,"td",5),e.SDv(18,23),e.qZA(),e.YNc(19,gt,2,0,"td",0),e.YNc(20,Et,2,1,"td",0),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(2);e.xp6(9),e.Oqu(e.lcZ(10,5,_.selection.bucket_quota.enabled)),e.xp6(5),e.Q6J("ngIf",_.selection.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",_.selection.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_objects>-1)}}function St(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"tr"),e.TgZ(2,"td",5),e.SDv(3,26),e.qZA(),e.TgZ(4,"td"),e._uU(5),e.qZA(),e.qZA(),e.TgZ(6,"tr"),e.TgZ(7,"td",5),e.SDv(8,27),e.qZA(),e.TgZ(9,"td"),e._uU(10),e.qZA(),e.qZA(),e.BQk()),2&t){const _=e.oxw(2);e.xp6(5),e.Oqu(_.selection.lock_mode),e.xp6(5),e.Oqu(_.selection.lock_retention_period_days)}}function ft(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"table",1),e.TgZ(2,"tbody"),e.TgZ(3,"tr"),e.TgZ(4,"td",2),e.SDv(5,3),e.qZA(),e.TgZ(6,"td",4),e._uU(7),e.qZA(),e.qZA(),e.TgZ(8,"tr"),e.TgZ(9,"td",5),e.SDv(10,6),e.qZA(),e.TgZ(11,"td"),e._uU(12),e.qZA(),e.qZA(),e.TgZ(13,"tr"),e.TgZ(14,"td",5),e.SDv(15,7),e.qZA(),e.TgZ(16,"td"),e._uU(17),e.qZA(),e.qZA(),e.TgZ(18,"tr"),e.TgZ(19,"td",5),e.SDv(20,8),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.qZA(),e.qZA(),e.TgZ(23,"tr"),e.TgZ(24,"td",5),e.SDv(25,9),e.qZA(),e.TgZ(26,"td"),e._uU(27),e.qZA(),e.qZA(),e.TgZ(28,"tr"),e.TgZ(29,"td",5),e.SDv(30,10),e.qZA(),e.TgZ(31,"td"),e._uU(32),e.qZA(),e.qZA(),e.TgZ(33,"tr"),e.TgZ(34,"td",5),e.SDv(35,11),e.qZA(),e.TgZ(36,"td"),e._uU(37),e.qZA(),e.qZA(),e.TgZ(38,"tr"),e.TgZ(39,"td",5),e.SDv(40,12),e.qZA(),e.TgZ(41,"td"),e._uU(42),e.qZA(),e.qZA(),e.TgZ(43,"tr"),e.TgZ(44,"td",5),e.SDv(45,13),e.qZA(),e.TgZ(46,"td"),e._uU(47),e.qZA(),e.qZA(),e.TgZ(48,"tr"),e.TgZ(49,"td",5),e.SDv(50,14),e.qZA(),e.TgZ(51,"td"),e._uU(52),e.ALo(53,"cdDate"),e.qZA(),e.qZA(),e.TgZ(54,"tr"),e.TgZ(55,"td",5),e.SDv(56,15),e.qZA(),e.TgZ(57,"td"),e._uU(58),e.qZA(),e.qZA(),e.TgZ(59,"tr"),e.TgZ(60,"td",5),e.SDv(61,16),e.qZA(),e.TgZ(62,"td"),e._uU(63),e.qZA(),e.qZA(),e.TgZ(64,"tr"),e.TgZ(65,"td",5),e.SDv(66,17),e.qZA(),e.TgZ(67,"td"),e._uU(68),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(69,Tt,21,7,"div",0),e.TgZ(70,"legend"),e.SDv(71,18),e.qZA(),e.TgZ(72,"table",1),e.TgZ(73,"tbody"),e.TgZ(74,"tr"),e.TgZ(75,"td",2),e.SDv(76,19),e.qZA(),e.TgZ(77,"td",4),e._uU(78),e.ALo(79,"booleanText"),e.qZA(),e.qZA(),e.YNc(80,St,11,2,"ng-container",0),e.qZA(),e.qZA(),e.BQk()),2&t){const _=e.oxw();e.xp6(7),e.Oqu(_.selection.bid),e.xp6(5),e.Oqu(_.selection.id),e.xp6(5),e.Oqu(_.selection.owner),e.xp6(5),e.Oqu(_.selection.index_type),e.xp6(5),e.Oqu(_.selection.placement_rule),e.xp6(5),e.Oqu(_.selection.marker),e.xp6(5),e.Oqu(_.selection.max_marker),e.xp6(5),e.Oqu(_.selection.ver),e.xp6(5),e.Oqu(_.selection.master_ver),e.xp6(5),e.Oqu(e.lcZ(53,16,_.selection.mtime)),e.xp6(6),e.Oqu(_.selection.zonegroup),e.xp6(5),e.Oqu(_.selection.versioning),e.xp6(5),e.Oqu(_.selection.mfa_delete),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota),e.xp6(9),e.Oqu(e.lcZ(79,18,_.selection.lock_enabled)),e.xp6(2),e.Q6J("ngIf",_.selection.lock_enabled)}}let Ct=(()=>{class t{constructor(_){this.rgwBucketService=_}ngOnChanges(){this.selection&&this.rgwBucketService.get(this.selection.bid).subscribe(_=>{_.lock_retention_period_days=this.rgwBucketService.getLockDays(_),this.selection=_})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Me.o))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F;return o="Name",_="ID",n="Owner",i="Index type",s="Placement rule",c="Marker",d="Maximum marker",E="Version",g="Master version",C="Modification time",b="Zonegroup",P="Versioning",G="MFA Delete",N="Locking",p="Enabled",U="Bucket quota",W="Enabled",Z="Maximum size",$="Maximum objects",h="Unlimited",I="Unlimited",v="Mode",F="Days",[[4,"ngIf"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],o,[1,"w-75"],[1,"bold"],_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F]},template:function(_,n){1&_&&e.YNc(0,ft,81,20,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[f.O5],pipes:[dt.N,Be.T,we.n],styles:["table[_ngcontent-%COMP%]{table-layout:fixed}table[_ngcontent-%COMP%] td[_ngcontent-%COMP%]{word-wrap:break-word}"]}),t})();var He=r(60251);const pt=["bucketSizeTpl"],Mt=["bucketObjectTpl"];function mt(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_size)("used",_.bucket_size)}}function At(t,o){1&t&&e.SDv(0,9)}function bt(t,o){if(1&t&&(e.YNc(0,mt,1,2,"cd-usage-bar",6),e.YNc(1,At,1,0,"ng-template",null,7,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_size>0&&_.bucket_quota.enabled)("ngIfElse",n)}}function Pt(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_objects)("used",_.num_objects)("isBinary",!1)}}function Gt(t,o){1&t&&e.SDv(0,13)}function Nt(t,o){if(1&t&&(e.YNc(0,Pt,1,3,"cd-usage-bar",10),e.YNc(1,Gt,1,0,"ng-template",null,11,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_objects>0&&_.bucket_quota.enabled)("ngIfElse",n)}}let Ut=(()=>{class t extends be.o{constructor(_,n,i,s,c,d,E,g){super(g),this.authStorageService=_,this.dimlessBinaryPipe=n,this.dimlessPipe=i,this.rgwBucketService=s,this.modalService=c,this.urlBuilder=d,this.actionLabels=E,this.ngZone=g,this.columns=[],this.buckets=[],this.selection=new Pe.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Name",prop:"bid",flexGrow:2},{name:"Owner",prop:"owner",flexGrow:2.5},{name:"Used Capacity",prop:"bucket_size",flexGrow:.6,pipe:this.dimlessBinaryPipe},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.bucketSizeTpl,flexGrow:.8},{name:"Objects",prop:"num_objects",flexGrow:.6,pipe:this.dimlessPipe},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.bucketObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().bid)}`;this.tableActions=[{permission:"create",icon:D.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:c=>!c.hasSelection},{permission:"update",icon:D.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:D.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:c=>c.hasMultiSelection}],this.setTableRefreshTimeout()}transformBucketData(){u().forEach(this.buckets,_=>{const n=_.bucket_quota.max_size,i=_.bucket_quota.max_objects;_.bucket_size=0,_.num_objects=0,u().isEmpty(_.usage)||(_.bucket_size=_.usage["rgw.main"].size_actual,_.num_objects=_.usage["rgw.main"].num_objects),_.size_usage=n>0?_.bucket_size/n:void 0,_.object_usage=i>0?_.num_objects/i:void 0})}getBucketList(_){this.setTableRefreshTimeout(),this.rgwBucketService.list(!0).subscribe(n=>{this.buckets=n,this.transformBucketData()},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(xe.M,{itemDescription:this.selection.hasSingleSelection?"bucket":"buckets",itemNames:this.selection.selected.map(_=>_.bid),submitActionObservable:()=>new ye.y(_=>{(0,Y.D)(this.selection.selected.map(n=>this.rgwBucketService.delete(n.bid))).subscribe({error:n=>{_.error(n),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ee.j),e.Y36(qe.$),e.Y36(we.n),e.Y36(Me.o),e.Y36(Te.Z),e.Y36(Q.F),e.Y36(A.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-list"]],viewQuery:function(_,n){if(1&_&&(e.Gf(z.a,7),e.Gf(pt,7),e.Gf(Mt,7)),2&_){let i;e.iGM(i=e.CRH())&&(n.table=i.first),e.iGM(i=e.CRH())&&(n.bucketSizeTpl=i.first),e.iGM(i=e.CRH())&&(n.bucketObjectTpl=i.first)}},features:[e._Bn([{provide:Q.F,useValue:new Q.F("rgw/bucket")}]),e.qOj],decls:8,vars:9,consts:function(){let o,_;return o="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","bid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["bucketSizeTpl",""],["bucketObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],o,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,n){1&_&&(e.TgZ(0,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return n.setExpandedRow(s)})("updateSelection",function(s){return n.updateSelection(s)})("fetchData",function(s){return n.getBucketList(s)}),e._UZ(2,"cd-table-actions",2),e._UZ(3,"cd-rgw-bucket-details",3),e.qZA(),e.YNc(4,bt,3,2,"ng-template",null,4,e.W1O),e.YNc(6,Nt,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.Q6J("autoReload",!1)("data",n.buckets)("columns",n.columns)("hasDetails",!0)("status",n.tableStatus),e.xp6(2),e.Q6J("permission",n.permission)("selection",n.selection)("tableActions",n.tableActions),e.xp6(1),e.Q6J("selection",n.expandedRow))},directives:[z.a,ke.K,Ct,f.O5,He.O],styles:[""]}),t})();var Wt=r(58111),Xe=r(59376),Zt=r(61350),$t=r(98056),Ke=r(76317);function ht(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table-key-value",11),e.NdJ("fetchData",function(){return e.CHM(_),e.oxw(2).getMetaData()}),e.qZA()}if(2&t){const _=e.oxw(2);e.Q6J("data",_.metadata)}}function It(t,o){if(1&t&&e._UZ(0,"cd-table-performance-counter",12),2&t){const _=e.oxw(2);e.Q6J("serviceId",_.serviceMapId)}}function vt(t,o){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.Q6J("grafanaPath","rgw-instance-detail?var-rgw_servers=rgw."+_.serviceId)("type","metrics")}}function Ft(t,o){1&t&&(e.TgZ(0,"li",13),e.TgZ(1,"a",4),e.SDv(2,14),e.qZA(),e.YNc(3,vt,1,2,"ng-template",6),e.qZA())}function Lt(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"ul",1,2),e.TgZ(3,"li",3),e.TgZ(4,"a",4),e.SDv(5,5),e.qZA(),e.YNc(6,ht,1,1,"ng-template",6),e.qZA(),e.TgZ(7,"li",7),e.TgZ(8,"a",4),e.SDv(9,8),e.qZA(),e.YNc(10,It,1,1,"ng-template",6),e.qZA(),e.YNc(11,Ft,4,0,"li",9),e.qZA(),e._UZ(12,"div",10),e.BQk()),2&t){const _=e.MAs(2),n=e.oxw();e.xp6(11),e.Q6J("ngIf",n.grafanaPermission.read),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Dt=(()=>{class t{constructor(_,n){this.rgwDaemonService=_,this.authStorageService=n,this.serviceId="",this.serviceMapId="",this.grafanaPermission=this.authStorageService.getPermissions().grafana}ngOnChanges(){this.selection&&(this.serviceId=this.selection.id,this.serviceMapId=this.selection.service_map_id)}getMetaData(){u().isEmpty(this.serviceId)||this.rgwDaemonService.get(this.serviceId).subscribe(_=>{this.metadata=_.rgw_metadata})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ge.b),e.Y36(Ee.j))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n;return o="Details",_="Performance Counters",n="Performance Details",[[4,"ngIf"],["ngbNav","","cdStatefulTab","rgw-daemon-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","performance-counters"],_,["ngbNavItem","performance-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"data","fetchData"],["serviceType","rgw",3,"serviceId"],["ngbNavItem","performance-details"],n,["uid","x5ARzZtmk","grafanaStyle","one",3,"grafanaPath","type"]]},template:function(_,n){1&_&&e.YNc(0,Lt,13,2,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[f.O5,M.Pz,Xe.m,M.nv,M.Vx,M.uN,M.tO,Zt.b,$t.p,Ke.F],styles:[""]}),t})();function yt(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",8),e.NdJ("setExpandedRow",function(i){return e.CHM(_),e.oxw().setExpandedRow(i)})("fetchData",function(i){return e.CHM(_),e.oxw().getDaemonList(i)}),e._UZ(1,"cd-rgw-daemon-details",9),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.daemons)("columns",_.columns)("hasDetails",!0),e.xp6(1),e.Q6J("selection",_.expandedRow)}}function xt(t,o){1&t&&e._UZ(0,"cd-grafana",11),2&t&&e.Q6J("grafanaPath","rgw-overview?")("type","metrics")}function qt(t,o){1&t&&(e.TgZ(0,"li",2),e.TgZ(1,"a",3),e.SDv(2,10),e.qZA(),e.YNc(3,xt,1,2,"ng-template",5),e.qZA())}function wt(t,o){1&t&&e._UZ(0,"cd-grafana",13),2&t&&e.Q6J("grafanaPath","radosgw-sync-overview?")("type","metrics")}function kt(t,o){1&t&&(e.TgZ(0,"li",2),e.TgZ(1,"a",3),e.SDv(2,12),e.qZA(),e.YNc(3,wt,1,2,"ng-template",5),e.qZA())}let Bt=(()=>{class t extends be.o{constructor(_,n,i,s){super(),this.rgwDaemonService=_,this.authStorageService=n,this.cephShortVersionPipe=i,this.rgwSiteService=s,this.columns=[],this.daemons=[],this.updateDaemons=c=>{this.daemons=c}}ngOnInit(){this.grafanaPermission=this.authStorageService.getPermissions().grafana,this.columns=[{name:"ID",prop:"id",flexGrow:2},{name:"Hostname",prop:"server_hostname",flexGrow:2},{name:"Zone",prop:"zone_name",flexGrow:2},{name:"Zone Group",prop:"zonegroup_name",flexGrow:2},{name:"Realm",prop:"realm_name",flexGrow:2},{name:"Version",prop:"version",flexGrow:1,pipe:this.cephShortVersionPipe}],this.rgwSiteService.get("realms").subscribe(_=>this.isMultiSite=_.length>0)}getDaemonList(_){this.rgwDaemonService.list().subscribe(this.updateDaemons,()=>{_.error()})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ge.b),e.Y36(Ee.j),e.Y36(Wt.F),e.Y36(We.I))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-list"]],features:[e.qOj],decls:9,vars:3,consts:function(){let o,_,n;return o="Daemons List",_="Overall Performance",n="Sync Performance",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","",4,"ngIf"],[3,"ngbNavOutlet"],["columnMode","flex",3,"data","columns","hasDetails","setExpandedRow","fetchData"],["cdTableDetail","",3,"selection"],_,["uid","WAkugZpiz","grafanaStyle","two",3,"grafanaPath","type"],n,["uid","rgw-sync-overview","grafanaStyle","two",3,"grafanaPath","type"]]},template:function(_,n){if(1&_&&(e.TgZ(0,"ul",0,1),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,yt,2,4,"ng-template",5),e.qZA(),e.YNc(6,qt,4,0,"li",6),e.YNc(7,kt,4,0,"li",6),e.qZA(),e._UZ(8,"div",7)),2&_){const i=e.MAs(1);e.xp6(6),e.Q6J("ngIf",n.grafanaPermission.read),e.xp6(1),e.Q6J("ngIf",n.grafanaPermission.read&&n.isMultiSite),e.xp6(1),e.Q6J("ngbNavOutlet",i)}},directives:[M.Pz,M.nv,M.Vx,M.uN,f.O5,M.tO,z.a,Dt,Ke.F],styles:[""]}),t})();var Ht=r(58071),Ge=r(28211),Se=(()=>{return(t=Se||(Se={})).USERS="users",t.BUCKETS="buckets",t.METADATA="metadata",t.USAGE="usage",t.ZONE="zone",Se;var t})();let ze=(()=>{class t{static getAll(){return Object.values(t.capabilities)}}return t.capabilities=Se,t})();var fe=r(60312);function Xt(t,o){1&t&&e._UZ(0,"input",22),2&t&&e.Q6J("readonly",!0)}function Kt(t,o){1&t&&(e.TgZ(0,"option",17),e.SDv(1,25),e.qZA()),2&t&&e.Q6J("ngValue",null)}function zt(t,o){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function Qt(t,o){if(1&t&&(e.TgZ(0,"select",23),e.YNc(1,Kt,2,1,"option",24),e.YNc(2,zt,2,2,"option",19),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.types),e.xp6(1),e.Q6J("ngForOf",_.types)}}function Yt(t,o){1&t&&(e.TgZ(0,"span",27),e.SDv(1,28),e.qZA())}function Jt(t,o){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Vt(t,o){1&t&&(e.TgZ(0,"span",27),e.SDv(1,29),e.qZA())}const jt=function(t){return{required:t}},en=function(){return["read","write","*"]};let _n=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.activeModal=n,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.types=[],this.resource="capability",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({type:[null,[a.kI.required]],perm:[null,[a.kI.required]]})}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.ADD}setValues(_,n){this.formGroup.setValue({type:_,perm:n})}setCapabilities(_){const n=[];_.forEach(i=>{n.push(i.type)}),this.types=[],ze.getAll().forEach(i=>{-1===u().indexOf(n,i)&&this.types.push(i)})}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-capability-modal"]],outputs:{submitAction:"submitAction"},decls:29,vars:24,consts:function(){let o,_,n,i,s,c,d;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Type",n="Permission",i="-- Select a permission --",s="-- Select a type --",c="This field is required.",d="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","type",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","type","class","form-control","type","text","formControlName","type",3,"readonly",4,"ngIf"],["id","type","class","form-control","formControlName","type","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],n,["id","perm","formControlName","perm",1,"form-control"],[3,"ngValue"],i,[3,"value",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["id","type","type","text","formControlName","type",1,"form-control",3,"readonly"],["id","type","formControlName","type","autofocus","",1,"form-control"],[3,"ngValue",4,"ngIf"],s,[3,"value"],[1,"invalid-feedback"],c,d]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,Xt,1,1,"input",11),e.YNc(14,Qt,3,2,"select",12),e.YNc(15,Yt,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(16,"div",7),e.TgZ(17,"label",14),e.SDv(18,15),e.qZA(),e.TgZ(19,"div",10),e.TgZ(20,"select",16),e.TgZ(21,"option",17),e.SDv(22,18),e.qZA(),e.YNc(23,Jt,2,2,"option",19),e.qZA(),e.YNc(24,Vt,2,0,"span",13),e.qZA(),e.qZA(),e.qZA(),e.TgZ(25,"div",20),e.TgZ(26,"cd-form-button-panel",21),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(27,"titlecase"),e.ALo(28,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,13,n.action))(e.lcZ(4,15,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(21,jt,!n.editing)),e.xp6(3),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",!n.editing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("type",i,"required")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(23,en)),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("perm",i,"required")),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(27,17,n.action)+" "+e.lcZ(28,19,n.resource))}},directives:[fe.z,a._Y,a.JL,V.V,a.sg,H.P,f.mk,f.O5,q.o,a.EJ,X.b,a.JJ,a.u,a.YN,a.Kr,f.sg,ee.p,a.Fj,j.U],pipes:[f.rS,K.m],styles:[""]}),t})();var Ce=r(4416),pe=r(58039);function tn(t,o){1&t&&e._UZ(0,"input",17),2&t&&e.Q6J("readonly",!0)}function nn(t,o){1&t&&(e.TgZ(0,"option",21),e.SDv(1,22),e.qZA()),2&t&&e.Q6J("ngValue",null)}function on(t,o){if(1&t&&(e.TgZ(0,"option",23),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function sn(t,o){if(1&t&&(e.TgZ(0,"select",18),e.YNc(1,nn,2,1,"option",19),e.YNc(2,on,2,2,"option",20),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.userCandidates),e.xp6(1),e.Q6J("ngForOf",_.userCandidates)}}function an(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function rn(t,o){1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"div",26),e.TgZ(2,"div",27),e._UZ(3,"input",28),e.TgZ(4,"label",29),e.SDv(5,30),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function ln(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,38),e.qZA())}const Ne=function(t){return{required:t}};function cn(t,o){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",31),e.SDv(2,32),e.qZA(),e.TgZ(3,"div",10),e.TgZ(4,"div",33),e._UZ(5,"input",34),e.TgZ(6,"span",35),e._UZ(7,"button",36),e._UZ(8,"cd-copy-2-clipboard-button",37),e.qZA(),e.qZA(),e.YNc(9,ln,2,0,"span",13),e.qZA(),e.qZA()),2&t){const _=e.oxw(),n=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ne,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(4),e.Q6J("ngIf",_.formGroup.showError("access_key",n,"required"))}}function dn(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,44),e.qZA())}function un(t,o){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",39),e.SDv(2,40),e.qZA(),e.TgZ(3,"div",10),e.TgZ(4,"div",33),e._UZ(5,"input",41),e.TgZ(6,"span",35),e._UZ(7,"button",42),e._UZ(8,"cd-copy-2-clipboard-button",43),e.qZA(),e.qZA(),e.YNc(9,dn,2,0,"span",13),e.qZA(),e.qZA()),2&t){const _=e.oxw(),n=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ne,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(4),e.Q6J("ngIf",_.formGroup.showError("secret_key",n,"required"))}}let Qe=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.activeModal=n,this.actionLabels=i,this.submitAction=new e.vpe,this.viewing=!0,this.userCandidates=[],this.resource="S3 Key",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({user:[null,[a.kI.required]],generate_key:[!0],access_key:[null,[m.h.requiredIf({generate_key:!1})]],secret_key:[null,[m.h.requiredIf({generate_key:!1})]]})}setViewing(_=!0){this.viewing=_,this.action=this.viewing?this.actionLabels.SHOW:this.actionLabels.CREATE}setValues(_,n,i){this.formGroup.setValue({user:_,generate_key:u().isEmpty(n),access_key:n,secret_key:i})}setUserCandidates(_){this.userCandidates=_}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-s3-key-modal"]],outputs:{submitAction:"submitAction"},decls:23,vars:24,consts:function(){let o,_,n,i,s,c,d,E,g;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="-- Select a username --",i="This field is required.",s="Auto-generate key",c="Access key",d="This field is required.",E="Secret key",g="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user","class","form-control","type","text","formControlName","user",3,"readonly",4,"ngIf"],["id","user","class","form-control","formControlName","user","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","showSubmit","submitActionEvent"],["id","user","type","text","formControlName","user",1,"form-control",3,"readonly"],["id","user","formControlName","user","autofocus","",1,"form-control"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],n,[3,"value"],[1,"invalid-feedback"],i,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],s,["for","access_key",1,"cd-col-form-label",3,"ngClass"],c,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control",3,"readonly"],[1,"input-group-append"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],d,["for","secret_key",1,"cd-col-form-label",3,"ngClass"],E,["id","secret_key","type","password","formControlName","secret_key",1,"form-control",3,"readonly"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],g]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,tn,1,1,"input",11),e.YNc(14,sn,3,2,"select",12),e.YNc(15,an,2,0,"span",13),e.qZA(),e.qZA(),e.YNc(16,rn,6,0,"div",14),e.YNc(17,cn,10,5,"div",14),e.YNc(18,un,10,5,"div",14),e.qZA(),e.TgZ(19,"div",15),e.TgZ(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(21,"titlecase"),e.ALo(22,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,14,n.action))(e.lcZ(4,16,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(22,Ne,!n.viewing)),e.xp6(3),e.Q6J("ngIf",n.viewing),e.xp6(1),e.Q6J("ngIf",!n.viewing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",!n.viewing),e.xp6(1),e.Q6J("ngIf",!n.formGroup.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!n.formGroup.getValue("generate_key")),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(21,18,n.action)+" "+e.lcZ(22,20,n.resource))("showSubmit",!n.viewing)}},directives:[fe.z,a._Y,a.JL,V.V,a.sg,H.P,f.mk,f.O5,ee.p,q.o,a.Fj,X.b,a.JJ,a.u,a.EJ,j.U,f.sg,a.YN,a.Kr,a.Wl,Ce.C,pe.s],pipes:[f.rS,K.m],styles:[""]}),t})();class Rn{}function gn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function En(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,31),e.qZA())}function Tn(t,o){if(1&t&&(e.TgZ(0,"option",32),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Sn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,33),e.qZA())}function fn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,48),e.qZA())}function Cn(t,o){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",41),e.SDv(2,42),e.qZA(),e.TgZ(3,"div",10),e.TgZ(4,"div",43),e._UZ(5,"input",44),e.TgZ(6,"span",45),e._UZ(7,"button",46),e._UZ(8,"cd-copy-2-clipboard-button",47),e.qZA(),e.qZA(),e.YNc(9,fn,2,0,"span",15),e.qZA(),e.qZA()),2&t){const _=e.oxw(2),n=e.MAs(7);e.xp6(9),e.Q6J("ngIf",_.formGroup.showError("secret_key",n,"required"))}}function pn(t,o){if(1&t&&(e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,34),e.qZA(),e.TgZ(3,"div",7),e.TgZ(4,"div",35),e.TgZ(5,"div",36),e._UZ(6,"input",37),e.TgZ(7,"label",38),e.SDv(8,39),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(9,Cn,10,1,"div",40),e.qZA()),2&t){const _=e.oxw();e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.formGroup.getValue("generate_secret"))}}const Mn=function(t){return{required:t}},mn=function(){return["read","write"]};let An=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.bsModalRef=n,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.subusers=[],this.resource="Subuser",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({uid:[null],subuid:[null,[a.kI.required,this.subuserValidator()]],perm:[null,[a.kI.required]],generate_secret:[!0],secret_key:[null,[m.h.requiredIf({generate_secret:!1})]]})}subuserValidator(){const _=this;return n=>_.editing||(0,m.P)(n.value)?null:_.subusers.some(s=>u().isEqual(_.getSubuserName(s.id),n.value))?{subuserIdExists:!0}:null}getSubuserName(_){if(u().isEmpty(_))return _;const n=_.match(/([^:]+)(:(.+))?/);return u().isUndefined(n[3])?n[1]:n[3]}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE}setValues(_,n="",i=""){this.formGroup.setValue({uid:_,subuid:this.getSubuserName(n),perm:i,generate_secret:!0,secret_key:null})}setSubusers(_){this.subusers=_}onSubmit(){const _=this.formGroup.value,n=new Rn;n.id=`${_.uid}:${_.subuid}`,n.permissions=_.perm,n.generate_secret=_.generate_secret,n.secret_key=_.secret_key,this.submitAction.emit(n),this.bsModalRef.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-subuser-modal"]],outputs:{submitAction:"submitAction"},decls:39,vars:26,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="Subuser",i="Permission",s="-- Select a permission --",c="read, write",d="full",E="This field is required.",g="The chosen subuser ID is already in use.",C="This field is required.",b="Swift key",P="Auto-generate secret",G="Secret key",N="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","uid",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","uid","type","text","formControlName","uid",1,"form-control",3,"readonly"],["for","subuid",1,"cd-col-form-label",3,"ngClass"],n,["id","subuid","type","text","formControlName","subuid","autofocus","",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],i,["id","perm","formControlName","perm",1,"form-control"],[3,"ngValue"],s,[3,"value",4,"ngFor","ngForOf"],["value","read-write"],c,["value","full-control"],d,[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],E,g,[3,"value"],C,b,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_secret","type","checkbox","formControlName","generate_secret",1,"custom-control-input"],["for","generate_secret",1,"custom-control-label"],P,["class","form-group row",4,"ngIf"],["for","secret_key",1,"cd-col-form-label","required"],G,[1,"input-group"],["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],[1,"input-group-append"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],N]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.qZA(),e.qZA(),e.TgZ(14,"div",7),e.TgZ(15,"label",12),e.SDv(16,13),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",14),e.YNc(19,gn,2,0,"span",15),e.YNc(20,En,2,0,"span",15),e.qZA(),e.qZA(),e.TgZ(21,"div",7),e.TgZ(22,"label",16),e.SDv(23,17),e.qZA(),e.TgZ(24,"div",10),e.TgZ(25,"select",18),e.TgZ(26,"option",19),e.SDv(27,20),e.qZA(),e.YNc(28,Tn,2,2,"option",21),e.TgZ(29,"option",22),e.SDv(30,23),e.qZA(),e.TgZ(31,"option",24),e.SDv(32,25),e.qZA(),e.qZA(),e.YNc(33,Sn,2,0,"span",15),e.qZA(),e.qZA(),e.YNc(34,pn,10,1,"fieldset",26),e.qZA(),e.TgZ(35,"div",27),e.TgZ(36,"cd-form-button-panel",28),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(37,"titlecase"),e.ALo(38,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.bsModalRef),e.xp6(4),e.pQV(e.lcZ(3,15,n.action))(e.lcZ(4,17,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(7),e.Q6J("readonly",!0),e.xp6(2),e.Q6J("ngClass",e.VKq(23,Mn,!n.editing)),e.xp6(3),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("subuid",i,"required")),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("subuid",i,"subuserIdExists")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(25,mn)),e.xp6(5),e.Q6J("ngIf",n.formGroup.showError("perm",i,"required")),e.xp6(1),e.Q6J("ngIf",!n.editing),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(37,19,n.action)+" "+e.lcZ(38,21,n.resource))}},directives:[fe.z,a._Y,a.JL,V.V,a.sg,H.P,q.o,a.Fj,X.b,a.JJ,a.u,f.mk,j.U,f.O5,a.EJ,a.YN,a.Kr,f.sg,ee.p,a.Wl,Ce.C,pe.s],pipes:[f.rS,K.m],styles:[""]}),t})();var bn=r(13472);let Ye=(()=>{class t{constructor(_,n){this.activeModal=_,this.actionLabels=n,this.resource="Swift Key",this.action=this.actionLabels.SHOW}setValues(_,n){this.user=_,this.secret_key=n}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-swift-key-modal"]],decls:24,vars:11,consts:function(){let o,_,n;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="Secret key",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],[1,"modal-body"],["novalidate",""],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","user","name","user","type","text",1,"form-control",3,"readonly","ngModel","ngModelChange"],["for","secret_key",1,"cd-col-form-label"],n,[1,"input-group"],["id","secret_key","name","secret_key","type","password",1,"form-control",3,"ngModel","readonly","ngModelChange"],[1,"input-group-append"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],[1,"modal-footer"],[3,"backAction"]]},template:function(_,n){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"div",4),e.TgZ(7,"form",5),e.TgZ(8,"div",6),e.TgZ(9,"label",7),e.SDv(10,8),e.qZA(),e.TgZ(11,"div",9),e.TgZ(12,"input",10),e.NdJ("ngModelChange",function(s){return n.user=s}),e.qZA(),e.qZA(),e.qZA(),e.TgZ(13,"div",6),e.TgZ(14,"label",11),e.SDv(15,12),e.qZA(),e.TgZ(16,"div",9),e.TgZ(17,"div",13),e.TgZ(18,"input",14),e.NdJ("ngModelChange",function(s){return n.secret_key=s}),e.qZA(),e.TgZ(19,"span",15),e._UZ(20,"button",16),e._UZ(21,"cd-copy-2-clipboard-button",17),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.TgZ(22,"div",18),e.TgZ(23,"cd-back-button",19),e.NdJ("backAction",function(){return n.activeModal.close()}),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,7,n.action))(e.lcZ(4,9,n.resource)),e.QtT(2),e.xp6(8),e.Q6J("readonly",!0)("ngModel",n.user),e.xp6(6),e.Q6J("ngModel",n.secret_key)("readonly",!0))},directives:[fe.z,a._Y,a.JL,a.F,H.P,q.o,a.Fj,X.b,a.JJ,a.On,Ce.C,pe.s,bn.W],pipes:[f.rS,K.m],styles:[""]}),t})();var Pn=r(17932);function Gn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,50),e.qZA())}function Nn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,51),e.qZA())}function On(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,52),e.qZA())}function Un(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,56),e.qZA())}function Wn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,57),e.qZA())}function Zn(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",53),e.SDv(2,54),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",55),e.YNc(5,Un,2,0,"span",13),e.YNc(6,Wn,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(4),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("tenant",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("tenant",_,"notUnique"))}}function $n(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,58),e.qZA())}function hn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,59),e.qZA())}function In(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,60),e.qZA())}function vn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,61),e.qZA())}function Fn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,64),e.qZA())}function Ln(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,65),e.qZA())}function Dn(t,o){if(1&t&&(e.TgZ(0,"div",8),e._UZ(1,"label",62),e.TgZ(2,"div",11),e._UZ(3,"input",63),e.YNc(4,Fn,2,0,"span",13),e.YNc(5,Ln,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(4),e.Q6J("ngIf",n.userForm.showError("max_buckets",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("max_buckets",_,"min"))}}function yn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,77),e.qZA())}function xn(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",70),e.SDv(2,71),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",72),e._UZ(5,"input",73),e.TgZ(6,"span",74),e._UZ(7,"button",75),e._UZ(8,"cd-copy-2-clipboard-button",76),e.qZA(),e.qZA(),e.YNc(9,yn,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(9),e.Q6J("ngIf",n.userForm.showError("access_key",_,"required"))}}function qn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,83),e.qZA())}function wn(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",78),e.SDv(2,79),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",72),e._UZ(5,"input",80),e.TgZ(6,"span",74),e._UZ(7,"button",81),e._UZ(8,"cd-copy-2-clipboard-button",82),e.qZA(),e.qZA(),e.YNc(9,qn,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(9),e.Q6J("ngIf",n.userForm.showError("secret_key",_,"required"))}}function kn(t,o){if(1&t&&(e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,66),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"div",14),e.TgZ(5,"div",15),e._UZ(6,"input",67),e.TgZ(7,"label",68),e.SDv(8,69),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(9,xn,10,1,"div",19),e.YNc(10,wn,10,1,"div",19),e.qZA()),2&t){const _=e.oxw(2);e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key"))}}function Bn(t,o){1&t&&(e.TgZ(0,"span",93),e.TgZ(1,"span",94),e.SDv(2,95),e.qZA(),e.qZA())}const L=function(t){return[t]};function Hn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",72),e.TgZ(2,"div",96),e.TgZ(3,"span",97),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",98),e.TgZ(6,"div",99),e.TgZ(7,"span",97),e._UZ(8,"i"),e.qZA(),e.qZA(),e._UZ(9,"input",98),e.TgZ(10,"span",74),e.TgZ(11,"button",100),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showSubuserModal(s)}),e._UZ(12,"i",90),e.qZA(),e.TgZ(13,"button",101),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteSubuser(s)}),e._UZ(14,"i",90),e.qZA(),e.qZA(),e.qZA(),e._UZ(15,"span",94),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.user),e.xp6(1),e.s9C("value",_.id),e.xp6(3),e.Tol(n.icons.share),e.xp6(1),e.s9C("value","full-control"===_.permissions?"full":_.permissions),e.xp6(3),e.Q6J("ngClass",e.VKq(10,L,n.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(12,L,n.icons.destroy))}}function Xn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,84),e.qZA(),e.TgZ(3,"div",85),e.TgZ(4,"div",14),e.YNc(5,Bn,3,0,"span",86),e.YNc(6,Hn,16,14,"span",87),e.TgZ(7,"div",85),e.TgZ(8,"div",88),e.TgZ(9,"button",89),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showSubuserModal()}),e._UZ(10,"i",90),e.ynx(11),e.SDv(12,91),e.ALo(13,"titlecase"),e.ALo(14,"upperFirst"),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(15,"span",92),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.subusers.length),e.xp6(1),e.Q6J("ngForOf",_.subusers),e.xp6(4),e.Q6J("ngClass",e.VKq(9,L,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(13,5,_.actionLabels.CREATE))(e.lcZ(14,7,_.subuserLabel)),e.QtT(12)}}function Kn(t,o){1&t&&(e.TgZ(0,"span",93),e.TgZ(1,"span",94),e.SDv(2,107),e.qZA(),e.qZA())}function zn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",72),e.TgZ(2,"div",96),e.TgZ(3,"div",97),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",98),e.TgZ(6,"span",74),e.TgZ(7,"button",108),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showS3KeyModal(s)}),e._UZ(8,"i",90),e.qZA(),e.TgZ(9,"button",109),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteS3Key(s)}),e._UZ(10,"i",90),e.qZA(),e.qZA(),e.qZA(),e._UZ(11,"span",94),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(3),e.Q6J("ngClass",e.VKq(6,L,n.icons.show)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,L,n.icons.destroy))}}function Qn(t,o){1&t&&(e.TgZ(0,"span",93),e.TgZ(1,"span",94),e.SDv(2,110),e.qZA(),e.qZA())}function Yn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",72),e.TgZ(2,"div",96),e.TgZ(3,"span",97),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",98),e.TgZ(6,"span",74),e.TgZ(7,"button",111),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showSwiftKeyModal(s)}),e._UZ(8,"i",90),e.qZA(),e.qZA(),e.qZA(),e._UZ(9,"span",94),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(3),e.Q6J("ngClass",e.VKq(5,L,n.icons.show))}}function Jn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,102),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"label",62),e.SDv(5,103),e.qZA(),e.TgZ(6,"div",11),e.YNc(7,Kn,3,0,"span",86),e.YNc(8,zn,12,10,"span",87),e.TgZ(9,"div",85),e.TgZ(10,"div",88),e.TgZ(11,"button",104),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showS3KeyModal()}),e._UZ(12,"i",90),e.ynx(13),e.SDv(14,105),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(17,"span",92),e.qZA(),e._UZ(18,"hr"),e.qZA(),e.TgZ(19,"div",8),e.TgZ(20,"label",62),e.SDv(21,106),e.qZA(),e.TgZ(22,"div",11),e.YNc(23,Qn,3,0,"span",86),e.YNc(24,Yn,10,7,"span",87),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(7),e.Q6J("ngIf",0===_.s3Keys.length),e.xp6(1),e.Q6J("ngForOf",_.s3Keys),e.xp6(4),e.Q6J("ngClass",e.VKq(11,L,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,7,_.actionLabels.CREATE))(e.lcZ(16,9,_.s3keyLabel)),e.QtT(14),e.xp6(7),e.Q6J("ngIf",0===_.swiftKeys.length),e.xp6(1),e.Q6J("ngForOf",_.swiftKeys)}}function Vn(t,o){1&t&&(e.TgZ(0,"span",93),e.TgZ(1,"span",94),e.SDv(2,115),e.qZA(),e.qZA())}function jn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",72),e.TgZ(2,"span",96),e.TgZ(3,"div",97),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",98),e.TgZ(6,"span",74),e.TgZ(7,"button",116),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showCapabilityModal(s)}),e._UZ(8,"i",90),e.qZA(),e.TgZ(9,"button",117),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteCapability(s)}),e._UZ(10,"i",90),e.qZA(),e.qZA(),e.qZA(),e._UZ(11,"span",94),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.share),e.xp6(1),e.hYB("value","",_.type,":",_.perm,""),e.xp6(3),e.Q6J("ngClass",e.VKq(7,L,n.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(9,L,n.icons.destroy))}}function eo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,112),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"div",14),e.YNc(5,Vn,3,0,"span",86),e.YNc(6,jn,12,11,"span",87),e.TgZ(7,"div",85),e.TgZ(8,"div",88),e.TgZ(9,"button",113),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showCapabilityModal()}),e.ALo(10,"pipeFunction"),e.ALo(11,"pipeFunction"),e._UZ(12,"i",90),e.ynx(13),e.SDv(14,114),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(17,"span",92),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.capabilities.length),e.xp6(1),e.Q6J("ngForOf",_.capabilities),e.xp6(3),e.Q6J("disabled",e.xi3(10,7,_.capabilities,_.hasAllCapabilities))("disableTooltip",!e.xi3(11,10,_.capabilities,_.hasAllCapabilities)),e.xp6(3),e.Q6J("ngClass",e.VKq(17,L,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,13,_.actionLabels.ADD))(e.lcZ(16,15,_.capabilityLabel)),e.QtT(14)}}function _o(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",118),e.TgZ(4,"label",119),e.SDv(5,120),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function to(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,124),e.qZA())}function no(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,125),e.qZA())}function oo(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",121),e.SDv(2,122),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",123),e.YNc(5,to,2,0,"span",13),e.YNc(6,no,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("user_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_quota_max_size",_,"quotaMaxSize"))}}function io(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",126),e.TgZ(4,"label",127),e.SDv(5,128),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function so(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,132),e.qZA())}function ao(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,133),e.qZA())}function ro(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",129),e.SDv(2,130),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",131),e.YNc(5,so,2,0,"span",13),e.YNc(6,ao,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("user_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_quota_max_objects",_,"min"))}}function lo(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",134),e.TgZ(4,"label",135),e.SDv(5,136),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function co(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,140),e.qZA())}function uo(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,141),e.qZA())}function Ro(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",137),e.SDv(2,138),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",139),e.YNc(5,co,2,0,"span",13),e.YNc(6,uo,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_size",_,"quotaMaxSize"))}}function go(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",142),e.TgZ(4,"label",143),e.SDv(5,144),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function Eo(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,148),e.qZA())}function To(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,149),e.qZA())}function So(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",145),e.SDv(2,146),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",147),e.YNc(5,Eo,2,0,"span",13),e.YNc(6,To,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_objects",_,"min"))}}const Je=function(t){return{required:t}};function fo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.TgZ(9,"div",8),e.TgZ(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,Gn,2,0,"span",13),e.YNc(15,Nn,2,0,"span",13),e.YNc(16,On,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(17,"div",8),e.TgZ(18,"div",14),e.TgZ(19,"div",15),e.TgZ(20,"input",16),e.NdJ("click",function(){return e.CHM(_),e.oxw().updateFieldsWhenTenanted()}),e.qZA(),e.TgZ(21,"label",17),e.SDv(22,18),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(23,Zn,7,3,"div",19),e.TgZ(24,"div",8),e.TgZ(25,"label",20),e.SDv(26,21),e.qZA(),e.TgZ(27,"div",11),e._UZ(28,"input",22),e.YNc(29,$n,2,0,"span",13),e.YNc(30,hn,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(31,"div",8),e.TgZ(32,"label",23),e.SDv(33,24),e.qZA(),e.TgZ(34,"div",11),e._UZ(35,"input",25),e.YNc(36,In,2,0,"span",13),e.YNc(37,vn,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(38,"div",8),e.TgZ(39,"label",26),e.SDv(40,27),e.qZA(),e.TgZ(41,"div",11),e.TgZ(42,"select",28),e.NdJ("change",function(i){return e.CHM(_),e.oxw().onMaxBucketsModeChange(i.target.value)}),e.TgZ(43,"option",29),e.SDv(44,30),e.qZA(),e.TgZ(45,"option",31),e.SDv(46,32),e.qZA(),e.TgZ(47,"option",33),e.SDv(48,34),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(49,Dn,6,2,"div",19),e.TgZ(50,"div",8),e.TgZ(51,"div",14),e.TgZ(52,"div",15),e._UZ(53,"input",35),e.TgZ(54,"label",36),e.SDv(55,37),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(56,kn,11,2,"fieldset",38),e.YNc(57,Xn,16,11,"fieldset",38),e.YNc(58,Jn,25,13,"fieldset",38),e.YNc(59,eo,18,19,"fieldset",38),e.TgZ(60,"fieldset"),e.TgZ(61,"legend"),e.SDv(62,39),e.qZA(),e.TgZ(63,"div",8),e.TgZ(64,"div",14),e.TgZ(65,"div",15),e._UZ(66,"input",40),e.TgZ(67,"label",41),e.SDv(68,42),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(69,_o,6,0,"div",19),e.YNc(70,oo,7,2,"div",19),e.YNc(71,io,6,0,"div",19),e.YNc(72,ro,7,2,"div",19),e.qZA(),e.TgZ(73,"fieldset"),e.TgZ(74,"legend"),e.SDv(75,43),e.qZA(),e.TgZ(76,"div",8),e.TgZ(77,"div",14),e.TgZ(78,"div",15),e._UZ(79,"input",44),e.TgZ(80,"label",45),e.SDv(81,46),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(82,lo,6,0,"div",19),e.YNc(83,Ro,7,2,"div",19),e.YNc(84,go,6,0,"div",19),e.YNc(85,So,7,2,"div",19),e.qZA(),e.qZA(),e.TgZ(86,"div",47),e.TgZ(87,"cd-form-button-panel",48),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().onSubmit()}),e.ALo(88,"titlecase"),e.ALo(89,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.MAs(2),n=e.oxw();e.xp6(1),e.Q6J("formGroup",n.userForm),e.xp6(6),e.pQV(e.lcZ(6,30,n.action))(e.lcZ(7,32,n.resource)),e.QtT(5),e.xp6(3),e.Q6J("ngClass",e.VKq(38,Je,!n.editing)),e.xp6(3),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_id",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_id",_,"pattern")),e.xp6(1),e.Q6J("ngIf",!n.userForm.getValue("show_tenant")&&n.userForm.showError("user_id",_,"notUnique")),e.xp6(4),e.Q6J("readonly",!0),e.xp6(3),e.Q6J("ngIf",n.userForm.getValue("show_tenant")),e.xp6(2),e.Q6J("ngClass",e.VKq(40,Je,!n.editing)),e.xp6(4),e.Q6J("ngIf",n.userForm.showError("display_name",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("display_name",_,"required")),e.xp6(6),e.Q6J("ngIf",n.userForm.showError("email",_,"email")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("email",_,"notUnique")),e.xp6(12),e.Q6J("ngIf",1==n.userForm.get("max_buckets_mode").value),e.xp6(7),e.Q6J("ngIf",!n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(10),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value&&!n.userForm.getValue("user_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value&&!n.userForm.getValue("user_quota_max_objects_unlimited")),e.xp6(10),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value&&!n.userForm.getValue("bucket_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value&&!n.userForm.getValue("bucket_quota_max_objects_unlimited")),e.xp6(2),e.Q6J("form",n.userForm)("submitText",e.lcZ(88,34,n.action)+" "+e.lcZ(89,36,n.resource))}}let Ve=(()=>{class t extends Ie.E{constructor(_,n,i,s,c,d,E){super(),this.formBuilder=_,this.route=n,this.router=i,this.rgwUserService=s,this.modalService=c,this.notificationService=d,this.actionLabels=E,this.editing=!1,this.submitObservables=[],this.icons=D.P,this.subusers=[],this.s3Keys=[],this.swiftKeys=[],this.capabilities=[],this.showTenant=!1,this.previousTenant=null,this.resource="user",this.subuserLabel="subuser",this.s3keyLabel="S3 Key",this.capabilityLabel="capability",this.editing=this.router.url.startsWith(`/rgw/user/${A.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.createForm()}createForm(){this.userForm=this.formBuilder.group({user_id:[null,[a.kI.required,a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[m.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("tenant"))]],show_tenant:[this.editing],tenant:[null,[a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[m.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("user_id"),!0)]],display_name:[null,[a.kI.required,a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_ -]+$/)]],email:[null,[m.h.email],[m.h.unique(this.rgwUserService.emailExists,this.rgwUserService)]],max_buckets_mode:[1],max_buckets:[1e3,[m.h.requiredIf({max_buckets_mode:"1"}),m.h.number(!1)]],suspended:[!1],generate_key:[!0],access_key:[null,[m.h.requiredIf({generate_key:!1})]],secret_key:[null,[m.h.requiredIf({generate_key:!1})]],user_quota_enabled:[!1],user_quota_max_size_unlimited:[!0],user_quota_max_size:[null,[m.h.composeIf({user_quota_enabled:!0,user_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],user_quota_max_objects_unlimited:[!0],user_quota_max_objects:[null,[m.h.requiredIf({user_quota_enabled:!0,user_quota_max_objects_unlimited:!1})]],bucket_quota_enabled:[!1],bucket_quota_max_size_unlimited:[!0],bucket_quota_max_size:[null,[m.h.composeIf({bucket_quota_enabled:!0,bucket_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],bucket_quota_max_objects_unlimited:[!0],bucket_quota_max_objects:[null,[m.h.requiredIf({bucket_quota_enabled:!0,bucket_quota_max_objects_unlimited:!1})]]})}ngOnInit(){this.route.params.subscribe(_=>{if(!_.hasOwnProperty("uid"))return void this.loadingReady();const n=decodeURIComponent(_.uid),i=[];i.push(this.rgwUserService.get(n)),i.push(this.rgwUserService.getQuota(n)),(0,Y.D)(i).subscribe(s=>{const c=u().clone(this.userForm.value);let d=u().pick(s[0],u().keys(this.userForm.value));switch(d.max_buckets){case-1:d.max_buckets_mode=-1,d.max_buckets="";break;case 0:d.max_buckets_mode=0,d.max_buckets="";break;default:d.max_buckets_mode=1}["user","bucket"].forEach(g=>{const C=s[1][g+"_quota"];d[g+"_quota_enabled"]=C.enabled,C.max_size<0?(d[g+"_quota_max_size_unlimited"]=!0,d[g+"_quota_max_size"]=null):(d[g+"_quota_max_size_unlimited"]=!1,d[g+"_quota_max_size"]=`${C.max_size} B`),C.max_objects<0?(d[g+"_quota_max_objects_unlimited"]=!0,d[g+"_quota_max_objects"]=null):(d[g+"_quota_max_objects_unlimited"]=!1,d[g+"_quota_max_objects"]=C.max_objects)}),d=u().merge(c,d),this.userForm.setValue(d),this.subusers=s[0].subusers,this.s3Keys=s[0].keys,this.swiftKeys=s[0].swift_keys;const E={"read, write":"*"};s[0].caps.forEach(g=>{g.perm in E&&(g.perm=E[g.perm])}),this.capabilities=s[0].caps,this.loadingReady()},()=>{this.loadingError()})})}goToListView(){this.router.navigate(["/rgw/user"])}onSubmit(){let _;if(this.userForm.pristine)return void this.goToListView();const n=this.getUID();if(this.editing){if(this._isGeneralDirty()){const i=this._getUpdateArgs();this.submitObservables.push(this.rgwUserService.update(n,i))}_="Updated Object Gateway user '" + n + "'"}else{const i=this._getCreateArgs();this.submitObservables.push(this.rgwUserService.create(i)),_="Created Object Gateway user '" + n + "'"}if(this._isUserQuotaDirty()){const i=this._getUserQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(n,i))}if(this._isBucketQuotaDirty()){const i=this._getBucketQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(n,i))}(0,Ht.z)(...this.submitObservables).subscribe({error:()=>{this.userForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.notificationService.show(Ae.k.success,_),this.goToListView()}})}updateFieldsWhenTenanted(){this.showTenant=this.userForm.getValue("show_tenant"),this.showTenant?(this.userForm.get("user_id").markAsTouched(),this.previousTenant=this.userForm.get("tenant").value,this.userForm.get("tenant").patchValue(null)):(this.userForm.get("user_id").markAsUntouched(),this.userForm.get("tenant").patchValue(this.previousTenant))}getUID(){var _;let n=this.userForm.getValue("user_id");const i=null===(_=this.userForm)||void 0===_?void 0:_.getValue("tenant");return i&&i.length>0&&(n=`${this.userForm.getValue("tenant")}$${n}`),n}quotaMaxSizeValidator(_){return(0,m.P)(_.value)?null:null===RegExp("^(\\d+(\\.\\d+)?)\\s*(B|K(B|iB)?|M(B|iB)?|G(B|iB)?|T(B|iB)?)?$","i").exec(_.value)||(new Ge.H).toBytes(_.value)<1024?{quotaMaxSize:!0}:null}setSubuser(_,n){const i={"full-control":"full","read-write":"readwrite"},s=this.getUID();this.submitObservables.push(this.rgwUserService.createSubuser(s,{subuser:_.id,access:_.permissions in i?i[_.permissions]:_.permissions,key_type:"swift",secret_key:_.secret_key,generate_secret:_.generate_secret?"true":"false"})),u().isNumber(n)?this.subusers[n]=_:(this.subusers.push(_),this.swiftKeys.push({user:_.id,secret_key:_.generate_secret?"Apply your changes first...":_.secret_key})),this.userForm.markAsDirty()}deleteSubuser(_){const n=this.subusers[_];this.submitObservables.push(this.rgwUserService.deleteSubuser(this.getUID(),n.id)),this.s3Keys=this.s3Keys.filter(i=>i.user!==n.id),this.swiftKeys=this.swiftKeys.filter(i=>i.user!==n.id),this.subusers.splice(_,1),this.userForm.markAsDirty()}setCapability(_,n){const i=this.getUID();if(u().isNumber(n)){const s=this.capabilities[n];this.submitObservables.push(this.rgwUserService.deleteCapability(i,s.type,s.perm)),this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities[n]=_}else this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities=[...this.capabilities,_];this.userForm.markAsDirty()}deleteCapability(_){const n=this.capabilities[_];this.submitObservables.push(this.rgwUserService.deleteCapability(this.getUID(),n.type,n.perm)),this.capabilities.splice(_,1),this.capabilities=[...this.capabilities],this.userForm.markAsDirty()}hasAllCapabilities(_){return!u().difference(ze.getAll(),u().map(_,"type")).length}setS3Key(_,n){if(!u().isNumber(n)){const i=_.user.match(/([^:]+)(:(.+))?/),s=i[1],c={subuser:i[2]?i[3]:"",generate_key:_.generate_key?"true":"false"};"false"===c.generate_key&&(u().isNil(_.access_key)||(c.access_key=_.access_key),u().isNil(_.secret_key)||(c.secret_key=_.secret_key)),this.submitObservables.push(this.rgwUserService.addS3Key(s,c)),this.s3Keys.push({user:_.user,access_key:_.generate_key?"Apply your changes first...":_.access_key,secret_key:_.generate_key?"Apply your changes first...":_.secret_key})}this.userForm.markAsDirty()}deleteS3Key(_){const n=this.s3Keys[_];this.submitObservables.push(this.rgwUserService.deleteS3Key(this.getUID(),n.access_key)),this.s3Keys.splice(_,1),this.userForm.markAsDirty()}showSubuserModal(_){const n=this.getUID(),i=this.modalService.show(An);if(u().isNumber(_)){const s=this.subusers[_];i.componentInstance.setEditing(),i.componentInstance.setValues(n,s.id,s.permissions)}else i.componentInstance.setEditing(!1),i.componentInstance.setValues(n),i.componentInstance.setSubusers(this.subusers);i.componentInstance.submitAction.subscribe(s=>{this.setSubuser(s,_)})}showS3KeyModal(_){const n=this.modalService.show(Qe);if(u().isNumber(_)){const i=this.s3Keys[_];n.componentInstance.setViewing(),n.componentInstance.setValues(i.user,i.access_key,i.secret_key)}else{const i=this._getS3KeyUserCandidates();n.componentInstance.setViewing(!1),n.componentInstance.setUserCandidates(i),n.componentInstance.submitAction.subscribe(s=>{this.setS3Key(s)})}}showSwiftKeyModal(_){const n=this.modalService.show(Ye),i=this.swiftKeys[_];n.componentInstance.setValues(i.user,i.secret_key)}showCapabilityModal(_){const n=this.modalService.show(_n);if(u().isNumber(_)){const i=this.capabilities[_];n.componentInstance.setEditing(),n.componentInstance.setValues(i.type,i.perm)}else n.componentInstance.setEditing(!1),n.componentInstance.setCapabilities(this.capabilities);n.componentInstance.submitAction.subscribe(i=>{this.setCapability(i,_)})}_isGeneralDirty(){return["display_name","email","max_buckets_mode","max_buckets","suspended"].some(_=>this.userForm.get(_).dirty)}_isUserQuotaDirty(){return["user_quota_enabled","user_quota_max_size_unlimited","user_quota_max_size","user_quota_max_objects_unlimited","user_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_isBucketQuotaDirty(){return["bucket_quota_enabled","bucket_quota_max_size_unlimited","bucket_quota_max_size","bucket_quota_max_objects_unlimited","bucket_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_getCreateArgs(){const _={uid:this.getUID(),display_name:this.userForm.getValue("display_name"),suspended:this.userForm.getValue("suspended"),email:"",max_buckets:this.userForm.getValue("max_buckets"),generate_key:this.userForm.getValue("generate_key"),access_key:"",secret_key:""},n=this.userForm.getValue("email");u().isString(n)&&n.length>0&&u().merge(_,{email:n}),this.userForm.getValue("generate_key")||u().merge(_,{generate_key:!1,access_key:this.userForm.getValue("access_key"),secret_key:this.userForm.getValue("secret_key")});const s=parseInt(this.userForm.getValue("max_buckets_mode"),10);return u().includes([-1,0],s)&&u().merge(_,{max_buckets:s}),_}_getUpdateArgs(){const _={},n=["display_name","email","max_buckets","suspended"];for(const s of n)_[s]=this.userForm.getValue(s);const i=parseInt(this.userForm.getValue("max_buckets_mode"),10);return u().includes([-1,0],i)&&(_.max_buckets=i),_}_getUserQuotaArgs(){const _={quota_type:"user",enabled:this.userForm.getValue("user_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("user_quota_max_size_unlimited")){const n=(new Ge.H).toBytes(this.userForm.getValue("user_quota_max_size"));_.max_size_kb=(n/1024).toFixed(0)}return this.userForm.getValue("user_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("user_quota_max_objects")),_}_getBucketQuotaArgs(){const _={quota_type:"bucket",enabled:this.userForm.getValue("bucket_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("bucket_quota_max_size_unlimited")){const n=(new Ge.H).toBytes(this.userForm.getValue("bucket_quota_max_size"));_.max_size_kb=(n/1024).toFixed(0)}return this.userForm.getValue("bucket_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("bucket_quota_max_objects")),_}_getS3KeyUserCandidates(){let _=[];const n=this.getUID();return u().isString(n)&&!u().isEmpty(n)&&_.push(n),this.subusers.forEach(i=>{_.push(i.id)}),this.s3Keys.forEach(i=>{_.push(i.user)}),_=u().uniq(_),_}onMaxBucketsModeChange(_){"1"===_&&(this.userForm.get("max_buckets").valid||this.userForm.patchValue({max_buckets:1e3}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(w.gz),e.Y36(w.F0),e.Y36(O),e.Y36(Te.Z),e.Y36(ve.g),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F,T,y,x,S,_e,te,ne,oe,ie,se,ae,re,le,ce,de,ue,Re,R,__,t_,n_,o_,i_,s_,a_,r_,l_,c_,d_,u_,R_,g_,E_,T_,S_,f_,C_,p_,M_,m_,A_,b_,P_,G_,N_;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="User ID",n="Show Tenant",i="Full name",s="Email address",c="Max. buckets",d="Disabled",E="Unlimited",g="Custom",C="Suspended",b="User quota",P="Enabled",G="Bucket quota",N="Enabled",p="This field is required.",U="The value is not valid.",W="The chosen user ID is already in use.",Z="Tenant",$="The value is not valid.",h="The chosen user ID exists in this tenant.",I="The value is not valid.",v="This field is required.",F="This is not a valid email address.",T="The chosen email address is already in use.",y="This field is required.",x="The entered value must be >= 1.",S="S3 key",_e="Auto-generate key",te="Access key",ne="This field is required.",oe="Secret key",ie="This field is required.",se="Subusers",ae="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",re="There are no subusers.",le="Edit",ce="Delete",de="Keys",ue="S3",Re="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",R="Swift",__="There are no keys.",t_="Show",n_="Delete",o_="There are no keys.",i_="Show",s_="Capabilities",a_="All capabilities are already added.",r_="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",l_="There are no capabilities.",c_="Edit",d_="Delete",u_="Unlimited size",R_="Max. size",g_="This field is required.",E_="The value is not valid.",T_="Unlimited objects",S_="Max. objects",f_="This field is required.",C_="The entered value must be >= 0.",p_="Unlimited size",M_="Max. size",m_="This field is required.",A_="The value is not valid.",b_="Unlimited objects",P_="Max. objects",G_="This field is required.",N_="The entered value must be >= 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],o,[1,"card-body"],[1,"form-group","row"],["for","user_id",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user_id","type","text","formControlName","user_id",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","show_tenant","type","checkbox","formControlName","show_tenant",1,"custom-control-input",3,"readonly","click"],["for","show_tenant",1,"custom-control-label"],n,["class","form-group row",4,"ngIf"],["for","display_name",1,"cd-col-form-label",3,"ngClass"],i,["id","display_name","type","text","formControlName","display_name",1,"form-control"],["for","email",1,"cd-col-form-label"],s,["id","email","type","text","formControlName","email",1,"form-control"],["for","max_buckets_mode",1,"cd-col-form-label"],c,["formControlName","max_buckets_mode","name","max_buckets_mode","id","max_buckets_mode",1,"form-control",3,"change"],["value","-1"],d,["value","0"],E,["value","1"],g,["id","suspended","type","checkbox","formControlName","suspended",1,"custom-control-input"],["for","suspended",1,"custom-control-label"],C,[4,"ngIf"],b,["id","user_quota_enabled","type","checkbox","formControlName","user_quota_enabled",1,"custom-control-input"],["for","user_quota_enabled",1,"custom-control-label"],P,G,["id","bucket_quota_enabled","type","checkbox","formControlName","bucket_quota_enabled",1,"custom-control-input"],["for","bucket_quota_enabled",1,"custom-control-label"],N,[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],p,U,W,["for","tenant",1,"cd-col-form-label"],Z,["id","tenant","type","text","formControlName","tenant","autofocus","",1,"form-control",3,"readonly"],$,h,I,v,F,T,[1,"cd-col-form-label"],["id","max_buckets","type","number","formControlName","max_buckets","min","1",1,"form-control"],y,x,S,["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],_e,["for","access_key",1,"cd-col-form-label","required"],te,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control"],[1,"input-group-append"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],ne,["for","secret_key",1,"cd-col-form-label","required"],oe,["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],ie,se,[1,"row"],["class","no-border",4,"ngIf"],[4,"ngFor","ngForOf"],[1,"col-12"],["type","button",1,"btn","btn-light","float-right","tc_addSubuserButton",3,"click"],[3,"ngClass"],ae,[1,"help-block"],[1,"no-border"],[1,"form-text","text-muted"],re,[1,"input-group-prepend"],[1,"input-group-text"],["type","text","readonly","",1,"cd-form-control",3,"value"],[1,"input-group-prepend","border-left-0","border-right-0"],["type","button","ngbTooltip",le,1,"btn","btn-light","tc_showSubuserButton",3,"click"],["type","button","ngbTooltip",ce,1,"btn","btn-light","tc_deleteSubuserButton",3,"click"],de,ue,["type","button",1,"btn","btn-light","float-right","tc_addS3KeyButton",3,"click"],Re,R,__,["type","button","ngbTooltip",t_,1,"btn","btn-light","tc_showS3KeyButton",3,"click"],["type","button","ngbTooltip",n_,1,"btn","btn-light","tc_deleteS3KeyButton",3,"click"],o_,["type","button","ngbTooltip",i_,1,"btn","btn-light","tc_showSwiftKeyButton",3,"click"],s_,["type","button","ngbTooltip",a_,"triggers","pointerenter:pointerleave",1,"btn","btn-light","float-right","tc_addCapButton",3,"disabled","disableTooltip","click"],r_,l_,["type","button","ngbTooltip",c_,1,"btn","btn-light","tc_editCapButton",3,"click"],["type","button","ngbTooltip",d_,1,"btn","btn-light","tc_deleteCapButton",3,"click"],["id","user_quota_max_size_unlimited","type","checkbox","formControlName","user_quota_max_size_unlimited",1,"custom-control-input"],["for","user_quota_max_size_unlimited",1,"custom-control-label"],u_,["for","user_quota_max_size",1,"cd-col-form-label","required"],R_,["id","user_quota_max_size","type","text","formControlName","user_quota_max_size","cdDimlessBinary","",1,"form-control"],g_,E_,["id","user_quota_max_objects_unlimited","type","checkbox","formControlName","user_quota_max_objects_unlimited",1,"custom-control-input"],["for","user_quota_max_objects_unlimited",1,"custom-control-label"],T_,["for","user_quota_max_objects",1,"cd-col-form-label","required"],S_,["id","user_quota_max_objects","type","number","formControlName","user_quota_max_objects","min","0",1,"form-control"],f_,C_,["id","bucket_quota_max_size_unlimited","type","checkbox","formControlName","bucket_quota_max_size_unlimited",1,"custom-control-input"],["for","bucket_quota_max_size_unlimited",1,"custom-control-label"],p_,["for","bucket_quota_max_size",1,"cd-col-form-label","required"],M_,["id","bucket_quota_max_size","type","text","formControlName","bucket_quota_max_size","cdDimlessBinary","",1,"form-control"],m_,A_,["id","bucket_quota_max_objects_unlimited","type","checkbox","formControlName","bucket_quota_max_objects_unlimited",1,"custom-control-input"],["for","bucket_quota_max_objects_unlimited",1,"custom-control-label"],b_,["for","bucket_quota_max_objects",1,"cd-col-form-label","required"],P_,["id","bucket_quota_max_objects","type","number","formControlName","bucket_quota_max_objects","min","0",1,"form-control"],G_,N_]},template:function(_,n){1&_&&e.YNc(0,fo,90,42,"div",0),2&_&&e.Q6J("cdFormLoading",n.loading)},directives:[Fe.y,a._Y,a.JL,V.V,a.sg,H.P,f.mk,q.o,a.Fj,X.b,a.JJ,a.u,f.O5,a.Wl,a.EJ,a.YN,a.Kr,ee.p,j.U,a.wV,a.qQ,Ce.C,pe.s,f.sg,M._L,Pn.Q],pipes:[f.rS,K.m,Ue.i],styles:[""]}),t})();var je=r(99466),Co=r(78877),po=r(86969);const Mo=["accessKeyTpl"],mo=["secretKeyTpl"];function Ao(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,20),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Oqu(_.user.email)}}function bo(t,o){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.xp6(1),e.AsE(" ",_.id," (",_.permissions,") ")}}function Po(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,21),e.qZA(),e.TgZ(3,"td"),e.YNc(4,bo,2,2,"div",22),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Q6J("ngForOf",_.user.subusers)}}function Go(t,o){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.xp6(1),e.AsE(" ",_.type," (",_.perm,") ")}}function No(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,23),e.qZA(),e.TgZ(3,"td"),e.YNc(4,Go,2,2,"div",22),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Q6J("ngForOf",_.user.caps)}}function Oo(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,24),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.ALo(5,"join"),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Oqu(e.lcZ(5,1,_.user.mfa_ids))}}function Uo(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Wo(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,29),e.qZA())}function Zo(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.user_quota.max_size)," ")}}function $o(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function ho(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,30),e.qZA())}function Io(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",_.user.user_quota.max_objects," ")}}function vo(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,25),e.qZA(),e.TgZ(3,"table",9),e.TgZ(4,"tbody"),e.TgZ(5,"tr"),e.TgZ(6,"td",10),e.SDv(7,26),e.qZA(),e.TgZ(8,"td",12),e._uU(9),e.ALo(10,"booleanText"),e.qZA(),e.qZA(),e.TgZ(11,"tr"),e.TgZ(12,"td",15),e.SDv(13,27),e.qZA(),e.YNc(14,Uo,2,0,"td",0),e.YNc(15,Wo,2,0,"td",0),e.YNc(16,Zo,3,3,"td",0),e.qZA(),e.TgZ(17,"tr"),e.TgZ(18,"td",15),e.SDv(19,28),e.qZA(),e.YNc(20,$o,2,0,"td",0),e.YNc(21,ho,2,0,"td",0),e.YNc(22,Io,2,1,"td",0),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.user_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects>-1)}}function Fo(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Lo(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,35),e.qZA())}function Do(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.bucket_quota.max_size)," ")}}function yo(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function xo(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,36),e.qZA())}function qo(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",_.user.bucket_quota.max_objects," ")}}function wo(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,31),e.qZA(),e.TgZ(3,"table",9),e.TgZ(4,"tbody"),e.TgZ(5,"tr"),e.TgZ(6,"td",10),e.SDv(7,32),e.qZA(),e.TgZ(8,"td",12),e._uU(9),e.ALo(10,"booleanText"),e.qZA(),e.qZA(),e.TgZ(11,"tr"),e.TgZ(12,"td",15),e.SDv(13,33),e.qZA(),e.YNc(14,Fo,2,0,"td",0),e.YNc(15,Lo,2,0,"td",0),e.YNc(16,Do,3,3,"td",0),e.qZA(),e.TgZ(17,"tr"),e.TgZ(18,"td",15),e.SDv(19,34),e.qZA(),e.YNc(20,yo,2,0,"td",0),e.YNc(21,xo,2,0,"td",0),e.YNc(22,qo,2,1,"td",0),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.bucket_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects>-1)}}function ko(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"table",9),e.TgZ(2,"tbody"),e.TgZ(3,"tr"),e.TgZ(4,"td",10),e.SDv(5,11),e.qZA(),e.TgZ(6,"td",12),e._uU(7),e.qZA(),e.qZA(),e.TgZ(8,"tr"),e.TgZ(9,"td",10),e.SDv(10,13),e.qZA(),e.TgZ(11,"td",12),e._uU(12),e.qZA(),e.qZA(),e.TgZ(13,"tr"),e.TgZ(14,"td",10),e.SDv(15,14),e.qZA(),e.TgZ(16,"td",12),e._uU(17),e.qZA(),e.qZA(),e.TgZ(18,"tr"),e.TgZ(19,"td",15),e.SDv(20,16),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.qZA(),e.qZA(),e.YNc(23,Ao,5,1,"tr",0),e.TgZ(24,"tr"),e.TgZ(25,"td",15),e.SDv(26,17),e.qZA(),e.TgZ(27,"td"),e._uU(28),e.ALo(29,"booleanText"),e.qZA(),e.qZA(),e.TgZ(30,"tr"),e.TgZ(31,"td",15),e.SDv(32,18),e.qZA(),e.TgZ(33,"td"),e._uU(34),e.ALo(35,"booleanText"),e.qZA(),e.qZA(),e.TgZ(36,"tr"),e.TgZ(37,"td",15),e.SDv(38,19),e.qZA(),e.TgZ(39,"td"),e._uU(40),e.ALo(41,"map"),e.qZA(),e.qZA(),e.YNc(42,Po,5,1,"tr",0),e.YNc(43,No,5,1,"tr",0),e.YNc(44,Oo,6,3,"tr",0),e.qZA(),e.qZA(),e.YNc(45,vo,23,9,"div",0),e.YNc(46,wo,23,9,"div",0),e.qZA()),2&t){const _=e.oxw(3);e.xp6(7),e.Oqu(_.user.tenant),e.xp6(5),e.Oqu(_.user.user_id),e.xp6(5),e.Oqu(_.user.uid),e.xp6(5),e.Oqu(_.user.display_name),e.xp6(1),e.Q6J("ngIf",null==_.user.email?null:_.user.email.length),e.xp6(5),e.Oqu(e.lcZ(29,13,_.user.suspended)),e.xp6(6),e.Oqu(e.lcZ(35,15,"true"===_.user.system)),e.xp6(6),e.Oqu(e.xi3(41,17,_.user.max_buckets,_.maxBucketsMap)),e.xp6(2),e.Q6J("ngIf",_.user.subusers&&_.user.subusers.length),e.xp6(1),e.Q6J("ngIf",_.user.caps&&_.user.caps.length),e.xp6(1),e.Q6J("ngIf",null==_.user.mfa_ids?null:_.user.mfa_ids.length),e.xp6(1),e.Q6J("ngIf",_.user.user_quota),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota)}}function Bo(t,o){if(1&t&&e.YNc(0,ko,47,20,"div",0),2&t){const _=e.oxw(2);e.Q6J("ngIf",_.user)}}const Ho=function(t){return[t]};function Xo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",39),e.NdJ("updateSelection",function(i){return e.CHM(_),e.oxw(3).updateKeysSelection(i)}),e.TgZ(1,"div",40),e.TgZ(2,"div",41),e.TgZ(3,"button",42),e.NdJ("click",function(){return e.CHM(_),e.oxw(3).showKeyModal()}),e._UZ(4,"i",43),e.ynx(5),e.SDv(6,44),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(3);e.Q6J("data",_.keys)("columns",_.keysColumns),e.xp6(3),e.Q6J("disabled",!_.keysSelection.hasSingleSelection),e.xp6(1),e.Q6J("ngClass",e.VKq(4,Ho,_.icons.show))}}function Ko(t,o){1&t&&(e.TgZ(0,"li",37),e.TgZ(1,"a",4),e.SDv(2,38),e.qZA(),e.YNc(3,Xo,7,6,"ng-template",6),e.qZA())}function zo(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"ul",1,2),e.TgZ(3,"li",3),e.TgZ(4,"a",4),e.SDv(5,5),e.qZA(),e.YNc(6,Bo,1,1,"ng-template",6),e.qZA(),e.YNc(7,Ko,4,0,"li",7),e.qZA(),e._UZ(8,"div",8),e.BQk()),2&t){const _=e.MAs(2),n=e.oxw();e.xp6(7),e.Q6J("ngIf",n.keys.length),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Qo=(()=>{class t{constructor(_,n){this.rgwUserService=_,this.modalService=n,this.keys=[],this.keysColumns=[],this.keysSelection=new Pe.r,this.icons=D.P}ngOnInit(){this.keysColumns=[{name:"Username",prop:"username",flexGrow:1},{name:"Type",prop:"type",flexGrow:1}],this.maxBucketsMap={"-1":"Disabled",0:"Unlimited"}}ngOnChanges(){this.selection&&(this.user=this.selection,this.user.subusers=u().sortBy(this.user.subusers,"id"),this.user.caps=u().sortBy(this.user.caps,"type"),this.rgwUserService.getQuota(this.user.uid).subscribe(_=>{u().extend(this.user,_)}),this.keys=[],this.user.keys&&this.user.keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"S3",username:_.user,ref:_})}),this.user.swift_keys&&this.user.swift_keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"Swift",username:_.user,ref:_})}),this.keys=u().sortBy(this.keys,"user"))}updateKeysSelection(_){this.keysSelection=_}showKeyModal(){const _=this.keysSelection.first(),n=this.modalService.show("S3"===_.type?Qe:Ye);switch(_.type){case"S3":n.componentInstance.setViewing(),n.componentInstance.setValues(_.ref.user,_.ref.access_key,_.ref.secret_key);break;case"Swift":n.componentInstance.setValues(_.ref.user,_.ref.secret_key)}}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(O),e.Y36(Te.Z))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-details"]],viewQuery:function(_,n){if(1&_&&(e.Gf(Mo,5),e.Gf(mo,5)),2&_){let i;e.iGM(i=e.CRH())&&(n.accessKeyTpl=i.first),e.iGM(i=e.CRH())&&(n.secretKeyTpl=i.first)}},inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F,T,y,x;return o="Details",_="Tenant",n="User ID",i="Username",s="Full name",c="Suspended",d="System",E="Maximum buckets",g="Email address",C="Subusers",b="Capabilities",P="MFAs(Id)",G="User quota",N="Enabled",p="Maximum size",U="Maximum objects",W="Unlimited",Z="Unlimited",$="Bucket quota",h="Enabled",I="Maximum size",v="Maximum objects",F="Unlimited",T="Unlimited",y="Keys",x="Show",[[4,"ngIf"],["ngbNav","","cdStatefulTab","rgw-user-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","keys",4,"ngIf"],[3,"ngbNavOutlet"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],_,[1,"w-75"],n,i,[1,"bold"],s,c,d,E,g,C,[4,"ngFor","ngForOf"],b,P,G,N,p,U,W,Z,$,h,I,v,F,T,["ngbNavItem","keys"],y,["columnMode","flex","selectionType","multi","forceIdentifier","true",3,"data","columns","updateSelection"],[1,"table-actions"],["dropdown","",1,"btn-group"],["type","button",1,"btn","btn-accent",3,"disabled","click"],[3,"ngClass"],x]},template:function(_,n){1&_&&e.YNc(0,zo,9,2,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[f.O5,M.Pz,Xe.m,M.nv,M.Vx,M.uN,M.tO,f.sg,z.a,q.o,f.mk],pipes:[Be.T,Co.b,po.A,qe.$],styles:[""]}),t})();const Yo=["userSizeTpl"],Jo=["userObjectTpl"];function Vo(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_size)("used",_.stats.size_actual)}}function jo(t,o){1&t&&e.SDv(0,9)}function ei(t,o){if(1&t&&(e.YNc(0,Vo,1,2,"cd-usage-bar",6),e.YNc(1,jo,1,0,"ng-template",null,7,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_size>0&&_.user_quota.enabled)("ngIfElse",n)}}function _i(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_objects)("used",_.stats.num_objects)("isBinary",!1)}}function ti(t,o){1&t&&e.SDv(0,13)}function ni(t,o){if(1&t&&(e.YNc(0,_i,1,3,"cd-usage-bar",10),e.YNc(1,ti,1,0,"ng-template",null,11,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_objects>0&&_.user_quota.enabled)("ngIfElse",n)}}let ii=(()=>{class t extends be.o{constructor(_,n,i,s,c,d){super(d),this.authStorageService=_,this.rgwUserService=n,this.modalService=i,this.urlBuilder=s,this.actionLabels=c,this.ngZone=d,this.columns=[],this.users=[],this.selection=new Pe.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Username",prop:"uid",flexGrow:1},{name:"Tenant",prop:"tenant",flexGrow:1},{name:"Full name",prop:"display_name",flexGrow:1},{name:"Email address",prop:"email",flexGrow:1},{name:"Suspended",prop:"suspended",flexGrow:1,cellClass:"text-center",cellTransformation:je.e.checkIcon},{name:"Max. buckets",prop:"max_buckets",flexGrow:1,cellTransformation:je.e.map,customTemplateConfig:{"-1":"Disabled",0:"Unlimited"}},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.userSizeTpl,flexGrow:.8},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.userObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().uid)}`;this.tableActions=[{permission:"create",icon:D.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:c=>!c.hasSelection},{permission:"update",icon:D.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:D.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:c=>c.hasMultiSelection}],this.setTableRefreshTimeout()}getUserList(_){this.setTableRefreshTimeout(),this.rgwUserService.list().subscribe(n=>{this.users=n},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(xe.M,{itemDescription:this.selection.hasSingleSelection?"user":"users",itemNames:this.selection.selected.map(_=>_.uid),submitActionObservable:()=>new ye.y(_=>{(0,Y.D)(this.selection.selected.map(n=>this.rgwUserService.delete(n.uid))).subscribe({error:n=>{_.error(n),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ee.j),e.Y36(O),e.Y36(Te.Z),e.Y36(Q.F),e.Y36(A.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-list"]],viewQuery:function(_,n){if(1&_&&(e.Gf(z.a,7),e.Gf(Yo,7),e.Gf(Jo,7)),2&_){let i;e.iGM(i=e.CRH())&&(n.table=i.first),e.iGM(i=e.CRH())&&(n.userSizeTpl=i.first),e.iGM(i=e.CRH())&&(n.userObjectTpl=i.first)}},features:[e._Bn([{provide:Q.F,useValue:new Q.F("rgw/user")}]),e.qOj],decls:8,vars:9,consts:function(){let o,_;return o="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","uid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["userSizeTpl",""],["userObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],o,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,n){1&_&&(e.TgZ(0,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return n.setExpandedRow(s)})("updateSelection",function(s){return n.updateSelection(s)})("fetchData",function(s){return n.getUserList(s)}),e._UZ(2,"cd-table-actions",2),e._UZ(3,"cd-rgw-user-details",3),e.qZA(),e.YNc(4,ei,3,2,"ng-template",null,4,e.W1O),e.YNc(6,ni,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.Q6J("autoReload",!1)("data",n.users)("columns",n.columns)("hasDetails",!0)("status",n.tableStatus),e.xp6(2),e.Q6J("permission",n.permission)("selection",n.selection)("tableActions",n.tableActions),e.xp6(1),e.Q6J("selection",n.expandedRow))},directives:[z.a,ke.K,Qo,f.O5,He.O],styles:[""]}),t})(),e_=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[f.ez,O_.m,a.u5,a.UX,U_.B,M.Oz,w.Bz,M.HK,Ue.b]]}),t})();const si=[{path:""},{path:"daemon",component:Bt,data:{breadcrumbs:"Daemons"}},{path:"user",data:{breadcrumbs:"Users"},children:[{path:"",component:ii},{path:A.MQ.CREATE,component:Ve,data:{breadcrumbs:A.Qn.CREATE}},{path:`${A.MQ.EDIT}/:uid`,component:Ve,data:{breadcrumbs:A.Qn.EDIT}}]},{path:"bucket",data:{breadcrumbs:"Buckets"},children:[{path:"",component:Ut},{path:A.MQ.CREATE,component:De,data:{breadcrumbs:A.Qn.CREATE}},{path:`${A.MQ.EDIT}/:bid`,component:De,data:{breadcrumbs:A.Qn.EDIT}}]}];let ai=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[e_,w.Bz.forChild(si)]]}),t})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.cd14092ccedeaf2d7d79.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.cd14092ccedeaf2d7d79.js deleted file mode 100644 index a064d6a98..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/281.cd14092ccedeaf2d7d79.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[281],{59281:(si,Oe,r)=>{r.r(Oe),r.d(Oe,{RgwModule:()=>e_,RoutedRgwModule:()=>ii});var f=r(12057),a=r(24751),w=r(6283),M=r(38549),Ue=r(37496),A=r(79512),N_=r(44466),O_=r(66265),U_=r(23815),u=r.n(U_),Y=r(35758),Me=r(95152),We=r(33394),Ze=r(64762),$e=r(58497),me=r(25917),he=r(19773),W_=r(96736),Z_=r(5304),ge=r(20523),$_=r(93523),e=r(74788);let O=class{constructor(o,_){this.http=o,this.rgwDaemonService=_,this.url="api/rgw/user"}list(){return this.enumerate().pipe((0,he.zg)(o=>o.length>0?(0,Y.D)(o.map(_=>this.get(_))):(0,me.of)([])))}enumerate(){return this.rgwDaemonService.request(o=>this.http.get(this.url,{params:o}))}enumerateEmail(){return this.rgwDaemonService.request(o=>this.http.get(`${this.url}/get_emails`,{params:o}))}get(o){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${o}`,{params:_}))}getQuota(o){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${o}/quota`,{params:_}))}create(o){return this.rgwDaemonService.request(_=>(u().keys(o).forEach(n=>{_=_.append(n,o[n])}),this.http.post(this.url,null,{params:_})))}update(o,_){return this.rgwDaemonService.request(n=>(u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.put(`${this.url}/${o}`,null,{params:n})))}updateQuota(o,_){return this.rgwDaemonService.request(n=>(u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.put(`${this.url}/${o}/quota`,null,{params:n})))}delete(o){return this.rgwDaemonService.request(_=>this.http.delete(`${this.url}/${o}`,{params:_}))}createSubuser(o,_){return this.rgwDaemonService.request(n=>(u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.post(`${this.url}/${o}/subuser`,null,{params:n})))}deleteSubuser(o,_){return this.rgwDaemonService.request(n=>this.http.delete(`${this.url}/${o}/subuser/${_}`,{params:n}))}addCapability(o,_,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",n),this.http.post(`${this.url}/${o}/capability`,null,{params:i})))}deleteCapability(o,_,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",n),this.http.delete(`${this.url}/${o}/capability`,{params:i})))}addS3Key(o,_){return this.rgwDaemonService.request(n=>(n=n.append("key_type","s3"),u().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.post(`${this.url}/${o}/key`,null,{params:n})))}deleteS3Key(o,_){return this.rgwDaemonService.request(n=>(n=(n=n.append("key_type","s3")).append("access_key",_),this.http.delete(`${this.url}/${o}/key`,{params:n})))}exists(o){return this.get(o).pipe((0,W_.h)(!0),(0,Z_.K)(_=>(u().isFunction(_.preventDefault)&&_.preventDefault(),(0,me.of)(!1))))}emailExists(o){return o=decodeURIComponent(o),this.enumerateEmail().pipe((0,he.zg)(_=>{const n=u().indexOf(_,o);return(0,me.of)(-1!==n)}))}};O.\u0275fac=function(o){return new(o||O)(e.LFG($e.eN),e.LFG(ge.b))},O.\u0275prov=e.Yz7({token:O,factory:O.\u0275fac,providedIn:"root"}),O=(0,Ze.gn)([$_.o,(0,Ze.w6)("design:paramtypes",[$e.eN,ge.b])],O);var D=r(65862),Ae=r(18001),Ie=r(93614),m=r(77205),ve=r(97161),k=(()=>{return(t=k||(k={})).ENABLED="Enabled",t.DISABLED="Disabled",k;var t})(),B=(()=>{return(t=B||(B={})).ENABLED="Enabled",t.SUSPENDED="Suspended",B;var t})(),J=r(62862),Fe=r(63622),V=r(41582),H=r(56310),q=r(87925),X=r(94276),j=r(82945),h_=r(18372),ee=r(30839),K=r(10545);function I_(t,o){1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",35),e.SDv(2,36),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",37),e.qZA(),e.qZA())}function v_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,39),e.qZA())}function F_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,40),e.qZA())}function L_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,41),e.qZA())}function D_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,42),e.qZA())}function x_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,43),e.qZA())}function y_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,44),e.qZA())}function q_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,45),e.qZA())}function w_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,46),e.qZA())}function k_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,48),e.qZA()),2&t&&e.Q6J("ngValue",null)}function B_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,49),e.qZA()),2&t&&e.Q6J("ngValue",null)}function H_(t,o){if(1&t&&(e.TgZ(0,"option",50),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function X_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,51),e.qZA())}function K_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,53),e.qZA()),2&t&&e.Q6J("ngValue",null)}function z_(t,o){1&t&&(e.TgZ(0,"option",47),e.SDv(1,54),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Q_(t,o){if(1&t&&(e.TgZ(0,"option",50),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_.name),e.xp6(1),e.Oqu(_.description)}}function Y_(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,55),e.qZA())}function J_(t,o){if(1&t&&(e.TgZ(0,"select",52),e.YNc(1,K_,2,1,"option",18),e.YNc(2,z_,2,1,"option",18),e.YNc(3,Q_,2,2,"option",19),e.qZA(),e.YNc(4,Y_,2,0,"span",14)),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(1),e.Q6J("ngIf",null===n.placementTargets),e.xp6(1),e.Q6J("ngIf",null!==n.placementTargets),e.xp6(1),e.Q6J("ngForOf",n.placementTargets),e.xp6(1),e.Q6J("ngIf",n.bucketForm.showError("placement-target",_,"required"))}}function V_(t,o){1&t&&(e.ynx(0),e._UZ(1,"input",56),e.BQk())}function j_(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend",25),e.SDv(2,57),e.qZA(),e.TgZ(3,"div",9),e.TgZ(4,"div",27),e.TgZ(5,"div",28),e.TgZ(6,"input",58),e.NdJ("change",function(){return e.CHM(_),e.oxw(2).setMfaDeleteValidators()}),e.qZA(),e.TgZ(7,"label",59),e.SDv(8,60),e.qZA(),e.TgZ(9,"cd-helper"),e.TgZ(10,"span"),e.SDv(11,61),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}}function et(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,70),e.qZA())}function _t(t,o){if(1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",67),e.SDv(2,68),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",69),e.YNc(5,et,2,0,"span",14),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.bucketForm.showError("mfa-token-serial",_,"required"))}}function tt(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,74),e.qZA())}function nt(t,o){if(1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",71),e.SDv(2,72),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",73),e.YNc(5,tt,2,0,"span",14),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.bucketForm.showError("mfa-token-pin",_,"required"))}}function ot(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend",25),e.SDv(2,62),e.qZA(),e.TgZ(3,"div",9),e.TgZ(4,"div",27),e.TgZ(5,"div",28),e.TgZ(6,"input",63),e.NdJ("change",function(){return e.CHM(_),e.oxw(2).setMfaDeleteValidators()}),e.qZA(),e.TgZ(7,"label",64),e.SDv(8,65),e.qZA(),e.TgZ(9,"cd-helper"),e.TgZ(10,"span"),e.SDv(11,66),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(12,_t,6,1,"div",8),e.YNc(13,nt,6,1,"div",8),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(12),e.Q6J("ngIf",_.areMfaCredentialsRequired()),e.xp6(1),e.Q6J("ngIf",_.areMfaCredentialsRequired())}}function it(t,o){1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",75),e.SDv(2,76),e.qZA(),e.TgZ(3,"div",12),e.TgZ(4,"select",77),e.TgZ(5,"option",78),e.SDv(6,79),e.qZA(),e.TgZ(7,"option",80),e.SDv(8,81),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function st(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,86),e.qZA())}function at(t,o){1&t&&(e.TgZ(0,"span",38),e.SDv(1,87),e.qZA())}function rt(t,o){if(1&t&&(e.TgZ(0,"div",9),e.TgZ(1,"label",82),e.ynx(2),e.SDv(3,83),e.BQk(),e.TgZ(4,"cd-helper"),e.SDv(5,84),e.qZA(),e.qZA(),e.TgZ(6,"div",12),e._UZ(7,"input",85),e.YNc(8,st,2,0,"span",14),e.YNc(9,at,2,0,"span",14),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(8),e.Q6J("ngIf",n.bucketForm.showError("lock_retention_period_days",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.bucketForm.showError("lock_retention_period_days",_,"lockDays"))}}const Le=function(t){return{required:t}};function lt(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,I_,5,0,"div",8),e.TgZ(10,"div",9),e.TgZ(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,v_,2,0,"span",14),e.YNc(16,F_,2,0,"span",14),e.YNc(17,L_,2,0,"span",14),e.YNc(18,D_,2,0,"span",14),e.YNc(19,x_,2,0,"span",14),e.YNc(20,y_,2,0,"span",14),e.YNc(21,q_,2,0,"span",14),e.YNc(22,w_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(23,"div",9),e.TgZ(24,"label",15),e.SDv(25,16),e.qZA(),e.TgZ(26,"div",12),e.TgZ(27,"select",17),e.YNc(28,k_,2,1,"option",18),e.YNc(29,B_,2,1,"option",18),e.YNc(30,H_,2,2,"option",19),e.qZA(),e.YNc(31,X_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(32,"div",9),e.TgZ(33,"label",20),e.SDv(34,21),e.qZA(),e.TgZ(35,"div",12),e.YNc(36,J_,5,4,"ng-template",null,22,e.W1O),e.YNc(38,V_,2,0,"ng-container",23),e.qZA(),e.qZA(),e.YNc(39,j_,12,0,"fieldset",24),e.YNc(40,ot,14,2,"fieldset",24),e.TgZ(41,"fieldset"),e.TgZ(42,"legend",25),e.SDv(43,26),e.qZA(),e.TgZ(44,"div",9),e.TgZ(45,"div",27),e.TgZ(46,"div",28),e._UZ(47,"input",29),e.TgZ(48,"label",30),e.SDv(49,31),e.qZA(),e.TgZ(50,"cd-helper"),e.TgZ(51,"span"),e.SDv(52,32),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(53,it,9,0,"div",8),e.YNc(54,rt,10,2,"div",8),e.qZA(),e.qZA(),e.TgZ(55,"div",33),e.TgZ(56,"cd-form-button-panel",34),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().submit()}),e.ALo(57,"titlecase"),e.ALo(58,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.MAs(2),n=e.MAs(37),i=e.oxw();e.xp6(1),e.Q6J("formGroup",i.bucketForm),e.xp6(6),e.pQV(e.lcZ(6,29,i.action))(e.lcZ(7,31,i.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",i.editing),e.xp6(2),e.Q6J("ngClass",e.VKq(37,Le,!i.editing)),e.xp6(3),e.Q6J("readonly",i.editing)("autofocus",!i.editing),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"required")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameInvalid")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameNotAllowed")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"containsUpperCase")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"lowerCaseOrNumber")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"ipAddress")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"onlyLowerCaseAndNumbers")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"shouldBeInRange")),e.xp6(5),e.Q6J("autofocus",i.editing),e.xp6(1),e.Q6J("ngIf",null===i.owners),e.xp6(1),e.Q6J("ngIf",null!==i.owners),e.xp6(1),e.Q6J("ngForOf",i.owners),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("owner",_,"required")),e.xp6(2),e.Q6J("ngClass",e.VKq(39,Le,!i.editing)),e.xp6(5),e.Q6J("ngIf",i.editing)("ngIfElse",n),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(13),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(2),e.Q6J("form",i.bucketForm)("submitText",e.lcZ(57,33,i.action)+" "+e.lcZ(58,35,i.resource))}}let De=(()=>{class t extends Ie.E{constructor(_,n,i,s,c,d,E,g){super(),this.route=_,this.router=n,this.formBuilder=i,this.rgwBucketService=s,this.rgwSiteService=c,this.rgwUserService=d,this.notificationService=E,this.actionLabels=g,this.editing=!1,this.owners=null,this.placementTargets=[],this.isVersioningAlreadyEnabled=!1,this.isMfaDeleteAlreadyEnabled=!1,this.icons=D.P,this.editing=this.router.url.startsWith(`/rgw/bucket/${A.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="bucket",this.createForm()}get isVersioningEnabled(){return this.bucketForm.getValue("versioning")}get isMfaDeleteEnabled(){return this.bucketForm.getValue("mfa-delete")}createForm(){const _=this,n=m.h.custom("lockDays",()=>{if(!_.bucketForm||!u().get(_.bucketForm.getRawValue(),"lock_enabled"))return!1;const i=Number(_.bucketForm.getValue("lock_retention_period_days"));return!Number.isInteger(i)||0===i});this.bucketForm=this.formBuilder.group({id:[null],bid:[null,[a.kI.required],this.editing?[]:[m.h.bucketName(),m.h.bucketExistence(!1,this.rgwBucketService)]],owner:[null,[a.kI.required]],"placement-target":[null,this.editing?[]:[a.kI.required]],versioning:[null],"mfa-delete":[null],"mfa-token-serial":[""],"mfa-token-pin":[""],lock_enabled:[{value:!1,disabled:this.editing}],lock_mode:["COMPLIANCE"],lock_retention_period_days:[0,[m.h.number(!1),n]]})}ngOnInit(){const _={owners:this.rgwUserService.enumerate()};this.editing||(_.getPlacementTargets=this.rgwSiteService.get("placement-targets")),this.route.params.subscribe(n=>{if(n.hasOwnProperty("bid")){const i=decodeURIComponent(n.bid);_.getBid=this.rgwBucketService.get(i)}(0,Y.D)(_).subscribe(i=>{if(this.owners=i.owners.sort(),i.getPlacementTargets){const s=i.getPlacementTargets;this.zonegroup=s.zonegroup,u().forEach(s.placement_targets,c=>{c.description=`${c.name} (${"pool"}: ${c.data_pool})`,this.placementTargets.push(c)}),1===this.placementTargets.length&&this.bucketForm.get("placement-target").setValue(this.placementTargets[0].name)}if(i.getBid){const s=i.getBid,c=u().clone(this.bucketForm.getRawValue());let d=u().pick(s,u().keys(c));d.lock_retention_period_days=this.rgwBucketService.getLockDays(s),d["placement-target"]=s.placement_rule,d.versioning=s.versioning===B.ENABLED,d["mfa-delete"]=s.mfa_delete===k.ENABLED,d=u().merge(c,d),this.bucketForm.setValue(d),this.editing&&(this.isVersioningAlreadyEnabled=this.isVersioningEnabled,this.isMfaDeleteAlreadyEnabled=this.isMfaDeleteEnabled,this.setMfaDeleteValidators(),d.lock_enabled&&this.bucketForm.controls.versioning.disable())}this.loadingReady()})})}goToListView(){this.router.navigate(["/rgw/bucket"])}submit(){if(this.bucketForm.pristine)return void this.goToListView();const _=this.bucketForm.value;if(this.editing){const n=this.getVersioningStatus(),i=this.getMfaDeleteStatus();this.rgwBucketService.update(_.bid,_.id,_.owner,n,i,_["mfa-token-serial"],_["mfa-token-pin"],_.lock_mode,_.lock_retention_period_days).subscribe(()=>{this.notificationService.show(Ae.k.success,"Updated Object Gateway bucket '" + _.bid + "'."),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}else this.rgwBucketService.create(_.bid,_.owner,this.zonegroup,_["placement-target"],_.lock_enabled,_.lock_mode,_.lock_retention_period_days).subscribe(()=>{this.notificationService.show(Ae.k.success,"Created Object Gateway bucket '" + _.bid + "'"),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}areMfaCredentialsRequired(){return this.isMfaDeleteEnabled!==this.isMfaDeleteAlreadyEnabled||this.isMfaDeleteAlreadyEnabled&&this.isVersioningEnabled!==this.isVersioningAlreadyEnabled}setMfaDeleteValidators(){const _=this.bucketForm.get("mfa-token-serial"),n=this.bucketForm.get("mfa-token-pin");this.areMfaCredentialsRequired()?(_.setValidators(a.kI.required),n.setValidators(a.kI.required)):(_.setValidators(null),n.setValidators(null)),_.updateValueAndValidity(),n.updateValueAndValidity()}getVersioningStatus(){return this.isVersioningEnabled?B.ENABLED:B.SUSPENDED}getMfaDeleteStatus(){return this.isMfaDeleteEnabled?k.ENABLED:k.DISABLED}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(w.gz),e.Y36(w.F0),e.Y36(J.O),e.Y36(Me.o),e.Y36(We.I),e.Y36(O),e.Y36(ve.g),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F,T,x,y,S,_e,te,ne,oe,ie,se,ae,re,le,ce,de,ue,Re;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",n="Name...",i="Owner",s="Placement target",c="Locking",d="Enabled",E="Enables locking for the objects in the bucket. Locking can only be enabled while creating a bucket.",g="Id",C="This field is required.",b="Bucket names can only contain lowercase letters, numbers, periods and hyphens.",P="The chosen name is already in use.",G="Bucket names must not contain uppercase characters or underscores.",N="Each label must start and end with a lowercase letter or a number.",p="Bucket names cannot be formatted as IP address.",U="Bucket labels cannot be empty and can only contain lowercase letters, numbers and hyphens.",W="Bucket names must be 3 to 63 characters long.",Z="Loading...",$="-- Select a user --",h="This field is required.",I="Loading...",v="-- Select a placement target --",F="This field is required.",T="Versioning",x="Enabled",y="Enables versioning for the objects in the bucket.",S="Multi-Factor Authentication",_e="Delete enabled",te="Enables MFA (multi-factor authentication) Delete, which requires additional authentication for changing the bucket versioning state.",ne="Token Serial Number",oe="This field is required.",ie="Token PIN",se="This field is required.",ae="Mode",re="Compliance",le="Governance",ce="Days",de="The number of days that you want to specify for the default retention period that will be applied to new objects placed in this bucket.",ue="The entered value must be a positive integer.",Re="Retention Days must be a positive integer.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","bucketForm","novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],o,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","bid",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","bid","name","bid","type","text","placeholder",n,"formControlName","bid",1,"form-control",3,"readonly","autofocus"],["class","invalid-feedback",4,"ngIf"],["for","owner",1,"cd-col-form-label","required"],i,["id","owner","name","owner","formControlName","owner",1,"form-control",3,"autofocus"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],["for","placement-target",1,"cd-col-form-label",3,"ngClass"],s,["placementTargetSelect",""],[4,"ngIf","ngIfElse"],[4,"ngIf"],[1,"cd-header"],c,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","lock_enabled","formControlName","lock_enabled","type","checkbox",1,"custom-control-input"],["for","lock_enabled",1,"custom-control-label"],d,E,[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","id",1,"cd-col-form-label"],g,["id","id","name","id","type","text","formControlName","id","readonly","",1,"form-control"],[1,"invalid-feedback"],C,b,P,G,N,p,U,W,[3,"ngValue"],Z,$,[3,"value"],h,["id","placement-target","name","placement-target","formControlName","placement-target",1,"form-control"],I,v,F,["id","placement-target","name","placement-target","formControlName","placement-target","type","text","readonly","",1,"form-control"],T,["type","checkbox","id","versioning","name","versioning","formControlName","versioning",1,"custom-control-input",3,"change"],["for","versioning",1,"custom-control-label"],x,y,S,["type","checkbox","id","mfa-delete","name","mfa-delete","formControlName","mfa-delete",1,"custom-control-input",3,"change"],["for","mfa-delete",1,"custom-control-label"],_e,te,["for","mfa-token-serial",1,"cd-col-form-label"],ne,["type","text","id","mfa-token-serial","name","mfa-token-serial","formControlName","mfa-token-serial",1,"form-control"],oe,["for","mfa-token-pin",1,"cd-col-form-label"],ie,["type","text","id","mfa-token-pin","name","mfa-token-pin","formControlName","mfa-token-pin",1,"form-control"],se,["for","lock_mode",1,"cd-col-form-label"],ae,["formControlName","lock_mode","name","lock_mode","id","lock_mode",1,"form-control"],["value","COMPLIANCE"],re,["value","GOVERNANCE"],le,["for","lock_retention_period_days",1,"cd-col-form-label"],ce,de,["type","number","id","lock_retention_period_days","formControlName","lock_retention_period_days","min","0",1,"form-control"],ue,Re]},template:function(_,n){1&_&&e.YNc(0,lt,59,41,"div",0),2&_&&e.Q6J("cdFormLoading",n.loading)},directives:[Fe.y,a._Y,a.JL,V.V,a.sg,f.O5,H.P,f.mk,q.o,a.Fj,X.b,a.JJ,a.u,j.U,a.EJ,f.sg,a.Wl,h_.S,ee.p,a.YN,a.Kr,a.wV,a.qQ],pipes:[f.rS,K.m],styles:[""]}),t})();var xe=r(18891),be=r(68136),ye=r(30982),z=r(64337),Pe=r(68774),qe=r(47557),we=r(66369),Q=r(51847),Ee=r(74937),Te=r(63285),ke=r(94928),ct=r(96102),Be=r(68962);function dt(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,24),e.qZA())}function ut(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimless"),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.selection.bucket_quota.max_size)," ")}}function Rt(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,25),e.qZA())}function gt(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",_.selection.bucket_quota.max_objects," ")}}function Et(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,20),e.qZA(),e.TgZ(3,"table",1),e.TgZ(4,"tbody"),e.TgZ(5,"tr"),e.TgZ(6,"td",2),e.SDv(7,21),e.qZA(),e.TgZ(8,"td",4),e._uU(9),e.ALo(10,"booleanText"),e.qZA(),e.qZA(),e.TgZ(11,"tr"),e.TgZ(12,"td",5),e.SDv(13,22),e.qZA(),e.YNc(14,dt,2,0,"td",0),e.YNc(15,ut,3,3,"td",0),e.qZA(),e.TgZ(16,"tr"),e.TgZ(17,"td",5),e.SDv(18,23),e.qZA(),e.YNc(19,Rt,2,0,"td",0),e.YNc(20,gt,2,1,"td",0),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(2);e.xp6(9),e.Oqu(e.lcZ(10,5,_.selection.bucket_quota.enabled)),e.xp6(5),e.Q6J("ngIf",_.selection.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",_.selection.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_objects>-1)}}function Tt(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"tr"),e.TgZ(2,"td",5),e.SDv(3,26),e.qZA(),e.TgZ(4,"td"),e._uU(5),e.qZA(),e.qZA(),e.TgZ(6,"tr"),e.TgZ(7,"td",5),e.SDv(8,27),e.qZA(),e.TgZ(9,"td"),e._uU(10),e.qZA(),e.qZA(),e.BQk()),2&t){const _=e.oxw(2);e.xp6(5),e.Oqu(_.selection.lock_mode),e.xp6(5),e.Oqu(_.selection.lock_retention_period_days)}}function St(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"table",1),e.TgZ(2,"tbody"),e.TgZ(3,"tr"),e.TgZ(4,"td",2),e.SDv(5,3),e.qZA(),e.TgZ(6,"td",4),e._uU(7),e.qZA(),e.qZA(),e.TgZ(8,"tr"),e.TgZ(9,"td",5),e.SDv(10,6),e.qZA(),e.TgZ(11,"td"),e._uU(12),e.qZA(),e.qZA(),e.TgZ(13,"tr"),e.TgZ(14,"td",5),e.SDv(15,7),e.qZA(),e.TgZ(16,"td"),e._uU(17),e.qZA(),e.qZA(),e.TgZ(18,"tr"),e.TgZ(19,"td",5),e.SDv(20,8),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.qZA(),e.qZA(),e.TgZ(23,"tr"),e.TgZ(24,"td",5),e.SDv(25,9),e.qZA(),e.TgZ(26,"td"),e._uU(27),e.qZA(),e.qZA(),e.TgZ(28,"tr"),e.TgZ(29,"td",5),e.SDv(30,10),e.qZA(),e.TgZ(31,"td"),e._uU(32),e.qZA(),e.qZA(),e.TgZ(33,"tr"),e.TgZ(34,"td",5),e.SDv(35,11),e.qZA(),e.TgZ(36,"td"),e._uU(37),e.qZA(),e.qZA(),e.TgZ(38,"tr"),e.TgZ(39,"td",5),e.SDv(40,12),e.qZA(),e.TgZ(41,"td"),e._uU(42),e.qZA(),e.qZA(),e.TgZ(43,"tr"),e.TgZ(44,"td",5),e.SDv(45,13),e.qZA(),e.TgZ(46,"td"),e._uU(47),e.qZA(),e.qZA(),e.TgZ(48,"tr"),e.TgZ(49,"td",5),e.SDv(50,14),e.qZA(),e.TgZ(51,"td"),e._uU(52),e.ALo(53,"cdDate"),e.qZA(),e.qZA(),e.TgZ(54,"tr"),e.TgZ(55,"td",5),e.SDv(56,15),e.qZA(),e.TgZ(57,"td"),e._uU(58),e.qZA(),e.qZA(),e.TgZ(59,"tr"),e.TgZ(60,"td",5),e.SDv(61,16),e.qZA(),e.TgZ(62,"td"),e._uU(63),e.qZA(),e.qZA(),e.TgZ(64,"tr"),e.TgZ(65,"td",5),e.SDv(66,17),e.qZA(),e.TgZ(67,"td"),e._uU(68),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(69,Et,21,7,"div",0),e.TgZ(70,"legend"),e.SDv(71,18),e.qZA(),e.TgZ(72,"table",1),e.TgZ(73,"tbody"),e.TgZ(74,"tr"),e.TgZ(75,"td",2),e.SDv(76,19),e.qZA(),e.TgZ(77,"td",4),e._uU(78),e.ALo(79,"booleanText"),e.qZA(),e.qZA(),e.YNc(80,Tt,11,2,"ng-container",0),e.qZA(),e.qZA(),e.BQk()),2&t){const _=e.oxw();e.xp6(7),e.Oqu(_.selection.bid),e.xp6(5),e.Oqu(_.selection.id),e.xp6(5),e.Oqu(_.selection.owner),e.xp6(5),e.Oqu(_.selection.index_type),e.xp6(5),e.Oqu(_.selection.placement_rule),e.xp6(5),e.Oqu(_.selection.marker),e.xp6(5),e.Oqu(_.selection.max_marker),e.xp6(5),e.Oqu(_.selection.ver),e.xp6(5),e.Oqu(_.selection.master_ver),e.xp6(5),e.Oqu(e.lcZ(53,16,_.selection.mtime)),e.xp6(6),e.Oqu(_.selection.zonegroup),e.xp6(5),e.Oqu(_.selection.versioning),e.xp6(5),e.Oqu(_.selection.mfa_delete),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota),e.xp6(9),e.Oqu(e.lcZ(79,18,_.selection.lock_enabled)),e.xp6(2),e.Q6J("ngIf",_.selection.lock_enabled)}}let ft=(()=>{class t{constructor(_){this.rgwBucketService=_}ngOnChanges(){this.selection&&this.rgwBucketService.get(this.selection.bid).subscribe(_=>{_.lock_retention_period_days=this.rgwBucketService.getLockDays(_),this.selection=_})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Me.o))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F;return o="Name",_="ID",n="Owner",i="Index type",s="Placement rule",c="Marker",d="Maximum marker",E="Version",g="Master version",C="Modification time",b="Zonegroup",P="Versioning",G="MFA Delete",N="Locking",p="Enabled",U="Bucket quota",W="Enabled",Z="Maximum size",$="Maximum objects",h="Unlimited",I="Unlimited",v="Mode",F="Days",[[4,"ngIf"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],o,[1,"w-75"],[1,"bold"],_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F]},template:function(_,n){1&_&&e.YNc(0,St,81,20,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[f.O5],pipes:[ct.N,Be.T,we.n],styles:["table[_ngcontent-%COMP%]{table-layout:fixed}table[_ngcontent-%COMP%] td[_ngcontent-%COMP%]{word-wrap:break-word}"]}),t})();var He=r(60251);const Ct=["bucketSizeTpl"],pt=["bucketObjectTpl"];function Mt(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_size)("used",_.bucket_size)}}function mt(t,o){1&t&&e.SDv(0,9)}function At(t,o){if(1&t&&(e.YNc(0,Mt,1,2,"cd-usage-bar",6),e.YNc(1,mt,1,0,"ng-template",null,7,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_size>0&&_.bucket_quota.enabled)("ngIfElse",n)}}function bt(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_objects)("used",_.num_objects)("isBinary",!1)}}function Pt(t,o){1&t&&e.SDv(0,13)}function Gt(t,o){if(1&t&&(e.YNc(0,bt,1,3,"cd-usage-bar",10),e.YNc(1,Pt,1,0,"ng-template",null,11,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_objects>0&&_.bucket_quota.enabled)("ngIfElse",n)}}let Ot=(()=>{class t extends be.o{constructor(_,n,i,s,c,d,E,g){super(g),this.authStorageService=_,this.dimlessBinaryPipe=n,this.dimlessPipe=i,this.rgwBucketService=s,this.modalService=c,this.urlBuilder=d,this.actionLabels=E,this.ngZone=g,this.columns=[],this.buckets=[],this.selection=new Pe.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Name",prop:"bid",flexGrow:2},{name:"Owner",prop:"owner",flexGrow:2.5},{name:"Used Capacity",prop:"bucket_size",flexGrow:.6,pipe:this.dimlessBinaryPipe},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.bucketSizeTpl,flexGrow:.8},{name:"Objects",prop:"num_objects",flexGrow:.6,pipe:this.dimlessPipe},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.bucketObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().bid)}`;this.tableActions=[{permission:"create",icon:D.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:c=>!c.hasSelection},{permission:"update",icon:D.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:D.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:c=>c.hasMultiSelection}],this.setTableRefreshTimeout()}transformBucketData(){u().forEach(this.buckets,_=>{const n=_.bucket_quota.max_size,i=_.bucket_quota.max_objects;_.bucket_size=0,_.num_objects=0,u().isEmpty(_.usage)||(_.bucket_size=_.usage["rgw.main"].size_actual,_.num_objects=_.usage["rgw.main"].num_objects),_.size_usage=n>0?_.bucket_size/n:void 0,_.object_usage=i>0?_.num_objects/i:void 0})}getBucketList(_){this.setTableRefreshTimeout(),this.rgwBucketService.list(!0).subscribe(n=>{this.buckets=n,this.transformBucketData()},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(ye.M,{itemDescription:this.selection.hasSingleSelection?"bucket":"buckets",itemNames:this.selection.selected.map(_=>_.bid),submitActionObservable:()=>new xe.y(_=>{(0,Y.D)(this.selection.selected.map(n=>this.rgwBucketService.delete(n.bid))).subscribe({error:n=>{_.error(n),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ee.j),e.Y36(qe.$),e.Y36(we.n),e.Y36(Me.o),e.Y36(Te.Z),e.Y36(Q.F),e.Y36(A.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-list"]],viewQuery:function(_,n){if(1&_&&(e.Gf(z.a,7),e.Gf(Ct,7),e.Gf(pt,7)),2&_){let i;e.iGM(i=e.CRH())&&(n.table=i.first),e.iGM(i=e.CRH())&&(n.bucketSizeTpl=i.first),e.iGM(i=e.CRH())&&(n.bucketObjectTpl=i.first)}},features:[e._Bn([{provide:Q.F,useValue:new Q.F("rgw/bucket")}]),e.qOj],decls:8,vars:9,consts:function(){let o,_;return o="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","bid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["bucketSizeTpl",""],["bucketObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],o,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,n){1&_&&(e.TgZ(0,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return n.setExpandedRow(s)})("updateSelection",function(s){return n.updateSelection(s)})("fetchData",function(s){return n.getBucketList(s)}),e._UZ(2,"cd-table-actions",2),e._UZ(3,"cd-rgw-bucket-details",3),e.qZA(),e.YNc(4,At,3,2,"ng-template",null,4,e.W1O),e.YNc(6,Gt,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.Q6J("autoReload",!1)("data",n.buckets)("columns",n.columns)("hasDetails",!0)("status",n.tableStatus),e.xp6(2),e.Q6J("permission",n.permission)("selection",n.selection)("tableActions",n.tableActions),e.xp6(1),e.Q6J("selection",n.expandedRow))},directives:[z.a,ke.K,ft,f.O5,He.O],styles:[""]}),t})();var Ut=r(58111),Xe=r(59376),Wt=r(61350),Zt=r(98056),Ke=r(76317);function $t(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table-key-value",11),e.NdJ("fetchData",function(){return e.CHM(_),e.oxw(2).getMetaData()}),e.qZA()}if(2&t){const _=e.oxw(2);e.Q6J("data",_.metadata)}}function ht(t,o){if(1&t&&e._UZ(0,"cd-table-performance-counter",12),2&t){const _=e.oxw(2);e.Q6J("serviceId",_.serviceMapId)}}function It(t,o){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.Q6J("grafanaPath","rgw-instance-detail?var-rgw_servers=rgw."+_.serviceId)}}function vt(t,o){1&t&&(e.TgZ(0,"li",13),e.TgZ(1,"a",4),e.SDv(2,14),e.qZA(),e.YNc(3,It,1,1,"ng-template",6),e.qZA())}function Ft(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"ul",1,2),e.TgZ(3,"li",3),e.TgZ(4,"a",4),e.SDv(5,5),e.qZA(),e.YNc(6,$t,1,1,"ng-template",6),e.qZA(),e.TgZ(7,"li",7),e.TgZ(8,"a",4),e.SDv(9,8),e.qZA(),e.YNc(10,ht,1,1,"ng-template",6),e.qZA(),e.YNc(11,vt,4,0,"li",9),e.qZA(),e._UZ(12,"div",10),e.BQk()),2&t){const _=e.MAs(2),n=e.oxw();e.xp6(11),e.Q6J("ngIf",n.grafanaPermission.read),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Lt=(()=>{class t{constructor(_,n){this.rgwDaemonService=_,this.authStorageService=n,this.serviceId="",this.serviceMapId="",this.grafanaPermission=this.authStorageService.getPermissions().grafana}ngOnChanges(){this.selection&&(this.serviceId=this.selection.id,this.serviceMapId=this.selection.service_map_id)}getMetaData(){u().isEmpty(this.serviceId)||this.rgwDaemonService.get(this.serviceId).subscribe(_=>{this.metadata=_.rgw_metadata})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ge.b),e.Y36(Ee.j))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n;return o="Details",_="Performance Counters",n="Performance Details",[[4,"ngIf"],["ngbNav","","cdStatefulTab","rgw-daemon-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","performance-counters"],_,["ngbNavItem","performance-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"data","fetchData"],["serviceType","rgw",3,"serviceId"],["ngbNavItem","performance-details"],n,["uid","x5ARzZtmk","grafanaStyle","one",3,"grafanaPath"]]},template:function(_,n){1&_&&e.YNc(0,Ft,13,2,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[f.O5,M.Pz,Xe.m,M.nv,M.Vx,M.uN,M.tO,Wt.b,Zt.p,Ke.F],styles:[""]}),t})();function Dt(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",8),e.NdJ("setExpandedRow",function(i){return e.CHM(_),e.oxw().setExpandedRow(i)})("fetchData",function(i){return e.CHM(_),e.oxw().getDaemonList(i)}),e._UZ(1,"cd-rgw-daemon-details",9),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.daemons)("columns",_.columns)("hasDetails",!0),e.xp6(1),e.Q6J("selection",_.expandedRow)}}function xt(t,o){1&t&&e._UZ(0,"cd-grafana",11),2&t&&e.Q6J("grafanaPath","rgw-overview?")}function yt(t,o){1&t&&(e.TgZ(0,"li",2),e.TgZ(1,"a",3),e.SDv(2,10),e.qZA(),e.YNc(3,xt,1,1,"ng-template",5),e.qZA())}function qt(t,o){1&t&&e._UZ(0,"cd-grafana",13),2&t&&e.Q6J("grafanaPath","radosgw-sync-overview?")}function wt(t,o){1&t&&(e.TgZ(0,"li",2),e.TgZ(1,"a",3),e.SDv(2,12),e.qZA(),e.YNc(3,qt,1,1,"ng-template",5),e.qZA())}let kt=(()=>{class t extends be.o{constructor(_,n,i,s){super(),this.rgwDaemonService=_,this.authStorageService=n,this.cephShortVersionPipe=i,this.rgwSiteService=s,this.columns=[],this.daemons=[],this.updateDaemons=c=>{this.daemons=c}}ngOnInit(){this.grafanaPermission=this.authStorageService.getPermissions().grafana,this.columns=[{name:"ID",prop:"id",flexGrow:2},{name:"Hostname",prop:"server_hostname",flexGrow:2},{name:"Zone",prop:"zone_name",flexGrow:2},{name:"Zone Group",prop:"zonegroup_name",flexGrow:2},{name:"Realm",prop:"realm_name",flexGrow:2},{name:"Version",prop:"version",flexGrow:1,pipe:this.cephShortVersionPipe}],this.rgwSiteService.get("realms").subscribe(_=>this.isMultiSite=_.length>0)}getDaemonList(_){this.rgwDaemonService.list().subscribe(this.updateDaemons,()=>{_.error()})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ge.b),e.Y36(Ee.j),e.Y36(Ut.F),e.Y36(We.I))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-list"]],features:[e.qOj],decls:9,vars:3,consts:function(){let o,_,n;return o="Daemons List",_="Overall Performance",n="Sync Performance",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","",4,"ngIf"],[3,"ngbNavOutlet"],["columnMode","flex",3,"data","columns","hasDetails","setExpandedRow","fetchData"],["cdTableDetail","",3,"selection"],_,["uid","WAkugZpiz","grafanaStyle","two",3,"grafanaPath"],n,["uid","rgw-sync-overview","grafanaStyle","two",3,"grafanaPath"]]},template:function(_,n){if(1&_&&(e.TgZ(0,"ul",0,1),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,Dt,2,4,"ng-template",5),e.qZA(),e.YNc(6,yt,4,0,"li",6),e.YNc(7,wt,4,0,"li",6),e.qZA(),e._UZ(8,"div",7)),2&_){const i=e.MAs(1);e.xp6(6),e.Q6J("ngIf",n.grafanaPermission.read),e.xp6(1),e.Q6J("ngIf",n.grafanaPermission.read&&n.isMultiSite),e.xp6(1),e.Q6J("ngbNavOutlet",i)}},directives:[M.Pz,M.nv,M.Vx,M.uN,f.O5,M.tO,z.a,Lt,Ke.F],styles:[""]}),t})();var Bt=r(58071),Ge=r(28211),Se=(()=>{return(t=Se||(Se={})).USERS="users",t.BUCKETS="buckets",t.METADATA="metadata",t.USAGE="usage",t.ZONE="zone",Se;var t})();let ze=(()=>{class t{static getAll(){return Object.values(t.capabilities)}}return t.capabilities=Se,t})();var fe=r(60312);function Ht(t,o){1&t&&e._UZ(0,"input",22),2&t&&e.Q6J("readonly",!0)}function Xt(t,o){1&t&&(e.TgZ(0,"option",17),e.SDv(1,25),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Kt(t,o){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function zt(t,o){if(1&t&&(e.TgZ(0,"select",23),e.YNc(1,Xt,2,1,"option",24),e.YNc(2,Kt,2,2,"option",19),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.types),e.xp6(1),e.Q6J("ngForOf",_.types)}}function Qt(t,o){1&t&&(e.TgZ(0,"span",27),e.SDv(1,28),e.qZA())}function Yt(t,o){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Jt(t,o){1&t&&(e.TgZ(0,"span",27),e.SDv(1,29),e.qZA())}const Vt=function(t){return{required:t}},jt=function(){return["read","write","*"]};let en=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.activeModal=n,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.types=[],this.resource="capability",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({type:[null,[a.kI.required]],perm:[null,[a.kI.required]]})}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.ADD}setValues(_,n){this.formGroup.setValue({type:_,perm:n})}setCapabilities(_){const n=[];_.forEach(i=>{n.push(i.type)}),this.types=[],ze.getAll().forEach(i=>{-1===u().indexOf(n,i)&&this.types.push(i)})}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-capability-modal"]],outputs:{submitAction:"submitAction"},decls:29,vars:24,consts:function(){let o,_,n,i,s,c,d;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Type",n="Permission",i="-- Select a permission --",s="-- Select a type --",c="This field is required.",d="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","type",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","type","class","form-control","type","text","formControlName","type",3,"readonly",4,"ngIf"],["id","type","class","form-control","formControlName","type","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],n,["id","perm","formControlName","perm",1,"form-control"],[3,"ngValue"],i,[3,"value",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["id","type","type","text","formControlName","type",1,"form-control",3,"readonly"],["id","type","formControlName","type","autofocus","",1,"form-control"],[3,"ngValue",4,"ngIf"],s,[3,"value"],[1,"invalid-feedback"],c,d]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,Ht,1,1,"input",11),e.YNc(14,zt,3,2,"select",12),e.YNc(15,Qt,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(16,"div",7),e.TgZ(17,"label",14),e.SDv(18,15),e.qZA(),e.TgZ(19,"div",10),e.TgZ(20,"select",16),e.TgZ(21,"option",17),e.SDv(22,18),e.qZA(),e.YNc(23,Yt,2,2,"option",19),e.qZA(),e.YNc(24,Jt,2,0,"span",13),e.qZA(),e.qZA(),e.qZA(),e.TgZ(25,"div",20),e.TgZ(26,"cd-form-button-panel",21),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(27,"titlecase"),e.ALo(28,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,13,n.action))(e.lcZ(4,15,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(21,Vt,!n.editing)),e.xp6(3),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",!n.editing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("type",i,"required")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(23,jt)),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("perm",i,"required")),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(27,17,n.action)+" "+e.lcZ(28,19,n.resource))}},directives:[fe.z,a._Y,a.JL,V.V,a.sg,H.P,f.mk,f.O5,q.o,a.EJ,X.b,a.JJ,a.u,a.YN,a.Kr,f.sg,ee.p,a.Fj,j.U],pipes:[f.rS,K.m],styles:[""]}),t})();var Ce=r(4416),pe=r(58039);function _n(t,o){1&t&&e._UZ(0,"input",17),2&t&&e.Q6J("readonly",!0)}function tn(t,o){1&t&&(e.TgZ(0,"option",21),e.SDv(1,22),e.qZA()),2&t&&e.Q6J("ngValue",null)}function nn(t,o){if(1&t&&(e.TgZ(0,"option",23),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function on(t,o){if(1&t&&(e.TgZ(0,"select",18),e.YNc(1,tn,2,1,"option",19),e.YNc(2,nn,2,2,"option",20),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.userCandidates),e.xp6(1),e.Q6J("ngForOf",_.userCandidates)}}function sn(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function an(t,o){1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"div",26),e.TgZ(2,"div",27),e._UZ(3,"input",28),e.TgZ(4,"label",29),e.SDv(5,30),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function rn(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,38),e.qZA())}const Ne=function(t){return{required:t}};function ln(t,o){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",31),e.SDv(2,32),e.qZA(),e.TgZ(3,"div",10),e.TgZ(4,"div",33),e._UZ(5,"input",34),e.TgZ(6,"span",35),e._UZ(7,"button",36),e._UZ(8,"cd-copy-2-clipboard-button",37),e.qZA(),e.qZA(),e.YNc(9,rn,2,0,"span",13),e.qZA(),e.qZA()),2&t){const _=e.oxw(),n=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ne,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(4),e.Q6J("ngIf",_.formGroup.showError("access_key",n,"required"))}}function cn(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,44),e.qZA())}function dn(t,o){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",39),e.SDv(2,40),e.qZA(),e.TgZ(3,"div",10),e.TgZ(4,"div",33),e._UZ(5,"input",41),e.TgZ(6,"span",35),e._UZ(7,"button",42),e._UZ(8,"cd-copy-2-clipboard-button",43),e.qZA(),e.qZA(),e.YNc(9,cn,2,0,"span",13),e.qZA(),e.qZA()),2&t){const _=e.oxw(),n=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ne,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(4),e.Q6J("ngIf",_.formGroup.showError("secret_key",n,"required"))}}let Qe=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.activeModal=n,this.actionLabels=i,this.submitAction=new e.vpe,this.viewing=!0,this.userCandidates=[],this.resource="S3 Key",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({user:[null,[a.kI.required]],generate_key:[!0],access_key:[null,[m.h.requiredIf({generate_key:!1})]],secret_key:[null,[m.h.requiredIf({generate_key:!1})]]})}setViewing(_=!0){this.viewing=_,this.action=this.viewing?this.actionLabels.SHOW:this.actionLabels.CREATE}setValues(_,n,i){this.formGroup.setValue({user:_,generate_key:u().isEmpty(n),access_key:n,secret_key:i})}setUserCandidates(_){this.userCandidates=_}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-s3-key-modal"]],outputs:{submitAction:"submitAction"},decls:23,vars:24,consts:function(){let o,_,n,i,s,c,d,E,g;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="-- Select a username --",i="This field is required.",s="Auto-generate key",c="Access key",d="This field is required.",E="Secret key",g="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user","class","form-control","type","text","formControlName","user",3,"readonly",4,"ngIf"],["id","user","class","form-control","formControlName","user","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","showSubmit","submitActionEvent"],["id","user","type","text","formControlName","user",1,"form-control",3,"readonly"],["id","user","formControlName","user","autofocus","",1,"form-control"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],n,[3,"value"],[1,"invalid-feedback"],i,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],s,["for","access_key",1,"cd-col-form-label",3,"ngClass"],c,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control",3,"readonly"],[1,"input-group-append"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],d,["for","secret_key",1,"cd-col-form-label",3,"ngClass"],E,["id","secret_key","type","password","formControlName","secret_key",1,"form-control",3,"readonly"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],g]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,_n,1,1,"input",11),e.YNc(14,on,3,2,"select",12),e.YNc(15,sn,2,0,"span",13),e.qZA(),e.qZA(),e.YNc(16,an,6,0,"div",14),e.YNc(17,ln,10,5,"div",14),e.YNc(18,dn,10,5,"div",14),e.qZA(),e.TgZ(19,"div",15),e.TgZ(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(21,"titlecase"),e.ALo(22,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,14,n.action))(e.lcZ(4,16,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(22,Ne,!n.viewing)),e.xp6(3),e.Q6J("ngIf",n.viewing),e.xp6(1),e.Q6J("ngIf",!n.viewing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",!n.viewing),e.xp6(1),e.Q6J("ngIf",!n.formGroup.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!n.formGroup.getValue("generate_key")),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(21,18,n.action)+" "+e.lcZ(22,20,n.resource))("showSubmit",!n.viewing)}},directives:[fe.z,a._Y,a.JL,V.V,a.sg,H.P,f.mk,f.O5,ee.p,q.o,a.Fj,X.b,a.JJ,a.u,a.EJ,j.U,f.sg,a.YN,a.Kr,a.Wl,Ce.C,pe.s],pipes:[f.rS,K.m],styles:[""]}),t})();class un{}function Rn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function gn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,31),e.qZA())}function En(t,o){if(1&t&&(e.TgZ(0,"option",32),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Tn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,33),e.qZA())}function Sn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,48),e.qZA())}function fn(t,o){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",41),e.SDv(2,42),e.qZA(),e.TgZ(3,"div",10),e.TgZ(4,"div",43),e._UZ(5,"input",44),e.TgZ(6,"span",45),e._UZ(7,"button",46),e._UZ(8,"cd-copy-2-clipboard-button",47),e.qZA(),e.qZA(),e.YNc(9,Sn,2,0,"span",15),e.qZA(),e.qZA()),2&t){const _=e.oxw(2),n=e.MAs(7);e.xp6(9),e.Q6J("ngIf",_.formGroup.showError("secret_key",n,"required"))}}function Cn(t,o){if(1&t&&(e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,34),e.qZA(),e.TgZ(3,"div",7),e.TgZ(4,"div",35),e.TgZ(5,"div",36),e._UZ(6,"input",37),e.TgZ(7,"label",38),e.SDv(8,39),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(9,fn,10,1,"div",40),e.qZA()),2&t){const _=e.oxw();e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.formGroup.getValue("generate_secret"))}}const pn=function(t){return{required:t}},Mn=function(){return["read","write"]};let mn=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.bsModalRef=n,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.subusers=[],this.resource="Subuser",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({uid:[null],subuid:[null,[a.kI.required,this.subuserValidator()]],perm:[null,[a.kI.required]],generate_secret:[!0],secret_key:[null,[m.h.requiredIf({generate_secret:!1})]]})}subuserValidator(){const _=this;return n=>_.editing||(0,m.P)(n.value)?null:_.subusers.some(s=>u().isEqual(_.getSubuserName(s.id),n.value))?{subuserIdExists:!0}:null}getSubuserName(_){if(u().isEmpty(_))return _;const n=_.match(/([^:]+)(:(.+))?/);return u().isUndefined(n[3])?n[1]:n[3]}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE}setValues(_,n="",i=""){this.formGroup.setValue({uid:_,subuid:this.getSubuserName(n),perm:i,generate_secret:!0,secret_key:null})}setSubusers(_){this.subusers=_}onSubmit(){const _=this.formGroup.value,n=new un;n.id=`${_.uid}:${_.subuid}`,n.permissions=_.perm,n.generate_secret=_.generate_secret,n.secret_key=_.secret_key,this.submitAction.emit(n),this.bsModalRef.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-subuser-modal"]],outputs:{submitAction:"submitAction"},decls:39,vars:26,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="Subuser",i="Permission",s="-- Select a permission --",c="read, write",d="full",E="This field is required.",g="The chosen subuser ID is already in use.",C="This field is required.",b="Swift key",P="Auto-generate secret",G="Secret key",N="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","uid",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","uid","type","text","formControlName","uid",1,"form-control",3,"readonly"],["for","subuid",1,"cd-col-form-label",3,"ngClass"],n,["id","subuid","type","text","formControlName","subuid","autofocus","",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],i,["id","perm","formControlName","perm",1,"form-control"],[3,"ngValue"],s,[3,"value",4,"ngFor","ngForOf"],["value","read-write"],c,["value","full-control"],d,[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],E,g,[3,"value"],C,b,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_secret","type","checkbox","formControlName","generate_secret",1,"custom-control-input"],["for","generate_secret",1,"custom-control-label"],P,["class","form-group row",4,"ngIf"],["for","secret_key",1,"cd-col-form-label","required"],G,[1,"input-group"],["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],[1,"input-group-append"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],N]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.qZA(),e.qZA(),e.TgZ(14,"div",7),e.TgZ(15,"label",12),e.SDv(16,13),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",14),e.YNc(19,Rn,2,0,"span",15),e.YNc(20,gn,2,0,"span",15),e.qZA(),e.qZA(),e.TgZ(21,"div",7),e.TgZ(22,"label",16),e.SDv(23,17),e.qZA(),e.TgZ(24,"div",10),e.TgZ(25,"select",18),e.TgZ(26,"option",19),e.SDv(27,20),e.qZA(),e.YNc(28,En,2,2,"option",21),e.TgZ(29,"option",22),e.SDv(30,23),e.qZA(),e.TgZ(31,"option",24),e.SDv(32,25),e.qZA(),e.qZA(),e.YNc(33,Tn,2,0,"span",15),e.qZA(),e.qZA(),e.YNc(34,Cn,10,1,"fieldset",26),e.qZA(),e.TgZ(35,"div",27),e.TgZ(36,"cd-form-button-panel",28),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(37,"titlecase"),e.ALo(38,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.bsModalRef),e.xp6(4),e.pQV(e.lcZ(3,15,n.action))(e.lcZ(4,17,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(7),e.Q6J("readonly",!0),e.xp6(2),e.Q6J("ngClass",e.VKq(23,pn,!n.editing)),e.xp6(3),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("subuid",i,"required")),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("subuid",i,"subuserIdExists")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(25,Mn)),e.xp6(5),e.Q6J("ngIf",n.formGroup.showError("perm",i,"required")),e.xp6(1),e.Q6J("ngIf",!n.editing),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(37,19,n.action)+" "+e.lcZ(38,21,n.resource))}},directives:[fe.z,a._Y,a.JL,V.V,a.sg,H.P,q.o,a.Fj,X.b,a.JJ,a.u,f.mk,j.U,f.O5,a.EJ,a.YN,a.Kr,f.sg,ee.p,a.Wl,Ce.C,pe.s],pipes:[f.rS,K.m],styles:[""]}),t})();var An=r(13472);let Ye=(()=>{class t{constructor(_,n){this.activeModal=_,this.actionLabels=n,this.resource="Swift Key",this.action=this.actionLabels.SHOW}setValues(_,n){this.user=_,this.secret_key=n}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(M.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-swift-key-modal"]],decls:24,vars:11,consts:function(){let o,_,n;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="Secret key",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],[1,"modal-body"],["novalidate",""],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","user","name","user","type","text",1,"form-control",3,"readonly","ngModel","ngModelChange"],["for","secret_key",1,"cd-col-form-label"],n,[1,"input-group"],["id","secret_key","name","secret_key","type","password",1,"form-control",3,"ngModel","readonly","ngModelChange"],[1,"input-group-append"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],[1,"modal-footer"],[3,"backAction"]]},template:function(_,n){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"div",4),e.TgZ(7,"form",5),e.TgZ(8,"div",6),e.TgZ(9,"label",7),e.SDv(10,8),e.qZA(),e.TgZ(11,"div",9),e.TgZ(12,"input",10),e.NdJ("ngModelChange",function(s){return n.user=s}),e.qZA(),e.qZA(),e.qZA(),e.TgZ(13,"div",6),e.TgZ(14,"label",11),e.SDv(15,12),e.qZA(),e.TgZ(16,"div",9),e.TgZ(17,"div",13),e.TgZ(18,"input",14),e.NdJ("ngModelChange",function(s){return n.secret_key=s}),e.qZA(),e.TgZ(19,"span",15),e._UZ(20,"button",16),e._UZ(21,"cd-copy-2-clipboard-button",17),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.TgZ(22,"div",18),e.TgZ(23,"cd-back-button",19),e.NdJ("backAction",function(){return n.activeModal.close()}),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,7,n.action))(e.lcZ(4,9,n.resource)),e.QtT(2),e.xp6(8),e.Q6J("readonly",!0)("ngModel",n.user),e.xp6(6),e.Q6J("ngModel",n.secret_key)("readonly",!0))},directives:[fe.z,a._Y,a.JL,a.F,H.P,q.o,a.Fj,X.b,a.JJ,a.On,Ce.C,pe.s,An.W],pipes:[f.rS,K.m],styles:[""]}),t})();var bn=r(17932);function Pn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,50),e.qZA())}function Gn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,51),e.qZA())}function Nn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,52),e.qZA())}function On(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,56),e.qZA())}function Un(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,57),e.qZA())}function Wn(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",53),e.SDv(2,54),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",55),e.YNc(5,On,2,0,"span",13),e.YNc(6,Un,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(4),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("tenant",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("tenant",_,"notUnique"))}}function Zn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,58),e.qZA())}function $n(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,59),e.qZA())}function hn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,60),e.qZA())}function In(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,63),e.qZA())}function vn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,64),e.qZA())}function Fn(t,o){if(1&t&&(e.TgZ(0,"div",8),e._UZ(1,"label",61),e.TgZ(2,"div",11),e._UZ(3,"input",62),e.YNc(4,In,2,0,"span",13),e.YNc(5,vn,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(4),e.Q6J("ngIf",n.userForm.showError("max_buckets",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("max_buckets",_,"min"))}}function Ln(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,76),e.qZA())}function Dn(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",69),e.SDv(2,70),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",71),e._UZ(5,"input",72),e.TgZ(6,"span",73),e._UZ(7,"button",74),e._UZ(8,"cd-copy-2-clipboard-button",75),e.qZA(),e.qZA(),e.YNc(9,Ln,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(9),e.Q6J("ngIf",n.userForm.showError("access_key",_,"required"))}}function xn(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,82),e.qZA())}function yn(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",77),e.SDv(2,78),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",71),e._UZ(5,"input",79),e.TgZ(6,"span",73),e._UZ(7,"button",80),e._UZ(8,"cd-copy-2-clipboard-button",81),e.qZA(),e.qZA(),e.YNc(9,xn,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(9),e.Q6J("ngIf",n.userForm.showError("secret_key",_,"required"))}}function qn(t,o){if(1&t&&(e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,65),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"div",14),e.TgZ(5,"div",15),e._UZ(6,"input",66),e.TgZ(7,"label",67),e.SDv(8,68),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(9,Dn,10,1,"div",19),e.YNc(10,yn,10,1,"div",19),e.qZA()),2&t){const _=e.oxw(2);e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key"))}}function wn(t,o){1&t&&(e.TgZ(0,"span",92),e.TgZ(1,"span",93),e.SDv(2,94),e.qZA(),e.qZA())}const L=function(t){return[t]};function kn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",71),e.TgZ(2,"div",95),e.TgZ(3,"span",96),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",97),e.TgZ(6,"div",98),e.TgZ(7,"span",96),e._UZ(8,"i"),e.qZA(),e.qZA(),e._UZ(9,"input",97),e.TgZ(10,"span",73),e.TgZ(11,"button",99),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showSubuserModal(s)}),e._UZ(12,"i",89),e.qZA(),e.TgZ(13,"button",100),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteSubuser(s)}),e._UZ(14,"i",89),e.qZA(),e.qZA(),e.qZA(),e._UZ(15,"span",93),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.user),e.xp6(1),e.s9C("value",_.id),e.xp6(3),e.Tol(n.icons.share),e.xp6(1),e.s9C("value","full-control"===_.permissions?"full":_.permissions),e.xp6(3),e.Q6J("ngClass",e.VKq(10,L,n.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(12,L,n.icons.destroy))}}function Bn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,83),e.qZA(),e.TgZ(3,"div",84),e.TgZ(4,"div",14),e.YNc(5,wn,3,0,"span",85),e.YNc(6,kn,16,14,"span",86),e.TgZ(7,"div",84),e.TgZ(8,"div",87),e.TgZ(9,"button",88),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showSubuserModal()}),e._UZ(10,"i",89),e.ynx(11),e.SDv(12,90),e.ALo(13,"titlecase"),e.ALo(14,"upperFirst"),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(15,"span",91),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.subusers.length),e.xp6(1),e.Q6J("ngForOf",_.subusers),e.xp6(4),e.Q6J("ngClass",e.VKq(9,L,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(13,5,_.actionLabels.CREATE))(e.lcZ(14,7,_.subuserLabel)),e.QtT(12)}}function Hn(t,o){1&t&&(e.TgZ(0,"span",92),e.TgZ(1,"span",93),e.SDv(2,106),e.qZA(),e.qZA())}function Xn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",71),e.TgZ(2,"div",95),e.TgZ(3,"div",96),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",97),e.TgZ(6,"span",73),e.TgZ(7,"button",107),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showS3KeyModal(s)}),e._UZ(8,"i",89),e.qZA(),e.TgZ(9,"button",108),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteS3Key(s)}),e._UZ(10,"i",89),e.qZA(),e.qZA(),e.qZA(),e._UZ(11,"span",93),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(3),e.Q6J("ngClass",e.VKq(6,L,n.icons.show)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,L,n.icons.destroy))}}function Kn(t,o){1&t&&(e.TgZ(0,"span",92),e.TgZ(1,"span",93),e.SDv(2,109),e.qZA(),e.qZA())}function zn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",71),e.TgZ(2,"div",95),e.TgZ(3,"span",96),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",97),e.TgZ(6,"span",73),e.TgZ(7,"button",110),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showSwiftKeyModal(s)}),e._UZ(8,"i",89),e.qZA(),e.qZA(),e.qZA(),e._UZ(9,"span",93),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(3),e.Q6J("ngClass",e.VKq(5,L,n.icons.show))}}function Qn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,101),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"label",61),e.SDv(5,102),e.qZA(),e.TgZ(6,"div",11),e.YNc(7,Hn,3,0,"span",85),e.YNc(8,Xn,12,10,"span",86),e.TgZ(9,"div",84),e.TgZ(10,"div",87),e.TgZ(11,"button",103),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showS3KeyModal()}),e._UZ(12,"i",89),e.ynx(13),e.SDv(14,104),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(17,"span",91),e.qZA(),e._UZ(18,"hr"),e.qZA(),e.TgZ(19,"div",8),e.TgZ(20,"label",61),e.SDv(21,105),e.qZA(),e.TgZ(22,"div",11),e.YNc(23,Kn,3,0,"span",85),e.YNc(24,zn,10,7,"span",86),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(7),e.Q6J("ngIf",0===_.s3Keys.length),e.xp6(1),e.Q6J("ngForOf",_.s3Keys),e.xp6(4),e.Q6J("ngClass",e.VKq(11,L,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,7,_.actionLabels.CREATE))(e.lcZ(16,9,_.s3keyLabel)),e.QtT(14),e.xp6(7),e.Q6J("ngIf",0===_.swiftKeys.length),e.xp6(1),e.Q6J("ngForOf",_.swiftKeys)}}function Yn(t,o){1&t&&(e.TgZ(0,"span",92),e.TgZ(1,"span",93),e.SDv(2,114),e.qZA(),e.qZA())}function Jn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span"),e.TgZ(1,"div",71),e.TgZ(2,"span",95),e.TgZ(3,"div",96),e._UZ(4,"i"),e.qZA(),e.qZA(),e._UZ(5,"input",97),e.TgZ(6,"span",73),e.TgZ(7,"button",115),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showCapabilityModal(s)}),e._UZ(8,"i",89),e.qZA(),e.TgZ(9,"button",116),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteCapability(s)}),e._UZ(10,"i",89),e.qZA(),e.qZA(),e.qZA(),e._UZ(11,"span",93),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(4),e.Tol(n.icons.share),e.xp6(1),e.hYB("value","",_.type,":",_.perm,""),e.xp6(3),e.Q6J("ngClass",e.VKq(7,L,n.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(9,L,n.icons.destroy))}}function Vn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset"),e.TgZ(1,"legend"),e.SDv(2,111),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"div",14),e.YNc(5,Yn,3,0,"span",85),e.YNc(6,Jn,12,11,"span",86),e.TgZ(7,"div",84),e.TgZ(8,"div",87),e.TgZ(9,"button",112),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showCapabilityModal()}),e.ALo(10,"pipeFunction"),e.ALo(11,"pipeFunction"),e._UZ(12,"i",89),e.ynx(13),e.SDv(14,113),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(17,"span",91),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.capabilities.length),e.xp6(1),e.Q6J("ngForOf",_.capabilities),e.xp6(3),e.Q6J("disabled",e.xi3(10,7,_.capabilities,_.hasAllCapabilities))("disableTooltip",!e.xi3(11,10,_.capabilities,_.hasAllCapabilities)),e.xp6(3),e.Q6J("ngClass",e.VKq(17,L,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,13,_.actionLabels.ADD))(e.lcZ(16,15,_.capabilityLabel)),e.QtT(14)}}function jn(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",117),e.TgZ(4,"label",118),e.SDv(5,119),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function eo(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,123),e.qZA())}function _o(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,124),e.qZA())}function to(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",120),e.SDv(2,121),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",122),e.YNc(5,eo,2,0,"span",13),e.YNc(6,_o,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("user_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_quota_max_size",_,"quotaMaxSize"))}}function no(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",125),e.TgZ(4,"label",126),e.SDv(5,127),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function oo(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,131),e.qZA())}function io(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,132),e.qZA())}function so(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",128),e.SDv(2,129),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",130),e.YNc(5,oo,2,0,"span",13),e.YNc(6,io,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("user_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_quota_max_objects",_,"min"))}}function ao(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",133),e.TgZ(4,"label",134),e.SDv(5,135),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function ro(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,139),e.qZA())}function lo(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,140),e.qZA())}function co(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",136),e.SDv(2,137),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",138),e.YNc(5,ro,2,0,"span",13),e.YNc(6,lo,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_size",_,"quotaMaxSize"))}}function uo(t,o){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"div",14),e.TgZ(2,"div",15),e._UZ(3,"input",141),e.TgZ(4,"label",142),e.SDv(5,143),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function Ro(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,147),e.qZA())}function go(t,o){1&t&&(e.TgZ(0,"span",49),e.SDv(1,148),e.qZA())}function Eo(t,o){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",144),e.SDv(2,145),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",146),e.YNc(5,Ro,2,0,"span",13),e.YNc(6,go,2,0,"span",13),e.qZA(),e.qZA()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_objects",_,"min"))}}const Je=function(t){return{required:t}};function To(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.TgZ(9,"div",8),e.TgZ(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,Pn,2,0,"span",13),e.YNc(15,Gn,2,0,"span",13),e.YNc(16,Nn,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(17,"div",8),e.TgZ(18,"div",14),e.TgZ(19,"div",15),e.TgZ(20,"input",16),e.NdJ("click",function(){return e.CHM(_),e.oxw().updateFieldsWhenTenanted()}),e.qZA(),e.TgZ(21,"label",17),e.SDv(22,18),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(23,Wn,7,3,"div",19),e.TgZ(24,"div",8),e.TgZ(25,"label",20),e.SDv(26,21),e.qZA(),e.TgZ(27,"div",11),e._UZ(28,"input",22),e.YNc(29,Zn,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(30,"div",8),e.TgZ(31,"label",23),e.SDv(32,24),e.qZA(),e.TgZ(33,"div",11),e._UZ(34,"input",25),e.YNc(35,$n,2,0,"span",13),e.YNc(36,hn,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(37,"div",8),e.TgZ(38,"label",26),e.SDv(39,27),e.qZA(),e.TgZ(40,"div",11),e.TgZ(41,"select",28),e.NdJ("change",function(i){return e.CHM(_),e.oxw().onMaxBucketsModeChange(i.target.value)}),e.TgZ(42,"option",29),e.SDv(43,30),e.qZA(),e.TgZ(44,"option",31),e.SDv(45,32),e.qZA(),e.TgZ(46,"option",33),e.SDv(47,34),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(48,Fn,6,2,"div",19),e.TgZ(49,"div",8),e.TgZ(50,"div",14),e.TgZ(51,"div",15),e._UZ(52,"input",35),e.TgZ(53,"label",36),e.SDv(54,37),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(55,qn,11,2,"fieldset",38),e.YNc(56,Bn,16,11,"fieldset",38),e.YNc(57,Qn,25,13,"fieldset",38),e.YNc(58,Vn,18,19,"fieldset",38),e.TgZ(59,"fieldset"),e.TgZ(60,"legend"),e.SDv(61,39),e.qZA(),e.TgZ(62,"div",8),e.TgZ(63,"div",14),e.TgZ(64,"div",15),e._UZ(65,"input",40),e.TgZ(66,"label",41),e.SDv(67,42),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(68,jn,6,0,"div",19),e.YNc(69,to,7,2,"div",19),e.YNc(70,no,6,0,"div",19),e.YNc(71,so,7,2,"div",19),e.qZA(),e.TgZ(72,"fieldset"),e.TgZ(73,"legend"),e.SDv(74,43),e.qZA(),e.TgZ(75,"div",8),e.TgZ(76,"div",14),e.TgZ(77,"div",15),e._UZ(78,"input",44),e.TgZ(79,"label",45),e.SDv(80,46),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.YNc(81,ao,6,0,"div",19),e.YNc(82,co,7,2,"div",19),e.YNc(83,uo,6,0,"div",19),e.YNc(84,Eo,7,2,"div",19),e.qZA(),e.qZA(),e.TgZ(85,"div",47),e.TgZ(86,"cd-form-button-panel",48),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().onSubmit()}),e.ALo(87,"titlecase"),e.ALo(88,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.MAs(2),n=e.oxw();e.xp6(1),e.Q6J("formGroup",n.userForm),e.xp6(6),e.pQV(e.lcZ(6,29,n.action))(e.lcZ(7,31,n.resource)),e.QtT(5),e.xp6(3),e.Q6J("ngClass",e.VKq(37,Je,!n.editing)),e.xp6(3),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_id",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_id",_,"pattern")),e.xp6(1),e.Q6J("ngIf",!n.userForm.getValue("show_tenant")&&n.userForm.showError("user_id",_,"notUnique")),e.xp6(4),e.Q6J("readonly",!0),e.xp6(3),e.Q6J("ngIf",n.userForm.getValue("show_tenant")),e.xp6(2),e.Q6J("ngClass",e.VKq(39,Je,!n.editing)),e.xp6(4),e.Q6J("ngIf",n.userForm.showError("display_name",_,"required")),e.xp6(6),e.Q6J("ngIf",n.userForm.showError("email",_,"email")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("email",_,"notUnique")),e.xp6(12),e.Q6J("ngIf",1==n.userForm.get("max_buckets_mode").value),e.xp6(7),e.Q6J("ngIf",!n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(10),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value&&!n.userForm.getValue("user_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value&&!n.userForm.getValue("user_quota_max_objects_unlimited")),e.xp6(10),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value&&!n.userForm.getValue("bucket_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value&&!n.userForm.getValue("bucket_quota_max_objects_unlimited")),e.xp6(2),e.Q6J("form",n.userForm)("submitText",e.lcZ(87,33,n.action)+" "+e.lcZ(88,35,n.resource))}}let Ve=(()=>{class t extends Ie.E{constructor(_,n,i,s,c,d,E){super(),this.formBuilder=_,this.route=n,this.router=i,this.rgwUserService=s,this.modalService=c,this.notificationService=d,this.actionLabels=E,this.editing=!1,this.submitObservables=[],this.icons=D.P,this.subusers=[],this.s3Keys=[],this.swiftKeys=[],this.capabilities=[],this.showTenant=!1,this.previousTenant=null,this.resource="user",this.subuserLabel="subuser",this.s3keyLabel="S3 Key",this.capabilityLabel="capability",this.editing=this.router.url.startsWith(`/rgw/user/${A.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.createForm()}createForm(){this.userForm=this.formBuilder.group({user_id:[null,[a.kI.required,a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[m.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("tenant"))]],show_tenant:[this.editing],tenant:[null,[a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[m.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("user_id"),!0)]],display_name:[null,[a.kI.required]],email:[null,[m.h.email],[m.h.unique(this.rgwUserService.emailExists,this.rgwUserService)]],max_buckets_mode:[1],max_buckets:[1e3,[m.h.requiredIf({max_buckets_mode:"1"}),m.h.number(!1)]],suspended:[!1],generate_key:[!0],access_key:[null,[m.h.requiredIf({generate_key:!1})]],secret_key:[null,[m.h.requiredIf({generate_key:!1})]],user_quota_enabled:[!1],user_quota_max_size_unlimited:[!0],user_quota_max_size:[null,[m.h.composeIf({user_quota_enabled:!0,user_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],user_quota_max_objects_unlimited:[!0],user_quota_max_objects:[null,[m.h.requiredIf({user_quota_enabled:!0,user_quota_max_objects_unlimited:!1})]],bucket_quota_enabled:[!1],bucket_quota_max_size_unlimited:[!0],bucket_quota_max_size:[null,[m.h.composeIf({bucket_quota_enabled:!0,bucket_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],bucket_quota_max_objects_unlimited:[!0],bucket_quota_max_objects:[null,[m.h.requiredIf({bucket_quota_enabled:!0,bucket_quota_max_objects_unlimited:!1})]]})}ngOnInit(){this.route.params.subscribe(_=>{if(!_.hasOwnProperty("uid"))return void this.loadingReady();const n=decodeURIComponent(_.uid),i=[];i.push(this.rgwUserService.get(n)),i.push(this.rgwUserService.getQuota(n)),(0,Y.D)(i).subscribe(s=>{const c=u().clone(this.userForm.value);let d=u().pick(s[0],u().keys(this.userForm.value));switch(d.max_buckets){case-1:d.max_buckets_mode=-1,d.max_buckets="";break;case 0:d.max_buckets_mode=0,d.max_buckets="";break;default:d.max_buckets_mode=1}["user","bucket"].forEach(g=>{const C=s[1][g+"_quota"];d[g+"_quota_enabled"]=C.enabled,C.max_size<0?(d[g+"_quota_max_size_unlimited"]=!0,d[g+"_quota_max_size"]=null):(d[g+"_quota_max_size_unlimited"]=!1,d[g+"_quota_max_size"]=`${C.max_size} B`),C.max_objects<0?(d[g+"_quota_max_objects_unlimited"]=!0,d[g+"_quota_max_objects"]=null):(d[g+"_quota_max_objects_unlimited"]=!1,d[g+"_quota_max_objects"]=C.max_objects)}),d=u().merge(c,d),this.userForm.setValue(d),this.subusers=s[0].subusers,this.s3Keys=s[0].keys,this.swiftKeys=s[0].swift_keys;const E={"read, write":"*"};s[0].caps.forEach(g=>{g.perm in E&&(g.perm=E[g.perm])}),this.capabilities=s[0].caps,this.loadingReady()},()=>{this.loadingError()})})}goToListView(){this.router.navigate(["/rgw/user"])}onSubmit(){let _;if(this.userForm.pristine)return void this.goToListView();const n=this.getUID();if(this.editing){if(this._isGeneralDirty()){const i=this._getUpdateArgs();this.submitObservables.push(this.rgwUserService.update(n,i))}_="Updated Object Gateway user '" + n + "'"}else{const i=this._getCreateArgs();this.submitObservables.push(this.rgwUserService.create(i)),_="Created Object Gateway user '" + n + "'"}if(this._isUserQuotaDirty()){const i=this._getUserQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(n,i))}if(this._isBucketQuotaDirty()){const i=this._getBucketQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(n,i))}(0,Bt.z)(...this.submitObservables).subscribe({error:()=>{this.userForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.notificationService.show(Ae.k.success,_),this.goToListView()}})}updateFieldsWhenTenanted(){this.showTenant=this.userForm.getValue("show_tenant"),this.showTenant?(this.userForm.get("user_id").markAsTouched(),this.previousTenant=this.userForm.get("tenant").value,this.userForm.get("tenant").patchValue(null)):(this.userForm.get("user_id").markAsUntouched(),this.userForm.get("tenant").patchValue(this.previousTenant))}getUID(){var _;let n=this.userForm.getValue("user_id");const i=null===(_=this.userForm)||void 0===_?void 0:_.getValue("tenant");return i&&i.length>0&&(n=`${this.userForm.getValue("tenant")}$${n}`),n}quotaMaxSizeValidator(_){return(0,m.P)(_.value)?null:null===RegExp("^(\\d+(\\.\\d+)?)\\s*(B|K(B|iB)?|M(B|iB)?|G(B|iB)?|T(B|iB)?)?$","i").exec(_.value)||(new Ge.H).toBytes(_.value)<1024?{quotaMaxSize:!0}:null}setSubuser(_,n){const i={"full-control":"full","read-write":"readwrite"},s=this.getUID();this.submitObservables.push(this.rgwUserService.createSubuser(s,{subuser:_.id,access:_.permissions in i?i[_.permissions]:_.permissions,key_type:"swift",secret_key:_.secret_key,generate_secret:_.generate_secret?"true":"false"})),u().isNumber(n)?this.subusers[n]=_:(this.subusers.push(_),this.swiftKeys.push({user:_.id,secret_key:_.generate_secret?"Apply your changes first...":_.secret_key})),this.userForm.markAsDirty()}deleteSubuser(_){const n=this.subusers[_];this.submitObservables.push(this.rgwUserService.deleteSubuser(this.getUID(),n.id)),this.s3Keys=this.s3Keys.filter(i=>i.user!==n.id),this.swiftKeys=this.swiftKeys.filter(i=>i.user!==n.id),this.subusers.splice(_,1),this.userForm.markAsDirty()}setCapability(_,n){const i=this.getUID();if(u().isNumber(n)){const s=this.capabilities[n];this.submitObservables.push(this.rgwUserService.deleteCapability(i,s.type,s.perm)),this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities[n]=_}else this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities=[...this.capabilities,_];this.userForm.markAsDirty()}deleteCapability(_){const n=this.capabilities[_];this.submitObservables.push(this.rgwUserService.deleteCapability(this.getUID(),n.type,n.perm)),this.capabilities.splice(_,1),this.capabilities=[...this.capabilities],this.userForm.markAsDirty()}hasAllCapabilities(_){return!u().difference(ze.getAll(),u().map(_,"type")).length}setS3Key(_,n){if(!u().isNumber(n)){const i=_.user.match(/([^:]+)(:(.+))?/),s=i[1],c={subuser:i[2]?i[3]:"",generate_key:_.generate_key?"true":"false"};"false"===c.generate_key&&(u().isNil(_.access_key)||(c.access_key=_.access_key),u().isNil(_.secret_key)||(c.secret_key=_.secret_key)),this.submitObservables.push(this.rgwUserService.addS3Key(s,c)),this.s3Keys.push({user:_.user,access_key:_.generate_key?"Apply your changes first...":_.access_key,secret_key:_.generate_key?"Apply your changes first...":_.secret_key})}this.userForm.markAsDirty()}deleteS3Key(_){const n=this.s3Keys[_];this.submitObservables.push(this.rgwUserService.deleteS3Key(this.getUID(),n.access_key)),this.s3Keys.splice(_,1),this.userForm.markAsDirty()}showSubuserModal(_){const n=this.getUID(),i=this.modalService.show(mn);if(u().isNumber(_)){const s=this.subusers[_];i.componentInstance.setEditing(),i.componentInstance.setValues(n,s.id,s.permissions)}else i.componentInstance.setEditing(!1),i.componentInstance.setValues(n),i.componentInstance.setSubusers(this.subusers);i.componentInstance.submitAction.subscribe(s=>{this.setSubuser(s,_)})}showS3KeyModal(_){const n=this.modalService.show(Qe);if(u().isNumber(_)){const i=this.s3Keys[_];n.componentInstance.setViewing(),n.componentInstance.setValues(i.user,i.access_key,i.secret_key)}else{const i=this._getS3KeyUserCandidates();n.componentInstance.setViewing(!1),n.componentInstance.setUserCandidates(i),n.componentInstance.submitAction.subscribe(s=>{this.setS3Key(s)})}}showSwiftKeyModal(_){const n=this.modalService.show(Ye),i=this.swiftKeys[_];n.componentInstance.setValues(i.user,i.secret_key)}showCapabilityModal(_){const n=this.modalService.show(en);if(u().isNumber(_)){const i=this.capabilities[_];n.componentInstance.setEditing(),n.componentInstance.setValues(i.type,i.perm)}else n.componentInstance.setEditing(!1),n.componentInstance.setCapabilities(this.capabilities);n.componentInstance.submitAction.subscribe(i=>{this.setCapability(i,_)})}_isGeneralDirty(){return["display_name","email","max_buckets_mode","max_buckets","suspended"].some(_=>this.userForm.get(_).dirty)}_isUserQuotaDirty(){return["user_quota_enabled","user_quota_max_size_unlimited","user_quota_max_size","user_quota_max_objects_unlimited","user_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_isBucketQuotaDirty(){return["bucket_quota_enabled","bucket_quota_max_size_unlimited","bucket_quota_max_size","bucket_quota_max_objects_unlimited","bucket_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_getCreateArgs(){const _={uid:this.getUID(),display_name:this.userForm.getValue("display_name"),suspended:this.userForm.getValue("suspended"),email:"",max_buckets:this.userForm.getValue("max_buckets"),generate_key:this.userForm.getValue("generate_key"),access_key:"",secret_key:""},n=this.userForm.getValue("email");u().isString(n)&&n.length>0&&u().merge(_,{email:n}),this.userForm.getValue("generate_key")||u().merge(_,{generate_key:!1,access_key:this.userForm.getValue("access_key"),secret_key:this.userForm.getValue("secret_key")});const s=parseInt(this.userForm.getValue("max_buckets_mode"),10);return u().includes([-1,0],s)&&u().merge(_,{max_buckets:s}),_}_getUpdateArgs(){const _={},n=["display_name","email","max_buckets","suspended"];for(const s of n)_[s]=this.userForm.getValue(s);const i=parseInt(this.userForm.getValue("max_buckets_mode"),10);return u().includes([-1,0],i)&&(_.max_buckets=i),_}_getUserQuotaArgs(){const _={quota_type:"user",enabled:this.userForm.getValue("user_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("user_quota_max_size_unlimited")){const n=(new Ge.H).toBytes(this.userForm.getValue("user_quota_max_size"));_.max_size_kb=(n/1024).toFixed(0)}return this.userForm.getValue("user_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("user_quota_max_objects")),_}_getBucketQuotaArgs(){const _={quota_type:"bucket",enabled:this.userForm.getValue("bucket_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("bucket_quota_max_size_unlimited")){const n=(new Ge.H).toBytes(this.userForm.getValue("bucket_quota_max_size"));_.max_size_kb=(n/1024).toFixed(0)}return this.userForm.getValue("bucket_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("bucket_quota_max_objects")),_}_getS3KeyUserCandidates(){let _=[];const n=this.getUID();return u().isString(n)&&!u().isEmpty(n)&&_.push(n),this.subusers.forEach(i=>{_.push(i.id)}),this.s3Keys.forEach(i=>{_.push(i.user)}),_=u().uniq(_),_}onMaxBucketsModeChange(_){"1"===_&&(this.userForm.get("max_buckets").valid||this.userForm.patchValue({max_buckets:1e3}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.O),e.Y36(w.gz),e.Y36(w.F0),e.Y36(O),e.Y36(Te.Z),e.Y36(ve.g),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F,T,x,y,S,_e,te,ne,oe,ie,se,ae,re,le,ce,de,ue,Re,R,__,t_,n_,o_,i_,s_,a_,r_,l_,c_,d_,u_,R_,g_,E_,T_,S_,f_,C_,p_,M_,m_,A_,b_,P_,G_;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="User ID",n="Show Tenant",i="Full name",s="Email address",c="Max. buckets",d="Disabled",E="Unlimited",g="Custom",C="Suspended",b="User quota",P="Enabled",G="Bucket quota",N="Enabled",p="This field is required.",U="The value is not valid.",W="The chosen user ID is already in use.",Z="Tenant",$="The value is not valid.",h="The chosen user ID exists in this tenant.",I="This field is required.",v="This is not a valid email address.",F="The chosen email address is already in use.",T="This field is required.",x="The entered value must be >= 1.",y="S3 key",S="Auto-generate key",_e="Access key",te="This field is required.",ne="Secret key",oe="This field is required.",ie="Subusers",se="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",ae="There are no subusers.",re="Edit",le="Delete",ce="Keys",de="S3",ue="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",Re="Swift",R="There are no keys.",__="Show",t_="Delete",n_="There are no keys.",o_="Show",i_="Capabilities",s_="All capabilities are already added.",a_="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",r_="There are no capabilities.",l_="Edit",c_="Delete",d_="Unlimited size",u_="Max. size",R_="This field is required.",g_="The value is not valid.",E_="Unlimited objects",T_="Max. objects",S_="This field is required.",f_="The entered value must be >= 0.",C_="Unlimited size",p_="Max. size",M_="This field is required.",m_="The value is not valid.",A_="Unlimited objects",b_="Max. objects",P_="This field is required.",G_="The entered value must be >= 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],o,[1,"card-body"],[1,"form-group","row"],["for","user_id",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user_id","type","text","formControlName","user_id",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","show_tenant","type","checkbox","formControlName","show_tenant",1,"custom-control-input",3,"readonly","click"],["for","show_tenant",1,"custom-control-label"],n,["class","form-group row",4,"ngIf"],["for","display_name",1,"cd-col-form-label",3,"ngClass"],i,["id","display_name","type","text","formControlName","display_name",1,"form-control"],["for","email",1,"cd-col-form-label"],s,["id","email","type","text","formControlName","email",1,"form-control"],["for","max_buckets_mode",1,"cd-col-form-label"],c,["formControlName","max_buckets_mode","name","max_buckets_mode","id","max_buckets_mode",1,"form-control",3,"change"],["value","-1"],d,["value","0"],E,["value","1"],g,["id","suspended","type","checkbox","formControlName","suspended",1,"custom-control-input"],["for","suspended",1,"custom-control-label"],C,[4,"ngIf"],b,["id","user_quota_enabled","type","checkbox","formControlName","user_quota_enabled",1,"custom-control-input"],["for","user_quota_enabled",1,"custom-control-label"],P,G,["id","bucket_quota_enabled","type","checkbox","formControlName","bucket_quota_enabled",1,"custom-control-input"],["for","bucket_quota_enabled",1,"custom-control-label"],N,[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],p,U,W,["for","tenant",1,"cd-col-form-label"],Z,["id","tenant","type","text","formControlName","tenant","autofocus","",1,"form-control",3,"readonly"],$,h,I,v,F,[1,"cd-col-form-label"],["id","max_buckets","type","number","formControlName","max_buckets","min","1",1,"form-control"],T,x,y,["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],S,["for","access_key",1,"cd-col-form-label","required"],_e,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control"],[1,"input-group-append"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],te,["for","secret_key",1,"cd-col-form-label","required"],ne,["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],oe,ie,[1,"row"],["class","no-border",4,"ngIf"],[4,"ngFor","ngForOf"],[1,"col-12"],["type","button",1,"btn","btn-light","float-right","tc_addSubuserButton",3,"click"],[3,"ngClass"],se,[1,"help-block"],[1,"no-border"],[1,"form-text","text-muted"],ae,[1,"input-group-prepend"],[1,"input-group-text"],["type","text","readonly","",1,"cd-form-control",3,"value"],[1,"input-group-prepend","border-left-0","border-right-0"],["type","button","ngbTooltip",re,1,"btn","btn-light","tc_showSubuserButton",3,"click"],["type","button","ngbTooltip",le,1,"btn","btn-light","tc_deleteSubuserButton",3,"click"],ce,de,["type","button",1,"btn","btn-light","float-right","tc_addS3KeyButton",3,"click"],ue,Re,R,["type","button","ngbTooltip",__,1,"btn","btn-light","tc_showS3KeyButton",3,"click"],["type","button","ngbTooltip",t_,1,"btn","btn-light","tc_deleteS3KeyButton",3,"click"],n_,["type","button","ngbTooltip",o_,1,"btn","btn-light","tc_showSwiftKeyButton",3,"click"],i_,["type","button","ngbTooltip",s_,"triggers","pointerenter:pointerleave",1,"btn","btn-light","float-right","tc_addCapButton",3,"disabled","disableTooltip","click"],a_,r_,["type","button","ngbTooltip",l_,1,"btn","btn-light","tc_editCapButton",3,"click"],["type","button","ngbTooltip",c_,1,"btn","btn-light","tc_deleteCapButton",3,"click"],["id","user_quota_max_size_unlimited","type","checkbox","formControlName","user_quota_max_size_unlimited",1,"custom-control-input"],["for","user_quota_max_size_unlimited",1,"custom-control-label"],d_,["for","user_quota_max_size",1,"cd-col-form-label","required"],u_,["id","user_quota_max_size","type","text","formControlName","user_quota_max_size","cdDimlessBinary","",1,"form-control"],R_,g_,["id","user_quota_max_objects_unlimited","type","checkbox","formControlName","user_quota_max_objects_unlimited",1,"custom-control-input"],["for","user_quota_max_objects_unlimited",1,"custom-control-label"],E_,["for","user_quota_max_objects",1,"cd-col-form-label","required"],T_,["id","user_quota_max_objects","type","number","formControlName","user_quota_max_objects","min","0",1,"form-control"],S_,f_,["id","bucket_quota_max_size_unlimited","type","checkbox","formControlName","bucket_quota_max_size_unlimited",1,"custom-control-input"],["for","bucket_quota_max_size_unlimited",1,"custom-control-label"],C_,["for","bucket_quota_max_size",1,"cd-col-form-label","required"],p_,["id","bucket_quota_max_size","type","text","formControlName","bucket_quota_max_size","cdDimlessBinary","",1,"form-control"],M_,m_,["id","bucket_quota_max_objects_unlimited","type","checkbox","formControlName","bucket_quota_max_objects_unlimited",1,"custom-control-input"],["for","bucket_quota_max_objects_unlimited",1,"custom-control-label"],A_,["for","bucket_quota_max_objects",1,"cd-col-form-label","required"],b_,["id","bucket_quota_max_objects","type","number","formControlName","bucket_quota_max_objects","min","0",1,"form-control"],P_,G_]},template:function(_,n){1&_&&e.YNc(0,To,89,41,"div",0),2&_&&e.Q6J("cdFormLoading",n.loading)},directives:[Fe.y,a._Y,a.JL,V.V,a.sg,H.P,f.mk,q.o,a.Fj,X.b,a.JJ,a.u,f.O5,a.Wl,a.EJ,a.YN,a.Kr,ee.p,j.U,a.wV,a.qQ,Ce.C,pe.s,f.sg,M._L,bn.Q],pipes:[f.rS,K.m,Ue.i],styles:[""]}),t})();var je=r(99466),So=r(78877),fo=r(86969);const Co=["accessKeyTpl"],po=["secretKeyTpl"];function Mo(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,20),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Oqu(_.user.email)}}function mo(t,o){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.xp6(1),e.AsE(" ",_.id," (",_.permissions,") ")}}function Ao(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,21),e.qZA(),e.TgZ(3,"td"),e.YNc(4,mo,2,2,"div",22),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Q6J("ngForOf",_.user.subusers)}}function bo(t,o){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.xp6(1),e.AsE(" ",_.type," (",_.perm,") ")}}function Po(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,23),e.qZA(),e.TgZ(3,"td"),e.YNc(4,bo,2,2,"div",22),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Q6J("ngForOf",_.user.caps)}}function Go(t,o){if(1&t&&(e.TgZ(0,"tr"),e.TgZ(1,"td",15),e.SDv(2,24),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.ALo(5,"join"),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(4),e.Oqu(e.lcZ(5,1,_.user.mfa_ids))}}function No(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Oo(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,29),e.qZA())}function Uo(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.user_quota.max_size)," ")}}function Wo(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Zo(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,30),e.qZA())}function $o(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",_.user.user_quota.max_objects," ")}}function ho(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,25),e.qZA(),e.TgZ(3,"table",9),e.TgZ(4,"tbody"),e.TgZ(5,"tr"),e.TgZ(6,"td",10),e.SDv(7,26),e.qZA(),e.TgZ(8,"td",12),e._uU(9),e.ALo(10,"booleanText"),e.qZA(),e.qZA(),e.TgZ(11,"tr"),e.TgZ(12,"td",15),e.SDv(13,27),e.qZA(),e.YNc(14,No,2,0,"td",0),e.YNc(15,Oo,2,0,"td",0),e.YNc(16,Uo,3,3,"td",0),e.qZA(),e.TgZ(17,"tr"),e.TgZ(18,"td",15),e.SDv(19,28),e.qZA(),e.YNc(20,Wo,2,0,"td",0),e.YNc(21,Zo,2,0,"td",0),e.YNc(22,$o,2,1,"td",0),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.user_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects>-1)}}function Io(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function vo(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,35),e.qZA())}function Fo(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.bucket_quota.max_size)," ")}}function Lo(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Do(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,36),e.qZA())}function xo(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(5);e.xp6(1),e.hij(" ",_.user.bucket_quota.max_objects," ")}}function yo(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,31),e.qZA(),e.TgZ(3,"table",9),e.TgZ(4,"tbody"),e.TgZ(5,"tr"),e.TgZ(6,"td",10),e.SDv(7,32),e.qZA(),e.TgZ(8,"td",12),e._uU(9),e.ALo(10,"booleanText"),e.qZA(),e.qZA(),e.TgZ(11,"tr"),e.TgZ(12,"td",15),e.SDv(13,33),e.qZA(),e.YNc(14,Io,2,0,"td",0),e.YNc(15,vo,2,0,"td",0),e.YNc(16,Fo,3,3,"td",0),e.qZA(),e.TgZ(17,"tr"),e.TgZ(18,"td",15),e.SDv(19,34),e.qZA(),e.YNc(20,Lo,2,0,"td",0),e.YNc(21,Do,2,0,"td",0),e.YNc(22,xo,2,1,"td",0),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.bucket_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects>-1)}}function qo(t,o){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"table",9),e.TgZ(2,"tbody"),e.TgZ(3,"tr"),e.TgZ(4,"td",10),e.SDv(5,11),e.qZA(),e.TgZ(6,"td",12),e._uU(7),e.qZA(),e.qZA(),e.TgZ(8,"tr"),e.TgZ(9,"td",10),e.SDv(10,13),e.qZA(),e.TgZ(11,"td",12),e._uU(12),e.qZA(),e.qZA(),e.TgZ(13,"tr"),e.TgZ(14,"td",10),e.SDv(15,14),e.qZA(),e.TgZ(16,"td",12),e._uU(17),e.qZA(),e.qZA(),e.TgZ(18,"tr"),e.TgZ(19,"td",15),e.SDv(20,16),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.qZA(),e.qZA(),e.YNc(23,Mo,5,1,"tr",0),e.TgZ(24,"tr"),e.TgZ(25,"td",15),e.SDv(26,17),e.qZA(),e.TgZ(27,"td"),e._uU(28),e.ALo(29,"booleanText"),e.qZA(),e.qZA(),e.TgZ(30,"tr"),e.TgZ(31,"td",15),e.SDv(32,18),e.qZA(),e.TgZ(33,"td"),e._uU(34),e.ALo(35,"booleanText"),e.qZA(),e.qZA(),e.TgZ(36,"tr"),e.TgZ(37,"td",15),e.SDv(38,19),e.qZA(),e.TgZ(39,"td"),e._uU(40),e.ALo(41,"map"),e.qZA(),e.qZA(),e.YNc(42,Ao,5,1,"tr",0),e.YNc(43,Po,5,1,"tr",0),e.YNc(44,Go,6,3,"tr",0),e.qZA(),e.qZA(),e.YNc(45,ho,23,9,"div",0),e.YNc(46,yo,23,9,"div",0),e.qZA()),2&t){const _=e.oxw(3);e.xp6(7),e.Oqu(_.user.tenant),e.xp6(5),e.Oqu(_.user.user_id),e.xp6(5),e.Oqu(_.user.uid),e.xp6(5),e.Oqu(_.user.display_name),e.xp6(1),e.Q6J("ngIf",null==_.user.email?null:_.user.email.length),e.xp6(5),e.Oqu(e.lcZ(29,13,_.user.suspended)),e.xp6(6),e.Oqu(e.lcZ(35,15,"true"===_.user.system)),e.xp6(6),e.Oqu(e.xi3(41,17,_.user.max_buckets,_.maxBucketsMap)),e.xp6(2),e.Q6J("ngIf",_.user.subusers&&_.user.subusers.length),e.xp6(1),e.Q6J("ngIf",_.user.caps&&_.user.caps.length),e.xp6(1),e.Q6J("ngIf",null==_.user.mfa_ids?null:_.user.mfa_ids.length),e.xp6(1),e.Q6J("ngIf",_.user.user_quota),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota)}}function wo(t,o){if(1&t&&e.YNc(0,qo,47,20,"div",0),2&t){const _=e.oxw(2);e.Q6J("ngIf",_.user)}}const ko=function(t){return[t]};function Bo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",39),e.NdJ("updateSelection",function(i){return e.CHM(_),e.oxw(3).updateKeysSelection(i)}),e.TgZ(1,"div",40),e.TgZ(2,"div",41),e.TgZ(3,"button",42),e.NdJ("click",function(){return e.CHM(_),e.oxw(3).showKeyModal()}),e._UZ(4,"i",43),e.ynx(5),e.SDv(6,44),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(3);e.Q6J("data",_.keys)("columns",_.keysColumns),e.xp6(3),e.Q6J("disabled",!_.keysSelection.hasSingleSelection),e.xp6(1),e.Q6J("ngClass",e.VKq(4,ko,_.icons.show))}}function Ho(t,o){1&t&&(e.TgZ(0,"li",37),e.TgZ(1,"a",4),e.SDv(2,38),e.qZA(),e.YNc(3,Bo,7,6,"ng-template",6),e.qZA())}function Xo(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"ul",1,2),e.TgZ(3,"li",3),e.TgZ(4,"a",4),e.SDv(5,5),e.qZA(),e.YNc(6,wo,1,1,"ng-template",6),e.qZA(),e.YNc(7,Ho,4,0,"li",7),e.qZA(),e._UZ(8,"div",8),e.BQk()),2&t){const _=e.MAs(2),n=e.oxw();e.xp6(7),e.Q6J("ngIf",n.keys.length),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Ko=(()=>{class t{constructor(_,n){this.rgwUserService=_,this.modalService=n,this.keys=[],this.keysColumns=[],this.keysSelection=new Pe.r,this.icons=D.P}ngOnInit(){this.keysColumns=[{name:"Username",prop:"username",flexGrow:1},{name:"Type",prop:"type",flexGrow:1}],this.maxBucketsMap={"-1":"Disabled",0:"Unlimited"}}ngOnChanges(){this.selection&&(this.user=this.selection,this.user.subusers=u().sortBy(this.user.subusers,"id"),this.user.caps=u().sortBy(this.user.caps,"type"),this.rgwUserService.getQuota(this.user.uid).subscribe(_=>{u().extend(this.user,_)}),this.keys=[],this.user.keys&&this.user.keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"S3",username:_.user,ref:_})}),this.user.swift_keys&&this.user.swift_keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"Swift",username:_.user,ref:_})}),this.keys=u().sortBy(this.keys,"user"))}updateKeysSelection(_){this.keysSelection=_}showKeyModal(){const _=this.keysSelection.first(),n=this.modalService.show("S3"===_.type?Qe:Ye);switch(_.type){case"S3":n.componentInstance.setViewing(),n.componentInstance.setValues(_.ref.user,_.ref.access_key,_.ref.secret_key);break;case"Swift":n.componentInstance.setValues(_.ref.user,_.ref.secret_key)}}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(O),e.Y36(Te.Z))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-details"]],viewQuery:function(_,n){if(1&_&&(e.Gf(Co,5),e.Gf(po,5)),2&_){let i;e.iGM(i=e.CRH())&&(n.accessKeyTpl=i.first),e.iGM(i=e.CRH())&&(n.secretKeyTpl=i.first)}},inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n,i,s,c,d,E,g,C,b,P,G,N,p,U,W,Z,$,h,I,v,F,T,x,y;return o="Details",_="Tenant",n="User ID",i="Username",s="Full name",c="Suspended",d="System",E="Maximum buckets",g="Email address",C="Subusers",b="Capabilities",P="MFAs(Id)",G="User quota",N="Enabled",p="Maximum size",U="Maximum objects",W="Unlimited",Z="Unlimited",$="Bucket quota",h="Enabled",I="Maximum size",v="Maximum objects",F="Unlimited",T="Unlimited",x="Keys",y="Show",[[4,"ngIf"],["ngbNav","","cdStatefulTab","rgw-user-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","keys",4,"ngIf"],[3,"ngbNavOutlet"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],_,[1,"w-75"],n,i,[1,"bold"],s,c,d,E,g,C,[4,"ngFor","ngForOf"],b,P,G,N,p,U,W,Z,$,h,I,v,F,T,["ngbNavItem","keys"],x,["columnMode","flex","selectionType","multi","forceIdentifier","true",3,"data","columns","updateSelection"],[1,"table-actions"],["dropdown","",1,"btn-group"],["type","button",1,"btn","btn-accent",3,"disabled","click"],[3,"ngClass"],y]},template:function(_,n){1&_&&e.YNc(0,Xo,9,2,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[f.O5,M.Pz,Xe.m,M.nv,M.Vx,M.uN,M.tO,f.sg,z.a,q.o,f.mk],pipes:[Be.T,So.b,fo.A,qe.$],styles:[""]}),t})();const zo=["userSizeTpl"],Qo=["userObjectTpl"];function Yo(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_size)("used",_.stats.size_actual)}}function Jo(t,o){1&t&&e.SDv(0,9)}function Vo(t,o){if(1&t&&(e.YNc(0,Yo,1,2,"cd-usage-bar",6),e.YNc(1,Jo,1,0,"ng-template",null,7,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_size>0&&_.user_quota.enabled)("ngIfElse",n)}}function jo(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_objects)("used",_.stats.num_objects)("isBinary",!1)}}function ei(t,o){1&t&&e.SDv(0,13)}function _i(t,o){if(1&t&&(e.YNc(0,jo,1,3,"cd-usage-bar",10),e.YNc(1,ei,1,0,"ng-template",null,11,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_objects>0&&_.user_quota.enabled)("ngIfElse",n)}}let ni=(()=>{class t extends be.o{constructor(_,n,i,s,c,d){super(d),this.authStorageService=_,this.rgwUserService=n,this.modalService=i,this.urlBuilder=s,this.actionLabels=c,this.ngZone=d,this.columns=[],this.users=[],this.selection=new Pe.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Username",prop:"uid",flexGrow:1},{name:"Tenant",prop:"tenant",flexGrow:1},{name:"Full name",prop:"display_name",flexGrow:1},{name:"Email address",prop:"email",flexGrow:1},{name:"Suspended",prop:"suspended",flexGrow:1,cellClass:"text-center",cellTransformation:je.e.checkIcon},{name:"Max. buckets",prop:"max_buckets",flexGrow:1,cellTransformation:je.e.map,customTemplateConfig:{"-1":"Disabled",0:"Unlimited"}},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.userSizeTpl,flexGrow:.8},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.userObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().uid)}`;this.tableActions=[{permission:"create",icon:D.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:c=>!c.hasSelection},{permission:"update",icon:D.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:D.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:c=>c.hasMultiSelection}],this.setTableRefreshTimeout()}getUserList(_){this.setTableRefreshTimeout(),this.rgwUserService.list().subscribe(n=>{this.users=n},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(ye.M,{itemDescription:this.selection.hasSingleSelection?"user":"users",itemNames:this.selection.selected.map(_=>_.uid),submitActionObservable:()=>new xe.y(_=>{(0,Y.D)(this.selection.selected.map(n=>this.rgwUserService.delete(n.uid))).subscribe({error:n=>{_.error(n),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ee.j),e.Y36(O),e.Y36(Te.Z),e.Y36(Q.F),e.Y36(A.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-list"]],viewQuery:function(_,n){if(1&_&&(e.Gf(z.a,7),e.Gf(zo,7),e.Gf(Qo,7)),2&_){let i;e.iGM(i=e.CRH())&&(n.table=i.first),e.iGM(i=e.CRH())&&(n.userSizeTpl=i.first),e.iGM(i=e.CRH())&&(n.userObjectTpl=i.first)}},features:[e._Bn([{provide:Q.F,useValue:new Q.F("rgw/user")}]),e.qOj],decls:8,vars:9,consts:function(){let o,_;return o="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","uid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["userSizeTpl",""],["userObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],o,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,n){1&_&&(e.TgZ(0,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return n.setExpandedRow(s)})("updateSelection",function(s){return n.updateSelection(s)})("fetchData",function(s){return n.getUserList(s)}),e._UZ(2,"cd-table-actions",2),e._UZ(3,"cd-rgw-user-details",3),e.qZA(),e.YNc(4,Vo,3,2,"ng-template",null,4,e.W1O),e.YNc(6,_i,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.Q6J("autoReload",!1)("data",n.users)("columns",n.columns)("hasDetails",!0)("status",n.tableStatus),e.xp6(2),e.Q6J("permission",n.permission)("selection",n.selection)("tableActions",n.tableActions),e.xp6(1),e.Q6J("selection",n.expandedRow))},directives:[z.a,ke.K,Ko,f.O5,He.O],styles:[""]}),t})(),e_=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[f.ez,N_.m,a.u5,a.UX,O_.B,M.Oz,w.Bz,M.HK,Ue.b]]}),t})();const oi=[{path:""},{path:"daemon",component:kt,data:{breadcrumbs:"Daemons"}},{path:"user",data:{breadcrumbs:"Users"},children:[{path:"",component:ni},{path:A.MQ.CREATE,component:Ve,data:{breadcrumbs:A.Qn.CREATE}},{path:`${A.MQ.EDIT}/:uid`,component:Ve,data:{breadcrumbs:A.Qn.EDIT}}]},{path:"bucket",data:{breadcrumbs:"Buckets"},children:[{path:"",component:Ot},{path:A.MQ.CREATE,component:De,data:{breadcrumbs:A.Qn.CREATE}},{path:`${A.MQ.EDIT}/:bid`,component:De,data:{breadcrumbs:A.Qn.EDIT}}]}];let ii=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[e_,w.Bz.forChild(oi)]]}),t})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/330.4192d10f1b1db19145cc.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/330.4192d10f1b1db19145cc.js deleted file mode 100644 index e1d1907d2..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/330.4192d10f1b1db19145cc.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[330],{91330:(it,Oe,p)=>{p.r(Oe),p.d(Oe,{BlockModule:()=>Mt,RoutedBlockModule:()=>$a});var l=p(12057),r=p(24751),m=p(6283),ne=p(19723),I=p(38549),Ie=p(37496),D=p(79512),j=p(4222),_e=p(44466),be=p(23815),C=p.n(be),W=p(35758),b=p(64762),ie=p(58497),Y=p(93523),e=p(74788);let k=class{constructor(_){this.http=_}listTargets(){return this.http.get("api/iscsi/target")}getTarget(_){return this.http.get(`api/iscsi/target/${_}`)}updateTarget(_,t){return this.http.put(`api/iscsi/target/${_}`,t,{observe:"response"})}status(){return this.http.get("ui-api/iscsi/status")}settings(){return this.http.get("ui-api/iscsi/settings")}version(){return this.http.get("ui-api/iscsi/version")}portals(){return this.http.get("ui-api/iscsi/portals")}createTarget(_){return this.http.post("api/iscsi/target",_,{observe:"response"})}deleteTarget(_){return this.http.delete(`api/iscsi/target/${_}`,{observe:"response"})}getDiscovery(){return this.http.get("api/iscsi/discoveryauth")}updateDiscovery(_){return this.http.put("api/iscsi/discoveryauth",_)}overview(){return this.http.get("ui-api/iscsi/overview")}};k.\u0275fac=function(_){return new(_||k)(e.LFG(ie.eN))},k.\u0275prov=e.Yz7({token:k,factory:k.\u0275fac,providedIn:"root"}),k=(0,b.gn)([Y.o,(0,b.w6)("design:paramtypes",[ie.eN])],k);var Ne=p(88002),Z=p(19358),Ae=p(34089);let x=class{constructor(_,t){this.http=_,this.rbdConfigurationService=t}isRBDPool(_){return-1!==C().indexOf(_.application_metadata,"rbd")&&!_.pool_name.includes("/")}create(_){return this.http.post("api/block/image",_,{observe:"response"})}delete(_){return this.http.delete(`api/block/image/${_.toStringEncoded()}`,{observe:"response"})}update(_,t){return this.http.put(`api/block/image/${_.toStringEncoded()}`,t,{observe:"response"})}get(_){return this.http.get(`api/block/image/${_.toStringEncoded()}`)}list(){return this.http.get("api/block/image").pipe((0,Ne.U)(_=>_.map(t=>(t.value.map(o=>(o.configuration&&o.configuration.map(i=>Object.assign(i,this.rbdConfigurationService.getOptionByName(i.name))),o)),t))))}copy(_,t){return this.http.post(`api/block/image/${_.toStringEncoded()}/copy`,t,{observe:"response"})}flatten(_){return this.http.post(`api/block/image/${_.toStringEncoded()}/flatten`,null,{observe:"response"})}defaultFeatures(){return this.http.get("api/block/image/default_features")}cloneFormatVersion(){return this.http.get("api/block/image/clone_format_version")}createSnapshot(_,t){const o={snapshot_name:t};return this.http.post(`api/block/image/${_.toStringEncoded()}/snap`,o,{observe:"response"})}renameSnapshot(_,t,o){const i={new_snap_name:o};return this.http.put(`api/block/image/${_.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}protectSnapshot(_,t,o){const i={is_protected:o};return this.http.put(`api/block/image/${_.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}rollbackSnapshot(_,t){return this.http.post(`api/block/image/${_.toStringEncoded()}/snap/${t}/rollback`,null,{observe:"response"})}cloneSnapshot(_,t,o){return this.http.post(`api/block/image/${_.toStringEncoded()}/snap/${t}/clone`,o,{observe:"response"})}deleteSnapshot(_,t){return this.http.delete(`api/block/image/${_.toStringEncoded()}/snap/${t}`,{observe:"response"})}listTrash(){return this.http.get("api/block/image/trash/")}createNamespace(_,t){return this.http.post(`api/block/pool/${_}/namespace`,{namespace:t},{observe:"response"})}listNamespaces(_){return this.http.get(`api/block/pool/${_}/namespace/`)}deleteNamespace(_,t){return this.http.delete(`api/block/pool/${_}/namespace/${t}`,{observe:"response"})}moveTrash(_,t){return this.http.post(`api/block/image/${_.toStringEncoded()}/move_trash`,{delay:t},{observe:"response"})}purgeTrash(_){return this.http.post(`api/block/image/trash/purge/?pool_name=${_}`,null,{observe:"response"})}restoreTrash(_,t){return this.http.post(`api/block/image/trash/${_.toStringEncoded()}/restore`,{new_image_name:t},{observe:"response"})}removeTrash(_,t=!1){return this.http.delete(`api/block/image/trash/${_.toStringEncoded()}/?force=${t}`,{observe:"response"})}};x.\u0275fac=function(_){return new(_||x)(e.LFG(ie.eN),e.LFG(Ae.n))},x.\u0275prov=e.Yz7({token:x,factory:x.\u0275fac,providedIn:"root"}),(0,b.gn)([(0,b.fM)(1,Y.G),(0,b.w6)("design:type",Function),(0,b.w6)("design:paramtypes",[Z.N,String]),(0,b.w6)("design:returntype",void 0)],x.prototype,"createSnapshot",null),(0,b.gn)([(0,b.fM)(2,Y.G),(0,b.w6)("design:type",Function),(0,b.w6)("design:paramtypes",[Z.N,String,String]),(0,b.w6)("design:returntype",void 0)],x.prototype,"renameSnapshot",null),(0,b.gn)([(0,b.fM)(2,Y.G),(0,b.w6)("design:type",Function),(0,b.w6)("design:paramtypes",[Z.N,String,Boolean]),(0,b.w6)("design:returntype",void 0)],x.prototype,"protectSnapshot",null),(0,b.gn)([(0,b.fM)(1,Y.G),(0,b.w6)("design:type",Function),(0,b.w6)("design:paramtypes",[Z.N,String]),(0,b.w6)("design:returntype",void 0)],x.prototype,"restoreTrash",null),x=(0,b.gn)([Y.o,(0,b.w6)("design:paramtypes",[ie.eN,Ae.n])],x);var ae=p(7022),V=p(14745),T=p(65862),q=p(93614),M=p(95463),B=p(77205),F=p(76111),Q=p(32337),R=p(60312),v=p(41582),g=p(56310),f=p(87925),h=p(94276),O=p(30839);function H(n,_){if(1&n&&(e.TgZ(0,"option",6),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("ngValue",t),e.xp6(1),e.Oqu(t)}}function De(n,_){if(1&n&&(e.TgZ(0,"select",5),e._UZ(1,"option",6),e.YNc(2,H,2,2,"option",7),e.qZA()),2&n){const t=e.oxw();e.s9C("id",t.setting),e.s9C("name",t.setting),e.Q6J("formControlName",t.setting),e.xp6(1),e.Q6J("ngValue",null),e.xp6(1),e.Q6J("ngForOf",t.limits.values)}}function Jt(n,_){if(1&n&&e._UZ(0,"input",10),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function Yt(n,_){if(1&n&&e._UZ(0,"input",11),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function Vt(n,_){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"div",12),e._UZ(3,"input",13),e.TgZ(4,"label",14),e._uU(5,"Yes"),e.qZA(),e.qZA(),e.TgZ(6,"div",12),e._UZ(7,"input",13),e.TgZ(8,"label",14),e._uU(9,"No"),e.qZA(),e.qZA(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(3),e.Q6J("id",t.setting+"True")("value",!0)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"True"),e.xp6(3),e.Q6J("id",t.setting+"False")("value",!1)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"False")}}function Ut(n,_){if(1&n&&(e.TgZ(0,"span"),e.YNc(1,Jt,1,1,"input",8),e.YNc(2,Yt,1,1,"input",9),e.YNc(3,Vt,10,8,"ng-container",3),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf","int"===t.limits.type),e.xp6(1),e.Q6J("ngIf","str"===t.limits.type),e.xp6(1),e.Q6J("ngIf","bool"===t.limits.type)}}function jt(n,_){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,16),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.min),e.QtT(2)}}function Wt(n,_){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,17),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.max),e.QtT(2)}}let _t=(()=>{class n{ngOnInit(){const t=[];"min"in this.limits&&t.push(r.kI.min(this.limits.min)),"max"in this.limits&&t.push(r.kI.max(this.limits.max)),this.settingsForm.get(this.setting).setValidators(t)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-setting"]],inputs:{settingsForm:"settingsForm",formDir:"formDir",setting:"setting",limits:"limits"},decls:7,vars:7,consts:function(){let _,t;return _="Must be greater than or equal to " + "\ufffd0\ufffd" + ".",t="Must be less than or equal to " + "\ufffd0\ufffd" + ".",[[1,"form-group",3,"formGroup"],[1,"col-form-label",3,"for"],["class","form-control",3,"id","name","formControlName",4,"ngIf"],[4,"ngIf"],["class","invalid-feedback",4,"ngIf"],[1,"form-control",3,"id","name","formControlName"],[3,"ngValue"],[3,"ngValue",4,"ngFor","ngForOf"],["type","number","class","form-control",3,"formControlName",4,"ngIf"],["type","text","class","form-control",3,"formControlName",4,"ngIf"],["type","number",1,"form-control",3,"formControlName"],["type","text",1,"form-control",3,"formControlName"],[1,"custom-control","custom-radio","custom-control-inline"],["type","radio",1,"custom-control-input",3,"id","value","formControlName"],[1,"custom-control-label",3,"for"],[1,"invalid-feedback"],_,t]},template:function(t,o){1&t&&(e.TgZ(0,"div",0),e.TgZ(1,"label",1),e._uU(2),e.qZA(),e.YNc(3,De,3,5,"select",2),e.YNc(4,Ut,4,3,"span",3),e.YNc(5,jt,3,1,"span",4),e.YNc(6,Wt,3,1,"span",4),e.qZA()),2&t&&(e.Q6J("formGroup",o.settingsForm),e.xp6(1),e.s9C("for",o.setting),e.xp6(1),e.Oqu(o.setting),e.xp6(1),e.Q6J("ngIf","enum"===o.limits.type),e.xp6(1),e.Q6J("ngIf","enum"!==o.limits.type),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"min")),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"max")))},directives:[g.P,r.JL,r.sg,v.V,l.O5,f.o,r.EJ,h.b,r.JJ,r.u,r.YN,r.Kr,l.sg,r.wV,r.Fj,r._],styles:[""]}),n})();var He=p(88820);function eo(n,_){1&n&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function to(n,_){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"legend",10),e.SDv(2,21),e.qZA(),e.TgZ(3,"div",12),e.TgZ(4,"div",13),e.TgZ(5,"label",22),e.SDv(6,23),e.qZA(),e._UZ(7,"input",24),e.YNc(8,eo,2,0,"span",25),e.qZA(),e.qZA(),e.TgZ(9,"div",12),e.TgZ(10,"div",13),e.TgZ(11,"label",26),e.SDv(12,27),e.qZA(),e._UZ(13,"input",28),e.qZA(),e.qZA(),e.qZA()),2&n){const t=e.oxw(),o=e.MAs(9);e.xp6(8),e.Q6J("ngIf",t.settingsForm.showError("lun",o,"required"))}}function oo(n,_){if(1&n&&(e.TgZ(0,"option",31),e._uU(1),e.ALo(2,"iscsiBackstore"),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(e.lcZ(2,2,t))}}function no(n,_){if(1&n&&(e.TgZ(0,"div",12),e.TgZ(1,"div",13),e._UZ(2,"cd-iscsi-setting",33),e.qZA(),e.qZA()),2&n){const t=_.$implicit,o=e.oxw(2).$implicit,i=e.oxw(),s=e.MAs(9);e.xp6(2),e.Q6J("settingsForm",i.settingsForm)("formDir",s)("setting",t.key)("limits",i.getDiskControlLimits(o,t.key))}}function io(n,_){if(1&n&&(e.ynx(0),e.YNc(1,no,3,4,"div",32),e.ALo(2,"keyvalue"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngForOf",e.lcZ(2,1,o.disk_default_controls[t]))}}function _o(n,_){if(1&n&&(e.ynx(0),e.YNc(1,io,3,3,"ng-container",9),e.BQk()),2&n){const t=_.$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngIf",o.settingsForm.value.backstore===t)}}let so=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={backstore:new r.NI(this.imagesSettings[this.image].backstore),lun:new r.NI(this.imagesSettings[this.image].lun),wwn:new r.NI(this.imagesSettings[this.image].wwn)};C().forEach(this.backstores,o=>{const i=this.imagesSettings[this.image][o]||{};C().forIn(this.disk_default_controls[o],(s,a)=>{t[a]=new r.NI(i[a])})}),this.settingsForm=new M.d(t)}getDiskControlLimits(t,o){return this.disk_controls_limits?this.disk_controls_limits[t][o]:{type:"int"}}save(){const t=this.settingsForm.controls.backstore.value,o=this.settingsForm.controls.lun.value,i=this.settingsForm.controls.wwn.value,s={};C().forIn(this.settingsForm.controls,(a,d)=>{""!==a.value&&null!==a.value&&d in this.disk_default_controls[this.settingsForm.value.backstore]&&(s[d]=a.value,C().forEach(this.backstores,c=>{c!==t&&d in(this.imagesSettings[this.image][c]||{})&&(this.imagesSettings[this.image][c][d]=a.value)}))}),this.imagesSettings[this.image].backstore=t,this.imagesSettings[this.image].lun=o,this.imagesSettings[this.image].wwn=i,this.imagesSettings[this.image][t]=s,this.imagesSettings=Object.assign({},this.imagesSettings),this.control.updateValueAndValidity({emitEvent:!1}),this.activeModal.close()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(k),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-image-settings-modal"]],decls:25,vars:8,consts:function(){let _,t,o,i,s,a,d,c;return _="Configure",t="Changing these parameters from their default values is usually not necessary.",o="Settings",i="Backstore",s="Identifier",a="lun",d="wwn",c="This field is required.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","settingsForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,[4,"ngIf"],[1,"cd-header"],o,[1,"form-group","row"],[1,"col-sm-12"],[1,"col-form-label"],i,["id","backstore","name","backstore","formControlName","backstore",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],[4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],s,["for","lun",1,"col-form-label","required"],a,["type","number","id","lun","name","lun","formControlName","lun",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","wwn",1,"col-form-label"],d,["type","text","id","wwn","name","wwn","formControlName","wwn",1,"form-control"],[1,"invalid-feedback"],c,[3,"value"],["class","form-group row",4,"ngFor","ngForOf"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.ynx(2),e.SDv(3,2),e.BQk(),e._uU(4,"\xa0 "),e.TgZ(5,"small"),e._uU(6),e.qZA(),e.BQk(),e.ynx(7,3),e.TgZ(8,"form",4,5),e.TgZ(10,"div",6),e.TgZ(11,"p",7),e.SDv(12,8),e.qZA(),e.YNc(13,to,14,1,"span",9),e.TgZ(14,"legend",10),e.SDv(15,11),e.qZA(),e.TgZ(16,"div",12),e.TgZ(17,"div",13),e.TgZ(18,"label",14),e.SDv(19,15),e.qZA(),e.TgZ(20,"select",16),e.YNc(21,oo,3,4,"option",17),e.qZA(),e.qZA(),e.qZA(),e.YNc(22,_o,2,1,"ng-container",18),e.qZA(),e.TgZ(23,"div",19),e.TgZ(24,"cd-form-button-panel",20),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(6),e.Oqu(o.image),e.xp6(2),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngIf",o.api_version>=1),e.xp6(8),e.Q6J("ngForOf",o.backstores),e.xp6(1),e.Q6J("ngForOf",o.backstores),e.xp6(2),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},directives:[R.z,r._Y,r.JL,r.sg,v.V,l.O5,g.P,f.o,r.EJ,h.b,r.JJ,r.u,l.sg,O.p,r.wV,r.Fj,r.YN,r.Kr,_t],pipes:[He.V,l.Nd],styles:[""]}),n})();function ao(n,_){if(1&n&&(e.TgZ(0,"div",12),e.TgZ(1,"div",13),e._UZ(2,"cd-iscsi-setting",14),e.qZA(),e.qZA()),2&n){const t=_.$implicit,o=e.oxw(),i=e.MAs(5);e.xp6(2),e.Q6J("settingsForm",o.settingsForm)("formDir",i)("setting",t.key)("limits",o.getTargetControlLimits(t.key))}}let ro=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={};C().forIn(this.target_default_controls,(o,i)=>{t[i]=new r.NI(this.target_controls.value[i])}),this.settingsForm=new M.d(t)}save(){const t={};C().forIn(this.settingsForm.controls,(o,i)=>{""===o.value||null===o.value||(t[i]=o.value)}),this.target_controls.setValue(t),this.activeModal.close()}getTargetControlLimits(t){return this.target_controls_limits?this.target_controls_limits[t]:["Yes","No"].includes(this.target_default_controls[t])?{type:"bool"}:{type:"int"}}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(k),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-iqn-settings-modal"]],decls:13,vars:7,consts:function(){let _,t;return _="Advanced Settings",t="Changing these parameters from their default values is usually not necessary.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","settingsForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,["class","form-group row",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"form-group","row"],[1,"col-sm-12"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p",7),e.SDv(8,8),e.qZA(),e.YNc(9,ao,3,4,"div",9),e.ALo(10,"keyvalue"),e.qZA(),e.TgZ(11,"div",10),e.TgZ(12,"cd-form-button-panel",11),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngForOf",e.lcZ(10,5,o.settingsForm.controls)),e.xp6(3),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},directives:[R.z,r._Y,r.JL,r.sg,v.V,l.sg,O.p,g.P,_t],pipes:[l.Nd],styles:[""]}),n})();var re=p(63285),st=p(63622);let lo=(()=>{class n{constructor(t){this.ngControl=t}onInput(t){this.setValue(t)}setValue(t){t=C().isString(t)?t.trim():t,this.ngControl.control.setValue(t)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(r.a5))},n.\u0275dir=e.lG2({type:n,selectors:[["","cdTrim",""]],hostBindings:function(t,o){1&t&&e.NdJ("input",function(s){return o.onInput(s.target.value)})}}),n})();var co=p(39092),at=p(4416),Je=p(58039),Ye=p(10545);function po(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,42),e.qZA())}function go(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,43),e.qZA())}function uo(n,_){1&n&&(e.TgZ(0,"span",41),e.ynx(1),e.SDv(2,44),e.BQk(),e._UZ(3,"br"),e.ynx(4),e.SDv(5,45),e.BQk(),e._UZ(6,"br"),e.TgZ(7,"a",46),e.SDv(8,47),e.qZA(),e.qZA())}function mo(n,_){1&n&&(e.TgZ(0,"span",48),e.SDv(1,49),e.qZA())}const z=function(n){return[n]};function To(n,_){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const i=e.CHM(t),s=i.index,a=i.$implicit;return e.oxw(2).removePortal(s,a)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=_.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,z,o.icons.destroy))}}function fo(n,_){if(1&n&&(e.TgZ(0,"span",41),e.SDv(1,53),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.pQV(t.minimum_gateways),e.QtT(1)}}function Co(n,_){if(1&n&&(e.TgZ(0,"div",56),e._uU(1),e.qZA()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(1),e.hij("lun: ",o.imagesSettings[t].lun,"")}}function So(n,_){if(1&n&&(e.ynx(0),e.SDv(1,57),e.ALo(2,"iscsiBackstore"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(2),e.pQV(e.lcZ(2,1,o.imagesSettings[t].backstore)),e.QtT(1)}}function Eo(n,_){1&n&&(e.ynx(0),e.SDv(1,58),e.BQk())}function Ro(n,_){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.YNc(4,Co,2,1,"div",54),e.TgZ(5,"button",52),e.NdJ("click",function(){const s=e.CHM(t).$implicit;return e.oxw(2).imageSettingsModal(s)}),e._UZ(6,"i",16),e.qZA(),e.TgZ(7,"button",52),e.NdJ("click",function(){const i=e.CHM(t),s=i.index,a=i.$implicit;return e.oxw(2).removeImage(s,a)}),e._UZ(8,"i",16),e.qZA(),e.qZA(),e.qZA(),e.TgZ(9,"span",48),e.YNc(10,So,3,3,"ng-container",55),e.YNc(11,Eo,2,0,"ng-container",55),e.qZA(),e.BQk()}if(2&n){const t=_.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngIf",o.api_version>=1),e.xp6(2),e.Q6J("ngClass",e.VKq(6,z,o.icons.deepCheck)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,z,o.icons.destroy)),e.xp6(2),e.Q6J("ngIf",o.backstores.length>1),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.imagesSettings[t][o.imagesSettings[t].backstore]))}}function Mo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,59),e.qZA())}function Oo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,60),e.qZA())}function Ao(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,78),e.qZA())}function ho(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,79),e.qZA())}function Po(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,80),e.qZA())}function Io(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,81),e.qZA())}function bo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,82),e.qZA())}function No(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,83),e.qZA())}function Do(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,84),e.qZA())}function vo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,85),e.qZA())}function Lo(n,_){if(1&n&&(e.TgZ(0,"div",61),e.TgZ(1,"div",8),e.TgZ(2,"label",62),e.ynx(3),e.SDv(4,63),e.BQk(),e.qZA(),e.TgZ(5,"div",11),e._UZ(6,"input",64),e.YNc(7,Ao,2,0,"span",17),e.YNc(8,ho,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(9,"div",8),e.TgZ(10,"label",65),e.ynx(11),e.SDv(12,66),e.BQk(),e.qZA(),e.TgZ(13,"div",11),e.TgZ(14,"div",12),e._UZ(15,"input",67),e.TgZ(16,"span",14),e._UZ(17,"button",68),e._UZ(18,"cd-copy-2-clipboard-button",69),e.qZA(),e.qZA(),e.YNc(19,Po,2,0,"span",17),e.YNc(20,Io,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(21,"div",8),e.TgZ(22,"label",70),e.ynx(23),e.SDv(24,71),e.BQk(),e.qZA(),e.TgZ(25,"div",11),e._UZ(26,"input",72),e.YNc(27,bo,2,0,"span",17),e.YNc(28,No,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(29,"div",8),e.TgZ(30,"label",73),e.ynx(31),e.SDv(32,74),e.BQk(),e.qZA(),e.TgZ(33,"div",11),e.TgZ(34,"div",12),e._UZ(35,"input",75),e.TgZ(36,"span",14),e._UZ(37,"button",76),e._UZ(38,"cd-copy-2-clipboard-button",77),e.qZA(),e.qZA(),e.YNc(39,Do,2,0,"span",17),e.YNc(40,vo,2,0,"span",17),e.qZA(),e.qZA(),e.qZA()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("user",t,"pattern")),e.xp6(11),e.Q6J("ngIf",o.targetForm.showError("password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("password",t,"pattern")),e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"pattern")),e.xp6(11),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"pattern"))}}function Fo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,115),e.qZA())}function $o(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,116),e.qZA())}function Zo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,117),e.qZA())}function Bo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,118),e.qZA())}function Go(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,119),e.qZA())}function yo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,120),e.qZA())}function xo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,121),e.qZA())}function wo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,122),e.qZA())}function qo(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,123),e.qZA())}function Ho(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,124),e.qZA())}function Ko(n,_){1&n&&(e.TgZ(0,"span",41),e.SDv(1,125),e.qZA())}function ko(n,_){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const i=e.CHM(t),s=i.index,a=i.$implicit,d=e.oxw(),c=d.$implicit,u=d.index;return e.oxw(3).removeInitiatorImage(c,s,u,a)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=_.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,z,o.icons.destroy))}}function Xo(n,_){1&n&&(e.TgZ(0,"span"),e.SDv(1,126),e.qZA())}function Qo(n,_){if(1&n&&(e.TgZ(0,"div",22),e.TgZ(1,"div",23),e.TgZ(2,"cd-select",127),e._UZ(3,"i",25),e.ynx(4),e.SDv(5,128),e.BQk(),e.qZA(),e.qZA(),e.qZA()),2&n){const t=e.oxw(),o=t.$implicit,i=t.index,s=e.oxw(3);e.xp6(2),e.Q6J("data",o.getValue("luns"))("options",s.imagesInitiatorSelections[i])("messages",s.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(4,z,s.icons.add))}}function zo(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"div",92),e.TgZ(1,"div",5),e.ynx(2),e.SDv(3,93),e.BQk(),e._uU(4),e.TgZ(5,"button",94),e.NdJ("click",function(){const s=e.CHM(t).index;return e.oxw(3).removeInitiator(s)}),e._UZ(6,"i",25),e.qZA(),e.qZA(),e.TgZ(7,"div",7),e.TgZ(8,"div",8),e.TgZ(9,"label",95),e.SDv(10,96),e.qZA(),e.TgZ(11,"div",11),e.TgZ(12,"input",97),e.NdJ("blur",function(){return e.CHM(t),e.oxw(3).updatedInitiatorSelector()}),e.qZA(),e.YNc(13,Fo,2,0,"span",17),e.YNc(14,$o,2,0,"span",17),e.YNc(15,Zo,2,0,"span",17),e.qZA(),e.qZA(),e.ynx(16,61),e.TgZ(17,"div",8),e.TgZ(18,"label",98),e.SDv(19,99),e.qZA(),e.TgZ(20,"div",11),e._UZ(21,"input",100),e.YNc(22,Bo,2,0,"span",17),e.YNc(23,Go,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(24,"div",8),e.TgZ(25,"label",101),e.SDv(26,102),e.qZA(),e.TgZ(27,"div",11),e.TgZ(28,"div",12),e._UZ(29,"input",103),e.TgZ(30,"span",14),e._UZ(31,"button",104),e._UZ(32,"cd-copy-2-clipboard-button",105),e.qZA(),e.qZA(),e.YNc(33,yo,2,0,"span",17),e.YNc(34,xo,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(35,"div",8),e.TgZ(36,"label",106),e.ynx(37),e.SDv(38,107),e.BQk(),e.qZA(),e.TgZ(39,"div",11),e._UZ(40,"input",108),e.YNc(41,wo,2,0,"span",17),e.YNc(42,qo,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(43,"div",8),e.TgZ(44,"label",109),e.SDv(45,110),e.qZA(),e.TgZ(46,"div",11),e.TgZ(47,"div",12),e._UZ(48,"input",111),e.TgZ(49,"span",14),e._UZ(50,"button",104),e._UZ(51,"cd-copy-2-clipboard-button",105),e.qZA(),e.qZA(),e.YNc(52,Ho,2,0,"span",17),e.YNc(53,Ko,2,0,"span",17),e.qZA(),e.qZA(),e.BQk(),e.TgZ(54,"div",8),e.TgZ(55,"label",112),e.SDv(56,113),e.qZA(),e.TgZ(57,"div",11),e.YNc(58,ko,6,4,"ng-container",21),e.YNc(59,Xo,2,0,"span",55),e.YNc(60,Qo,6,6,"div",114),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=_.$implicit,o=_.index;e.oxw(2);const i=e.MAs(2),s=e.oxw();e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("client_iqn")," "),e.xp6(2),e.Q6J("ngClass",e.VKq(25,z,s.icons.destroy)),e.xp6(7),e.Q6J("ngIf",t.showError("client_iqn",i,"notUnique")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"pattern")),e.xp6(6),e.Q6J("id","user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"pattern")),e.xp6(6),e.Q6J("id","password"+o),e.xp6(2),e.Q6J("cdPasswordButton","password"+o),e.xp6(1),e.Q6J("source","password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_password"+o),e.xp6(2),e.Q6J("cdPasswordButton","mutual_password"+o),e.xp6(1),e.Q6J("source","mutual_password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"pattern")),e.xp6(5),e.Q6J("ngForOf",t.getValue("luns")),e.xp6(1),e.Q6J("ngIf",t.getValue("cdIsInGroup")),e.xp6(1),e.Q6J("ngIf",!t.getValue("cdIsInGroup"))}}function Jo(n,_){1&n&&(e.TgZ(0,"span",48),e.SDv(1,129),e.qZA())}function Yo(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",86),e.SDv(2,87),e.qZA(),e.TgZ(3,"div",88),e.YNc(4,zo,61,27,"div",89),e.TgZ(5,"div",22),e.TgZ(6,"div",23),e.YNc(7,Jo,2,0,"span",18),e.TgZ(8,"button",90),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addInitiator(),!1}),e._UZ(9,"i",25),e.ynx(10),e.SDv(11,91),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(12,"hr"),e.qZA(),e.qZA()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.initiators.controls),e.xp6(3),e.Q6J("ngIf",0===t.initiators.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,z,t.icons.add))}}function Vo(n,_){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const s=e.CHM(t).index,a=e.oxw(),d=a.$implicit,c=a.index;return e.oxw(3).removeGroupInitiator(d,s,c)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=_.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,z,o.icons.destroy))}}function Uo(n,_){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const s=e.CHM(t).index,a=e.oxw(),d=a.$implicit,c=a.index;return e.oxw(3).removeGroupDisk(d,s,c)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=_.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,z,o.icons.destroy))}}function jo(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"div",92),e.TgZ(1,"div",5),e.ynx(2),e.SDv(3,133),e.BQk(),e._uU(4),e.TgZ(5,"button",94),e.NdJ("click",function(){const s=e.CHM(t).index;return e.oxw(3).removeGroup(s)}),e._UZ(6,"i",25),e.qZA(),e.qZA(),e.TgZ(7,"div",7),e.TgZ(8,"div",8),e.TgZ(9,"label",134),e.SDv(10,135),e.qZA(),e.TgZ(11,"div",11),e._UZ(12,"input",136),e.qZA(),e.qZA(),e.TgZ(13,"div",8),e.TgZ(14,"label",137),e.ynx(15),e.SDv(16,138),e.BQk(),e.qZA(),e.TgZ(17,"div",11),e.YNc(18,Vo,6,4,"ng-container",21),e.TgZ(19,"div",22),e.TgZ(20,"div",23),e.TgZ(21,"cd-select",24),e.NdJ("selection",function(i){const a=e.CHM(t).index;return e.oxw(3).onGroupMemberSelection(i,a)}),e._UZ(22,"i",25),e.ynx(23),e.SDv(24,139),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(25,"hr"),e.qZA(),e.qZA(),e.TgZ(26,"div",8),e.TgZ(27,"label",28),e.ynx(28),e.SDv(29,140),e.BQk(),e.qZA(),e.TgZ(30,"div",11),e.YNc(31,Uo,6,4,"ng-container",21),e.TgZ(32,"div",22),e.TgZ(33,"div",23),e.TgZ(34,"cd-select",127),e._UZ(35,"i",25),e.ynx(36),e.SDv(37,141),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(38,"hr"),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=_.$implicit,o=_.index,i=e.oxw(3);e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("group_id")," "),e.xp6(2),e.Q6J("ngClass",e.VKq(13,z,i.icons.destroy)),e.xp6(12),e.Q6J("ngForOf",t.getValue("members")),e.xp6(3),e.Q6J("data",t.getValue("members"))("options",i.groupMembersSelections[o])("messages",i.messages.groupInitiator),e.xp6(1),e.Q6J("ngClass",e.VKq(15,z,i.icons.add)),e.xp6(9),e.Q6J("ngForOf",t.getValue("disks")),e.xp6(3),e.Q6J("data",t.getValue("disks"))("options",i.groupDiskSelections[o])("messages",i.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(17,z,i.icons.add))}}function Wo(n,_){1&n&&(e.TgZ(0,"span",48),e.SDv(1,142),e.qZA())}function en(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",86),e.SDv(2,130),e.qZA(),e.TgZ(3,"div",131),e.YNc(4,jo,39,19,"div",89),e.TgZ(5,"div",22),e.TgZ(6,"div",23),e.YNc(7,Wo,2,0,"span",18),e.TgZ(8,"button",90),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addGroup(),!1}),e._UZ(9,"i",25),e.ynx(10),e.SDv(11,132),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.groups.controls),e.xp6(3),e.Q6J("ngIf",0===t.groups.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,z,t.icons.add))}}function tn(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.TgZ(9,"div",8),e.TgZ(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.TgZ(15,"span",14),e.TgZ(16,"button",15),e.NdJ("click",function(){return e.CHM(t),e.oxw().targetSettingsModal()}),e._UZ(17,"i",16),e.qZA(),e.qZA(),e.qZA(),e.YNc(18,po,2,0,"span",17),e.YNc(19,go,2,0,"span",17),e.YNc(20,uo,9,0,"span",17),e.YNc(21,mo,2,0,"span",18),e._UZ(22,"hr"),e.qZA(),e.qZA(),e.TgZ(23,"div",8),e.TgZ(24,"label",19),e.SDv(25,20),e.qZA(),e.TgZ(26,"div",11),e.YNc(27,To,6,4,"ng-container",21),e.TgZ(28,"div",22),e.TgZ(29,"div",23),e.TgZ(30,"cd-select",24),e.NdJ("selection",function(i){return e.CHM(t),e.oxw().onPortalSelection(i)}),e._UZ(31,"i",25),e.ynx(32),e.SDv(33,26),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(34,"input",27),e.YNc(35,fo,2,1,"span",17),e._UZ(36,"hr"),e.qZA(),e.qZA(),e.TgZ(37,"div",8),e.TgZ(38,"label",28),e.SDv(39,29),e.qZA(),e.TgZ(40,"div",11),e.YNc(41,Ro,12,10,"ng-container",21),e._UZ(42,"input",30),e.YNc(43,Mo,2,0,"span",17),e.YNc(44,Oo,2,0,"span",17),e.TgZ(45,"div",22),e.TgZ(46,"div",23),e.TgZ(47,"cd-select",24),e.NdJ("selection",function(i){return e.CHM(t),e.oxw().onImageSelection(i)}),e._UZ(48,"i",25),e.ynx(49),e.SDv(50,31),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(51,"hr"),e.qZA(),e.qZA(),e.TgZ(52,"div",8),e.TgZ(53,"div",32),e.TgZ(54,"div",33),e._UZ(55,"input",34),e.TgZ(56,"label",35),e.SDv(57,36),e.qZA(),e.qZA(),e._UZ(58,"hr"),e.qZA(),e.qZA(),e.YNc(59,Lo,41,8,"div",37),e.YNc(60,Yo,13,5,"div",38),e.YNc(61,en,12,5,"div",38),e.qZA(),e.TgZ(62,"div",39),e.TgZ(63,"cd-form-button-panel",40),e.NdJ("submitActionEvent",function(){return e.CHM(t),e.oxw().submit()}),e.ALo(64,"titlecase"),e.ALo(65,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.targetForm),e.xp6(6),e.pQV(e.lcZ(6,26,o.action))(e.lcZ(7,28,o.resource)),e.QtT(5),e.xp6(10),e.Q6J("ngClass",e.VKq(34,z,o.icons.deepCheck)),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"pattern")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"iqn")),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.targetForm.getValue("target_controls"))),e.xp6(6),e.Q6J("ngForOf",o.portals.value),e.xp6(3),e.Q6J("data",o.portals.value)("options",o.portalsSelections)("messages",o.messages.portals),e.xp6(1),e.Q6J("ngClass",e.VKq(36,z,o.icons.add)),e.xp6(4),e.Q6J("ngIf",o.targetForm.showError("portals",t,"minGateways")),e.xp6(6),e.Q6J("ngForOf",o.targetForm.getValue("disks")),e.xp6(2),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupLunId")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupWwn")),e.xp6(3),e.Q6J("data",o.disks.value)("options",o.imagesSelections)("messages",o.messages.images),e.xp6(1),e.Q6J("ngClass",e.VKq(38,z,o.icons.add)),e.xp6(11),e.Q6J("ngIf",o.cephIscsiConfigVersion>10&&!o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(2),e.Q6J("form",o.targetForm)("submitText",e.lcZ(64,30,o.action)+" "+e.lcZ(65,32,o.resource))}}let rt=(()=>{class n extends q.E{constructor(t,o,i,s,a,d,c){super(),this.iscsiService=t,this.modalService=o,this.rbdService=i,this.router=s,this.route=a,this.taskWrapper=d,this.actionLabels=c,this.api_version=0,this.minimum_gateways=1,this.icons=T.P,this.isEdit=!1,this.portalsSelections=[],this.imagesInitiatorSelections=[],this.groupDiskSelections=[],this.groupMembersSelections=[],this.imagesSettings={},this.messages={portals:new ae.a({noOptions:"There are no portals available."}),images:new ae.a({noOptions:"There are no images available."}),initiatorImage:new ae.a({noOptions:"There are no images available. Please make sure you add an image to the target."}),groupInitiator:new ae.a({noOptions:"There are no initiators available. Please make sure you add an initiator to the target."})},this.IQN_REGEX=/^iqn\.(19|20)\d\d-(0[1-9]|1[0-2])\.\D{2,3}(\.[A-Za-z0-9-]+)+(:[A-Za-z0-9-\.]+)*$/,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.resource="target"}ngOnInit(){const t=[this.iscsiService.listTargets(),this.rbdService.list(),this.iscsiService.portals(),this.iscsiService.settings(),this.iscsiService.version()];this.router.url.startsWith("/block/iscsi/targets/edit")&&(this.isEdit=!0,this.route.params.subscribe(o=>{this.target_iqn=decodeURIComponent(o.target_iqn),t.push(this.iscsiService.getTarget(this.target_iqn))})),this.action=this.isEdit?this.actionLabels.EDIT:this.actionLabels.CREATE,(0,W.D)(t).subscribe(o=>{const i=C()(o[0]).filter(a=>a.target_iqn!==this.target_iqn).flatMap(a=>a.disks).map(a=>`${a.pool}/${a.image}`).value();"api_version"in o[3]&&(this.api_version=o[3].api_version),this.minimum_gateways=o[3].config.minimum_gateways,this.target_default_controls=o[3].target_default_controls,this.target_controls_limits=o[3].target_controls_limits,this.disk_default_controls=o[3].disk_default_controls,this.disk_controls_limits=o[3].disk_controls_limits,this.backstores=o[3].backstores,this.default_backstore=o[3].default_backstore,this.unsupported_rbd_features=o[3].unsupported_rbd_features,this.required_rbd_features=o[3].required_rbd_features,this.imagesAll=C()(o[1]).flatMap(a=>a.value).filter(a=>!a.namespace&&!(-1!==i.indexOf(`${a.pool_name}/${a.name}`)||0===this.getValidBackstores(a).length)).value(),this.imagesSelections=this.imagesAll.map(a=>new V.$(!1,`${a.pool_name}/${a.name}`,""));const s=[];o[2].forEach(a=>{a.ip_addresses.forEach(d=>{s.push(new V.$(!1,a.name+":"+d,""))})}),this.portalsSelections=[...s],this.cephIscsiConfigVersion=o[4].ceph_iscsi_config_version,this.createForm(),o[5]&&this.resolveModel(o[5]),this.loadingReady()})}createForm(){if(this.targetForm=new M.d({target_iqn:new r.NI("iqn.2001-07.com.ceph:"+Date.now(),{validators:[r.kI.required,r.kI.pattern(this.IQN_REGEX)]}),target_controls:new r.NI({}),portals:new r.NI([],{validators:[B.h.custom("minGateways",t=>C().uniq(t.map(i=>i.split(":")[0])).length{const o=this.getLunIds(t);return o.length!==C().uniq(o).length}),B.h.custom("dupWwn",t=>{const o=this.getWwns(t);return o.length!==C().uniq(o).length})]}),initiators:new r.Oe([]),groups:new r.Oe([]),acl_enabled:new r.NI(!1)}),this.cephIscsiConfigVersion>10){const t=new M.d({user:new r.NI(""),password:new r.NI(""),mutual_user:new r.NI(""),mutual_password:new r.NI("")});this.setAuthValidator(t),this.targetForm.addControl("auth",t)}}resolveModel(t){this.targetForm.patchValue({target_iqn:t.target_iqn,target_controls:t.target_controls,acl_enabled:t.acl_enabled}),this.cephIscsiConfigVersion>10&&this.targetForm.patchValue({auth:t.auth});const o=[];C().forEach(t.portals,s=>{o.push(`${s.host}:${s.ip}`)}),this.targetForm.patchValue({portals:o});const i=[];C().forEach(t.disks,s=>{const a=`${s.pool}/${s.image}`;i.push(a),this.imagesSettings[a]={backstore:s.backstore},this.imagesSettings[a][s.backstore]=s.controls,"lun"in s&&(this.imagesSettings[a].lun=s.lun),"wwn"in s&&(this.imagesSettings[a].wwn=s.wwn),this.onImageSelection({option:{name:a,selected:!0}})}),this.targetForm.patchValue({disks:i}),C().forEach(t.clients,s=>{const a=this.addInitiator();s.luns=C().map(s.luns,d=>`${d.pool}/${d.image}`),a.patchValue(s)}),t.groups.forEach((s,a)=>{const d=this.addGroup();s.disks=C().map(s.disks,c=>`${c.pool}/${c.image}`),d.patchValue(s),C().forEach(s.members,c=>{this.onGroupMemberSelection({option:new V.$(!0,c,"")},a)})})}hasAdvancedSettings(t){return Object.values(t).length>0}get portals(){return this.targetForm.get("portals")}onPortalSelection(){this.portals.setValue(this.portals.value)}removePortal(t,o){return this.portalsSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.portals.value.splice(t,1),this.portals.setValue(this.portals.value),!1}get disks(){return this.targetForm.get("disks")}removeImage(t,o){return this.imagesSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.disks.value.splice(t,1),this.removeImageRefs(o),this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1}),!1}removeImageRefs(t){this.initiators.controls.forEach(o=>{const i=o.value.luns.filter(s=>s!==t);o.get("luns").setValue(i)}),this.groups.controls.forEach(o=>{const i=o.value.disks.filter(s=>s!==t);o.get("disks").setValue(i)}),C().forEach(this.imagesInitiatorSelections,(o,i)=>{this.imagesInitiatorSelections[i]=o.filter(s=>s.name!==t)}),C().forEach(this.groupDiskSelections,(o,i)=>{this.groupDiskSelections[i]=o.filter(s=>s.name!==t)})}getDefaultBackstore(t){let o=this.default_backstore;const i=this.getImageById(t);return this.validFeatures(i,this.default_backstore)||this.backstores.forEach(s=>{s!==this.default_backstore&&this.validFeatures(i,s)&&(o=s)}),o}isLunIdInUse(t,o){const i=this.disks.value.filter(s=>s!==o);return this.getLunIds(i).includes(t)}getLunIds(t){return C().map(t,o=>this.imagesSettings[o].lun)}nextLunId(t){const o=this.disks.value.filter(a=>a!==t),i=this.getLunIds(o);let s=0;for(;i.includes(s);)s++;return s}getWwns(t){return C().map(t,i=>this.imagesSettings[i].wwn).filter(i=>C().isString(i)&&""!==i)}onImageSelection(t){const o=t.option;if(o.selected){if(this.imagesSettings[o.name])this.isLunIdInUse(this.imagesSettings[o.name].lun,o.name)&&(this.imagesSettings[o.name].lun=this.nextLunId(o.name));else{const i=this.getDefaultBackstore(o.name);this.imagesSettings[o.name]={backstore:i,lun:this.nextLunId(o.name)},this.imagesSettings[o.name][i]={}}C().forEach(this.imagesInitiatorSelections,(i,s)=>{i.push(new V.$(!1,o.name,"")),this.imagesInitiatorSelections[s]=[...i]}),C().forEach(this.groupDiskSelections,(i,s)=>{i.push(new V.$(!1,o.name,"")),this.groupDiskSelections[s]=[...i]})}else this.removeImageRefs(o.name);this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1})}get initiators(){return this.targetForm.get("initiators")}addInitiator(){const t=new M.d({client_iqn:new r.NI("",{validators:[r.kI.required,B.h.custom("notUnique",i=>{const s=this.initiators.controls.reduce(function(a,d){return a.concat(d.value.client_iqn)},[]);return s.indexOf(i)!==s.lastIndexOf(i)}),r.kI.pattern(this.IQN_REGEX)]}),auth:new M.d({user:new r.NI(""),password:new r.NI(""),mutual_user:new r.NI(""),mutual_password:new r.NI("")}),luns:new r.NI([]),cdIsInGroup:new r.NI(!1)});this.setAuthValidator(t),this.initiators.push(t),C().forEach(this.groupMembersSelections,(i,s)=>{i.push(new V.$(!1,"","")),this.groupMembersSelections[s]=[...i]});const o=C().map(this.targetForm.getValue("disks"),i=>new V.$(!1,i,""));return this.imagesInitiatorSelections.push(o),t}setAuthValidator(t){B.h.validateIf(t.get("user"),()=>t.getValue("password")||t.getValue("mutual_user")||t.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[t.get("password"),t.get("mutual_user"),t.get("mutual_password")]),B.h.validateIf(t.get("password"),()=>t.getValue("user")||t.getValue("mutual_user")||t.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("mutual_user"),t.get("mutual_password")]),B.h.validateIf(t.get("mutual_user"),()=>t.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_password")]),B.h.validateIf(t.get("mutual_password"),()=>t.getValue("mutual_user"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_user")])}removeInitiator(t){const o=this.initiators.value[t];this.initiators.removeAt(t),C().forEach(this.groupMembersSelections,(i,s)=>{i.splice(t,1),this.groupMembersSelections[s]=[...i]}),this.groups.controls.forEach(i=>{const s=i.value.members.filter(a=>a!==o.client_iqn);i.get("members").setValue(s)}),this.imagesInitiatorSelections.splice(t,1)}updatedInitiatorSelector(){this.initiators.controls.forEach(t=>{t.get("client_iqn").updateValueAndValidity({emitEvent:!1})}),C().forEach(this.groupMembersSelections,(t,o)=>{C().forEach(t,(i,s)=>{const a=i.name;i.name=this.initiators.controls[s].value.client_iqn,this.groups.controls.forEach(d=>{const c=d.value.members,u=c.indexOf(a);-1!==u&&(c[u]=i.name),d.get("members").setValue(c)})}),this.groupMembersSelections[o]=[...this.groupMembersSelections[o]]})}removeInitiatorImage(t,o,i,s){const a=t.getValue("luns");return a.splice(o,1),t.patchValue({luns:a}),this.imagesInitiatorSelections[i].forEach(d=>{d.name===s&&(d.selected=!1)}),!1}get groups(){return this.targetForm.get("groups")}addGroup(){const t=new M.d({group_id:new r.NI("",{validators:[r.kI.required]}),members:new r.NI([]),disks:new r.NI([])});this.groups.push(t);const o=C().map(this.targetForm.getValue("disks"),s=>new V.$(!1,s,""));this.groupDiskSelections.push(o);const i=C().map(this.initiators.value,s=>new V.$(!1,s.client_iqn,"",!s.cdIsInGroup));return this.groupMembersSelections.push(i),t}removeGroup(t){this.groups.removeAt(t),this.groupMembersSelections[t].filter(i=>i.selected).forEach(i=>{i.selected=!1,this.onGroupMemberSelection({option:i},t)}),this.groupMembersSelections.splice(t,1),this.groupDiskSelections.splice(t,1)}onGroupMemberSelection(t,o){const i=t.option;let s=[];i.selected||(s=this.groupDiskSelections[o].filter(d=>d.selected).map(d=>d.name)),this.initiators.controls.forEach((a,d)=>{a.value.client_iqn===i.name&&(a.patchValue({luns:s}),a.get("cdIsInGroup").setValue(i.selected),C().forEach(this.groupMembersSelections,c=>{c[d].enabled=!i.selected}),this.imagesInitiatorSelections[d].forEach(c=>{c.selected=s.includes(c.name)}))})}removeGroupInitiator(t,o,i){const s=t.getValue("members")[o];t.getValue("members").splice(o,1),this.onGroupMemberSelection({option:new V.$(!1,s,"")},i)}removeGroupDisk(t,o,i){const s=t.getValue("disks")[o];t.getValue("disks").splice(o,1),this.groupDiskSelections[i].forEach(a=>{a.name===s&&(a.selected=!1)}),this.groupDiskSelections[i]=[...this.groupDiskSelections[i]]}submit(){const t=C().cloneDeep(this.targetForm.value),o={target_iqn:this.targetForm.getValue("target_iqn"),target_controls:this.targetForm.getValue("target_controls"),acl_enabled:this.targetForm.getValue("acl_enabled"),portals:[],disks:[],clients:[],groups:[]};if(this.cephIscsiConfigVersion>10){const s=this.targetForm.get("auth");s.getValue("user")||s.get("user").setValue(""),s.getValue("password")||s.get("password").setValue(""),s.getValue("mutual_user")||s.get("mutual_user").setValue(""),s.getValue("mutual_password")||s.get("mutual_password").setValue("");const a=this.targetForm.getValue("acl_enabled");o.auth={user:a?"":s.getValue("user"),password:a?"":s.getValue("password"),mutual_user:a?"":s.getValue("mutual_user"),mutual_password:a?"":s.getValue("mutual_password")}}let i;t.disks.forEach(s=>{const a=s.split("/"),d=this.imagesSettings[s].backstore;o.disks.push({pool:a[0],image:a[1],backstore:d,controls:this.imagesSettings[s][d],lun:this.imagesSettings[s].lun,wwn:this.imagesSettings[s].wwn})}),t.portals.forEach(s=>{const a=s.indexOf(":");o.portals.push({host:s.substring(0,a),ip:s.substring(a+1)})}),o.acl_enabled&&(t.initiators.forEach(s=>{s.auth.user||(s.auth.user=""),s.auth.password||(s.auth.password=""),s.auth.mutual_user||(s.auth.mutual_user=""),s.auth.mutual_password||(s.auth.mutual_password=""),delete s.cdIsInGroup;const a=[];s.luns.forEach(d=>{const c=d.split("/");a.push({pool:c[0],image:c[1]})}),s.luns=a}),o.clients=t.initiators),o.acl_enabled&&(t.groups.forEach(s=>{const a=[];s.disks.forEach(d=>{const c=d.split("/");a.push({pool:c[0],image:c[1]})}),s.disks=a}),o.groups=t.groups),this.isEdit?(o.new_target_iqn=o.target_iqn,o.target_iqn=this.target_iqn,i=this.taskWrapper.wrapTaskAroundCall({task:new F.R("iscsi/target/edit",{target_iqn:o.target_iqn}),call:this.iscsiService.updateTarget(this.target_iqn,o)})):i=this.taskWrapper.wrapTaskAroundCall({task:new F.R("iscsi/target/create",{target_iqn:o.target_iqn}),call:this.iscsiService.createTarget(o)}),i.subscribe({error:()=>{this.targetForm.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/block/iscsi/targets"])})}targetSettingsModal(){const t={target_controls:this.targetForm.get("target_controls"),target_default_controls:this.target_default_controls,target_controls_limits:this.target_controls_limits};this.modalRef=this.modalService.show(ro,t)}imageSettingsModal(t){const o={imagesSettings:this.imagesSettings,image:t,api_version:this.api_version,disk_default_controls:this.disk_default_controls,disk_controls_limits:this.disk_controls_limits,backstores:this.getValidBackstores(this.getImageById(t)),control:this.targetForm.get("disks")};this.modalRef=this.modalService.show(so,o)}validFeatures(t,o){const i=t.features,s=this.required_rbd_features[o];return(i&s)===s&&0==(i&this.unsupported_rbd_features[o])}getImageById(t){return this.imagesAll.find(o=>t===`${o.pool_name}/${o.name}`)}getValidBackstores(t){return this.backstores.filter(o=>this.validFeatures(t,o))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(k),e.Y36(re.Z),e.Y36(x),e.Y36(m.F0),e.Y36(m.gz),e.Y36(Q.P),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let _,t,o,i,s,a,d,c,u,S,N,P,$,G,X,J,te,A,w,de,pe,ge,ue,me,Te,fe,Ce,Se,y,Ze,Be,Ge,ye,xe,we,qe,L,Ot,At,ht,Pt,It,bt,Nt,Dt,vt,Lt,Ft,$t,Zt,Bt,Gt,yt,xt,wt,qt,Ht,Kt,kt,Xt,Qt,zt;return _="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Target IQN",o="Portals",i="Add portal",s="Images",a="Add image",d="ACL authentication",c="This field is required.",u="IQN has wrong pattern.",S="An IQN has the following notation 'iqn.$year-$month.$reversedAddress:$definedName'",N="For example: iqn.2016-06.org.dashboard:storage:disk.sn-a8675309",P="More information",$="This target has modified advanced settings.",G="At least " + "\ufffd0\ufffd" + " gateways are required.",X="Backstore: " + "\ufffd0\ufffd" + ".\xA0",J="This image has modified settings.",te="Duplicated LUN numbers.",A="Duplicated WWN.",w="User",de="Password",pe="Mutual User",ge="Mutual Password",ue="This field is required.",me="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Te="This field is required.",fe="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Ce="This field is required.",Se="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",y="This field is required.",Ze="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Be="Initiators",Ge="Add initiator",ye="Initiator",xe="Client IQN",we="User",qe="Password",L="Mutual User",Ot="Mutual Password",At="Images",ht="Initiator IQN needs to be unique.",Pt="This field is required.",It="IQN has wrong pattern.",bt="This field is required.",Nt="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Dt="This field is required.",vt="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Lt="This field is required.",Ft="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",$t="This field is required.",Zt="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Bt="Initiator belongs to a group. Images will be configure in the group.",Gt="Add image",yt="No items added.",xt="Groups",wt="Add group",qt="Group",Ht="Name",Kt="Initiators",kt="Add initiator",Xt="Images",Qt="Add image",zt="No items added.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","targetForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],_,[1,"card-body"],[1,"form-group","row"],["for","target_iqn",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],[1,"input-group"],["type","text","id","target_iqn","name","target_iqn","formControlName","target_iqn","cdTrim","",1,"form-control"],[1,"input-group-append"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],["class","invalid-feedback",4,"ngIf"],["class","form-text text-muted",4,"ngIf"],["for","portals",1,"cd-col-form-label","required"],o,[4,"ngFor","ngForOf"],[1,"row"],[1,"col-md-12"],["elemClass","btn btn-light float-right",3,"data","options","messages","selection"],[3,"ngClass"],i,["type","hidden","id","portals","name","portals","formControlName","portals",1,"form-control"],["for","disks",1,"cd-col-form-label"],s,["type","hidden","id","disks","name","disks","formControlName","disks",1,"form-control"],a,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","formControlName","acl_enabled","name","acl_enabled","id","acl_enabled",1,"custom-control-input"],["for","acl_enabled",1,"custom-control-label"],d,["formGroupName","auth",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],c,u,S,N,["target","_blank","href","https://en.wikipedia.org/wiki/ISCSI#Addressing"],P,[1,"form-text","text-muted"],$,[1,"input-group","cd-mb"],["type","text","disabled","",1,"cd-form-control",3,"value"],["type","button",1,"btn","btn-light",3,"click"],G,["class","input-group-text",4,"ngIf"],[4,"ngIf"],[1,"input-group-text"],X,J,te,A,["formGroupName","auth"],["for","target_user",1,"cd-col-form-label"],w,["type","text","autocomplete","off","id","target_user","name","target_user","formControlName","user",1,"form-control"],["for","target_password",1,"cd-col-form-label"],de,["type","password","autocomplete","new-password","id","target_password","name","target_password","formControlName","password",1,"form-control"],["type","button","cdPasswordButton","target_password",1,"btn","btn-light"],["source","target_password"],["for","target_mutual_user",1,"cd-col-form-label"],pe,["type","text","autocomplete","off","id","target_mutual_user","name","target_mutual_user","formControlName","mutual_user",1,"form-control"],["for","target_mutual_password",1,"cd-col-form-label"],ge,["type","password","autocomplete","new-password","id","target_mutual_password","name","target_mutual_password","formControlName","mutual_password",1,"form-control"],["type","button","cdPasswordButton","target_mutual_password",1,"btn","btn-light"],["source","target_mutual_password"],ue,me,Te,fe,Ce,Se,y,Ze,["for","initiators",1,"cd-col-form-label"],Be,["formArrayName","initiators",1,"cd-col-form-input"],["class","card mb-2",3,"formGroup",4,"ngFor","ngForOf"],[1,"btn","btn-light","float-right",3,"click"],Ge,[1,"card","mb-2",3,"formGroup"],ye,["type","button",1,"close",3,"click"],["for","client_iqn",1,"cd-col-form-label","required"],xe,["type","text","formControlName","client_iqn","cdTrim","",1,"form-control",3,"blur"],["for","user",1,"cd-col-form-label"],we,["formControlName","user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","password",1,"cd-col-form-label"],qe,["formControlName","password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["type","button",1,"btn","btn-light",3,"cdPasswordButton"],[3,"source"],["for","mutual_user",1,"cd-col-form-label"],L,["formControlName","mutual_user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","mutual_password",1,"cd-col-form-label"],Ot,["formControlName","mutual_password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["for","luns",1,"cd-col-form-label"],At,["class","row",4,"ngIf"],ht,Pt,It,bt,Nt,Dt,vt,Lt,Ft,$t,Zt,Bt,["elemClass","btn btn-light float-right",3,"data","options","messages"],Gt,yt,xt,["formArrayName","groups",1,"cd-col-form-input"],wt,qt,["for","group_id",1,"cd-col-form-label","required"],Ht,["type","text","formControlName","group_id",1,"form-control"],["for","members",1,"cd-col-form-label"],Kt,kt,Xt,Qt,zt]},template:function(t,o){1&t&&e.YNc(0,tn,66,40,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},directives:[st.y,r._Y,r.JL,r.sg,v.V,g.P,f.o,r.Fj,h.b,r.JJ,r.u,lo,l.mk,l.O5,l.sg,co.H,r.Wl,O.p,r.x0,at.C,Je.s,r.CE],pipes:[l.rS,Ye.m,He.V],styles:[".cd-mb[_ngcontent-%COMP%]{margin-bottom:10px}"]}),n})();var lt=p(68136),he=p(30982),ee=p(64337),ve=p(99466),Ee=p(68774),ct=p(55657),se=p(38047),Ve=p(18001),Le=p(97161),oe=p(74937);function on(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,31),e.qZA())}function nn(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,32),e.qZA())}function _n(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,33),e.qZA())}function sn(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,34),e.qZA())}function an(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,35),e.qZA())}function rn(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,36),e.qZA())}function ln(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,37),e.qZA())}function cn(n,_){1&n&&(e.TgZ(0,"span",30),e.SDv(1,38),e.qZA())}let dn=(()=>{class n{constructor(t,o,i,s,a){this.authStorageService=t,this.activeModal=o,this.actionLabels=i,this.iscsiService=s,this.notificationService=a,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.permission=this.authStorageService.getPermissions().iscsi}ngOnInit(){this.hasPermission=this.permission.update,this.createForm(),this.iscsiService.getDiscovery().subscribe(t=>{this.discoveryForm.patchValue(t)})}createForm(){this.discoveryForm=new M.d({user:new r.NI({value:"",disabled:!this.hasPermission}),password:new r.NI({value:"",disabled:!this.hasPermission}),mutual_user:new r.NI({value:"",disabled:!this.hasPermission}),mutual_password:new r.NI({value:"",disabled:!this.hasPermission})}),B.h.validateIf(this.discoveryForm.get("user"),()=>this.discoveryForm.getValue("password")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),B.h.validateIf(this.discoveryForm.get("password"),()=>this.discoveryForm.getValue("user")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),B.h.validateIf(this.discoveryForm.get("mutual_user"),()=>this.discoveryForm.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_password")]),B.h.validateIf(this.discoveryForm.get("mutual_password"),()=>this.discoveryForm.getValue("mutual_user"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user")])}submitAction(){this.iscsiService.updateDiscovery(this.discoveryForm.value).subscribe(()=>{this.notificationService.show(Ve.k.success,"Updated discovery authentication"),this.activeModal.close()},()=>{this.discoveryForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(I.Kz),e.Y36(D.p4),e.Y36(k),e.Y36(Le.g))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-discovery-modal"]],decls:46,vars:13,consts:function(){let _,t,o,i,s,a,d,c,u,S,N,P,$;return _="Discovery Authentication",t="User",o="Password",i="Mutual User",s="Mutual Password",a="This field is required.",d="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",c="This field is required.",u="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",S="This field is required.",N="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",P="This field is required.",$="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","discoveryForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],t,[1,"cd-col-form-input"],["id","user","formControlName","user","type","text","autocomplete","off",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","password",1,"cd-col-form-label"],o,[1,"input-group"],["id","password","formControlName","password","type","password","autocomplete","new-password",1,"form-control"],[1,"input-group-append"],["type","button","cdPasswordButton","password",1,"btn","btn-light"],["source","password"],["for","mutual_user",1,"cd-col-form-label"],i,["id","mutual_user","formControlName","mutual_user","type","text","autocomplete","off",1,"form-control"],["for","mutual_password",1,"cd-col-form-label"],s,["id","mutual_password","formControlName","mutual_password","type","password","autocomplete","new-password",1,"form-control"],["type","button","cdPasswordButton","mutual_password",1,"btn","btn-light"],["source","mutual_password"],[1,"modal-footer"],[3,"form","showSubmit","submitText","submitActionEvent"],[1,"invalid-feedback"],a,d,c,u,S,N,P,$]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"div",7),e.TgZ(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e._UZ(11,"input",11),e.YNc(12,on,2,0,"span",12),e.YNc(13,nn,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(14,"div",7),e.TgZ(15,"label",13),e.SDv(16,14),e.qZA(),e.TgZ(17,"div",10),e.TgZ(18,"div",15),e._UZ(19,"input",16),e.TgZ(20,"span",17),e._UZ(21,"button",18),e._UZ(22,"cd-copy-2-clipboard-button",19),e.qZA(),e.qZA(),e.YNc(23,_n,2,0,"span",12),e.YNc(24,sn,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(25,"div",7),e.TgZ(26,"label",20),e.ynx(27),e.SDv(28,21),e.BQk(),e.qZA(),e.TgZ(29,"div",10),e._UZ(30,"input",22),e.YNc(31,an,2,0,"span",12),e.YNc(32,rn,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(33,"div",7),e.TgZ(34,"label",23),e.SDv(35,24),e.qZA(),e.TgZ(36,"div",10),e.TgZ(37,"div",15),e._UZ(38,"input",25),e.TgZ(39,"span",17),e._UZ(40,"button",26),e._UZ(41,"cd-copy-2-clipboard-button",27),e.qZA(),e.qZA(),e.YNc(42,ln,2,0,"span",12),e.YNc(43,cn,2,0,"span",12),e.qZA(),e.qZA(),e.qZA(),e.TgZ(44,"div",28),e.TgZ(45,"cd-form-button-panel",29),e.NdJ("submitActionEvent",function(){return o.submitAction()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.discoveryForm),e.xp6(8),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"pattern")),e.xp6(10),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"pattern")),e.xp6(7),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"pattern")),e.xp6(10),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"pattern")),e.xp6(2),e.Q6J("form",o.discoveryForm)("showSubmit",o.hasPermission)("submitText",o.actionLabels.SUBMIT)}},directives:[R.z,r._Y,r.JL,r.sg,v.V,g.P,f.o,r.Fj,h.b,r.JJ,r.u,l.O5,at.C,Je.s,O.p],styles:[""]}),n})();var pn=p(86969);let dt=(()=>{class n{constructor(t){this.router=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(m.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-tabs"]],decls:8,vars:1,consts:function(){let _,t;return _="Overview",t="Targets",[["ngbNav","",1,"nav-tabs",3,"activeId","navChange"],["nav","ngbNav"],["ngbNavItem","/block/iscsi/overview"],["ngbNavLink",""],_,["ngbNavItem","/block/iscsi/targets"],t]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0,1),e.NdJ("navChange",function(s){return o.router.navigate([s.nextId])}),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.qZA(),e.TgZ(5,"li",5),e.TgZ(6,"a",3),e.SDv(7,6),e.qZA(),e.qZA(),e.qZA()),2&t&&e.Q6J("activeId",o.router.url)},directives:[I.Pz,I.nv,I.Vx],styles:[""]}),n})();var pt=p(34501),gn=p(30490),Re=p(94928),un=p(68962);const mn=["highlightTpl"],Tn=["detailTable"],fn=["tree"],Cn=function(){return["logged_in"]},Sn=function(){return["logged_out"]},En=function(n,_){return{"badge-success":n,"badge-danger":_}};function Rn(n,_){if(1&n&&(e._UZ(0,"i"),e.TgZ(1,"span"),e._uU(2),e.qZA(),e._uU(3," \xa0 "),e.TgZ(4,"span",8),e._uU(5),e.qZA()),2&n){const t=_.$implicit;e.Tol(t.data.cdIcon),e.xp6(2),e.Oqu(t.data.name),e.xp6(2),e.Q6J("ngClass",e.WLB(7,En,e.DdM(5,Cn).includes(t.data.status),e.DdM(6,Sn).includes(t.data.status))),e.xp6(1),e.hij(" ",t.data.status," ")}}function Mn(n,_){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"legend"),e._uU(2),e.qZA(),e._UZ(3,"cd-table",10,11),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.Oqu(t.title),e.xp6(1),e.Q6J("data",t.data)("columns",t.columns)("limit",0)}}function On(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function An(n,_){if(1&n&&(e.TgZ(0,"strong"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function hn(n,_){if(1&n&&(e.YNc(0,On,2,1,"span",12),e.YNc(1,An,2,1,"strong",12)),2&n){const t=_.row;e.Q6J("ngIf",void 0===t.default||t.default===t.current),e.xp6(1),e.Q6J("ngIf",void 0!==t.default&&t.default!==t.current)}}let Pn=(()=>{class n{constructor(t,o){this.iscsiBackstorePipe=t,this.booleanTextPipe=o,this.icons=T.P,this.metadata={},this.nodes=[],this.treeOptions={useVirtualScroll:!0,actionMapping:{mouse:{click:this.onNodeSelected.bind(this)}}}}set content(t){this.detailTable=t,t&&t.updateColumns()}ngOnInit(){this.columns=[{prop:"displayName",name:"Name",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"current",name:"Current",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"default",name:"Default",flexGrow:1,cellTemplate:this.highlightTpl}]}ngOnChanges(){this.selection&&(this.selectedItem=this.selection,this.generateTree()),this.data=void 0}generateTree(){const t=C().cloneDeep(this.selectedItem.target_controls);this.cephIscsiConfigVersion>10&&C().extend(t,C().cloneDeep(this.selectedItem.auth)),this.metadata={root:t};const o={target:{expanded:C().join(this.selectedItem.cdExecuting?[T.P.large,T.P.spinner,T.P.spin]:[T.P.large,T.P.bullseye]," ")},initiators:{expanded:C().join([T.P.large,T.P.user]," "),leaf:C().join([T.P.user]," ")},groups:{expanded:C().join([T.P.large,T.P.users]," "),leaf:C().join([T.P.users]," ")},disks:{expanded:C().join([T.P.large,T.P.disk]," "),leaf:C().join([T.P.disk]," ")},portals:{expanded:C().join([T.P.large,T.P.server]," "),leaf:C().join([T.P.server]," ")}},i=[];C().forEach(this.selectedItem.disks,c=>{const u="disk_"+c.pool+"_"+c.image;this.metadata[u]={controls:c.controls,backstore:c.backstore},["wwn","lun"].forEach(S=>{S in c&&(this.metadata[u][S]=c[S])}),i.push({name:`${c.pool}/${c.image}`,cdId:u,cdIcon:o.disks.leaf})});const s=[];C().forEach(this.selectedItem.portals,c=>{s.push({name:`${c.host}:${c.ip}`,cdIcon:o.portals.leaf})});const a=[];C().forEach(this.selectedItem.clients,c=>{const u=C().cloneDeep(c.auth);c.info&&(C().extend(u,c.info),delete u.state,C().forEach(Object.keys(c.info.state),P=>{u[P.toLowerCase()]=c.info.state[P]})),this.metadata["client_"+c.client_iqn]=u;const S=[];c.luns.forEach(P=>{S.push({name:`${P.pool}/${P.image}`,cdId:"disk_"+P.pool+"_"+P.image,cdIcon:o.disks.leaf})});let N="";c.info&&(N=Object.keys(c.info.state).includes("LOGGED_IN")?"logged_in":"logged_out"),a.push({name:c.client_iqn,status:N,cdId:"client_"+c.client_iqn,children:S,cdIcon:o.initiators.leaf})});const d=[];C().forEach(this.selectedItem.groups,c=>{const u=[];c.disks.forEach(N=>{u.push({name:`${N.pool}/${N.image}`,cdId:"disk_"+N.pool+"_"+N.image,cdIcon:o.disks.leaf})});const S=[];c.members.forEach(N=>{S.push({name:N,cdId:"client_"+N})}),d.push({name:c.group_id,cdIcon:o.groups.leaf,children:[{name:"Disks",children:u,cdIcon:o.disks.expanded},{name:"Initiators",children:S,cdIcon:o.initiators.expanded}]})}),this.nodes=[{name:this.selectedItem.target_iqn,cdId:"root",isExpanded:!0,cdIcon:o.target.expanded,children:[{name:"Disks",isExpanded:!0,children:i,cdIcon:o.disks.expanded},{name:"Portals",isExpanded:!0,children:s,cdIcon:o.portals.expanded},{name:"Initiators",isExpanded:!0,children:a,cdIcon:o.initiators.expanded},{name:"Groups",isExpanded:!0,children:d,cdIcon:o.groups.expanded}]}]}format(t){return"boolean"==typeof t?this.booleanTextPipe.transform(t):t}onNodeSelected(t,o){var i,s,a,d;if(ne.iM.ACTIVATE(t,o,!0),o.data.cdId){this.title=o.data.name;const c=this.metadata[o.data.cdId]||{};"root"===o.data.cdId?(null===(i=this.detailTable)||void 0===i||i.toggleColumn({prop:"default",isHidden:!0}),this.data=C().map(this.settings.target_default_controls,(u,S)=>({displayName:S,default:u=this.format(u),current:C().isUndefined(c[S])?u:this.format(c[S])})),this.cephIscsiConfigVersion>10&&["user","password","mutual_user","mutual_password"].forEach(u=>{this.data.push({displayName:u,default:null,current:c[u]})})):o.data.cdId.toString().startsWith("disk_")?(null===(s=this.detailTable)||void 0===s||s.toggleColumn({prop:"default",isHidden:!0}),this.data=C().map(this.settings.disk_default_controls[c.backstore],(u,S)=>({displayName:S,default:u=this.format(u),current:C().isUndefined(c.controls[S])?u:this.format(c.controls[S])})),this.data.push({displayName:"backstore",default:this.iscsiBackstorePipe.transform(this.settings.default_backstore),current:this.iscsiBackstorePipe.transform(c.backstore)}),["wwn","lun"].forEach(u=>{u in c&&this.data.push({displayName:u,default:void 0,current:c[u]})})):(null===(a=this.detailTable)||void 0===a||a.toggleColumn({prop:"default",isHidden:!1}),this.data=C().map(c,(u,S)=>({displayName:S,default:void 0,current:this.format(u)})))}else this.data=void 0;null===(d=this.detailTable)||void 0===d||d.updateColumns()}onUpdateData(){this.tree.treeModel.expandAll()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(He.V),e.Y36(un.T))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(mn,7),e.Gf(Tn,5),e.Gf(fn,5)),2&t){let i;e.iGM(i=e.CRH())&&(o.highlightTpl=i.first),e.iGM(i=e.CRH())&&(o.content=i.first),e.iGM(i=e.CRH())&&(o.tree=i.first)}},inputs:{selection:"selection",settings:"settings",cephIscsiConfigVersion:"cephIscsiConfigVersion"},features:[e.TTD],decls:11,vars:3,consts:function(){let _;return _="iSCSI Topology",[[1,"row"],[1,"col-6"],_,[3,"nodes","options","updateData"],["tree",""],["treeNodeTemplate",""],["class","col-6 metadata",4,"ngIf"],["highlightTpl",""],[1,"badge",3,"ngClass"],[1,"col-6","metadata"],["columnMode","flex",3,"data","columns","limit"],["detailTable",""],[4,"ngIf"]]},template:function(t,o){1&t&&(e.TgZ(0,"div",0),e.TgZ(1,"div",1),e.TgZ(2,"legend"),e.SDv(3,2),e.qZA(),e.TgZ(4,"tree-root",3,4),e.NdJ("updateData",function(){return o.onUpdateData()}),e.YNc(6,Rn,6,10,"ng-template",null,5,e.W1O),e.qZA(),e.qZA(),e.YNc(8,Mn,5,4,"div",6),e.qZA(),e.YNc(9,hn,2,2,"ng-template",null,7,e.W1O)),2&t&&(e.xp6(4),e.Q6J("nodes",o.nodes)("options",o.treeOptions),e.xp6(4),e.Q6J("ngIf",o.data))},directives:[ne.qr,l.O5,l.mk,ee.a],styles:[""]}),n})();function In(n,_){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"span"),e.SDv(3,6),e.qZA(),e.TgZ(4,"pre"),e._uU(5),e.qZA(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(5),e.Oqu(t.status)}}function bn(n,_){if(1&n&&(e.TgZ(0,"cd-alert-panel",2),e.ynx(1),e.tHW(2,3),e._UZ(3,"cd-doc",4),e.N_p(),e.BQk(),e.YNc(4,In,6,1,"ng-container",5),e.qZA()),2&n){const t=e.oxw();e.xp6(4),e.Q6J("ngIf",t.status)}}function Nn(n,_){if(1&n&&e._UZ(0,"cd-iscsi-target-details",15),2&n){const t=e.oxw(2);e.Q6J("cephIscsiConfigVersion",t.cephIscsiConfigVersion)("selection",t.expandedRow)("settings",t.settings)}}const Dn=function(n){return[n]};function vn(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",7,8),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().getTargets()})("setExpandedRow",function(i){return e.CHM(t),e.oxw().setExpandedRow(i)})("updateSelection",function(i){return e.CHM(t),e.oxw().updateSelection(i)}),e.TgZ(2,"div",9),e._UZ(3,"cd-table-actions",10),e.TgZ(4,"button",11),e.NdJ("click",function(){return e.CHM(t),e.oxw().configureDiscoveryAuth()}),e._UZ(5,"i",12),e.ynx(6),e.SDv(7,13),e.BQk(),e.qZA(),e.qZA(),e.YNc(8,Nn,1,3,"cd-iscsi-target-details",14),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.targets)("columns",t.columns)("hasDetails",!0)("autoReload",!1)("status",t.tableStatus),e.xp6(3),e.Q6J("permission",t.permission)("selection",t.selection)("tableActions",t.tableActions),e.xp6(2),e.Q6J("ngClass",e.VKq(10,Dn,t.icons.key)),e.xp6(3),e.Q6J("ngIf",t.expandedRow)}}let Ln=(()=>{class n extends lt.o{constructor(t,o,i,s,a,d,c,u,S){super(S),this.authStorageService=t,this.iscsiService=o,this.joinPipe=i,this.taskListService=s,this.notAvailablePipe=a,this.modalService=d,this.taskWrapper=c,this.actionLabels=u,this.ngZone=S,this.available=void 0,this.selection=new Ee.r,this.targets=[],this.icons=T.P,this.builders={"iscsi/target/create":N=>({target_iqn:N.target_iqn})},this.permission=this.authStorageService.getPermissions().iscsi,this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>"/block/iscsi/targets/create",name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>`/block/iscsi/targets/edit/${this.selection.first().target_iqn}`,name:this.actionLabels.EDIT,disable:()=>this.getEditDisableDesc()},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteIscsiTargetModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Target",prop:"target_iqn",flexGrow:2,cellTransformation:ve.e.executing},{name:"Portals",prop:"cdPortals",pipe:this.joinPipe,flexGrow:2},{name:"Images",prop:"cdImages",pipe:this.joinPipe,flexGrow:2},{name:"# Sessions",prop:"info.num_sessions",pipe:this.notAvailablePipe,flexGrow:1}],this.iscsiService.status().subscribe(t=>{this.available=t.available,t.available||(this.status=t.message)})}getTargets(){this.available&&(this.setTableRefreshTimeout(),this.iscsiService.version().subscribe(t=>{this.cephIscsiConfigVersion=t.ceph_iscsi_config_version}),this.taskListService.init(()=>this.iscsiService.listTargets(),t=>this.prepareResponse(t),t=>this.targets=t,()=>this.onFetchError(),this.taskFilter,this.itemFilter,this.builders),this.iscsiService.settings().subscribe(t=>{this.settings=t}))}ngOnDestroy(){this.summaryDataSubscription&&this.summaryDataSubscription.unsubscribe()}getEditDisableDesc(){const t=this.selection.first();return t&&(null==t?void 0:t.cdExecuting)?t.cdExecuting:t&&C().isUndefined(null==t?void 0:t.info)?"Unavailable gateway(s)":!t}getDeleteDisableDesc(){var t;const o=this.selection.first();return(null==o?void 0:o.cdExecuting)?o.cdExecuting:o&&C().isUndefined(null==o?void 0:o.info)?"Unavailable gateway(s)":o&&(null===(t=null==o?void 0:o.info)||void 0===t?void 0:t.num_sessions)?"Target has active sessions":!o}prepareResponse(t){return t.forEach(o=>{o.cdPortals=o.portals.map(i=>`${i.host}:${i.ip}`),o.cdImages=o.disks.map(i=>`${i.pool}/${i.image}`)}),t}onFetchError(){this.table.reset()}itemFilter(t,o){return t.target_iqn===o.metadata.target_iqn}taskFilter(t){return["iscsi/target/create","iscsi/target/edit","iscsi/target/delete"].includes(t.name)}updateSelection(t){this.selection=t}deleteIscsiTargetModal(){const t=this.selection.first().target_iqn;this.modalRef=this.modalService.show(he.M,{itemDescription:"iSCSI target",itemNames:[t],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new F.R("iscsi/target/delete",{target_iqn:t}),call:this.iscsiService.deleteTarget(t)})})}configureDiscoveryAuth(){this.modalService.show(dn)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(k),e.Y36(pn.A),e.Y36(se.j),e.Y36(ct.g),e.Y36(re.Z),e.Y36(Q.P),e.Y36(D.p4),e.Y36(e.R0b))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-list"]],viewQuery:function(t,o){if(1&t&&e.Gf(ee.a,5),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first)}},features:[e._Bn([se.j]),e.qOj],decls:3,vars:2,consts:function(){let _,t,o,i;return _="iSCSI Targets not available",t="Please consult the " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + " on how to configure and enable the iSCSI Targets management functionality.",o="Available information:",i="Discovery authentication",[["type","info","title",_,4,"ngIf"],["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection",4,"ngIf"],["type","info","title",_],t,["section","iscsi"],[4,"ngIf"],o,["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],i,["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings",4,"ngIf"],["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.YNc(1,bn,5,1,"cd-alert-panel",0),e.YNc(2,vn,9,12,"cd-table",1)),2&t&&(e.xp6(1),e.Q6J("ngIf",!1===o.available),e.xp6(1),e.Q6J("ngIf",!0===o.available))},directives:[dt,l.O5,pt.G,gn.K,ee.a,Re.K,f.o,l.mk,Pn],styles:[""]}),n})();var Ue=p(66369),Fn=p(76446),$n=p(90068);const Zn=["iscsiSparklineTpl"],Bn=["iscsiPerSecondTpl"],Gn=["iscsiRelativeDateTpl"];function yn(n,_){if(1&n&&(e.TgZ(0,"span"),e._UZ(1,"cd-sparkline",9),e.qZA()),2&n){const t=e.oxw(),o=t.value,i=t.row;e.xp6(1),e.Q6J("data",o)("isBinary",i.cdIsBinary)}}function xn(n,_){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function wn(n,_){if(1&n&&(e.YNc(0,yn,2,2,"span",7),e.YNc(1,xn,2,0,"span",8)),2&n){const t=_.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function qn(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",t," /s ")}}function Hn(n,_){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function Kn(n,_){if(1&n&&(e.YNc(0,qn,2,1,"span",7),e.YNc(1,Hn,2,0,"span",8)),2&n){const t=_.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function kn(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"notAvailable"),e.ALo(3,"relativeDate"),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",e.lcZ(2,1,e.lcZ(3,3,t))," ")}}function Xn(n,_){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function Qn(n,_){if(1&n&&(e.YNc(0,kn,4,5,"span",7),e.YNc(1,Xn,2,0,"span",8)),2&n){const t=_.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}let zn=(()=>{class n{constructor(t,o,i){this.iscsiService=t,this.dimlessPipe=o,this.iscsiBackstorePipe=i,this.gateways=[],this.images=[]}ngOnInit(){this.gatewaysColumns=[{name:"Name",prop:"name"},{name:"State",prop:"state",flexGrow:1,cellTransformation:ve.e.badge,customTemplateConfig:{map:{up:{class:"badge-success"},down:{class:"badge-danger"}}}},{name:"# Targets",prop:"num_targets"},{name:"# Sessions",prop:"num_sessions"}],this.imagesColumns=[{name:"Pool",prop:"pool"},{name:"Image",prop:"image"},{name:"Backstore",prop:"backstore",pipe:this.iscsiBackstorePipe},{name:"Read Bytes",prop:"stats_history.rd_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Write Bytes",prop:"stats_history.wr_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Read Ops",prop:"stats.rd",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"Write Ops",prop:"stats.wr",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"A/O Since",prop:"optimized_since",cellTemplate:this.iscsiRelativeDateTpl}]}refresh(){this.iscsiService.overview().subscribe(t=>{this.gateways=t.gateways,this.images=t.images,this.images.map(o=>(o.stats_history&&(o.stats_history.rd_bytes=o.stats_history.rd_bytes.map(i=>i[1]),o.stats_history.wr_bytes=o.stats_history.wr_bytes.map(i=>i[1])),o.cdIsBinary=!0,o))})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(k),e.Y36(Ue.n),e.Y36(He.V))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Zn,7),e.Gf(Bn,7),e.Gf(Gn,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.iscsiSparklineTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiPerSecondTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiRelativeDateTpl=i.first)}},decls:13,vars:4,consts:function(){let _,t;return _="Gateways",t="Images",[_,[3,"data","columns","fetchData"],t,[3,"data","columns"],["iscsiSparklineTpl",""],["iscsiPerSecondTpl",""],["iscsiRelativeDateTpl",""],[4,"ngIf"],["class","text-muted",4,"ngIf"],[3,"data","isBinary"],[1,"text-muted"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.TgZ(1,"legend"),e.SDv(2,0),e.qZA(),e.TgZ(3,"cd-table",1),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA(),e.TgZ(4,"legend"),e.SDv(5,2),e.qZA(),e._UZ(6,"cd-table",3),e.YNc(7,wn,2,2,"ng-template",null,4,e.W1O),e.YNc(9,Kn,2,2,"ng-template",null,5,e.W1O),e.YNc(11,Qn,2,2,"ng-template",null,6,e.W1O)),2&t&&(e.xp6(3),e.Q6J("data",o.gateways)("columns",o.gatewaysColumns),e.xp6(3),e.Q6J("data",o.images)("columns",o.imagesColumns))},directives:[dt,ee.a,l.O5,Fn.l],pipes:[ct.g,$n.h],styles:[""]}),n})(),Jn=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[l.ez,_e.m,I.Oz,m.Bz,r.u5,r.UX,I.ZQ]]}),n})();var Yn=p(75319),Vn=p(26215),Un=p(45435),gt=p(55358);let K=class{constructor(_,t){this.http=_,this.timerService=t,this.REFRESH_INTERVAL=3e4,this.summaryDataSource=new Vn.X(null),this.summaryData$=this.summaryDataSource.asObservable()}startPolling(){return this.timerService.get(()=>this.retrieveSummaryObservable(),this.REFRESH_INTERVAL).subscribe(this.retrieveSummaryObserver())}refresh(){return this.retrieveSummaryObservable().subscribe(this.retrieveSummaryObserver())}retrieveSummaryObservable(){return this.http.get("api/block/mirroring/summary")}retrieveSummaryObserver(){return _=>{this.summaryDataSource.next(_)}}subscribeSummary(_,t){return this.summaryData$.pipe((0,Un.h)(o=>!!o)).subscribe(_,t)}getPool(_){return this.http.get(`api/block/mirroring/pool/${_}`)}updatePool(_,t){return this.http.put(`api/block/mirroring/pool/${_}`,t,{observe:"response"})}getSiteName(){return this.http.get("api/block/mirroring/site_name")}setSiteName(_){return this.http.put("api/block/mirroring/site_name",{site_name:_},{observe:"response"})}createBootstrapToken(_){return this.http.post(`api/block/mirroring/pool/${_}/bootstrap/token`,{})}importBootstrapToken(_,t,o){return this.http.post(`api/block/mirroring/pool/${_}/bootstrap/peer`,{direction:t,token:o},{observe:"response"})}getPeer(_,t){return this.http.get(`api/block/mirroring/pool/${_}/peer/${t}`)}addPeer(_,t){return this.http.post(`api/block/mirroring/pool/${_}/peer`,t,{observe:"response"})}updatePeer(_,t,o){return this.http.put(`api/block/mirroring/pool/${_}/peer/${t}`,o,{observe:"response"})}deletePeer(_,t){return this.http.delete(`api/block/mirroring/pool/${_}/peer/${t}`,{observe:"response"})}};K.\u0275fac=function(_){return new(_||K)(e.LFG(ie.eN),e.LFG(gt.f))},K.\u0275prov=e.Yz7({token:K,factory:K.\u0275fac,providedIn:"root"}),(0,b.gn)([(0,b.fM)(0,Y.G),(0,b.w6)("design:type",Function),(0,b.w6)("design:paramtypes",[String]),(0,b.w6)("design:returntype",void 0)],K.prototype,"setSiteName",null),(0,b.gn)([(0,b.fM)(1,Y.G),(0,b.fM)(2,Y.G),(0,b.w6)("design:type",Function),(0,b.w6)("design:paramtypes",[String,String,String]),(0,b.w6)("design:returntype",void 0)],K.prototype,"importBootstrapToken",null),K=(0,b.gn)([Y.o,(0,b.w6)("design:paramtypes",[ie.eN,gt.f])],K);var je=p(58071),jn=p(68307),ut=p(12627),le=p(82945),Wn=p(39749),ei=p(13472);function ti(n,_){1&n&&(e.TgZ(0,"span",25),e.SDv(1,26),e.qZA())}function oi(n,_){if(1&n&&(e.TgZ(0,"div",27),e._UZ(1,"input",28),e.TgZ(2,"label",29),e._uU(3),e.qZA(),e.qZA()),2&n){const t=_.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function ni(n,_){1&n&&(e.TgZ(0,"span",25),e.SDv(1,30),e.qZA())}let ii=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.rbdMirroringService=o,this.taskWrapper=i,this.pools=[],this.createForm()}createForm(){this.createBootstrapForm=new M.d({siteName:new r.NI("",{validators:[r.kI.required]}),pools:new r.cw({},{validators:[this.validatePools()]}),token:new r.NI("",{})})}ngOnInit(){this.createBootstrapForm.get("siteName").setValue(this.siteName),this.rbdMirroringService.getSiteName().subscribe(t=>{this.createBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((s,a)=>(s.push({name:a.name,mirror_mode:a.mirror_mode}),s),[]);const i=this.createBootstrapForm.get("pools");C().each(this.pools,s=>{const a=s.name,d="disabled"===s.mirror_mode,c=i.controls[a];c?d&&c.disabled?c.enable():!d&&c.enabled&&(c.disable(),c.setValue(!0)):i.addControl(a,new r.NI({value:!d,disabled:!d}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return C().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}generate(){this.createBootstrapForm.get("token").setValue("");let t="";const o=[],i=this.createBootstrapForm.get("pools");C().each(i.controls,(u,S)=>{!0===u.value&&(t=S,u.disabled||o.push(S))});const s={mirror_mode:"image"},a=(0,je.z)(this.rbdMirroringService.setSiteName(this.createBootstrapForm.getValue("siteName")),(0,W.D)(o.map(u=>this.rbdMirroringService.updatePool(u,s))),this.rbdMirroringService.createBootstrapToken(t).pipe((0,jn.b)(u=>this.createBootstrapForm.get("token").setValue(u.token)))).pipe((0,ut.Z)()),d=()=>{this.rbdMirroringService.refresh(),this.createBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/mirroring/bootstrap/create",{}),call:a}).subscribe({error:d,complete:d})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(K),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-create-modal"]],decls:32,vars:6,consts:function(){let _,t,o,i,s,a,d,c,u,S,N;return _="Create Bootstrap Token",t="To create a bootstrap token which can be imported by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, and click\xA0 " + "\ufffd#10\ufffd" + "Generate" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",s="Pools",a="Generate",d="Token",c="Generated token...",u="Close",S="This field is required.",N="At least one pool is required.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","createBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],s,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],[1,"mb-4","float-right",3,"form","submitAction"],a,["for","token",1,"col-form-label"],d,["placeholder",c,"id","token","formControlName","token","readonly","",1,"form-control","resize-vertical"],["source","token",1,"float-right"],[1,"modal-footer"],["name",u,3,"backAction"],[1,"invalid-feedback"],S,[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],N]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,ti,2,0,"span",12),e.qZA(),e.TgZ(16,"div",13),e.TgZ(17,"label",14),e.SDv(18,15),e.qZA(),e.YNc(19,oi,4,5,"div",16),e.YNc(20,ni,2,0,"span",12),e.qZA(),e.TgZ(21,"cd-submit-button",17),e.NdJ("submitAction",function(){return o.generate()}),e.SDv(22,18),e.qZA(),e.TgZ(23,"div",8),e.TgZ(24,"label",19),e.TgZ(25,"span"),e.SDv(26,20),e.qZA(),e.qZA(),e.TgZ(27,"textarea",21),e._uU(28," "),e.qZA(),e.qZA(),e._UZ(29,"cd-copy-2-clipboard-button",22),e.qZA(),e.TgZ(30,"div",23),e.TgZ(31,"cd-back-button",24),e.NdJ("backAction",function(){return o.activeModal.close()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.createBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.createBootstrapForm.showError("siteName",i,"required")),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.createBootstrapForm.showError("pools",i,"requirePool")),e.xp6(1),e.Q6J("form",o.createBootstrapForm)}},directives:[R.z,r._Y,r.JL,v.V,r.sg,g.P,f.o,r.Fj,h.b,r.JJ,r.u,le.U,l.O5,r.x0,l.sg,Wn.w,Je.s,ei.W,r.Wl],styles:[".form-group.ng-invalid[_ngcontent-%COMP%] .invalid-feedback[_ngcontent-%COMP%]{display:block}"]}),n})();function _i(n,_){1&n&&(e.TgZ(0,"span",26),e.SDv(1,27),e.qZA())}function si(n,_){if(1&n&&(e.TgZ(0,"option",28),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t.key),e.xp6(1),e.Oqu(t.desc)}}function ai(n,_){if(1&n&&(e.TgZ(0,"div",29),e._UZ(1,"input",30),e.TgZ(2,"label",31),e._uU(3),e.qZA(),e.qZA()),2&n){const t=_.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function ri(n,_){1&n&&(e.TgZ(0,"span",26),e.SDv(1,32),e.qZA())}function li(n,_){1&n&&(e.TgZ(0,"span",26),e.SDv(1,33),e.qZA())}function ci(n,_){1&n&&(e.TgZ(0,"span",26),e.SDv(1,34),e.qZA())}let di=(()=>{class n{constructor(t,o,i,s){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=s,this.pools=[],this.directions=[{key:"rx-tx",desc:"Bidirectional"},{key:"rx",desc:"Unidirectional (receive-only)"}],this.createForm()}createForm(){this.importBootstrapForm=new M.d({siteName:new r.NI("",{validators:[r.kI.required]}),direction:new r.NI("rx-tx",{}),pools:new r.cw({},{validators:[this.validatePools()]}),token:new r.NI("",{validators:[r.kI.required,this.validateToken()]})})}ngOnInit(){this.rbdMirroringService.getSiteName().subscribe(t=>{this.importBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((s,a)=>(s.push({name:a.name,mirror_mode:a.mirror_mode}),s),[]);const i=this.importBootstrapForm.get("pools");C().each(this.pools,s=>{const a=s.name,d="disabled"===s.mirror_mode,c=i.controls[a];c?d&&c.disabled?c.enable():!d&&c.enabled&&(c.disable(),c.setValue(!0)):i.addControl(a,new r.NI({value:!d,disabled:!d}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return C().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}validateToken(){return t=>{try{if(JSON.parse(atob(t.value)))return null}catch(o){}return{invalidToken:!0}}}import(){const t=[],o=[],i=this.importBootstrapForm.get("pools");C().each(i.controls,(u,S)=>{!0===u.value&&(t.push(S),u.disabled||o.push(S))});const s={mirror_mode:"image"};let a=(0,je.z)(this.rbdMirroringService.setSiteName(this.importBootstrapForm.getValue("siteName")),(0,W.D)(o.map(u=>this.rbdMirroringService.updatePool(u,s))));a=t.reduce((u,S)=>(0,je.z)(u,this.rbdMirroringService.importBootstrapToken(S,this.importBootstrapForm.getValue("direction"),this.importBootstrapForm.getValue("token"))),a).pipe((0,ut.Z)());const d=()=>{this.rbdMirroringService.refresh(),this.importBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/mirroring/bootstrap/import",{}),call:a}).subscribe({error:d,complete:()=>{d(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(D.p4),e.Y36(K),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-import-modal"]],decls:36,vars:10,consts:function(){let _,t,o,i,s,a,d,c,u,S,N,P;return _="Import Bootstrap Token",t="To import a bootstrap token which was created by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, provide the generated token, and click\xA0" + "\ufffd#10\ufffd" + "Import" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",s="Direction",a="Pools",d="Token",c="Generated token...",u="This field is required.",S="At least one pool is required.",N="This field is required.",P="The token is invalid.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","importBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","direction",1,"col-form-label"],s,["id","direction","name","direction","formControlName","direction",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],a,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],["for","token",1,"col-form-label","required"],d,["placeholder",c,"id","token","formControlName","token",1,"form-control","resize-vertical"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],u,[3,"value"],[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],S,N,P]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,_i,2,0,"span",12),e.qZA(),e.TgZ(16,"div",8),e.TgZ(17,"label",13),e.TgZ(18,"span"),e.SDv(19,14),e.qZA(),e.qZA(),e.TgZ(20,"select",15),e.YNc(21,si,2,2,"option",16),e.qZA(),e.qZA(),e.TgZ(22,"div",17),e.TgZ(23,"label",18),e.SDv(24,19),e.qZA(),e.YNc(25,ai,4,5,"div",20),e.YNc(26,ri,2,0,"span",12),e.qZA(),e.TgZ(27,"div",8),e.TgZ(28,"label",21),e.SDv(29,22),e.qZA(),e.TgZ(30,"textarea",23),e._uU(31," "),e.qZA(),e.YNc(32,li,2,0,"span",12),e.YNc(33,ci,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(34,"div",24),e.TgZ(35,"cd-form-button-panel",25),e.NdJ("submitActionEvent",function(){return o.import()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.importBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.importBootstrapForm.showError("siteName",i,"required")),e.xp6(6),e.Q6J("ngForOf",o.directions),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("pools",i,"requirePool")),e.xp6(6),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"required")),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"invalidToken")),e.xp6(2),e.Q6J("form",o.importBootstrapForm)("submitText",o.actionLabels.SUBMIT)}},directives:[R.z,r._Y,r.JL,v.V,r.sg,g.P,f.o,r.Fj,h.b,r.JJ,r.u,le.U,l.O5,r.EJ,l.sg,r.x0,O.p,r.YN,r.Kr,r.Wl],styles:[""]}),n})(),pi=(()=>{class n{constructor(t,o,i,s){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=s,this.createForm()}createForm(){this.editSiteNameForm=new M.d({siteName:new r.NI("",{})})}ngOnInit(){this.editSiteNameForm.get("siteName").setValue(this.siteName),this.rbdMirroringService.getSiteName().subscribe(t=>{this.editSiteNameForm.get("siteName").setValue(t.site_name)})}update(){this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/mirroring/site_name/edit",{}),call:this.rbdMirroringService.setSiteName(this.editSiteNameForm.getValue("siteName"))}).subscribe({error:()=>this.editSiteNameForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(D.p4),e.Y36(K),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-edit-site-mode-modal"]],decls:17,vars:4,consts:function(){let _,t,o,i;return _="Edit site name",t="Edit the site name and click\xA0 " + "\ufffd#10\ufffd" + "Update" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","editSiteNameForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.qZA(),e.qZA(),e.TgZ(15,"div",12),e.TgZ(16,"cd-form-button-panel",13),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.editSiteNameForm),e.xp6(12),e.Q6J("form",o.editSiteNameForm)("submitText",o.actionLabels.UPDATE))},directives:[R.z,r._Y,r.JL,v.V,r.sg,g.P,f.o,r.Fj,h.b,r.JJ,r.u,le.U,O.p],styles:[""]}),n})();var U=p(69158),gi=p(58111);let We=(()=>{class n{transform(t){return"warning"===t?"badge badge-warning":"error"===t?"badge badge-danger":"success"===t?"badge badge-success":"badge badge-info"}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275pipe=e.Yjl({name:"mirrorHealthColor",type:n,pure:!0}),n})();const ui=["healthTmpl"];function mi(n,_){if(1&n&&(e.TgZ(0,"span",2),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=_.value;e.Q6J("ngClass",e.lcZ(1,2,_.row.health_color)),e.xp6(2),e.Oqu(o)}}let Ti=(()=>{class n{constructor(t,o){this.rbdMirroringService=t,this.cephShortVersionPipe=o,this.tableStatus=new U.E}ngOnInit(){this.columns=[{prop:"instance_id",name:"Instance",flexGrow:2},{prop:"id",name:"ID",flexGrow:2},{prop:"server_hostname",name:"Hostname",flexGrow:2},{prop:"version",name:"Version",pipe:this.cephShortVersionPipe,flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.daemons,this.tableStatus=new U.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(K),e.Y36(gi.F))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-daemons"]],viewQuery:function(t,o){if(1&t&&e.Gf(ui,7),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first)}},decls:3,vars:4,consts:[["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],["healthTmpl",""],[3,"ngClass"]],template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA(),e.YNc(1,mi,3,4,"ng-template",null,1,e.W1O)),2&t&&e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus)},directives:[ee.a,l.mk],pipes:[We],styles:[""]}),n})();var fi=p(18891);class Ci{}function Si(n,_){if(1&n&&(e.TgZ(0,"option",16),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t.id),e.xp6(1),e.Oqu(t.name)}}function Ei(n,_){1&n&&(e.TgZ(0,"span",17),e.SDv(1,18),e.qZA())}let Ri=(()=>{class n{constructor(t,o,i,s){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=s,this.bsConfig={containerClass:"theme-default"},this.peerExists=!1,this.mirrorModes=[{id:"disabled",name:"Disabled"},{id:"pool",name:"Pool"},{id:"image",name:"Image"}],this.createForm()}createForm(){this.editModeForm=new M.d({mirrorMode:new r.NI("",{validators:[r.kI.required,this.validateMode.bind(this)]})})}ngOnInit(){this.pattern=`${this.poolName}`,this.rbdMirroringService.getPool(this.poolName).subscribe(t=>{this.setResponse(t)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.peerExists=!1;const i=t.content_data.pools.find(s=>this.poolName===s.name);this.peerExists=i&&i.peer_uuids.length})}ngOnDestroy(){this.subs.unsubscribe()}validateMode(t){return"disabled"===t.value&&this.peerExists?{cannotDisable:{value:t.value}}:null}setResponse(t){this.editModeForm.get("mirrorMode").setValue(t.mirror_mode)}update(){const t=new Ci;t.mirror_mode=this.editModeForm.getValue("mirrorMode"),this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/mirroring/pool/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePool(this.poolName,t)}).subscribe({error:()=>this.editModeForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(D.p4),e.Y36(K),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-mode-modal"]],decls:21,vars:7,consts:function(){let _,t,o,i;return _="Edit pool mirror mode",t="To edit the mirror mode for pool\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ", select a new mode from the list and click\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Update" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",t=e.Zx4(t),o="Mode",i="Peer clusters must be removed prior to disabling mirror.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","editModeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","mirrorMode",1,"col-form-label"],o,["id","mirrorMode","name","mirrorMode","formControlName","mirrorMode",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[3,"value"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e._UZ(11,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(12,"div",8),e.TgZ(13,"label",9),e.TgZ(14,"span"),e.SDv(15,10),e.qZA(),e.qZA(),e.TgZ(16,"select",11),e.YNc(17,Si,2,2,"option",12),e.qZA(),e.YNc(18,Ei,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(19,"div",14),e.TgZ(20,"cd-form-button-panel",15),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.editModeForm),e.xp6(7),e.pQV(o.poolName),e.QtT(9),e.xp6(6),e.Q6J("ngForOf",o.mirrorModes),e.xp6(1),e.Q6J("ngIf",o.editModeForm.showError("mirrorMode",i,"cannotDisable")),e.xp6(2),e.Q6J("form",o.editModeForm)("submitText",o.actionLabels.UPDATE)}},directives:[R.z,r._Y,r.JL,v.V,r.sg,g.P,f.o,r.EJ,h.b,r.JJ,r.u,l.sg,l.O5,O.p,r.YN,r.Kr],styles:[""]}),n})();class Mi{}function Oi(n,_){1&n&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function Ai(n,_){1&n&&(e.TgZ(0,"span",24),e.SDv(1,26),e.qZA())}function hi(n,_){1&n&&(e.TgZ(0,"span",24),e.SDv(1,27),e.qZA())}function Pi(n,_){1&n&&(e.TgZ(0,"span",24),e.SDv(1,28),e.qZA())}function Ii(n,_){1&n&&(e.TgZ(0,"span",24),e.SDv(1,29),e.qZA())}function bi(n,_){1&n&&(e.TgZ(0,"span",24),e.SDv(1,30),e.qZA())}let Ni=(()=>{class n{constructor(t,o,i,s){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=s,this.bsConfig={containerClass:"theme-default"},this.createForm()}createForm(){this.editPeerForm=new M.d({clusterName:new r.NI("",{validators:[r.kI.required,this.validateClusterName]}),clientID:new r.NI("",{validators:[r.kI.required,this.validateClientID]}),monAddr:new r.NI("",{validators:[this.validateMonAddr]}),key:new r.NI("",{validators:[this.validateKey]})})}ngOnInit(){this.pattern=`${this.poolName}/${this.peerUUID}`,"edit"===this.mode&&this.rbdMirroringService.getPeer(this.poolName,this.peerUUID).subscribe(t=>{this.setResponse(t)})}validateClusterName(t){if(!t.value.match(/^[\w\-_]*$/))return{invalidClusterName:{value:t.value}}}validateClientID(t){if(!t.value.match(/^(?!client\.)[\w\-_.]*$/))return{invalidClientID:{value:t.value}}}validateMonAddr(t){if(!t.value.match(/^[,; ]*([\w.\-_\[\]]+(:[\d]+)?[,; ]*)*$/))return{invalidMonAddr:{value:t.value}}}validateKey(t){try{if(""===t.value||atob(t.value))return null}catch(o){}return{invalidKey:{value:t.value}}}setResponse(t){this.response=t,this.editPeerForm.get("clusterName").setValue(t.cluster_name),this.editPeerForm.get("clientID").setValue(t.client_id),this.editPeerForm.get("monAddr").setValue(t.mon_host),this.editPeerForm.get("key").setValue(t.key)}update(){const t=new Mi;let o;t.cluster_name=this.editPeerForm.getValue("clusterName"),t.client_id=this.editPeerForm.getValue("clientID"),t.mon_host=this.editPeerForm.getValue("monAddr"),t.key=this.editPeerForm.getValue("key"),o=this.taskWrapper.wrapTaskAroundCall("edit"===this.mode?{task:new F.R("rbd/mirroring/peer/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePeer(this.poolName,this.peerUUID,t)}:{task:new F.R("rbd/mirroring/peer/add",{pool_name:this.poolName}),call:this.rbdMirroringService.addPeer(this.poolName,t)}),o.subscribe({error:()=>this.editPeerForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(D.p4),e.Y36(K),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-peer-modal"]],decls:38,vars:13,consts:function(){let _,t,o,i,s,a,d,c,u,S,N,P,$,G,X,J,te,A;return _="{VAR_SELECT, select, edit {Edit} other {Add}}",_=e.Zx4(_,{VAR_SELECT:"\ufffd0\ufffd"}),t="" + _ + " pool mirror peer",o="{VAR_SELECT, select, edit {Edit} other {Add}}",o=e.Zx4(o,{VAR_SELECT:"\ufffd0\ufffd"}),i="" + o + " the pool mirror peer attributes for pool " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd1\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " and click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Submit" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",i=e.Zx4(i),s="Cluster Name",a="Name...",d="CephX ID",c="CephX ID...",u="Monitor Addresses",S="Comma-delimited addresses...",N="CephX Key",P="Base64-encoded key...",$="This field is required.",G="The cluster name is not valid.",X="This field is required.",J="The CephX ID is not valid.",te="The monitory address is not valid.",A="CephX key must be base64 encoded.",[[3,"modalRef"],[1,"modal-title"],t,[1,"modal-content"],["name","editPeerForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],i,[1,"form-group"],["for","clusterName",1,"col-form-label","required"],s,["type","text","placeholder",a,"id","clusterName","name","clusterName","formControlName","clusterName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","clientID",1,"col-form-label","required"],d,["type","text","placeholder",c,"id","clientID","name","clientID","formControlName","clientID",1,"form-control"],["for","monAddr",1,"col-form-label"],u,["type","text","placeholder",S,"id","monAddr","name","monAddr","formControlName","monAddr",1,"form-control"],["for","key",1,"col-form-label"],N,["type","text","placeholder",P,"id","key","name","key","formControlName","key",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],$,G,X,J,te,A]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.TgZ(1,"span",1),e.SDv(2,2),e.qZA(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.TgZ(8,"span"),e.tHW(9,7),e._UZ(10,"kbd"),e._UZ(11,"kbd"),e.N_p(),e.qZA(),e.qZA(),e.TgZ(12,"div",8),e.TgZ(13,"label",9),e.SDv(14,10),e.qZA(),e._UZ(15,"input",11),e.YNc(16,Oi,2,0,"span",12),e.YNc(17,Ai,2,0,"span",12),e.qZA(),e.TgZ(18,"div",8),e.TgZ(19,"label",13),e.SDv(20,14),e.qZA(),e._UZ(21,"input",15),e.YNc(22,hi,2,0,"span",12),e.YNc(23,Pi,2,0,"span",12),e.qZA(),e.TgZ(24,"div",8),e.TgZ(25,"label",16),e.TgZ(26,"span"),e.SDv(27,17),e.qZA(),e.qZA(),e._UZ(28,"input",18),e.YNc(29,Ii,2,0,"span",12),e.qZA(),e.TgZ(30,"div",8),e.TgZ(31,"label",19),e.TgZ(32,"span"),e.SDv(33,20),e.qZA(),e.qZA(),e._UZ(34,"input",21),e.YNc(35,bi,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(36,"div",22),e.TgZ(37,"cd-form-button-panel",23),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(2),e.pQV(o.mode),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.editPeerForm),e.xp6(7),e.pQV(o.mode)(o.poolName),e.QtT(9),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"invalidClusterName")),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"invalidClientID")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("monAddr",i,"invalidMonAddr")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("key",i,"invalidKey")),e.xp6(2),e.Q6J("form",o.editPeerForm)("submitText",o.actionLabels.SUBMIT)}},directives:[R.z,r._Y,r.JL,v.V,r.sg,g.P,f.o,r.Fj,h.b,r.JJ,r.u,le.U,l.O5,O.p],styles:[""]}),n})();const Di=["healthTmpl"];function vi(n,_){if(1&n&&(e.TgZ(0,"span",3),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=_.value;e.Q6J("ngClass",e.lcZ(1,2,_.row.health_color)),e.xp6(2),e.Oqu(o)}}let Li=(()=>{class n{constructor(t,o,i,s){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.taskWrapper=s,this.selection=new Ee.r,this.tableStatus=new U.E,this.data=[],this.permission=this.authStorageService.getPermissions().rbdMirroring;const a={permission:"update",icon:T.P.edit,click:()=>this.editModeModal(),name:"Edit Mode",canBePrimary:()=>!0},d={permission:"create",icon:T.P.add,name:"Add Peer",click:()=>this.editPeersModal("add"),disable:()=>!this.selection.first()||"disabled"===this.selection.first().mirror_mode,visible:()=>!this.getPeerUUID(),canBePrimary:()=>!1},c={permission:"update",icon:T.P.exchange,name:"Edit Peer",click:()=>this.editPeersModal("edit"),visible:()=>!!this.getPeerUUID()},u={permission:"delete",icon:T.P.destroy,name:"Delete Peer",click:()=>this.deletePeersModal(),visible:()=>!!this.getPeerUUID()};this.tableActions=[a,d,c,u]}ngOnInit(){this.columns=[{prop:"name",name:"Name",flexGrow:2},{prop:"mirror_mode",name:"Mode",flexGrow:2},{prop:"leader_id",name:"Leader",flexGrow:2},{prop:"image_local_count",name:"# Local",flexGrow:2},{prop:"image_remote_count",name:"# Remote",flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.pools,this.tableStatus=new U.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}editModeModal(){const t={poolName:this.selection.first().name};this.modalRef=this.modalService.show(Ri,t)}editPeersModal(t){const o={poolName:this.selection.first().name,mode:t};"edit"===t&&(o.peerUUID=this.getPeerUUID()),this.modalRef=this.modalService.show(Ni,o)}deletePeersModal(){const t=this.selection.first().name,o=this.getPeerUUID();this.modalRef=this.modalService.show(he.M,{itemDescription:"mirror peer",itemNames:[`${t} (${o})`],submitActionObservable:()=>new fi.y(i=>{this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/mirroring/peer/delete",{pool_name:t}),call:this.rbdMirroringService.deletePeer(t,o)}).subscribe({error:s=>i.error(s),complete:()=>{this.rbdMirroringService.refresh(),i.complete()}})})})}getPeerUUID(){const t=this.selection.first(),o=this.data.find(i=>t&&t.name===i.name);if(o&&o.peer_uuids)return o.peer_uuids[0]}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(K),e.Y36(re.Z),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-pools"]],viewQuery:function(t,o){if(1&t&&e.Gf(Di,7),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first)}},decls:4,vars:7,consts:[["columnMode","flex","identifier","name","forceIdentifier","true","selectionType","single",3,"data","columns","autoReload","status","fetchData","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["healthTmpl",""],[3,"ngClass"]],template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(s){return o.updateSelection(s)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,vi,3,4,"ng-template",null,2,e.W1O)),2&t&&(e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[ee.a,Re.K,l.mk],pipes:[We],styles:[""]}),n})();var mt=p(59376);const Fi=["stateTmpl"],$i=["syncTmpl"],Zi=["progressTmpl"];function Bi(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",14),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_error.data)("columns",t.image_error.columns)("autoReload",-1)("status",t.tableStatus)}}function Gi(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",14),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_syncing.data)("columns",t.image_syncing.columns)("autoReload",-1)("status",t.tableStatus)}}function yi(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",14),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_ready.data)("columns",t.image_ready.columns)("autoReload",-1)("status",t.tableStatus)}}function xi(n,_){if(1&n&&(e.TgZ(0,"span",15),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=_.value;e.Q6J("ngClass",e.lcZ(1,2,_.row.state_color)),e.xp6(2),e.Oqu(o)}}function wi(n,_){1&n&&(e.TgZ(0,"span",16),e.SDv(1,17),e.qZA())}function qi(n,_){1&n&&e._UZ(0,"ngb-progressbar",18),2&n&&e.Q6J("value",_.value)("showValue",!0)}let Hi=(()=>{class n{constructor(t){this.rbdMirroringService=t,this.image_error={data:[],columns:{}},this.image_syncing={data:[],columns:{}},this.image_ready={data:[],columns:{}},this.tableStatus=new U.E}ngOnInit(){this.image_error.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"description",name:"Issue",flexGrow:4},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1}],this.image_syncing.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"progress",name:"Progress",cellTemplate:this.progressTmpl,flexGrow:2},{prop:"state",name:"State",cellTemplate:this.syncTmpl,flexGrow:1}],this.image_ready.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"description",name:"Description",flexGrow:4},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.image_error.data=t.content_data.image_error,this.image_syncing.data=t.content_data.image_syncing,this.image_ready.data=t.content_data.image_ready,this.tableStatus=new U.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(K))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-images"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Fi,7),e.Gf($i,7),e.Gf(Zi,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.stateTmpl=i.first),e.iGM(i=e.CRH())&&(o.syncTmpl=i.first),e.iGM(i=e.CRH())&&(o.progressTmpl=i.first)}},decls:21,vars:1,consts:function(){let _,t,o,i;return _="Issues",t="Syncing",o="Ready",i="Syncing",[["ngbNav","","cdStatefulTab","image-list",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","issues"],["ngbNavLink",""],_,["ngbNavContent",""],["ngbNavItem","syncing"],t,["ngbNavItem","ready"],o,[3,"ngbNavOutlet"],["stateTmpl",""],["syncTmpl",""],["progressTmpl",""],["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],[3,"ngClass"],[1,"badge","badge-info"],i,["type","info",3,"value","showValue"]]},template:function(t,o){if(1&t&&(e.TgZ(0,"ul",0,1),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,Bi,1,4,"ng-template",5),e.qZA(),e.TgZ(6,"li",6),e.TgZ(7,"a",3),e.SDv(8,7),e.qZA(),e.YNc(9,Gi,1,4,"ng-template",5),e.qZA(),e.TgZ(10,"li",8),e.TgZ(11,"a",3),e.SDv(12,9),e.qZA(),e.YNc(13,yi,1,4,"ng-template",5),e.qZA(),e.qZA(),e._UZ(14,"div",10),e.YNc(15,xi,3,4,"ng-template",null,11,e.W1O),e.YNc(17,wi,2,0,"ng-template",null,12,e.W1O),e.YNc(19,qi,1,2,"ng-template",null,13,e.W1O)),2&t){const i=e.MAs(1);e.xp6(14),e.Q6J("ngbNavOutlet",i)}},directives:[I.Pz,mt.m,I.nv,I.Vx,I.uN,I.tO,ee.a,l.mk,I.Ly],pipes:[We],styles:[""]}),n})(),Ki=(()=>{class n{constructor(t,o,i){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.selection=new Ee.r,this.peersExist=!0,this.subs=new Yn.w,this.permission=this.authStorageService.getPermissions().rbdMirroring;const s={permission:"update",icon:T.P.edit,click:()=>this.editSiteNameModal(),name:"Edit Site Name",canBePrimary:()=>!0,disable:()=>!1},a={permission:"update",icon:T.P.upload,click:()=>this.createBootstrapModal(),name:"Create Bootstrap Token",disable:()=>!1},d={permission:"update",icon:T.P.download,click:()=>this.importBootstrapModal(),name:"Import Bootstrap Token",disable:()=>this.peersExist};this.tableActions=[s,a,d]}ngOnInit(){this.subs.add(this.rbdMirroringService.startPolling()),this.subs.add(this.rbdMirroringService.subscribeSummary(t=>{this.status=t.content_data.status,this.siteName=t.site_name,this.peersExist=!!t.content_data.pools.find(o=>o.peer_uuids.length>0)}))}ngOnDestroy(){this.subs.unsubscribe()}editSiteNameModal(){this.modalRef=this.modalService.show(pi,{siteName:this.siteName})}createBootstrapModal(){this.modalRef=this.modalService.show(ii,{siteName:this.siteName})}importBootstrapModal(){this.modalRef=this.modalService.show(di,{siteName:this.siteName})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(K),e.Y36(re.Z))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring"]],decls:21,vars:4,consts:function(){let _,t,o,i;return _="Site Name:",t="Daemons",o="Pools",i="Images",[[1,"row"],[1,"col-md-12"],_,[1,"table-actions","float-right",3,"permission","selection","tableActions"],[1,"col-sm-6"],t,o,i]},template:function(t,o){1&t&&(e.TgZ(0,"div",0),e.TgZ(1,"div",1),e.TgZ(2,"span"),e.TgZ(3,"strong"),e.SDv(4,2),e.qZA(),e._uU(5),e.qZA(),e._UZ(6,"cd-table-actions",3),e.qZA(),e.qZA(),e.TgZ(7,"div",0),e.TgZ(8,"div",4),e.TgZ(9,"legend"),e.SDv(10,5),e.qZA(),e._UZ(11,"cd-mirroring-daemons"),e.qZA(),e.TgZ(12,"div",4),e.TgZ(13,"legend"),e.SDv(14,6),e.qZA(),e._UZ(15,"cd-mirroring-pools"),e.qZA(),e.qZA(),e.TgZ(16,"div",0),e.TgZ(17,"div",1),e.TgZ(18,"legend"),e.SDv(19,7),e.qZA(),e._UZ(20,"cd-mirroring-images"),e.qZA(),e.qZA()),2&t&&(e.xp6(5),e.hij(" ",o.siteName,""),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[Re.K,Ti,Li,Hi],styles:[""]}),n})();var Tt=p(80226),ki=p(28049),Xi=p(43190),Ke=p(80842),et=p(30633),Fe=p(47557),Qi=p(28211);class zi{}var Pe=(()=>{return(n=Pe||(Pe={}))[n.V1=1]="V1",n[n.V2=2]="V2",Pe;var n})();class Ji{constructor(){this.features=[]}}class Yi{constructor(){this.features=[]}}class Ui extends class{}{constructor(){super(...arguments),this.features=[]}}class ji{constructor(){this.features=[]}}var ke=(()=>{return(n=ke||(ke={})).editing="editing",n.cloning="cloning",n.copying="copying",ke;var n})(),Wi=p(17932),e_=p(54555),t_=p(18372);function o_(n,_){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"label",56),e.SDv(2,57),e.ALo(3,"titlecase"),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",58),e._UZ(6,"hr"),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(3),e.pQV(e.lcZ(3,1,t.action)),e.QtT(2)}}function n_(n,_){1&n&&(e.TgZ(0,"span",59),e.ynx(1),e.SDv(2,60),e.BQk(),e.qZA())}function i_(n,_){1&n&&(e.TgZ(0,"span",59),e.ynx(1),e.SDv(2,61),e.BQk(),e.qZA())}function __(n,_){1&n&&e._UZ(0,"input",62)}function s_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,65),e.qZA()),2&n&&e.Q6J("ngValue",null)}function a_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,66),e.qZA()),2&n&&e.Q6J("ngValue",null)}function r_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,67),e.qZA()),2&n&&e.Q6J("ngValue",null)}function l_(n,_){if(1&n&&(e.TgZ(0,"option",68),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function c_(n,_){if(1&n&&(e.TgZ(0,"select",63),e.YNc(1,s_,2,1,"option",64),e.YNc(2,a_,2,1,"option",64),e.YNc(3,r_,2,1,"option",64),e.YNc(4,l_,2,2,"option",44),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function d_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,69),e.qZA())}const p_=function(n,_){return[n,_]};function g_(n,_){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"div",20),e._UZ(2,"i",70),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(2),e.Q6J("ngClass",e.WLB(1,p_,t.icons.spinner,t.icons.spin))}}function u_(n,_){1&n&&e._UZ(0,"input",74)}function m_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,76),e.qZA()),2&n&&e.Q6J("ngValue",null)}function T_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,77),e.qZA()),2&n&&e.Q6J("ngValue",null)}function f_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,78),e.qZA()),2&n&&e.Q6J("ngValue",null)}function C_(n,_){if(1&n&&(e.TgZ(0,"option",68),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function S_(n,_){if(1&n&&(e.TgZ(0,"select",75),e.YNc(1,m_,2,1,"option",64),e.YNc(2,T_,2,1,"option",64),e.YNc(3,f_,2,1,"option",64),e.YNc(4,C_,2,2,"option",44),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.namespaces)}}function E_(n,_){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"label",71),e._uU(2," Namespace "),e.qZA(),e.TgZ(3,"div",12),e.YNc(4,u_,1,0,"input",72),e.YNc(5,S_,5,4,"select",73),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngIf","editing"===t.mode||!t.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==t.mode&&t.poolPermission.read)}}function R_(n,_){1&n&&(e.TgZ(0,"cd-helper"),e.TgZ(1,"span"),e.SDv(2,79),e.qZA(),e.qZA())}function M_(n,_){1&n&&e._UZ(0,"input",85)}function O_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,87),e.qZA()),2&n&&e.Q6J("ngValue",null)}function A_(n,_){1&n&&(e.TgZ(0,"option",48),e.SDv(1,88),e.qZA()),2&n&&e.Q6J("ngValue",null)}function h_(n,_){1&n&&(e.TgZ(0,"option",48),e._uU(1,"-- Select a data pool -- "),e.qZA()),2&n&&e.Q6J("ngValue",null)}function P_(n,_){if(1&n&&(e.TgZ(0,"option",68),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function I_(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"select",86),e.NdJ("change",function(i){return e.CHM(t),e.oxw(3).onDataPoolChange(i.target.value)}),e.YNc(1,O_,2,1,"option",64),e.YNc(2,A_,2,1,"option",64),e.YNc(3,h_,2,1,"option",64),e.YNc(4,P_,2,2,"option",44),e.qZA()}if(2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.dataPools),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&0===t.dataPools.length),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&t.dataPools.length>0),e.xp6(1),e.Q6J("ngForOf",t.dataPools)}}function b_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,89),e.qZA())}const Xe=function(n){return{required:n}};function N_(n,_){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"label",80),e.TgZ(2,"span",70),e.SDv(3,81),e.qZA(),e._UZ(4,"cd-helper",82),e.qZA(),e.TgZ(5,"div",12),e.YNc(6,M_,1,0,"input",83),e.YNc(7,I_,5,4,"select",84),e.YNc(8,b_,2,0,"span",14),e.qZA(),e.qZA()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(2),e.Q6J("ngClass",e.VKq(4,Xe,"editing"!==o.mode)),e.xp6(4),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("dataPool",t,"required"))}}function D_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,90),e.qZA())}function v_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,91),e.qZA())}function L_(n,_){if(1&n&&e._UZ(0,"cd-helper",95),2&n){const t=e.oxw().$implicit;e.s9C("html",t.helperHtml)}}function F_(n,_){if(1&n&&(e.TgZ(0,"div",21),e._UZ(1,"input",92),e.TgZ(2,"label",93),e._uU(3),e.qZA(),e.YNc(4,L_,1,1,"cd-helper",94),e.qZA()),2&n){const t=_.$implicit;e.xp6(1),e.s9C("id",t.key),e.s9C("name",t.key),e.s9C("formControlName",t.key),e.xp6(1),e.s9C("for",t.key),e.xp6(1),e.Oqu(t.desc),e.xp6(1),e.Q6J("ngIf",t.helperHtml)}}function $_(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"a",96),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).advancedEnabled=!0,!1}),e.SDv(1,97),e.qZA()}}function Z_(n,_){if(1&n&&(e.TgZ(0,"option",68),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function B_(n,_){if(1&n&&(e.TgZ(0,"option",68),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function G_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,98),e.qZA())}function y_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,99),e.qZA())}function x_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,100),e.qZA())}function w_(n,_){1&n&&(e.TgZ(0,"span",59),e.SDv(1,101),e.qZA())}function q_(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,o_,7,3,"div",8),e.TgZ(10,"div",9),e.TgZ(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,n_,3,0,"span",14),e.YNc(16,i_,3,0,"span",14),e.qZA(),e.qZA(),e.TgZ(17,"div",15),e.NdJ("change",function(i){return e.CHM(t),e.oxw().onPoolChange(i.target.value)}),e.TgZ(18,"label",16),e.SDv(19,17),e.qZA(),e.TgZ(20,"div",12),e.YNc(21,__,1,0,"input",18),e.YNc(22,c_,5,4,"select",19),e.YNc(23,d_,2,0,"span",14),e.qZA(),e.qZA(),e.YNc(24,g_,3,4,"div",8),e.YNc(25,E_,6,2,"div",8),e.TgZ(26,"div",9),e.TgZ(27,"div",20),e.TgZ(28,"div",21),e.TgZ(29,"input",22),e.NdJ("change",function(){return e.CHM(t),e.oxw().onUseDataPoolChange()}),e.qZA(),e.TgZ(30,"label",23),e.SDv(31,24),e.qZA(),e.YNc(32,R_,3,0,"cd-helper",25),e.qZA(),e.qZA(),e.qZA(),e.YNc(33,N_,9,6,"div",8),e.TgZ(34,"div",9),e.TgZ(35,"label",26),e.SDv(36,27),e.qZA(),e.TgZ(37,"div",12),e._UZ(38,"input",28),e.YNc(39,D_,2,0,"span",14),e.YNc(40,v_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(41,"div",29),e.TgZ(42,"label",30),e.SDv(43,31),e.qZA(),e.TgZ(44,"div",12),e.YNc(45,F_,5,6,"div",32),e.qZA(),e.qZA(),e.TgZ(46,"div",33),e.TgZ(47,"div",34),e.YNc(48,$_,2,0,"a",35),e.qZA(),e.qZA(),e.TgZ(49,"div",36),e.TgZ(50,"legend",37),e.SDv(51,38),e.qZA(),e.TgZ(52,"div",39),e.TgZ(53,"h4",37),e.SDv(54,40),e.qZA(),e.TgZ(55,"div",9),e.TgZ(56,"label",41),e.SDv(57,42),e.qZA(),e.TgZ(58,"div",12),e.TgZ(59,"select",43),e.YNc(60,Z_,2,2,"option",44),e.qZA(),e.qZA(),e.qZA(),e.TgZ(61,"div",9),e.TgZ(62,"label",45),e.SDv(63,46),e.qZA(),e.TgZ(64,"div",12),e.TgZ(65,"select",47),e.TgZ(66,"option",48),e.SDv(67,49),e.qZA(),e.YNc(68,B_,2,2,"option",44),e.qZA(),e.YNc(69,G_,2,0,"span",14),e.YNc(70,y_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(71,"div",9),e.TgZ(72,"label",50),e.SDv(73,51),e.qZA(),e.TgZ(74,"div",12),e._UZ(75,"input",52),e.YNc(76,x_,2,0,"span",14),e.YNc(77,w_,2,0,"span",14),e.qZA(),e.qZA(),e.qZA(),e.TgZ(78,"cd-rbd-configuration-form",53),e.NdJ("changes",function(i){return e.CHM(t),e.oxw().getDirtyConfigurationValues=i}),e.qZA(),e.qZA(),e.qZA(),e.TgZ(79,"div",54),e.TgZ(80,"cd-form-button-panel",55),e.NdJ("submitActionEvent",function(){return e.CHM(t),e.oxw().submit()}),e.ALo(81,"titlecase"),e.ALo(82,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.rbdForm),e.xp6(6),e.pQV(e.lcZ(6,32,o.action))(e.lcZ(7,34,o.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",o.rbdForm.getValue("parent")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("name",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("name",t,"pattern")),e.xp6(2),e.Q6J("ngClass",e.VKq(40,Xe,"editing"!==o.mode)),e.xp6(3),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("pool",t,"required")),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.rbdForm.getValue("pool")&&null===o.namespaces),e.xp6(1),e.Q6J("ngIf","editing"===o.mode&&o.rbdForm.getValue("namespace")||"editing"!==o.mode&&(o.namespaces&&o.namespaces.length>0||!o.poolPermission.read)),e.xp6(7),e.Q6J("ngIf",o.allDataPools.length<=1),e.xp6(1),e.Q6J("ngIf",o.rbdForm.getValue("useDataPool")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("size",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("size",t,"invalidSizeObject")),e.xp6(5),e.Q6J("ngForOf",o.featuresList),e.xp6(3),e.Q6J("ngIf",!o.advancedEnabled),e.xp6(1),e.Q6J("hidden",!o.advancedEnabled),e.xp6(11),e.Q6J("ngForOf",o.objectSizes),e.xp6(2),e.Q6J("ngClass",e.VKq(42,Xe,o.rbdForm.getValue("stripingCount"))),e.xp6(4),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.objectSizes),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"invalidStripingUnit")),e.xp6(2),e.Q6J("ngClass",e.VKq(44,Xe,o.rbdForm.getValue("stripingUnit"))),e.xp6(4),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"min")),e.xp6(1),e.Q6J("form",o.rbdForm)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",t)("submitText",e.lcZ(81,36,o.action)+" "+e.lcZ(82,38,o.resource))}}let $e=(()=>{class n extends q.E{constructor(t,o,i,s,a,d,c,u,S){super(),this.authStorageService=t,this.route=o,this.poolService=i,this.rbdService=s,this.formatter=a,this.taskWrapper=d,this.dimlessBinaryPipe=c,this.actionLabels=u,this.router=S,this.namespaces=[],this.namespacesByPoolCache={},this.pools=null,this.allPools=null,this.dataPools=null,this.allDataPools=[],this.featuresList=[],this.initializeConfigData=new Tt.t(1),this.advancedEnabled=!1,this.rbdFormMode=ke,this.defaultObjectSize="4 MiB",this.objectSizes=["4 KiB","8 KiB","16 KiB","32 KiB","64 KiB","128 KiB","256 KiB","512 KiB","1 MiB","2 MiB","4 MiB","8 MiB","16 MiB","32 MiB"],this.rbdImage=new Tt.t(1),this.icons=T.P,this.routerUrl=this.router.url,this.poolPermission=this.authStorageService.getPermissions().pool,this.resource="RBD",this.features={"deep-flatten":{desc:"Deep flatten",requires:null,allowEnable:!1,allowDisable:!0},layering:{desc:"Layering",requires:null,allowEnable:!1,allowDisable:!1},"exclusive-lock":{desc:"Exclusive lock",requires:null,allowEnable:!0,allowDisable:!0},"object-map":{desc:"Object map (requires exclusive-lock)",requires:"exclusive-lock",allowEnable:!0,allowDisable:!0,initDisabled:!0},journaling:{desc:"Journaling (requires exclusive-lock)",requires:"exclusive-lock",allowEnable:!0,allowDisable:!0,initDisabled:!0},"fast-diff":{desc:"Fast diff (interlocked with object-map)",requires:"object-map",allowEnable:!0,allowDisable:!0,interlockedWith:"object-map",initDisabled:!0}},this.featuresList=this.objToArray(this.features),this.createForm()}objToArray(t){return C().map(t,(o,i)=>Object.assign(o,{key:i}))}createForm(){this.rbdForm=new M.d({parent:new r.NI(""),name:new r.NI("",{validators:[r.kI.required,r.kI.pattern(/^[^@/]+?$/)]}),pool:new r.NI(null,{validators:[r.kI.required]}),namespace:new r.NI(null),useDataPool:new r.NI(!1),dataPool:new r.NI(null),size:new r.NI(null,{updateOn:"blur"}),obj_size:new r.NI(this.defaultObjectSize),features:new M.d(this.featuresList.reduce((t,o)=>(t[o.key]=new r.NI({value:!1,disabled:!!o.initDisabled}),t),{})),stripingUnit:new r.NI(null),stripingCount:new r.NI(null,{updateOn:"blur"})},this.validateRbdForm(this.formatter))}disableForEdit(){this.rbdForm.get("parent").disable(),this.rbdForm.get("pool").disable(),this.rbdForm.get("namespace").disable(),this.rbdForm.get("useDataPool").disable(),this.rbdForm.get("dataPool").disable(),this.rbdForm.get("obj_size").disable(),this.rbdForm.get("stripingUnit").disable(),this.rbdForm.get("stripingCount").disable(),this.rbdImage.subscribe(t=>{t.image_format===Pe.V1&&(this.rbdForm.get("deep-flatten").disable(),this.rbdForm.get("layering").disable(),this.rbdForm.get("exclusive-lock").disable())})}disableForClone(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}disableForCopy(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}ngOnInit(){this.prepareFormForAction(),this.gatherNeededData().subscribe(this.handleExternalData.bind(this))}prepareFormForAction(){const t=this.routerUrl;t.startsWith("/block/rbd/edit")?(this.mode=this.rbdFormMode.editing,this.action=this.actionLabels.EDIT,this.disableForEdit()):t.startsWith("/block/rbd/clone")?(this.mode=this.rbdFormMode.cloning,this.disableForClone(),this.action=this.actionLabels.CLONE):t.startsWith("/block/rbd/copy")?(this.mode=this.rbdFormMode.copying,this.action=this.actionLabels.COPY,this.disableForCopy()):this.action=this.actionLabels.CREATE,C().each(this.features,o=>{this.rbdForm.get("features").get(o.key).valueChanges.subscribe(i=>this.featureFormUpdate(o.key,i))})}gatherNeededData(){const t={};return this.mode?this.route.params.subscribe(o=>{const i=Z.N.fromString(decodeURIComponent(o.image_spec));o.snap&&(this.snapName=decodeURIComponent(o.snap)),t.rbd=this.rbdService.get(i)}):t.defaultFeatures=this.rbdService.defaultFeatures(),this.mode!==this.rbdFormMode.editing&&this.poolPermission.read&&(t.pools=this.poolService.list(["pool_name","type","flags_names","application_metadata"])),(0,W.D)(t)}handleExternalData(t){if(this.handlePoolData(t.pools),t.defaultFeatures&&this.setFeatures(t.defaultFeatures),t.rbd){const o=t.rbd;this.setResponse(o,this.snapName),this.rbdImage.next(o)}this.loadingReady()}handlePoolData(t){if(!t)return;const o=[],i=[];for(const s of t)this.rbdService.isRBDPool(s)&&("replicated"===s.type?(o.push(s),i.push(s)):"erasure"===s.type&&-1!==s.flags_names.indexOf("ec_overwrites")&&i.push(s));if(this.pools=o,this.allPools=o,this.dataPools=i,this.allDataPools=i,1===this.pools.length){const s=this.pools[0].pool_name;this.rbdForm.get("pool").setValue(s),this.onPoolChange(s)}this.allDataPools.length<=1&&this.rbdForm.get("useDataPool").disable()}onPoolChange(t){const o=this.rbdForm.get("dataPool");o.value===t&&o.setValue(null),this.dataPools=this.allDataPools?this.allDataPools.filter(i=>i.pool_name!==t):[],this.namespaces=null,t in this.namespacesByPoolCache?this.namespaces=this.namespacesByPoolCache[t]:this.rbdService.listNamespaces(t).subscribe(i=>{i=i.map(s=>s.namespace),this.namespacesByPoolCache[t]=i,this.namespaces=i}),this.rbdForm.get("namespace").setValue(null)}onUseDataPoolChange(){this.rbdForm.getValue("useDataPool")||(this.rbdForm.get("dataPool").setValue(null),this.onDataPoolChange(null))}onDataPoolChange(t){const o=this.allPools.filter(i=>i.pool_name!==t);this.rbdForm.getValue("pool")===t&&this.rbdForm.get("pool").setValue(null),this.pools=o}validateRbdForm(t){return o=>{const i=o.get("useDataPool"),s=o.get("dataPool");let a=null;i.value&&null==s.value&&(a={required:!0}),s.setErrors(a);const d=o.get("size"),c=o.get("obj_size"),u=t.toBytes(null!=c.value?c.value:this.defaultObjectSize),S=o.get("stripingCount"),N=null!=S.value?S.value:1;let P=null;null===d.value?P={required:!0}:N*u>t.toBytes(d.value)&&(P={invalidSizeObject:!0}),d.setErrors(P);const $=o.get("stripingUnit");let G=null;null===$.value&&null!==S.value?G={required:!0}:null!==$.value&&t.toBytes($.value)>u&&(G={invalidStripingUnit:!0}),$.setErrors(G);let X=null;return null===S.value&&null!==$.value?X={required:!0}:N<1&&(X={min:!0}),S.setErrors(X),null}}deepBoxCheck(t,o){this.getDependentChildFeatures(t).forEach(s=>{const a=this.rbdForm.get(s.key);o?a.enable({emitEvent:!1}):(a.disable({emitEvent:!1}),a.setValue(!1,{emitEvent:!1}),this.deepBoxCheck(s.key,o));const d=this.rbdForm.get("features");this.mode===this.rbdFormMode.editing&&d.get(s.key).enabled&&(-1!==this.response.features_name.indexOf(s.key)&&!s.allowDisable||-1===this.response.features_name.indexOf(s.key)&&!s.allowEnable)&&d.get(s.key).disable()})}getDependentChildFeatures(t){return C().filter(this.features,o=>o.requires===t)||[]}interlockCheck(t,o){const i=this.featuresList.find(s=>s.key===t);if(this.response){const s=null!=i.interlockedWith,a=this.featuresList.find(c=>c.interlockedWith===i.key),d=!!this.response.features_name.find(c=>c===i.key);if(s){if(d!==!!this.response.features_name.find(u=>u===i.interlockedWith))return}else if(a&&!!this.response.features_name.find(u=>u===a.key)!==d)return}o?C().filter(this.features,s=>s.interlockedWith===t).forEach(s=>this.rbdForm.get(s.key).setValue(!0,{emitEvent:!1})):i.interlockedWith&&this.rbdForm.get("features").get(i.interlockedWith).setValue(!1)}featureFormUpdate(t,o){if(o){const i=this.features[t].requires;if(i&&!this.rbdForm.getValue(i))return void this.rbdForm.get(`features.${t}`).setValue(!1)}this.deepBoxCheck(t,o),this.interlockCheck(t,o)}setFeatures(t){const o=this.rbdForm.get("features");C().forIn(this.features,i=>{-1!==t.indexOf(i.key)&&o.get(i.key).setValue(!0),this.featureFormUpdate(i.key,o.get(i.key).value)})}setResponse(t,o){this.response=t;const i=new Z.N(t.pool_name,t.namespace,t.name).toString();if(this.mode===this.rbdFormMode.cloning)this.rbdForm.get("parent").setValue(`${i}@${o}`);else if(this.mode===this.rbdFormMode.copying)o?this.rbdForm.get("parent").setValue(`${i}@${o}`):this.rbdForm.get("parent").setValue(`${i}`);else if(t.parent){const s=t.parent;this.rbdForm.get("parent").setValue(`${s.pool_name}/${s.image_name}@${s.snap_name}`)}this.mode===this.rbdFormMode.editing&&this.rbdForm.get("name").setValue(t.name),this.rbdForm.get("pool").setValue(t.pool_name),this.onPoolChange(t.pool_name),this.rbdForm.get("namespace").setValue(t.namespace),t.data_pool&&(this.rbdForm.get("useDataPool").setValue(!0),this.rbdForm.get("dataPool").setValue(t.data_pool)),this.rbdForm.get("size").setValue(this.dimlessBinaryPipe.transform(t.size)),this.rbdForm.get("obj_size").setValue(this.dimlessBinaryPipe.transform(t.obj_size)),this.setFeatures(t.features_name),this.rbdForm.get("stripingUnit").setValue(this.dimlessBinaryPipe.transform(t.stripe_unit)),this.rbdForm.get("stripingCount").setValue(t.stripe_count),this.initializeConfigData.next({initialData:this.response.configuration,sourceType:et.h.image})}createRequest(){const t=new Ui;return t.pool_name=this.rbdForm.getValue("pool"),t.namespace=this.rbdForm.getValue("namespace"),t.name=this.rbdForm.getValue("name"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(),t}addObjectSizeAndStripingToRequest(t){t.obj_size=this.formatter.toBytes(this.rbdForm.getValue("obj_size")),C().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),t.stripe_unit=this.formatter.toBytes(this.rbdForm.getValue("stripingUnit")),t.stripe_count=this.rbdForm.getValue("stripingCount"),t.data_pool=this.rbdForm.getValue("dataPool")}createAction(){const t=this.createRequest();return this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/create",{pool_name:t.pool_name,namespace:t.namespace,image_name:t.name}),call:this.rbdService.create(t)})}editRequest(){const t=new ji;return t.name=this.rbdForm.getValue("name"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),C().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),t.configuration=this.getDirtyConfigurationValues(),t}cloneRequest(){const t=new Ji;return t.child_pool_name=this.rbdForm.getValue("pool"),t.child_namespace=this.rbdForm.getValue("namespace"),t.child_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,et.h.image),t}editAction(){const t=new Z.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/edit",{image_spec:t.toString()}),call:this.rbdService.update(t,this.editRequest())})}cloneAction(){const t=this.cloneRequest(),o=new Z.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/clone",{parent_image_spec:o.toString(),parent_snap_name:this.snapName,child_pool_name:t.child_pool_name,child_namespace:t.child_namespace,child_image_name:t.child_image_name}),call:this.rbdService.cloneSnapshot(o,this.snapName,t)})}copyRequest(){const t=new Yi;return this.snapName&&(t.snapshot_name=this.snapName),t.dest_pool_name=this.rbdForm.getValue("pool"),t.dest_namespace=this.rbdForm.getValue("namespace"),t.dest_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,et.h.image),t}copyAction(){const t=this.copyRequest(),o=new Z.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/copy",{src_image_spec:o.toString(),dest_pool_name:t.dest_pool_name,dest_namespace:t.dest_namespace,dest_image_name:t.dest_image_name}),call:this.rbdService.copy(o,t)})}submit(){this.mode||this.rbdImage.next("create"),this.rbdImage.pipe((0,ki.P)(),(0,Xi.w)(()=>this.mode===this.rbdFormMode.editing?this.editAction():this.mode===this.rbdFormMode.cloning?this.cloneAction():this.mode===this.rbdFormMode.copying?this.copyAction():this.createAction())).subscribe(()=>{},()=>this.rbdForm.setErrors({cdSubmitButton:!0}),()=>this.router.navigate(["/block/rbd"]))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(m.gz),e.Y36(Ke.q),e.Y36(x),e.Y36(Qi.H),e.Y36(Q.P),e.Y36(Fe.$),e.Y36(D.p4),e.Y36(m.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let _,t,o,i,s,a,d,c,u,S,N,P,$,G,X,J,te,A,w,de,pe,ge,ue,me,Te,fe,Ce,Se,y,Ze,Be,Ge,ye,xe,we,qe;return _="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="Pool",i="Use a dedicated data pool",s="Size",a="e.g., 10GiB",d="Features",c="Advanced",u="Striping",S="Object size",N="Stripe unit",P="-- Select stripe unit --",$="Stripe count",G="" + "\ufffd0\ufffd" + " from",X="This field is required.",J="'/' and '@' are not allowed.",te="Loading...",A="-- No rbd pools available --",w="-- Select a pool --",de="This field is required.",pe="Loading...",ge="-- No namespaces available --",ue="-- Select a namespace --",me="You need more than one pool with the rbd application label use to use a dedicated data pool.",Te="Data pool",fe="Dedicated pool that stores the object-data of the RBD.",Ce="Loading...",Se="-- No data pools available --",y="This field is required.",Ze="This field is required.",Be="You have to increase the size.",Ge="Advanced...",ye="This field is required because stripe count is defined!",xe="Stripe unit is greater than object size.",we="This field is required because stripe unit is defined!",qe="Stripe count must be greater than 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","rbdForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],_,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Name...","id","name","name","name","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"form-group","row",3,"change"],["for","pool",1,"cd-col-form-label",3,"ngClass"],o,["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-control","formControlName","pool",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","id","useDataPool","name","useDataPool","formControlName","useDataPool",1,"custom-control-input",3,"change"],["for","useDataPool",1,"custom-control-label"],i,[4,"ngIf"],["for","size",1,"cd-col-form-label","required"],s,["id","size","name","size","type","text","formControlName","size","placeholder",a,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["formGroupName","features",1,"form-group","row"],["for","features",1,"cd-col-form-label"],d,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],[1,"row"],[1,"col-sm-12"],["class","float-right margin-right-md","href","",3,"click",4,"ngIf"],[3,"hidden"],[1,"cd-header"],c,[1,"col-md-12"],u,["for","size",1,"cd-col-form-label"],S,["id","obj_size","name","obj_size","formControlName","obj_size",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["for","stripingUnit",1,"cd-col-form-label",3,"ngClass"],N,["id","stripingUnit","name","stripingUnit","formControlName","stripingUnit",1,"form-control"],[3,"ngValue"],P,["for","stripingCount",1,"cd-col-form-label",3,"ngClass"],$,["id","stripingCount","name","stripingCount","formControlName","stripingCount","type","number",1,"form-control"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","name",1,"cd-col-form-label"],G,["type","text","id","parent","name","parent","formControlName","parent",1,"form-control"],[1,"invalid-feedback"],X,J,["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-control"],[3,"ngValue",4,"ngIf"],te,A,w,[3,"value"],de,[3,"ngClass"],["for","pool",1,"cd-col-form-label"],["class","form-control","type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",4,"ngIf"],["id","namespace","name","namespace","class","form-control","formControlName","namespace",4,"ngIf"],["type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",1,"form-control"],["id","namespace","name","namespace","formControlName","namespace",1,"form-control"],pe,ge,ue,me,["for","dataPool",1,"cd-col-form-label"],Te,["html",fe],["class","form-control","type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",4,"ngIf"],["id","dataPool","name","dataPool","class","form-control","formControlName","dataPool",3,"change",4,"ngIf"],["type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",1,"form-control"],["id","dataPool","name","dataPool","formControlName","dataPool",1,"form-control",3,"change"],Ce,Se,y,Ze,Be,["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],[3,"html",4,"ngIf"],[3,"html"],["href","",1,"float-right","margin-right-md",3,"click"],Ge,ye,xe,we,qe]},template:function(t,o){1&t&&e.YNc(0,q_,83,46,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},directives:[st.y,r._Y,r.JL,r.sg,v.V,l.O5,g.P,f.o,r.Fj,h.b,r.JJ,r.u,le.U,l.mk,r.Wl,Wi.Q,r.x0,l.sg,r.EJ,r.YN,r.Kr,r.wV,e_.d,O.p,t_.S],pipes:[l.rS,Ye.m],styles:[""]}),n})();var ft=p(36169),ce=p(91801),tt=p(51847),H_=p(16738),Me=p.n(H_),ot=p(62862),K_=p(52266);function k_(n,_){1&n&&(e.TgZ(0,"div",18),e.TgZ(1,"span"),e.SDv(2,19),e.qZA(),e.qZA())}function X_(n,_){1&n&&(e.TgZ(0,"span",20),e.SDv(1,21),e.qZA())}function Q_(n,_){1&n&&(e.TgZ(0,"span",20),e.SDv(1,22),e.qZA())}function z_(n,_){if(1&n&&e._UZ(0,"cd-date-time-picker",23),2&n){const t=e.oxw();e.Q6J("control",t.moveForm.get("expiresAt"))}}let J_=(()=>{class n{constructor(t,o,i,s,a){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=s,this.taskWrapper=a,this.createForm()}createForm(){this.moveForm=this.fb.group({expiresAt:["",[B.h.custom("format",t=>!(""===t||Me()(t,"YYYY-MM-DD HH:mm:ss").isValid())),B.h.custom("expired",t=>Me()().isAfter(t))]]})}ngOnInit(){this.imageSpec=new Z.N(this.poolName,this.namespace,this.imageName),this.imageSpecStr=this.imageSpec.toString(),this.pattern=`${this.poolName}/${this.imageName}`}moveImage(){let t=0;const o=this.moveForm.getValue("expiresAt");o&&(t=Me()(o,"YYYY-MM-DD HH:mm:ss").diff(Me()(),"seconds",!0)),t<0&&(t=0),this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/trash/move",{image_spec:this.imageSpecStr}),call:this.rbdService.moveTrash(this.imageSpec,t)}).subscribe({complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(x),e.Y36(I.Kz),e.Y36(D.p4),e.Y36(ot.O),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-move-modal"]],decls:23,vars:9,consts:function(){let _,t,o,i,s,a,d;return _="Move an image to trash",t="To move " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " to trash, click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Move" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ". Optionally, you can pick an expiration date.",t=e.Zx4(t),o="Protection expires at",i="NOT PROTECTED",s="This image contains snapshot(s), which will prevent it from being removed after moved to trash.",a="Wrong date format. Please use \"YYYY-MM-DD HH:mm:ss\".",d="Protection has already expired. Please pick a future date or leave it empty.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","moveForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],["class","alert alert-warning","role","alert",4,"ngIf"],t,[1,"form-group"],["for","expiresAt",1,"col-form-label"],o,["type","text","placeholder",i,"formControlName","expiresAt","triggers","manual",1,"form-control",3,"ngbPopover","click","keypress"],["p","ngbPopover"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["popContent",""],["role","alert",1,"alert","alert-warning"],s,[1,"invalid-feedback"],a,d,[3,"control"]]},template:function(t,o){if(1&t){const i=e.EpF();e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.YNc(7,k_,3,0,"div",7),e.TgZ(8,"p"),e.tHW(9,8),e._UZ(10,"kbd"),e._UZ(11,"kbd"),e.N_p(),e.qZA(),e.TgZ(12,"div",9),e.TgZ(13,"label",10),e.SDv(14,11),e.qZA(),e.TgZ(15,"input",12,13),e.NdJ("click",function(){return e.CHM(i),e.MAs(16).open()})("keypress",function(){return e.CHM(i),e.MAs(16).close()}),e.qZA(),e.YNc(17,X_,2,0,"span",14),e.YNc(18,Q_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(19,"div",15),e.TgZ(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return o.moveImage()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA(),e.YNc(21,z_,1,1,"ng-template",null,17,e.W1O)}if(2&t){const i=e.MAs(5),s=e.MAs(22);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.moveForm),e.xp6(3),e.Q6J("ngIf",o.hasSnapshots),e.xp6(4),e.pQV(o.imageSpecStr),e.QtT(9),e.xp6(4),e.Q6J("ngbPopover",s),e.xp6(2),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"format")),e.xp6(1),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"expired")),e.xp6(2),e.Q6J("form",o.moveForm)("submitText",o.actionLabels.MOVE)}},directives:[R.z,r._Y,r.JL,r.sg,v.V,l.O5,g.P,f.o,r.Fj,h.b,r.JJ,r.u,I.o8,O.p,K_.J],styles:[""]}),n})();function Y_(n,_){1&n&&(e.TgZ(0,"li",10),e.TgZ(1,"a",3),e.SDv(2,11),e.qZA(),e.qZA())}let Qe=(()=>{class n{constructor(t,o){this.authStorageService=t,this.router=o,this.grafanaPermission=this.authStorageService.getPermissions().grafana}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(m.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-tabs"]],decls:12,vars:2,consts:function(){let _,t,o,i;return _="Images",t="Namespaces",o="Trash",i="Overall Performance",[["ngbNav","",1,"nav-tabs",3,"activeId","navChange"],["nav","ngbNav"],["ngbNavItem","/block/rbd"],["ngbNavLink",""],_,["ngbNavItem","/block/rbd/namespaces"],t,["ngbNavItem","/block/rbd/trash"],o,["ngbNavItem","/block/rbd/performance",4,"ngIf"],["ngbNavItem","/block/rbd/performance"],i]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0,1),e.NdJ("navChange",function(s){return o.router.navigate([s.nextId])}),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.qZA(),e.TgZ(5,"li",5),e.TgZ(6,"a",3),e.SDv(7,6),e.qZA(),e.qZA(),e.TgZ(8,"li",7),e.TgZ(9,"a",3),e.SDv(10,8),e.qZA(),e.qZA(),e.YNc(11,Y_,3,0,"li",9),e.qZA()),2&t&&(e.Q6J("activeId",o.router.url),e.xp6(11),e.Q6J("ngIf",o.grafanaPermission.read))},directives:[I.Pz,I.nv,I.Vx,l.O5],styles:[""]}),n})();var V_=p(25917),Ct=p(51295),nt=p(60737),U_=p(74255),St=p(71099),Et=p(79765);function j_(n,_){1&n&&(e.TgZ(0,"span",15),e.SDv(1,16),e.qZA())}let W_=(()=>{class n{constructor(t,o,i,s,a){this.activeModal=t,this.rbdService=o,this.taskManagerService=i,this.notificationService=s,this.actionLabels=a,this.editing=!1,this.onSubmit=new Et.xQ,this.action=this.actionLabels.CREATE,this.resource="RBD Snapshot",this.createForm()}createForm(){this.snapshotForm=new M.d({snapshotName:new r.NI("",{validators:[r.kI.required]})})}setSnapName(t){this.snapName=t,this.snapshotForm.get("snapshotName").setValue(t)}setEditing(t=!0){this.editing=t,this.action=this.editing?this.actionLabels.RENAME:this.actionLabels.CREATE}editAction(){const t=this.snapshotForm.getValue("snapshotName"),o=new Z.N(this.poolName,this.namespace,this.imageName),i=new F.R;i.name="rbd/snap/edit",i.metadata={image_spec:o.toString(),snapshot_name:t},this.rbdService.renameSnapshot(o,this.snapName,t).toPromise().then(()=>{this.taskManagerService.subscribe(i.name,i.metadata,s=>{this.notificationService.notifyTask(s)}),this.activeModal.close(),this.onSubmit.next(this.snapName)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}createAction(){const t=this.snapshotForm.getValue("snapshotName"),o=new Z.N(this.poolName,this.namespace,this.imageName),i=new F.R;i.name="rbd/snap/create",i.metadata={image_spec:o.toString(),snapshot_name:t},this.rbdService.createSnapshot(o,t).toPromise().then(()=>{this.taskManagerService.subscribe(i.name,i.metadata,s=>{this.notificationService.notifyTask(s)}),this.activeModal.close(),this.onSubmit.next(t)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}submit(){this.editing?this.editAction():this.createAction()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(x),e.Y36(St.k),e.Y36(Le.g),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-form-modal"]],decls:19,vars:15,consts:function(){let _,t,o;return _="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="This field is required.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","snapshotForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","snapshotName",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Snapshot name...","id","snapshotName","name","snapshotName","formControlName","snapshotName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],o]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,j_,2,0,"span",12),e.qZA(),e.qZA(),e.qZA(),e.TgZ(15,"div",13),e.TgZ(16,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.submit()}),e.ALo(17,"titlecase"),e.ALo(18,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,7,o.action))(e.lcZ(4,9,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.snapshotForm),e.xp6(8),e.Q6J("ngIf",o.snapshotForm.showError("snapshotName",i,"required")),e.xp6(2),e.Q6J("form",o.snapshotForm)("submitText",e.lcZ(17,11,o.action)+" "+e.lcZ(18,13,o.resource))}},directives:[R.z,r._Y,r.JL,r.sg,v.V,g.P,f.o,r.Fj,h.b,r.JJ,r.u,le.U,l.O5,O.p],pipes:[l.rS,Ye.m],styles:[""]}),n})();class es{constructor(_,t,o){this.featuresName=t,this.cloneFormatVersion=1,o.cloneFormatVersion().subscribe(i=>{this.cloneFormatVersion=i}),this.create={permission:"create",icon:T.P.add,name:_.CREATE},this.rename={permission:"update",icon:T.P.edit,name:_.RENAME},this.protect={permission:"update",icon:T.P.lock,visible:i=>i.hasSingleSelection&&!i.first().is_protected,name:_.PROTECT},this.unprotect={permission:"update",icon:T.P.unlock,visible:i=>i.hasSingleSelection&&i.first().is_protected,name:_.UNPROTECT},this.clone={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>this.getCloneDisableDesc(i,this.featuresName),icon:T.P.clone,name:_.CLONE},this.copy={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>!i.hasSingleSelection||i.first().cdExecuting,icon:T.P.copy,name:_.COPY},this.rollback={permission:"update",icon:T.P.undo,name:_.ROLLBACK},this.deleteSnap={permission:"delete",icon:T.P.destroy,disable:i=>{const s=i.first();return!i.hasSingleSelection||s.cdExecuting||s.is_protected},name:_.DELETE},this.ordering=[this.create,this.rename,this.protect,this.unprotect,this.clone,this.copy,this.rollback,this.deleteSnap]}getCloneDisableDesc(_,t){return!(_.hasSingleSelection&&!_.first().cdExecuting)||((null==t?void 0:t.includes("layering"))?1===this.cloneFormatVersion&&!_.first().is_protected&&"Snapshot must be protected in order to clone.":"Parent image must support Layering")}}class ts{}var ze=p(96102);const os=["nameTpl"],ns=["rollbackTpl"];function is(n,_){if(1&n&&(e.ynx(0),e.SDv(1,3),e.BQk(),e.TgZ(2,"strong"),e._uU(3),e.qZA(),e._uU(4,".\n")),2&n){const t=_.$implicit;e.xp6(3),e.hij(" ",t.snapName,"")}}let _s=(()=>{class n{constructor(t,o,i,s,a,d,c,u,S,N,P){this.authStorageService=t,this.modalService=o,this.dimlessBinaryPipe=i,this.cdDatePipe=s,this.rbdService=a,this.taskManagerService=d,this.notificationService=c,this.summaryService=u,this.taskListService=S,this.actionLabels=N,this.cdr=P,this.snapshots=[],this.selection=new Ee.r,this.builders={"rbd/snap/create":$=>{const G=new ts;return G.name=$.snapshot_name,G}},this.permission=this.authStorageService.getPermissions().rbdImage}ngOnInit(){this.columns=[{name:"Name",prop:"name",cellTransformation:ve.e.executing,flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"Provisioned",prop:"disk_usage",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"State",prop:"is_protected",flexGrow:1,cellTransformation:ve.e.badge,customTemplateConfig:{map:{true:{value:"PROTECTED",class:"badge-success"},false:{value:"UNPROTECTED",class:"badge-info"}}}},{name:"Created",prop:"timestamp",flexGrow:1,pipe:this.cdDatePipe}],this.imageSpec=new Z.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions=new es(this.actionLabels,this.featuresName,this.rbdService),this.rbdTableActions.create.click=()=>this.openCreateSnapshotModal(),this.rbdTableActions.rename.click=()=>this.openEditSnapshotModal(),this.rbdTableActions.protect.click=()=>this.toggleProtection(),this.rbdTableActions.unprotect.click=()=>this.toggleProtection();const t=()=>this.selection.first()&&`${this.imageSpec.toStringEncoded()}/${encodeURIComponent(this.selection.first().name)}`;this.rbdTableActions.clone.routerLink=()=>`/block/rbd/clone/${t()}`,this.rbdTableActions.copy.routerLink=()=>`/block/rbd/copy/${t()}`,this.rbdTableActions.rollback.click=()=>this.rollbackModal(),this.rbdTableActions.deleteSnap.click=()=>this.deleteSnapshotModal(),this.tableActions=this.rbdTableActions.ordering,this.taskListService.init(()=>(0,V_.of)(this.snapshots),null,s=>{Ct.T.updateChanged(this,{data:s})&&(this.cdr.detectChanges(),this.data=[...this.data])},()=>{Ct.T.updateChanged(this,{data:this.snapshots})&&(this.cdr.detectChanges(),this.data=[...this.data])},s=>["rbd/snap/create","rbd/snap/delete","rbd/snap/edit","rbd/snap/rollback"].includes(s.name)&&this.imageSpec.toString()===s.metadata.image_spec,(s,a)=>s.name===a.metadata.snapshot_name,this.builders)}ngOnChanges(){this.columns&&(this.imageSpec=new Z.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions&&(this.rbdTableActions.featuresName=this.featuresName),this.taskListService.fetch())}openSnapshotModal(t,o=null){this.modalRef=this.modalService.show(W_),this.modalRef.componentInstance.poolName=this.poolName,this.modalRef.componentInstance.imageName=this.rbdName,this.modalRef.componentInstance.namespace=this.namespace,o?this.modalRef.componentInstance.setEditing():o=`${this.rbdName}_${Me()().toISOString(!0)}`,this.modalRef.componentInstance.setSnapName(o),this.modalRef.componentInstance.onSubmit.subscribe(i=>{const s=new nt.o;s.name=t,s.metadata={image_spec:this.imageSpec.toString(),snapshot_name:i},this.summaryService.addRunningTask(s)})}openCreateSnapshotModal(){this.openSnapshotModal("rbd/snap/create")}openEditSnapshotModal(){this.openSnapshotModal("rbd/snap/edit",this.selection.first().name)}toggleProtection(){const t=this.selection.first().name,o=this.selection.first().is_protected,i=new F.R;i.name="rbd/snap/edit";const s=new Z.N(this.poolName,this.namespace,this.rbdName);i.metadata={image_spec:s.toString(),snapshot_name:t},this.rbdService.protectSnapshot(s,t,!o).toPromise().then(()=>{const a=new nt.o;a.name=i.name,a.metadata=i.metadata,this.summaryService.addRunningTask(a),this.taskManagerService.subscribe(i.name,i.metadata,d=>{this.notificationService.notifyTask(d)})})}_asyncTask(t,o,i){const s=new F.R;s.name=o,s.metadata={image_spec:new Z.N(this.poolName,this.namespace,this.rbdName).toString(),snapshot_name:i};const a=new Z.N(this.poolName,this.namespace,this.rbdName);this.rbdService[t](a,i).toPromise().then(()=>{const d=new nt.o;d.name=s.name,d.metadata=s.metadata,this.summaryService.addRunningTask(d),this.modalRef.close(),this.taskManagerService.subscribe(d.name,d.metadata,c=>{this.notificationService.notifyTask(c)})}).catch(()=>{this.modalRef.componentInstance.stopLoadingSpinner()})}rollbackModal(){const t=this.selection.selected[0].name,o=new Z.N(this.poolName,this.namespace,this.rbdName).toString(),i={titleText:"RBD snapshot rollback",buttonText:"Rollback",bodyTpl:this.rollbackTpl,bodyData:{snapName:`${o}@${t}`},onSubmit:()=>{this._asyncTask("rollbackSnapshot","rbd/snap/rollback",t)}};this.modalRef=this.modalService.show(ft.Y,i)}deleteSnapshotModal(){const t=this.selection.selected[0].name;this.modalRef=this.modalService.show(he.M,{itemDescription:"RBD snapshot",itemNames:[t],submitAction:()=>this._asyncTask("deleteSnapshot","rbd/snap/delete",t)})}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(re.Z),e.Y36(Fe.$),e.Y36(ze.N),e.Y36(x),e.Y36(St.k),e.Y36(Le.g),e.Y36(U_.J),e.Y36(se.j),e.Y36(D.p4),e.Y36(e.sBO))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(os,5),e.Gf(ns,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.rollbackTpl=i.first)}},inputs:{snapshots:"snapshots",featuresName:"featuresName",poolName:"poolName",namespace:"namespace",rbdName:"rbdName"},features:[e._Bn([se.j]),e.TTD],decls:4,vars:5,consts:function(){let _;return _="You are about to rollback",[["columnMode","flex","selectionType","single",3,"data","columns","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["rollbackTpl",""],_]},template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("updateSelection",function(s){return o.updateSelection(s)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,is,5,1,"ng-template",null,2,e.W1O)),2&t&&(e.Q6J("data",o.data)("columns",o.columns),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[ee.a,Re.K],styles:[""],changeDetection:0}),n})();var ss=p(71752),Rt=p(76317),as=p(41039);const rs=["poolConfigurationSourceTpl"];function ls(n,_){1&n&&(e.ynx(0),e.tHW(1,3),e._UZ(2,"strong"),e.N_p(),e.BQk())}function cs(n,_){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"span",38),e._uU(2),e.qZA(),e.qZA()),2&n){const t=_.$implicit;e.xp6(2),e.Oqu(t)}}function ds(n,_){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"span",39),e.SDv(2,40),e.qZA(),e.qZA()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function ps(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.disk_usage)," ")}}function gs(n,_){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"span",39),e.SDv(2,41),e.qZA(),e.qZA()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function us(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.total_disk_usage)," ")}}function ms(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(4);e.xp6(1),e.hij("/",t.selection.parent.pool_namespace,"")}}function Ts(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,ms,2,1,"span",1),e._uU(3),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Oqu(t.selection.parent.pool_name),e.xp6(1),e.Q6J("ngIf",t.selection.parent.pool_namespace),e.xp6(1),e.AsE("/",t.selection.parent.image_name,"@",t.selection.parent.snap_name,"")}}function fs(n,_){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function Cs(n,_){if(1&n&&(e.TgZ(0,"table",17),e.TgZ(1,"tbody"),e.TgZ(2,"tr"),e.TgZ(3,"td",18),e.SDv(4,19),e.qZA(),e.TgZ(5,"td",20),e._uU(6),e.qZA(),e.qZA(),e.TgZ(7,"tr"),e.TgZ(8,"td",21),e.SDv(9,22),e.qZA(),e.TgZ(10,"td"),e._uU(11),e.qZA(),e.qZA(),e.TgZ(12,"tr"),e.TgZ(13,"td",21),e.SDv(14,23),e.qZA(),e.TgZ(15,"td"),e._uU(16),e.ALo(17,"empty"),e.qZA(),e.qZA(),e.TgZ(18,"tr"),e.TgZ(19,"td",21),e.SDv(20,24),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.ALo(23,"cdDate"),e.qZA(),e.qZA(),e.TgZ(24,"tr"),e.TgZ(25,"td",21),e.SDv(26,25),e.qZA(),e.TgZ(27,"td"),e._uU(28),e.ALo(29,"dimlessBinary"),e.qZA(),e.qZA(),e.TgZ(30,"tr"),e.TgZ(31,"td",21),e.SDv(32,26),e.qZA(),e.TgZ(33,"td"),e._uU(34),e.ALo(35,"dimless"),e.qZA(),e.qZA(),e.TgZ(36,"tr"),e.TgZ(37,"td",21),e.SDv(38,27),e.qZA(),e.TgZ(39,"td"),e._uU(40),e.ALo(41,"dimlessBinary"),e.qZA(),e.qZA(),e.TgZ(42,"tr"),e.TgZ(43,"td",21),e.SDv(44,28),e.qZA(),e.TgZ(45,"td"),e.YNc(46,cs,3,1,"span",29),e.qZA(),e.qZA(),e.TgZ(47,"tr"),e.TgZ(48,"td",21),e.SDv(49,30),e.qZA(),e.TgZ(50,"td"),e.YNc(51,ds,3,1,"span",1),e.YNc(52,ps,3,3,"span",1),e.qZA(),e.qZA(),e.TgZ(53,"tr"),e.TgZ(54,"td",21),e.SDv(55,31),e.qZA(),e.TgZ(56,"td"),e.YNc(57,gs,3,1,"span",1),e.YNc(58,us,3,3,"span",1),e.qZA(),e.qZA(),e.TgZ(59,"tr"),e.TgZ(60,"td",21),e.SDv(61,32),e.qZA(),e.TgZ(62,"td"),e._uU(63),e.ALo(64,"dimlessBinary"),e.qZA(),e.qZA(),e.TgZ(65,"tr"),e.TgZ(66,"td",21),e.SDv(67,33),e.qZA(),e.TgZ(68,"td"),e._uU(69),e.qZA(),e.qZA(),e.TgZ(70,"tr"),e.TgZ(71,"td",21),e.SDv(72,34),e.qZA(),e.TgZ(73,"td"),e.YNc(74,Ts,4,4,"span",1),e.YNc(75,fs,2,0,"span",1),e.qZA(),e.qZA(),e.TgZ(76,"tr"),e.TgZ(77,"td",21),e.SDv(78,35),e.qZA(),e.TgZ(79,"td"),e._uU(80),e.qZA(),e.qZA(),e.TgZ(81,"tr"),e.TgZ(82,"td",21),e.SDv(83,36),e.qZA(),e.TgZ(84,"td"),e._uU(85),e.qZA(),e.qZA(),e.TgZ(86,"tr"),e.TgZ(87,"td",21),e.SDv(88,37),e.qZA(),e.TgZ(89,"td"),e._uU(90),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(6),e.Oqu(t.selection.name),e.xp6(5),e.Oqu(t.selection.pool_name),e.xp6(5),e.Oqu(e.lcZ(17,19,t.selection.data_pool)),e.xp6(6),e.Oqu(e.lcZ(23,21,t.selection.timestamp)),e.xp6(6),e.Oqu(e.lcZ(29,23,t.selection.size)),e.xp6(6),e.Oqu(e.lcZ(35,25,t.selection.num_objs)),e.xp6(6),e.Oqu(e.lcZ(41,27,t.selection.obj_size)),e.xp6(6),e.Q6J("ngForOf",t.selection.features_name),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Oqu(e.lcZ(64,29,t.selection.stripe_unit)),e.xp6(6),e.Oqu(t.selection.stripe_count),e.xp6(5),e.Q6J("ngIf",t.selection.parent),e.xp6(1),e.Q6J("ngIf",!t.selection.parent),e.xp6(5),e.Oqu(t.selection.block_name_prefix),e.xp6(5),e.Oqu(t.selection.order),e.xp6(5),e.Oqu(t.selection.image_format)}}function Ss(n,_){if(1&n&&e._UZ(0,"cd-rbd-snapshot-list",42),2&n){const t=e.oxw(2);e.Q6J("snapshots",t.selection.snapshots)("featuresName",t.selection.features_name)("poolName",t.selection.pool_name)("namespace",t.selection.namespace)("rbdName",t.selection.name)}}function Es(n,_){if(1&n&&e._UZ(0,"cd-rbd-configuration-table",43),2&n){const t=e.oxw(2);e.Q6J("data",t.selection.configuration)}}function Rs(n,_){if(1&n&&e._UZ(0,"cd-grafana",44),2&n){const t=e.oxw(2);e.Q6J("grafanaPath",t.rbdDashboardUrl)}}function Ms(n,_){if(1&n&&(e.ynx(0),e.TgZ(1,"ul",4,5),e.TgZ(3,"li",6),e.TgZ(4,"a",7),e.SDv(5,8),e.qZA(),e.YNc(6,Cs,91,31,"ng-template",9),e.qZA(),e.TgZ(7,"li",10),e.TgZ(8,"a",7),e.SDv(9,11),e.qZA(),e.YNc(10,Ss,1,5,"ng-template",9),e.qZA(),e.TgZ(11,"li",12),e.TgZ(12,"a",7),e.SDv(13,13),e.qZA(),e.YNc(14,Es,1,1,"ng-template",9),e.qZA(),e.TgZ(15,"li",14),e.TgZ(16,"a",7),e.SDv(17,15),e.qZA(),e.YNc(18,Rs,1,1,"ng-template",9),e.qZA(),e.qZA(),e._UZ(19,"div",16),e.BQk()),2&n){const t=e.MAs(2);e.xp6(19),e.Q6J("ngbNavOutlet",t)}}function Os(n,_){1&n&&(e.ynx(0),e.TgZ(1,"cd-alert-panel",45),e.SDv(2,46),e.qZA(),e.BQk())}function As(n,_){1&n&&(e.ynx(0),e.TgZ(1,"strong",49),e.SDv(2,50),e.qZA(),e.BQk())}function hs(n,_){1&n&&(e.TgZ(0,"span",51),e.SDv(1,52),e.qZA())}function Ps(n,_){if(1&n&&(e.YNc(0,As,3,0,"ng-container",47),e.YNc(1,hs,2,0,"ng-template",null,48,e.W1O)),2&n){const t=_.value,o=e.MAs(2);e.Q6J("ngIf",+t)("ngIfElse",o)}}let Is=(()=>{class n{ngOnChanges(){this.selection&&(this.rbdDashboardUrl=`rbd-details?var-Pool=${this.selection.pool_name}&var-Image=${this.selection.name}`)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(rs,7),e.Gf(I.Pz,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.poolConfigurationSourceTpl=i.first),e.iGM(i=e.CRH())&&(o.nav=i.first)}},inputs:{selection:"selection",images:"images"},features:[e.TTD],decls:6,vars:2,consts:function(){let _,t,o,i,s,a,d,c,u,S,N,P,$,G,X,J,te,A,w,de,pe,ge,ue,me,Te,fe,Ce,Se;return _="Only available for RBD images with " + "\ufffd#2\ufffd" + "fast-diff" + "\ufffd/#2\ufffd" + " enabled",t="Details",o="Snapshots",i="Configuration",s="Performance",a="Name",d="Pool",c="Data Pool",u="Created",S="Size",N="Objects",P="Object size",$="Features",G="Provisioned",X="Total provisioned",J="Striping unit",te="Striping count",A="Parent",w="Block name prefix",de="Order",pe="Format Version",ge="N/A",ue="N/A",me="Information can not be displayed for RBD in status 'Removing'.",Te="This setting overrides the global value",fe="Image",Ce="This is the global value. No value for this option has been set for this image.",Se="Global",[["usageNotAvailableTooltipTpl",""],[4,"ngIf"],["poolConfigurationSourceTpl",""],_,["ngbNav","","cdStatefulTab","rbd-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],t,["ngbNavContent",""],["ngbNavItem","snapshots"],o,["ngbNavItem","configuration"],i,["ngbNavItem","performance"],s,[3,"ngbNavOutlet"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],a,[1,"w-75"],[1,"bold"],d,c,u,S,N,P,$,[4,"ngFor","ngForOf"],G,X,J,te,A,w,de,pe,[1,"badge","badge-dark","mr-2"],["placement","top",1,"form-text","text-muted",3,"ngbTooltip"],ge,ue,[3,"snapshots","featuresName","poolName","namespace","rbdName"],[3,"data"],["uid","YhCYGcuZz","grafanaStyle","one",3,"grafanaPath"],["type","warning"],me,[4,"ngIf","ngIfElse"],["global",""],["ngbTooltip",Te],fe,["ngbTooltip",Ce],Se]},template:function(t,o){1&t&&(e.YNc(0,ls,3,0,"ng-template",null,0,e.W1O),e.YNc(2,Ms,20,1,"ng-container",1),e.YNc(3,Os,3,0,"ng-container",1),e.YNc(4,Ps,3,2,"ng-template",null,2,e.W1O)),2&t&&(e.xp6(2),e.Q6J("ngIf",o.selection&&"REMOVING"!==o.selection.source),e.xp6(1),e.Q6J("ngIf",o.selection&&"REMOVING"===o.selection.source))},directives:[l.O5,I.Pz,mt.m,I.nv,I.Vx,I.uN,I.tO,l.sg,I._L,_s,ss.P,Rt.F,pt.G],pipes:[as.W,ze.N,Fe.$,Ue.n],styles:[""]}),n})();const bs=["usageTpl"],Ns=["parentTpl"],Ds=["nameTpl"],vs=["flattenTpl"],Ls=["deleteTpl"],Fs=["removingStatTpl"],$s=["provisionedNotAvailableTooltipTpl"],Zs=["totalProvisionedNotAvailableTooltipTpl"];function Bs(n,_){1&n&&e._UZ(0,"div",11),2&n&&e.Q6J("innerHtml","Only available for RBD images with fast-diff enabled",e.oJD)}function Gs(n,_){if(1&n&&(e.TgZ(0,"span",14),e.SDv(1,15),e.qZA()),2&n){e.oxw(2);const t=e.MAs(6);e.Q6J("ngbTooltip",t)}}function ys(n,_){if(1&n&&(e.SDv(0,16),e.ALo(1,"dimlessBinary")),2&n){const t=e.oxw().row;e.xp6(1),e.pQV(e.lcZ(1,1,t.disk_usage)),e.QtT(0)}}function xs(n,_){if(1&n&&(e.YNc(0,Gs,2,1,"span",12),e.YNc(1,ys,2,3,"ng-template",null,13,e.W1O)),2&n){const t=_.row,o=e.MAs(2);e.Q6J("ngIf",null===t.disk_usage&&!t.features_name.includes("fast-diff"))("ngIfElse",o)}}function ws(n,_){if(1&n&&(e.TgZ(0,"span",14),e.SDv(1,18),e.qZA()),2&n){e.oxw(2);const t=e.MAs(6);e.Q6J("ngbTooltip",t)}}function qs(n,_){if(1&n&&(e.SDv(0,19),e.ALo(1,"dimlessBinary")),2&n){const t=e.oxw().row;e.xp6(1),e.pQV(e.lcZ(1,1,t.total_disk_usage)),e.QtT(0)}}function Hs(n,_){if(1&n&&(e.YNc(0,ws,2,1,"span",12),e.YNc(1,qs,2,3,"ng-template",null,17,e.W1O)),2&n){const t=_.row,o=e.MAs(2);e.Q6J("ngIf",null===t.total_disk_usage&&!t.features_name.includes("fast-diff"))("ngIfElse",o)}}function Ks(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(2).value;e.xp6(1),e.hij("/",t.pool_namespace,"")}}function ks(n,_){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,Ks,2,1,"span",20),e._uU(3),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t.pool_name),e.xp6(1),e.Q6J("ngIf",t.pool_namespace),e.xp6(1),e.AsE("/",t.image_name,"@",t.snap_name,"")}}function Xs(n,_){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function Qs(n,_){if(1&n&&(e.YNc(0,ks,4,4,"span",20),e.YNc(1,Xs,2,0,"span",20)),2&n){const t=_.value;e.Q6J("ngIf",t),e.xp6(1),e.Q6J("ngIf",!t)}}function zs(n,_){if(1&n&&(e._uU(0," You are about to flatten "),e.TgZ(1,"strong"),e._uU(2),e.qZA(),e._uU(3,". "),e._UZ(4,"br"),e._UZ(5,"br"),e._uU(6," All blocks will be copied from parent "),e.TgZ(7,"strong"),e._uU(8),e.qZA(),e._uU(9," to child "),e.TgZ(10,"strong"),e._uU(11),e.qZA(),e._uU(12,".\n")),2&n){const t=_.$implicit;e.xp6(2),e.Oqu(t.child),e.xp6(6),e.Oqu(t.parent),e.xp6(3),e.Oqu(t.child)}}function Js(n,_){if(1&n&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.xp6(1),e.Oqu(t)}}function Ys(n,_){if(1&n&&(e.ynx(0),e.TgZ(1,"span"),e.SDv(2,24),e.qZA(),e.TgZ(3,"ul"),e.YNc(4,Js,2,1,"li",25),e.qZA(),e.BQk()),2&n){const t=e.oxw(2).snapshots;e.xp6(4),e.Q6J("ngForOf",t)}}function Vs(n,_){if(1&n&&(e.TgZ(0,"div",22),e.TgZ(1,"span"),e.SDv(2,23),e.qZA(),e._UZ(3,"br"),e.YNc(4,Ys,5,1,"ng-container",20),e.qZA()),2&n){const t=e.oxw().snapshots;e.xp6(4),e.Q6J("ngIf",t.length>0)}}function Us(n,_){1&n&&e.YNc(0,Vs,5,1,"div",21),2&n&&e.Q6J("ngIf",_.hasSnapshots)}const js=function(n,_){return[n,_]};function Ws(n,_){if(1&n&&e._UZ(0,"i",27),2&n){const t=e.oxw(2);e.Q6J("ngClass",e.WLB(1,js,t.icons.spinner,t.icons.spin))}}function ea(n,_){if(1&n&&(e.TgZ(0,"span",27),e._uU(1),e.qZA()),2&n){const t=e.oxw(),o=t.column,i=t.row;e.Q6J("ngClass",null!=o&&null!=o.customTemplateConfig&&o.customTemplateConfig.executingClass?o.customTemplateConfig.executingClass:"text-muted italic"),e.xp6(1),e.hij(" (",i.cdExecuting,") ")}}function ta(n,_){if(1&n&&e._UZ(0,"i",29),2&n){const t=e.oxw(2);e.Gre("",t.icons.warning," warn")}}function oa(n,_){if(1&n&&(e.YNc(0,Ws,1,4,"i",26),e.TgZ(1,"span",27),e._uU(2),e.qZA(),e.YNc(3,ea,2,2,"span",26),e.YNc(4,ta,1,3,"i",28)),2&n){const t=_.column,o=_.value,i=_.row;e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngClass",null==t||null==t.customTemplateConfig?null:t.customTemplateConfig.valueClass),e.xp6(1),e.hij(" ",o," "),e.xp6(1),e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngIf",i.source&&"REMOVING"===i.source)}}let ia=(()=>{class n extends lt.o{constructor(t,o,i,s,a,d,c,u,S){super(),this.authStorageService=t,this.rbdService=o,this.dimlessBinaryPipe=i,this.dimlessPipe=s,this.modalService=a,this.taskWrapper=d,this.taskListService=c,this.urlBuilder=u,this.actionLabels=S,this.tableStatus=new U.E,this.selection=new Ee.r,this.icons=T.P,this.builders={"rbd/create":A=>this.createRbdFromTask(A.pool_name,A.namespace,A.image_name),"rbd/delete":A=>this.createRbdFromTaskImageSpec(A.image_spec),"rbd/clone":A=>this.createRbdFromTask(A.child_pool_name,A.child_namespace,A.child_image_name),"rbd/copy":A=>this.createRbdFromTask(A.dest_pool_name,A.dest_namespace,A.dest_image_name)},this.permission=this.authStorageService.getPermissions().rbdImage;const N=()=>this.selection.first()&&new Z.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name).toStringEncoded();this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>this.urlBuilder.getCreate(),canBePrimary:A=>!A.hasSingleSelection,name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>this.urlBuilder.getEdit(N()),name:this.actionLabels.EDIT,disable:A=>this.getRemovingStatusDesc(A)||this.getInvalidNameDisable(A)},{permission:"create",canBePrimary:A=>A.hasSingleSelection,disable:A=>this.getRemovingStatusDesc(A)||this.getInvalidNameDisable(A)||!!A.first().cdExecuting,icon:T.P.copy,routerLink:()=>`/block/rbd/copy/${N()}`,name:this.actionLabels.COPY},{permission:"update",disable:A=>this.getRemovingStatusDesc(A)||this.getInvalidNameDisable(A)||A.first().cdExecuting||!A.first().parent,icon:T.P.flatten,click:()=>this.flattenRbdModal(),name:this.actionLabels.FLATTEN},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteRbdModal(),name:this.actionLabels.DELETE,disable:A=>this.getDeleteDisableDesc(A)},{permission:"delete",icon:T.P.trash,click:()=>this.trashRbdModal(),name:this.actionLabels.TRASH,disable:A=>this.getRemovingStatusDesc(A)||this.getInvalidNameDisable(A)||A.first().image_format===Pe.V1}]}createRbdFromTaskImageSpec(t){const o=Z.N.fromString(t);return this.createRbdFromTask(o.poolName,o.namespace,o.imageName)}createRbdFromTask(t,o,i){const s=new zi;return s.id="-1",s.unique_id="-1",s.name=i,s.namespace=o,s.pool_name=t,s.image_format=Pe.V2,s}ngOnInit(){this.columns=[{name:"Name",prop:"name",flexGrow:2,cellTemplate:this.removingStatTpl},{name:"Pool",prop:"pool_name",flexGrow:2},{name:"Namespace",prop:"namespace",flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"Objects",prop:"num_objs",flexGrow:1,cellClass:"text-right",pipe:this.dimlessPipe},{name:"Object size",prop:"obj_size",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"Provisioned",prop:"disk_usage",cellClass:"text-center",flexGrow:1,pipe:this.dimlessBinaryPipe,cellTemplate:this.provisionedNotAvailableTooltipTpl},{name:"Total provisioned",prop:"total_disk_usage",cellClass:"text-center",flexGrow:1,pipe:this.dimlessBinaryPipe,cellTemplate:this.totalProvisionedNotAvailableTooltipTpl},{name:"Parent",prop:"parent",flexGrow:2,cellTemplate:this.parentTpl}],this.taskListService.init(()=>this.rbdService.list(),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/clone","rbd/copy","rbd/create","rbd/delete","rbd/edit","rbd/flatten","rbd/trash/move"].includes(i.name),(i,s)=>{let a;switch(s.name){case"rbd/copy":a=new Z.N(s.metadata.dest_pool_name,s.metadata.dest_namespace,s.metadata.dest_image_name).toString();break;case"rbd/clone":a=new Z.N(s.metadata.child_pool_name,s.metadata.child_namespace,s.metadata.child_image_name).toString();break;case"rbd/create":a=new Z.N(s.metadata.pool_name,s.metadata.namespace,s.metadata.image_name).toString();break;default:a=s.metadata.image_spec}return a===new Z.N(i.pool_name,i.namespace,i.name).toString()},this.builders)}onFetchError(){this.table.reset(),this.tableStatus=new U.E(ce.T.ValueException)}prepareResponse(t){let o=[];const i={};let s;if(t.forEach(a=>{C().isUndefined(i[a.status])&&(i[a.status]=[]),i[a.status].push(a.pool_name),o=o.concat(a.value)}),i[ce.T.ValueException]?s=ce.T.ValueException:i[ce.T.ValueStale]?s=ce.T.ValueStale:i[ce.T.ValueNone]&&(s=ce.T.ValueNone),s){const a=(i[s].length>1?"pools ":"pool ")+i[s].join();this.tableStatus=new U.E(s,a)}else this.tableStatus=new U.E;return o}updateSelection(t){this.selection=t}deleteRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,s=new Z.N(t,o,i);this.modalRef=this.modalService.show(he.M,{itemDescription:"RBD",itemNames:[s],bodyTemplate:this.deleteTpl,bodyContext:{hasSnapshots:this.hasSnapshots(),snapshots:this.listProtectedSnapshots()},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/delete",{image_spec:s.toString()}),call:this.rbdService.delete(s)})})}trashRbdModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,hasSnapshots:this.hasSnapshots()};this.modalRef=this.modalService.show(J_,t)}flattenRbd(t){this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/flatten",{image_spec:t.toString()}),call:this.rbdService.flatten(t)}).subscribe({complete:()=>{this.modalRef.close()}})}flattenRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,s=this.selection.first().parent,a=new Z.N(s.pool_name,s.pool_namespace,s.image_name),d=new Z.N(t,o,i),c={titleText:"RBD flatten",buttonText:"Flatten",bodyTpl:this.flattenTpl,bodyData:{parent:`${a}@${s.snap_name}`,child:d.toString()},onSubmit:()=>{this.flattenRbd(d)}};this.modalRef=this.modalService.show(ft.Y,c)}hasSnapshots(){return(this.selection.first().snapshots||[]).length>0}hasClonedSnapshots(t){return(t.snapshots||[]).some(i=>i.children&&i.children.length>0)}listProtectedSnapshots(){return this.selection.first().snapshots.reduce((i,s)=>(s.is_protected&&i.push(s.name),i),[])}getDeleteDisableDesc(t){const o=t.first();return o&&this.hasClonedSnapshots(o)?"This RBD has cloned snapshots. Please delete related RBDs before deleting this RBD.":this.getInvalidNameDisable(t)||this.hasClonedSnapshots(t.first())}getInvalidNameDisable(t){var o;const i=t.first();return(null===(o=null==i?void 0:i.name)||void 0===o?void 0:o.match(/[@/]/))?"This RBD image has an invalid name and can't be managed by ceph.":!t.first()||!t.hasSingleSelection}getRemovingStatusDesc(t){const o=t.first();return"REMOVING"===(null==o?void 0:o.source)&&"Action not possible for an RBD in status 'Removing'"}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(x),e.Y36(Fe.$),e.Y36(Ue.n),e.Y36(re.Z),e.Y36(Q.P),e.Y36(se.j),e.Y36(tt.F),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(ee.a,7),e.Gf(bs,5),e.Gf(Ns,7),e.Gf(Ds,5),e.Gf(vs,7),e.Gf(Ls,7),e.Gf(Fs,7),e.Gf($s,7),e.Gf(Zs,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.usageTpl=i.first),e.iGM(i=e.CRH())&&(o.parentTpl=i.first),e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.flattenTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first),e.iGM(i=e.CRH())&&(o.removingStatTpl=i.first),e.iGM(i=e.CRH())&&(o.provisionedNotAvailableTooltipTpl=i.first),e.iGM(i=e.CRH())&&(o.totalProvisionedNotAvailableTooltipTpl=i.first)}},features:[e._Bn([se.j,{provide:tt.F,useValue:new tt.F("block/rbd")}]),e.qOj],decls:19,vars:10,consts:function(){let _,t,o,i,s,a,d;return _="N/A",t="" + "\ufffd0\ufffd" + "",o="N/A",i="" + "\ufffd0\ufffd" + "",s="Deleting this image will also delete all its snapshots.",a="The following snapshots are currently protected and will be removed:",d="RBD in status 'Removing'",[["columnMode","flex","identifier","unique_id","forceIdentifier","true","selectionType","single",3,"data","columns","searchableObjects","hasDetails","status","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["usageNotAvailableTooltipTpl",""],["provisionedNotAvailableTooltipTpl",""],["totalProvisionedNotAvailableTooltipTpl",""],["parentTpl",""],["flattenTpl",""],["deleteTpl",""],["removingStatTpl",""],[3,"innerHtml"],["placement","top",3,"ngbTooltip",4,"ngIf","ngIfElse"],["provisioned",""],["placement","top",3,"ngbTooltip"],_,t,["totalProvisioned",""],o,i,[4,"ngIf"],["class","alert alert-warning","role","alert",4,"ngIf"],["role","alert",1,"alert","alert-warning"],s,a,[4,"ngFor","ngForOf"],[3,"ngClass",4,"ngIf"],[3,"ngClass"],["title",d,3,"class",4,"ngIf"],["title",d]]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0,1),e.NdJ("fetchData",function(){return o.taskListService.fetch()})("setExpandedRow",function(s){return o.setExpandedRow(s)})("updateSelection",function(s){return o.updateSelection(s)}),e._UZ(3,"cd-table-actions",2),e._UZ(4,"cd-rbd-details",3),e.qZA(),e.YNc(5,Bs,1,1,"ng-template",null,4,e.W1O),e.YNc(7,xs,3,2,"ng-template",null,5,e.W1O),e.YNc(9,Hs,3,2,"ng-template",null,6,e.W1O),e.YNc(11,Qs,2,2,"ng-template",null,7,e.W1O),e.YNc(13,zs,13,3,"ng-template",null,8,e.W1O),e.YNc(15,Us,1,1,"ng-template",null,9,e.W1O),e.YNc(17,oa,5,5,"ng-template",null,10,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("searchableObjects",!0)("hasDetails",!0)("status",o.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("selection",o.expandedRow))},directives:[Qe,ee.a,Re.K,Is,l.O5,I._L,l.sg,l.mk],pipes:[Fe.$],styles:[".warn[_ngcontent-%COMP%]{color:#ffc200}"]}),n})();function _a(n,_){1&n&&e._UZ(0,"input",19)}function sa(n,_){1&n&&(e.TgZ(0,"option",23),e.SDv(1,24),e.qZA()),2&n&&e.Q6J("ngValue",null)}function aa(n,_){1&n&&(e.TgZ(0,"option",23),e.SDv(1,25),e.qZA()),2&n&&e.Q6J("ngValue",null)}function ra(n,_){1&n&&(e.TgZ(0,"option",23),e.SDv(1,26),e.qZA()),2&n&&e.Q6J("ngValue",null)}function la(n,_){if(1&n&&(e.TgZ(0,"option",27),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function ca(n,_){if(1&n&&(e.TgZ(0,"select",20),e.YNc(1,sa,2,1,"option",21),e.YNc(2,aa,2,1,"option",21),e.YNc(3,ra,2,1,"option",21),e.YNc(4,la,2,2,"option",22),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function da(n,_){1&n&&(e.TgZ(0,"span",28),e.SDv(1,29),e.qZA())}function pa(n,_){1&n&&(e.TgZ(0,"span",28),e.SDv(1,30),e.qZA())}function ga(n,_){1&n&&(e.TgZ(0,"span",28),e.SDv(1,31),e.qZA())}let ua=(()=>{class n{constructor(t,o,i,s,a,d){this.activeModal=t,this.actionLabels=o,this.authStorageService=i,this.notificationService=s,this.poolService=a,this.rbdService=d,this.pools=null,this.editing=!1,this.poolPermission=this.authStorageService.getPermissions().pool,this.createForm()}createForm(){this.namespaceForm=new M.d({pool:new r.NI(""),namespace:new r.NI("")},this.validator(),this.asyncValidator())}validator(){return t=>{const o=t.get("pool"),i=t.get("namespace");let s=null;o.value||(s={required:!0}),o.setErrors(s);let a=null;return i.value||(a={required:!0}),i.setErrors(a),null}}asyncValidator(){return t=>new Promise(o=>{const i=t.get("pool"),s=t.get("namespace");this.rbdService.listNamespaces(i.value).subscribe(a=>{if(a.some(d=>d.namespace===s.value)){const d={namespaceExists:!0};s.setErrors(d),o(d)}else o(null)})})}ngOnInit(){this.onSubmit=new Et.xQ,this.poolPermission.read&&this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{const o=[];for(const i of t)this.rbdService.isRBDPool(i)&&"replicated"===i.type&&o.push(i);if(this.pools=o,1===this.pools.length){const i=this.pools[0].pool_name;this.namespaceForm.get("pool").setValue(i)}})}submit(){const t=this.namespaceForm.getValue("pool"),o=this.namespaceForm.getValue("namespace"),i=new F.R;i.name="rbd/namespace/create",i.metadata={pool:t,namespace:o},this.rbdService.createNamespace(t,o).toPromise().then(()=>{this.notificationService.show(Ve.k.success,"Created namespace '" + t + "/" + o + "'"),this.activeModal.close(),this.onSubmit.next()}).catch(()=>{this.namespaceForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(I.Kz),e.Y36(D.p4),e.Y36(oe.j),e.Y36(Le.g),e.Y36(Ke.q),e.Y36(x))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-form-modal"]],decls:23,vars:9,consts:function(){let _,t,o,i,s,a,d,c,u;return _="Create Namespace",t="Pool",o="Name",i="Loading...",s="-- No rbd pools available --",a="-- Select a pool --",d="This field is required.",c="This field is required.",u="Namespace already exists.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","namespaceForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","pool",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-control","formControlName","pool",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","namespace",1,"cd-col-form-label","required"],o,["type","text","placeholder","Namespace name...","id","namespace","name","namespace","formControlName","namespace","autofocus","",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-control"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],i,s,a,[3,"value"],[1,"invalid-feedback"],d,c,u]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"div",7),e.TgZ(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e.YNc(11,_a,1,0,"input",11),e.YNc(12,ca,5,4,"select",12),e.YNc(13,da,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(14,"div",7),e.TgZ(15,"label",14),e.SDv(16,15),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",16),e.YNc(19,pa,2,0,"span",13),e.YNc(20,ga,2,0,"span",13),e.qZA(),e.qZA(),e.qZA(),e.TgZ(21,"div",17),e.TgZ(22,"cd-form-button-panel",18),e.NdJ("submitActionEvent",function(){return o.submit()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.namespaceForm),e.xp6(7),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("pool",i,"required")),e.xp6(6),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"required")),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"namespaceExists")),e.xp6(2),e.Q6J("form",o.namespaceForm)("submitText",o.actionLabels.CREATE)}},directives:[R.z,r._Y,r.JL,r.sg,v.V,g.P,l.O5,f.o,r.Fj,h.b,r.JJ,r.u,le.U,O.p,r.EJ,l.sg,r.YN,r.Kr],styles:[""]}),n})(),ma=(()=>{class n{constructor(t,o,i,s,a,d){this.authStorageService=t,this.rbdService=o,this.poolService=i,this.modalService=s,this.notificationService=a,this.actionLabels=d,this.selection=new Ee.r,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"create",icon:T.P.add,click:()=>this.createModal(),name:this.actionLabels.CREATE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Pool",prop:"pool",flexGrow:1},{name:"Total images",prop:"num_images",flexGrow:1}],this.refresh()}refresh(){this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{t=t.filter(i=>this.rbdService.isRBDPool(i)&&"replicated"===i.type);const o=[];t.forEach(i=>{o.push(this.rbdService.listNamespaces(i.pool_name))}),o.length>0?(0,W.D)(o).subscribe(i=>{const s=[];for(let a=0;a{s.push({id:`${c}/${u.namespace}`,pool:c,namespace:u.namespace,num_images:u.num_images})})}this.namespaces=s}):this.namespaces=[]})}updateSelection(t){this.selection=t}createModal(){this.modalRef=this.modalService.show(ua),this.modalRef.componentInstance.onSubmit.subscribe(()=>{this.refresh()})}deleteModal(){const t=this.selection.first().pool,o=this.selection.first().namespace;this.modalRef=this.modalService.show(he.M,{itemDescription:"Namespace",itemNames:[`${t}/${o}`],submitAction:()=>this.rbdService.deleteNamespace(t,o).subscribe(()=>{this.notificationService.show(Ve.k.success,"Deleted namespace '" + t + "/" + o + "'"),this.modalRef.close(),this.refresh()},()=>{this.modalRef.componentInstance.stopLoadingSpinner()})})}getDeleteDisableDesc(){var t;const o=this.selection.first();return(null==o?void 0:o.num_images)>0?"Namespace contains images":!(null===(t=this.selection)||void 0===t?void 0:t.first())}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(x),e.Y36(Ke.q),e.Y36(re.Z),e.Y36(Le.g),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-list"]],features:[e._Bn([se.j])],decls:4,vars:5,consts:[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"]],template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(s){return o.updateSelection(s)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.qZA(),e.qZA()),2&t&&(e.xp6(1),e.Q6J("data",o.namespaces)("columns",o.columns),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[Qe,ee.a,Re.K],styles:[""]}),n})(),Ta=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-performance"]],decls:2,vars:1,consts:[["uid","41FrpeUiz","grafanaStyle","two",3,"grafanaPath"]],template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e._UZ(1,"cd-grafana",0)),2&t&&(e.xp6(1),e.Q6J("grafanaPath","rbd-overview?"))},directives:[Qe,Rt.F],styles:[""]}),n})();function fa(n,_){1&n&&e._UZ(0,"input",15)}function Ca(n,_){if(1&n&&(e.TgZ(0,"option",20),e._uU(1),e.qZA()),2&n){const t=_.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function Sa(n,_){if(1&n&&(e.TgZ(0,"select",16),e.TgZ(1,"option",17),e.SDv(2,18),e.qZA(),e.YNc(3,Ca,2,2,"option",19),e.qZA()),2&n){const t=e.oxw();e.xp6(3),e.Q6J("ngForOf",t.pools)}}let Ea=(()=>{class n{constructor(t,o,i,s,a,d,c){this.authStorageService=t,this.rbdService=o,this.activeModal=i,this.actionLabels=s,this.fb=a,this.poolService=d,this.taskWrapper=c,this.poolPermission=this.authStorageService.getPermissions().pool}createForm(){this.purgeForm=this.fb.group({poolName:""})}ngOnInit(){this.poolPermission.read&&this.poolService.list(["pool_name","application_metadata"]).then(t=>{this.pools=t.filter(o=>o.application_metadata.includes("rbd")).map(o=>o.pool_name)}),this.createForm()}purge(){const t=this.purgeForm.getValue("poolName")||"";this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/trash/purge",{pool_name:t}),call:this.rbdService.purgeTrash(t)}).subscribe({error:()=>{this.purgeForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(x),e.Y36(I.Kz),e.Y36(D.p4),e.Y36(ot.O),e.Y36(Ke.q),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-purge-modal"]],decls:18,vars:6,consts:function(){let _,t,o,i,s;return _="Purge Trash",t="To purge, select\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "All" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + "\xA0 or one pool and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Purge" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".\xA0",t=e.Zx4(t),o="Pool:",i="Pool name...",s="All",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","purgeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],[1,"col-form-label","mx-auto"],o,["class","form-control","type","text","placeholder",i,"formControlName","poolName",4,"ngIf"],["id","poolName","name","poolName","class","form-control","formControlName","poolName",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder",i,"formControlName","poolName",1,"form-control"],["id","poolName","name","poolName","formControlName","poolName",1,"form-control"],["value",""],s,[3,"value",4,"ngFor","ngForOf"],[3,"value"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.tHW(8,7),e._UZ(9,"kbd"),e._UZ(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e.YNc(14,fa,1,0,"input",11),e.YNc(15,Sa,4,1,"select",12),e.qZA(),e.qZA(),e.TgZ(16,"div",13),e.TgZ(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.purge()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.purgeForm),e.xp6(10),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(2),e.Q6J("form",o.purgeForm)("submitText",o.actionLabels.PURGE))},directives:[R.z,r._Y,r.JL,r.sg,v.V,g.P,l.O5,O.p,f.o,r.Fj,h.b,r.JJ,r.u,r.EJ,r.YN,r.Kr,l.sg],styles:[""]}),n})();function Ra(n,_){1&n&&(e.TgZ(0,"span",15),e.SDv(1,16),e.qZA())}let Ma=(()=>{class n{constructor(t,o,i,s,a){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=s,this.taskWrapper=a}ngOnInit(){this.imageSpec=new Z.N(this.poolName,this.namespace,this.imageName).toString(),this.restoreForm=this.fb.group({name:this.imageName})}restore(){const t=this.restoreForm.getValue("name"),o=new Z.N(this.poolName,this.namespace,this.imageId);this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/trash/restore",{image_id_spec:o.toString(),new_image_name:t}),call:this.rbdService.restoreTrash(o,t)}).subscribe({error:()=>{this.restoreForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(x),e.Y36(I.Kz),e.Y36(D.p4),e.Y36(ot.O),e.Y36(Q.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-restore-modal"]],decls:18,vars:7,consts:function(){let _,t,o,i;return _="Restore Image",t="To restore\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "" + "\ufffd0\ufffd" + "@" + "\ufffd1\ufffd" + "" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ",\xA0 type the image's new name and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Restore" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".",t=e.Zx4(t),o="New Name",i="This field is required.",[[3,"modalRef"],[1,"modal-title"],_,[1,"modal-content"],["name","restoreForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","name",1,"col-form-label"],o,["type","text","name","name","id","name","autocomplete","off","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.tHW(8,7),e._UZ(9,"kbd"),e._UZ(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,Ra,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(16,"div",13),e.TgZ(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.restore()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.restoreForm),e.xp6(6),e.pQV(o.imageSpec)(o.imageId),e.QtT(8),e.xp6(5),e.Q6J("ngIf",o.restoreForm.showError("name",i,"required")),e.xp6(2),e.Q6J("form",o.restoreForm)("submitText",o.actionLabels.RESTORE)}},directives:[R.z,r._Y,r.JL,r.sg,v.V,g.P,f.o,r.Fj,h.b,r.JJ,r.u,le.U,l.O5,O.p],styles:[""]}),n})();const Oa=["expiresTpl"],Aa=["deleteTpl"],ha=function(n){return[n]};function Pa(n,_){if(1&n){const t=e.EpF();e.TgZ(0,"button",6),e.NdJ("click",function(){return e.CHM(t),e.oxw().purgeModal()}),e._UZ(1,"i",7),e.ynx(2),e.SDv(3,8),e.BQk(),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("disabled",t.disablePurgeBtn),e.xp6(1),e.Q6J("ngClass",e.VKq(2,ha,t.icons.destroy))}}function Ia(n,_){1&n&&(e.ynx(0),e.SDv(1,10),e.BQk())}function ba(n,_){1&n&&(e.ynx(0),e.SDv(1,11),e.BQk())}function Na(n,_){if(1&n&&(e.YNc(0,Ia,2,0,"ng-container",9),e.YNc(1,ba,2,0,"ng-container",9),e._uU(2),e.ALo(3,"cdDate")),2&n){const t=_.row,o=_.value;e.Q6J("ngIf",t.cdIsExpired),e.xp6(1),e.Q6J("ngIf",!t.cdIsExpired),e.xp6(1),e.hij(" ",e.lcZ(3,3,o),"\n")}}function Da(n,_){if(1&n&&(e.TgZ(0,"p",13),e.TgZ(1,"strong"),e.ynx(2),e.SDv(3,14),e.ALo(4,"cdDate"),e.BQk(),e.qZA(),e.qZA()),2&n){const t=e.oxw().expiresAt;e.xp6(4),e.pQV(e.lcZ(4,1,t)),e.QtT(3)}}function va(n,_){1&n&&e.YNc(0,Da,5,3,"p",12),2&n&&e.Q6J("ngIf",!_.isExpired)}let La=(()=>{class n{constructor(t,o,i,s,a,d,c){this.authStorageService=t,this.rbdService=o,this.modalService=i,this.cdDatePipe=s,this.taskListService=a,this.taskWrapper=d,this.actionLabels=c,this.icons=T.P,this.executingTasks=[],this.selection=new Ee.r,this.tableStatus=new U.E,this.disablePurgeBtn=!0,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"update",icon:T.P.undo,click:()=>this.restoreModal(),name:this.actionLabels.RESTORE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE}]}ngOnInit(){this.columns=[{name:"ID",prop:"id",flexGrow:1,cellTransformation:ve.e.executing},{name:"Name",prop:"name",flexGrow:1},{name:"Pool",prop:"pool_name",flexGrow:1},{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Status",prop:"deferment_end_time",flexGrow:1,cellTemplate:this.expiresTpl},{name:"Deleted At",prop:"deletion_time",flexGrow:1,pipe:this.cdDatePipe}],this.taskListService.init(()=>this.rbdService.listTrash(),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/trash/remove","rbd/trash/restore"].includes(i.name),(i,s)=>new Z.N(i.pool_name,i.namespace,i.id).toString()===s.metadata.image_id_spec,void 0)}prepareResponse(t){let o=[];const i={};let s;if(t.forEach(a=>{C().isUndefined(i[a.status])&&(i[a.status]=[]),i[a.status].push(a.pool_name),o=o.concat(a.value),this.disablePurgeBtn=!o.length}),i[3]?s=3:i[1]?s=1:i[2]&&(s=2),s){const a=(i[s].length>1?"pools ":"pool ")+i[s].join();this.tableStatus=new U.E(s,a)}else this.tableStatus=new U.E;return o.forEach(a=>{a.cdIsExpired=Me()().isAfter(a.deferment_end_time)}),o}onFetchError(){this.table.reset(),this.tableStatus=new U.E(ce.T.ValueException)}updateSelection(t){this.selection=t}restoreModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,imageId:this.selection.first().id};this.modalRef=this.modalService.show(Ma,t)}deleteModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().id,s=this.selection.first().deferment_end_time,a=Me()().isAfter(s),d=new Z.N(t,o,i);this.modalRef=this.modalService.show(he.M,{itemDescription:"RBD",itemNames:[d],bodyTemplate:this.deleteTpl,bodyContext:{expiresAt:s,isExpired:a},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new F.R("rbd/trash/remove",{image_id_spec:d.toString()}),call:this.rbdService.removeTrash(d,!0)})})}purgeModal(){this.modalService.show(Ea)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(x),e.Y36(re.Z),e.Y36(ze.N),e.Y36(se.j),e.Y36(Q.P),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(ee.a,7),e.Gf(Oa,7),e.Gf(Aa,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.expiresTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first)}},features:[e._Bn([se.j])],decls:9,vars:8,consts:function(){let _,t,o,i;return _="Purge Trash",t="Expired at",o="Protected until",i="This image is protected until " + "\ufffd0\ufffd" + ".",[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","status","autoReload","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["class","btn btn-light","type","button",3,"disabled","click",4,"ngIf"],["expiresTpl",""],["deleteTpl",""],["type","button",1,"btn","btn-light",3,"disabled","click"],["aria-hidden","true",3,"ngClass"],_,[4,"ngIf"],t,o,["class","text-danger",4,"ngIf"],[1,"text-danger"],i]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.taskListService.fetch()})("updateSelection",function(s){return o.updateSelection(s)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.YNc(4,Pa,4,4,"button",3),e.qZA(),e.qZA(),e.YNc(5,Na,4,5,"ng-template",null,4,e.W1O),e.YNc(7,va,1,1,"ng-template",null,5,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("status",o.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("ngIf",o.permission.delete))},directives:[Qe,ee.a,Re.K,l.O5,f.o,l.mk],pipes:[ze.N],styles:[""]}),n})(),Mt=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[l.ez,Jn,r.u5,r.UX,I.Oz,I.dT,I.HK,Ie.b,_e.m,m.Bz,ne.xc]]}),n})();const Fa=[{path:"",redirectTo:"rbd",pathMatch:"full"},{path:"rbd",canActivate:[j.T],data:{breadcrumbs:"Images"},children:[{path:"",component:ia},{path:"namespaces",component:ma,data:{breadcrumbs:"Namespaces"}},{path:"trash",component:La,data:{breadcrumbs:"Trash"}},{path:"performance",component:Ta,data:{breadcrumbs:"Overall Performance"}},{path:D.MQ.CREATE,component:$e,data:{breadcrumbs:D.Qn.CREATE}},{path:`${D.MQ.EDIT}/:image_spec`,component:$e,data:{breadcrumbs:D.Qn.EDIT}},{path:`${D.MQ.CLONE}/:image_spec/:snap`,component:$e,data:{breadcrumbs:D.Qn.CLONE}},{path:`${D.MQ.COPY}/:image_spec`,component:$e,data:{breadcrumbs:D.Qn.COPY}},{path:`${D.MQ.COPY}/:image_spec/:snap`,component:$e,data:{breadcrumbs:D.Qn.COPY}}]},{path:"mirroring",component:Ki,canActivate:[j.T],data:{breadcrumbs:"Mirroring"}},{path:"iscsi",canActivate:[j.T],data:{breadcrumbs:"iSCSI"},children:[{path:"",redirectTo:"overview",pathMatch:"full"},{path:"overview",component:zn,data:{breadcrumbs:"Overview"}},{path:"targets",data:{breadcrumbs:"Targets"},children:[{path:"",component:Ln},{path:D.MQ.CREATE,component:rt,data:{breadcrumbs:D.Qn.CREATE}},{path:`${D.MQ.EDIT}/:target_iqn`,component:rt,data:{breadcrumbs:D.Qn.EDIT}}]}]}];let $a=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[Mt,m.Bz.forChild(Fa)]]}),n})()},54555:(it,Oe,p)=>{p.d(Oe,{d:()=>Q});var l=p(74788),r=p(24751),m=p(23815),ne=p.n(m),I=p(80226),Ie=p(65862),D=p(95463),j=p(30633),_e=p(28211),be=p(34089),C=p(41582),W=p(12057),b=p(56310),ie=p(18372),Y=p(87925),e=p(94276);let k=(()=>{class R{constructor(g,f){this.control=g,this.formatter=f}setValue(g){const f=this.formatter.toMilliseconds(g);this.control.control.setValue(`${f} ms`)}ngOnInit(){this.setValue(this.control.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.control.value))}onUpdate(g){this.setValue(g)}}return R.\u0275fac=function(g){return new(g||R)(l.Y36(r.a5),l.Y36(_e.H))},R.\u0275dir=l.lG2({type:R,selectors:[["","cdMilliseconds",""]],hostBindings:function(g,f){1&g&&l.NdJ("blur",function(O){return f.onUpdate(O.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),R})();var Ne=p(20044);let Z=(()=>{class R{constructor(g,f,h,O){this.elementRef=g,this.control=f,this.dimlessBinaryPerSecondPipe=h,this.formatter=O,this.ngModelChange=new l.vpe,this.el=this.elementRef.nativeElement}ngOnInit(){this.setValue(this.el.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.el.value))}setValue(g){/^[\d.]+$/.test(g)&&(g+=this.defaultUnit||"m");const f=this.formatter.toBytes(g,0),h=this.round(f);this.el.value=this.dimlessBinaryPerSecondPipe.transform(h),null!==f?(this.ngModelChange.emit(this.el.value),this.control.control.setValue(this.el.value)):(this.ngModelChange.emit(null),this.control.control.setValue(null))}round(g){if(null!==g&&0!==g){if(!ne().isUndefined(this.minBytes)&&gthis.maxBytes)return this.maxBytes;if(!ne().isUndefined(this.roundPower)){const f=Math.round(Math.log(g)/Math.log(this.roundPower));return Math.pow(this.roundPower,f)}}return g}onBlur(g){this.setValue(g)}}return R.\u0275fac=function(g){return new(g||R)(l.Y36(l.SBq),l.Y36(r.a5),l.Y36(Ne.O),l.Y36(_e.H))},R.\u0275dir=l.lG2({type:R,selectors:[["","cdDimlessBinaryPerSecond",""]],hostBindings:function(g,f){1&g&&l.NdJ("blur",function(O){return f.onBlur(O.target.value)})},inputs:{ngDataReady:"ngDataReady",minBytes:"minBytes",maxBytes:"maxBytes",roundPower:"roundPower",defaultUnit:"defaultUnit"},outputs:{ngModelChange:"ngModelChange"}}),R})(),Ae=(()=>{class R{constructor(g,f){this.formatter=g,this.ngControl=f}setValue(g){const f=this.formatter.toIops(g);this.ngControl.control.setValue(`${f} IOPS`)}ngOnInit(){this.setValue(this.ngControl.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.ngControl.value))}onUpdate(g){this.setValue(g)}}return R.\u0275fac=function(g){return new(g||R)(l.Y36(_e.H),l.Y36(r.a5))},R.\u0275dir=l.lG2({type:R,selectors:[["","cdIops",""]],hostBindings:function(g,f){1&g&&l.NdJ("blur",function(O){return f.onUpdate(O.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),R})();function x(R,v){if(1&R&&(l.ynx(0),l._UZ(1,"input",18),l.BQk()),2&R){const g=l.oxw().$implicit,f=l.oxw(2);l.xp6(1),l.Q6J("id",g.name)("name",g.name)("formControlName",g.name)("ngDataReady",f.ngDataReady)}}function ae(R,v){if(1&R&&(l.ynx(0),l._UZ(1,"input",19),l.BQk()),2&R){const g=l.oxw().$implicit,f=l.oxw(2);l.xp6(1),l.Q6J("id",g.name)("name",g.name)("formControlName",g.name)("ngDataReady",f.ngDataReady)}}function V(R,v){if(1&R&&(l.ynx(0),l._UZ(1,"input",20),l.BQk()),2&R){const g=l.oxw().$implicit,f=l.oxw(2);l.xp6(1),l.Q6J("id",g.name)("name",g.name)("formControlName",g.name)("ngDataReady",f.ngDataReady)}}function T(R,v){1&R&&(l.TgZ(0,"span",21),l.SDv(1,22),l.qZA())}const q=function(R){return{active:R}},M=function(R){return[R]};function B(R,v){if(1&R){const g=l.EpF();l.TgZ(0,"div",10),l.TgZ(1,"label",11),l._uU(2),l.TgZ(3,"cd-helper"),l._uU(4),l.qZA(),l.qZA(),l.TgZ(5,"div"),l.TgZ(6,"div",12),l.ynx(7,13),l.YNc(8,x,2,4,"ng-container",14),l.YNc(9,ae,2,4,"ng-container",14),l.YNc(10,V,2,4,"ng-container",14),l.BQk(),l.TgZ(11,"span",15),l.TgZ(12,"button",16),l.NdJ("click",function(){const O=l.CHM(g).$implicit;return l.oxw(2).reset(O.name)}),l._UZ(13,"i",7),l.qZA(),l.qZA(),l.qZA(),l.YNc(14,T,2,0,"span",17),l.qZA(),l.qZA()}if(2&R){const g=v.$implicit,f=l.oxw().$implicit,h=l.oxw(),O=l.MAs(1);l.xp6(1),l.Q6J("for",g.name),l.xp6(1),l.Oqu(g.displayName),l.xp6(2),l.Oqu(g.description),l.xp6(1),l.Gre("cd-col-form-input ",f.heading,""),l.xp6(2),l.Q6J("ngSwitch",g.type),l.xp6(1),l.Q6J("ngSwitchCase",h.configurationType.milliseconds),l.xp6(1),l.Q6J("ngSwitchCase",h.configurationType.bps),l.xp6(1),l.Q6J("ngSwitchCase",h.configurationType.iops),l.xp6(2),l.Q6J("ngClass",l.VKq(13,q,h.isDisabled(g.name))),l.xp6(1),l.Q6J("ngClass",l.VKq(15,M,h.icons.erase)),l.xp6(1),l.Q6J("ngIf",h.form.showError("configuration."+g.name,O,"min"))}}function F(R,v){if(1&R){const g=l.EpF();l.TgZ(0,"div",4),l.TgZ(1,"h4",5),l.TgZ(2,"span",6),l.NdJ("click",function(){const O=l.CHM(g).$implicit;return l.oxw().toggleSectionVisibility(O.class)}),l._uU(3),l._UZ(4,"i",7),l.qZA(),l.qZA(),l.TgZ(5,"div",8),l.YNc(6,B,15,17,"div",9),l.qZA(),l.qZA()}if(2&R){const g=v.$implicit,f=l.oxw();l.xp6(3),l.hij(" ",g.heading," "),l.xp6(1),l.Q6J("ngClass",f.sectionVisibility[g.class]?f.icons.minusCircle:f.icons.addCircle),l.xp6(1),l.Tol(g.class),l.Q6J("hidden",!f.sectionVisibility[g.class]),l.xp6(1),l.Q6J("ngForOf",g.options)}}let Q=(()=>{class R{constructor(g,f){this.formatterService=g,this.rbdConfigurationService=f,this.initializeData=new I.t(1),this.changes=new l.vpe,this.icons=Ie.P,this.ngDataReady=new l.vpe,this.configurationType=j.r,this.sectionVisibility={}}ngOnInit(){const g=this.createConfigurationFormGroup();this.form.addControl("configuration",g),g.valueChanges.subscribe(()=>{this.changes.emit(this.getDirtyValues.bind(this))}),this.initializeData&&this.initializeData.subscribe(f=>{this.initialData=f.initialData;const h=f.sourceType;this.rbdConfigurationService.getWritableOptionFields().forEach(O=>{const H=f.initialData.filter(De=>De.name===O.name).pop();H&&H.source===h&&this.form.get(`configuration.${O.name}`).setValue(H.value)}),this.ngDataReady.emit()}),this.rbdConfigurationService.getWritableSections().forEach(f=>this.sectionVisibility[f.class]=!1)}getDirtyValues(g=!1,f){if(g&&!f)throw new Error("ProgrammingError: If local values shall be included, a proper localFieldType argument has to be provided, too");const h={};return this.rbdConfigurationService.getWritableOptionFields().forEach(O=>{const H=this.form.get("configuration").get(O.name);this.initialData&&this.initialData[O.name]===H.value||(H.dirty||g&&H.source===f)&&(h[O.name]=null===H.value?H.value:O.type===j.r.bps?this.formatterService.toBytes(H.value):O.type===j.r.milliseconds?this.formatterService.toMilliseconds(H.value):O.type===j.r.iops?this.formatterService.toIops(H.value):H.value)}),h}createConfigurationFormGroup(){const g=new D.d({});return this.rbdConfigurationService.getWritableOptionFields().forEach(f=>{let h;if(f.type!==j.r.milliseconds&&f.type!==j.r.iops&&f.type!==j.r.bps)throw new Error(`Type ${f.type} is unknown, you may need to add it to RbdConfiguration class`);{let O=0;ne().forEach(this.initialData,H=>{H.name===f.name&&(O=H.value)}),h=new r.NI(O,r.kI.min(0))}g.addControl(f.name,h)}),g}reset(g){const f=this.form.get("configuration").get(g);f.disabled?(f.setValue(f.previousValue||0),f.enable(),f.previousValue||f.markAsPristine()):(f.previousValue=f.value,f.setValue(null),f.markAsDirty(),f.disable())}isDisabled(g){return this.form.get("configuration").get(g).disabled}toggleSectionVisibility(g){this.sectionVisibility[g]=!this.sectionVisibility[g]}}return R.\u0275fac=function(g){return new(g||R)(l.Y36(_e.H),l.Y36(be.n))},R.\u0275cmp=l.Xpm({type:R,selectors:[["cd-rbd-configuration-form"]],inputs:{form:"form",initializeData:"initializeData"},outputs:{changes:"changes"},decls:5,vars:2,consts:function(){let v,g,f;return v="RBD Configuration",g="Remove the local configuration value. The parent configuration value will be inherited and used instead.",f="The minimum value is 0",[[3,"formGroup"],["cfgFormGroup",""],v,["class","col-12",4,"ngFor","ngForOf"],[1,"col-12"],[1,"cd-header"],[1,"collapsible",3,"click"],["aria-hidden","true",3,"ngClass"],[3,"hidden"],["class","form-group row",4,"ngFor","ngForOf"],[1,"form-group","row"],[1,"cd-col-form-label",3,"for"],[1,"input-group"],[3,"ngSwitch"],[4,"ngSwitchCase"],[1,"input-group-append"],["type","button","data-toggle","button","title",g,1,"btn","btn-light",3,"ngClass","click"],["class","invalid-feedback",4,"ngIf"],["type","text","cdMilliseconds","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","defaultUnit","b","cdDimlessBinaryPerSecond","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","cdIops","",1,"form-control",3,"id","name","formControlName","ngDataReady"],[1,"invalid-feedback"],f]},template:function(g,f){1&g&&(l.TgZ(0,"fieldset",0,1),l.TgZ(2,"legend"),l.SDv(3,2),l.qZA(),l.YNc(4,F,7,7,"div",3),l.qZA()),2&g&&(l.Q6J("formGroup",f.form.get("configuration")),l.xp6(4),l.Q6J("ngForOf",f.rbdConfigurationService.sections))},directives:[r.JL,r.sg,C.V,W.sg,W.mk,b.P,ie.S,W.RF,W.n9,Y.o,W.O5,r.Fj,e.b,k,r.JJ,r.u,Z,Ae],styles:[".collapsible[_ngcontent-%COMP%]{cursor:pointer;user-select:none}"]}),R})()},71752:(it,Oe,p)=>{p.d(Oe,{P:()=>V});var l=p(64337),r=p(30633),m=p(74788);let ne=(()=>{class T{transform(M){return{0:"global",1:"pool",2:"image"}[M]}}return T.\u0275fac=function(M){return new(M||T)},T.\u0275pipe=m.Yjl({name:"rbdConfigurationSource",type:T,pure:!0}),T})();var I=p(28211),Ie=p(34089),D=p(12057),j=p(20044),_e=p(48537),be=p(21766);const C=["configurationSourceTpl"],W=["configurationValueTpl"],b=["poolConfTable"];function ie(T,q){1&T&&(m.TgZ(0,"span"),m.SDv(1,6),m.qZA())}function Y(T,q){1&T&&(m.TgZ(0,"strong"),m.SDv(1,7),m.qZA())}function e(T,q){1&T&&(m.TgZ(0,"strong"),m.SDv(1,8),m.qZA())}function k(T,q){1&T&&(m.TgZ(0,"div",4),m.YNc(1,ie,2,0,"span",5),m.YNc(2,Y,2,0,"strong",5),m.YNc(3,e,2,0,"strong",5),m.qZA()),2&T&&(m.Q6J("ngSwitch",q.value),m.xp6(1),m.Q6J("ngSwitchCase","global"),m.xp6(1),m.Q6J("ngSwitchCase","image"),m.xp6(1),m.Q6J("ngSwitchCase","pool"))}function Ne(T,q){if(1&T&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"dimlessBinaryPerSecond"),m.qZA()),2&T){const M=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,M))}}function Z(T,q){if(1&T&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"milliseconds"),m.qZA()),2&T){const M=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,M))}}function Ae(T,q){if(1&T&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"iops"),m.qZA()),2&T){const M=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,M))}}function x(T,q){if(1&T&&(m.TgZ(0,"span"),m._uU(1),m.qZA()),2&T){const M=m.oxw().value;m.xp6(1),m.Oqu(M)}}function ae(T,q){if(1&T&&(m.TgZ(0,"div",4),m.YNc(1,Ne,3,3,"span",5),m.YNc(2,Z,3,3,"span",5),m.YNc(3,Ae,3,3,"span",5),m.YNc(4,x,2,1,"span",9),m.qZA()),2&T){const M=q.row,B=m.oxw();m.Q6J("ngSwitch",M.type),m.xp6(1),m.Q6J("ngSwitchCase",B.typeField.bps),m.xp6(1),m.Q6J("ngSwitchCase",B.typeField.milliseconds),m.xp6(1),m.Q6J("ngSwitchCase",B.typeField.iops)}}let V=(()=>{class T{constructor(M,B){this.formatterService=M,this.rbdConfigurationService=B,this.sourceField=r.h,this.typeField=r.r}ngOnInit(){this.poolConfigurationColumns=[{prop:"displayName",name:"Name"},{prop:"description",name:"Description"},{prop:"name",name:"Key"},{prop:"source",name:"Source",cellTemplate:this.configurationSourceTpl,pipe:new ne},{prop:"value",name:"Value",cellTemplate:this.configurationValueTpl}]}ngOnChanges(){!this.data||(this.data=this.data.filter(M=>this.rbdConfigurationService.getOptionFields().map(B=>B.name).includes(M.name)))}}return T.\u0275fac=function(M){return new(M||T)(m.Y36(I.H),m.Y36(Ie.n))},T.\u0275cmp=m.Xpm({type:T,selectors:[["cd-rbd-configuration-table"]],viewQuery:function(M,B){if(1&M&&(m.Gf(C,7),m.Gf(W,7),m.Gf(b,7)),2&M){let F;m.iGM(F=m.CRH())&&(B.configurationSourceTpl=F.first),m.iGM(F=m.CRH())&&(B.configurationValueTpl=F.first),m.iGM(F=m.CRH())&&(B.poolConfTable=F.first)}},inputs:{data:"data"},features:[m.TTD],decls:6,vars:2,consts:function(){let q,M,B;return q="Global",M="Image",B="Pool",[["identifier","name",3,"data","columns"],["poolConfTable",""],["configurationSourceTpl",""],["configurationValueTpl",""],[3,"ngSwitch"],[4,"ngSwitchCase"],q,M,B,[4,"ngSwitchDefault"]]},template:function(M,B){1&M&&(m._UZ(0,"cd-table",0,1),m.YNc(2,k,4,4,"ng-template",null,2,m.W1O),m.YNc(4,ae,5,4,"ng-template",null,3,m.W1O)),2&M&&m.Q6J("data",B.data)("columns",B.poolConfigurationColumns)},directives:[l.a,D.RF,D.n9,D.ED],pipes:[j.O,_e.J,be.A],styles:[""]}),T})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt index edf330098..0f40b833c 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt @@ -16,6 +16,83 @@ MIT @angular/router MIT +@babel/runtime +MIT +MIT License + +Copyright (c) 2014-present Sebastian McKenzie and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +@babel/runtime-corejs3 +MIT +MIT License + +Copyright (c) 2014-present Sebastian McKenzie and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +@braintree/sanitize-url +MIT +MIT License + +Copyright (c) 2017 Braintree + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + @circlon/angular-tree-component MIT The MIT License (MIT) @@ -322,6 +399,31 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +autolinker +MIT +The MIT License (MIT) + +Copyright (c) 2014 Gregory Jacobs (http://greg-jacobs.com) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + balanced-match MIT (MIT) @@ -347,6 +449,31 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +base64-js +MIT +The MIT License (MIT) + +Copyright (c) 2014 Jameson Little + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + brace-expansion MIT MIT License @@ -372,6 +499,31 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +call-bind +MIT +MIT License + +Copyright (c) 2020 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + can-use-dom MIT The MIT License (MIT) @@ -411,6 +563,31 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +classnames +MIT +The MIT License (MIT) + +Copyright (c) 2018 Jed Watson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + concat-map MIT This software is released under the MIT license: @@ -433,6 +610,59 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +cookie +MIT +(The MIT License) + +Copyright (c) 2012-2014 Roman Shtylman +Copyright (c) 2015 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +copy-to-clipboard +MIT +MIT License + +Copyright (c) 2017 sudodoki + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + core-js MIT Copyright (c) 2014-2022 Denis Pushkarev @@ -456,11 +686,34 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -detect-browser +core-js-pure +MIT +Copyright (c) 2014-2022 Denis Pushkarev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +cross-fetch MIT The MIT License (MIT) -Copyright (c) 2019 Damon Oehlman +Copyright (c) 2017 Leonardo Quixadá Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -481,281 +734,467 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -file-saver +css.escape MIT -The MIT License +Copyright Mathias Bynens -Copyright © 2016 [Eli Grey][1]. +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - [1]: http://eligrey.com +deepmerge +MIT +The MIT License (MIT) +Copyright (c) 2012 James Halliday, Josh Duff, and other contributors -lodash +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +detect-browser MIT -Copyright OpenJS Foundation and other contributors +The MIT License (MIT) -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors +Copyright (c) 2019 Damon Oehlman -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -The following license applies to all parts of this software except as -documented below: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -==== +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. +dompurify +(MPL-2.0 OR Apache-2.0) +DOMPurify +Copyright 2015 Mario Heiderich -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +DOMPurify is free software; you can redistribute it and/or modify it under the +terms of either: -==== +a) the Apache License Version 2.0, or +b) the Mozilla Public License Version 2.0 -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. +----------------------------------------------------------------------------- -CC0: http://creativecommons.org/publicdomain/zero/1.0/ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -==== + http://www.apache.org/licenses/LICENSE-2.0 -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +----------------------------------------------------------------------------- +Mozilla Public License, version 2.0 -lodash-es -MIT -Copyright OpenJS Foundation and other contributors +1. Definitions -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors +1.1. “Contributor” -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. -The following license applies to all parts of this software except as -documented below: +1.2. “Contributor Version” -==== + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: +1.3. “Contribution” -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. + means Covered Software of a particular Contributor. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +1.4. “Covered Software” -==== + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. +1.5. “Incompatible With Secondary Licenses” + means -CC0: http://creativecommons.org/publicdomain/zero/1.0/ + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or -==== + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. +1.6. “Executable Form” + means any form of the work other than Source Code Form. -lodash.debounce -MIT -Copyright jQuery Foundation and other contributors +1.7. “Larger Work” -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash +1.8. “License” -The following license applies to all parts of this software except as -documented below: + means this document. -==== +1.9. “Licensable” -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. +1.10. “Modifications” -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + means any of the following: -==== + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. + b. any new file in Source Code Form that contains any Covered Software. -CC0: http://creativecommons.org/publicdomain/zero/1.0/ +1.11. “Patent Claims” of a Contributor -==== + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. +1.12. “Secondary License” + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. -lodash.memoize -MIT -Copyright jQuery Foundation and other contributors +1.13. “Source Code Form” -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors + means the form of the work preferred for making modifications. -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash +1.14. “You” (or “Your”) -The following license applies to all parts of this software except as -documented below: + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. -==== -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: +2. License Grants and Conditions -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. +2.1. Grants -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: -==== + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. -CC0: http://creativecommons.org/publicdomain/zero/1.0/ +2.2. Effective Date -==== + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. +2.3. Limitations on Grant Scope + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: -lodash.throttle -MIT -Copyright jQuery Foundation and other contributors + a. for any code that a Contributor has removed from Covered Software; or -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. -The following license applies to all parts of this software except as -documented below: + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). -==== +2.4. Subsequent Licenses -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. +2.5. Representation -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. -==== +2.6. Fair Use -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. -CC0: http://creativecommons.org/publicdomain/zero/1.0/ +2.7. Conditions -==== + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. +3. Responsibilities -mobx +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + + + +drange MIT The MIT License (MIT) -Copyright (c) 2015 Michel Weststrate +Copyright (c) 2014 David Tudury Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -776,18 +1215,162 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -moment +fast-json-patch MIT -Copyright (c) JS Foundation and other contributors +(The MIT License) -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: +Copyright (c) 2013, 2014, 2020 Joachim Wester + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +fault +MIT +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +file-saver +MIT +The MIT License + +Copyright © 2016 [Eli Grey][1]. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + [1]: http://eligrey.com + + +format +MIT + +formdata-node +MIT +The MIT License (MIT) + +Copyright (c) 2017-present Nick K. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +function-bind +MIT +Copyright (c) 2013 Raynos. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +get-intrinsic +MIT +MIT License + +Copyright (c) 2020 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +has +MIT +Copyright (c) 2013 Thiago de Arruda + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. @@ -802,11 +1385,1125 @@ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -ng-block-ui +has-symbols +MIT +MIT License + +Copyright (c) 2016 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +highlight.js +BSD-3-Clause +BSD 3-Clause License + +Copyright (c) 2006, Ivan Sagalaev. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +hoist-non-react-statics +BSD-3-Clause +Software License Agreement (BSD License) +======================================== + +Copyright (c) 2015, Yahoo! Inc. All rights reserved. +---------------------------------------------------- + +Redistribution and use of this software in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Yahoo! Inc. nor the names of YUI's contributors may be + used to endorse or promote products derived from this software without + specific prior written permission of Yahoo! Inc. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +ieee754 +BSD-3-Clause +Copyright 2008 Fair Oaks Labs, Inc. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +immutable +MIT +MIT License + +Copyright (c) 2014-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +is-plain-object +MIT +The MIT License (MIT) + +Copyright (c) 2014-2017, Jon Schlinkert. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +js-file-download +MIT +Copyright 2017 Kenneth Jiang + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE + + +js-yaml +MIT +(The MIT License) + +Copyright (C) 2011-2015 by Vitaly Puzrin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +lodash +MIT +Copyright OpenJS Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash-es +MIT +Copyright OpenJS Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash.debounce +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash.memoize +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash.throttle +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lowlight +MIT +(The MIT License) + +Copyright (c) 2016 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +mobx +MIT +The MIT License (MIT) + +Copyright (c) 2015 Michel Weststrate + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +moment +MIT +Copyright (c) JS Foundation and other contributors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +ng-block-ui +MIT +MIT License + +Copyright (c) 2017 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +ng-click-outside +MIT +The MIT License (MIT) + +Copyright (c) 2016 Eugene Cheung + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +ng2-charts +ISC + +ngx-pipe-function +MIT License + +Copyright (c) 2019 Artem Lanovyy + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +ngx-toastr +MIT +The MIT License (MIT) + +Copyright (c) Scott Cooper + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +object-assign +MIT +The MIT License (MIT) + +Copyright (c) Sindre Sorhus (sindresorhus.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +object-inspect +MIT +MIT License + +Copyright (c) 2013 James Halliday + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +prop-types +MIT +MIT License + +Copyright (c) 2013-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +punycode +MIT +Copyright Mathias Bynens + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +qs +BSD-3-Clause +BSD 3-Clause License + +Copyright (c) 2014, Nathan LaFreniere and other [contributors](https://github.com/ljharb/qs/graphs/contributors) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +querystring +MIT + +Copyright 2012 Irakli Gozalishvili. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + + +querystringify +MIT +The MIT License (MIT) + +Copyright (c) 2015 Unshift.io, Arnout Kazemier, the Contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +randexp +MIT +MIT License + +Copyright (C) 2011 by fent + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +react +MIT +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +react-copy-to-clipboard +MIT +The MIT License (MIT) + +Copyright (c) 2016 Nik Butenko + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +react-debounce-input +MIT +The MIT License (MIT) + +Copyright (c) 2016 Nik Butenko + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +react-dom +MIT +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +react-immutable-proptypes +MIT +The MIT License (MIT) + +Copyright (c) 2015 James Burnett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +react-immutable-pure-component +MIT +The MIT License (MIT) + +Copyright (c) 2017 Piotr Tomasz Monarski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +react-is +MIT +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +react-redux +MIT +The MIT License (MIT) + +Copyright (c) 2015-present Dan Abramov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +react-syntax-highlighter +MIT +MIT License + +Copyright (c) 2019 Conor Hastings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +redux +MIT +The MIT License (MIT) + +Copyright (c) 2015-present Dan Abramov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +redux-immutable +BSD-3-Clause +Copyright (c) 2016, Gajus Kuizinas (http://gajus.com/) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Gajus Kuizinas (http://gajus.com/) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANUARY BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +remarkable MIT -MIT License +The MIT License (MIT) -Copyright (c) 2017 +Copyright (c) 2014-2016, Jon Schlinkert +Copyright (c) 2014 Jon Schlinkert, Vitaly Puzrin. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -815,23 +2512,23 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -ng-click-outside +repeat-string MIT The MIT License (MIT) -Copyright (c) 2016 Eugene Cheung +Copyright (c) 2014-2016, Jon Schlinkert. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -852,13 +2549,11 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -ng2-charts -ISC - -ngx-pipe-function -MIT License +requires-port +MIT +The MIT License (MIT) -Copyright (c) 2019 Artem Lanovyy +Copyright (c) 2015 Unshift.io, Arnout Kazemier, the Contributors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -879,11 +2574,12 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -ngx-toastr + +reselect MIT The MIT License (MIT) -Copyright (c) Scott Cooper +Copyright (c) 2015-2018 Reselect Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -904,6 +2600,31 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +ret +MIT +MIT License + +Copyright (C) 2011 by fent + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + rxjs Apache-2.0 Apache License @@ -1081,81 +2802,329 @@ Apache-2.0 incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2015-2018 Google, Inc., Netflix, Inc., Microsoft Corp. and contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +scheduler +MIT +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +serialize-error +MIT +MIT License + +Copyright (c) Sindre Sorhus (https://sindresorhus.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +side-channel +MIT +MIT License + +Copyright (c) 2019 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +simplebar +MIT + +simplebar-angular +The MIT License (MIT) + +Copyright (c) 2015 Jonathan Nicol + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +swagger-client +Apache-2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - APPENDIX: How to apply the Apache License to your work. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and - Copyright (c) 2015-2018 Google, Inc., Netflix, Inc., Microsoft Corp. and contributors + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. - http://www.apache.org/licenses/LICENSE-2.0 + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -simplebar -MIT -The MIT License (MIT) + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -Copyright (c) 2015 Jonathan Nicol + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + END OF TERMS AND CONDITIONS -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + APPENDIX: How to apply the Apache License to your work. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -simplebar-angular -The MIT License (MIT) + Copyright [yyyy] [name of copyright owner] -Copyright (c) 2015 Jonathan Nicol + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + http://www.apache.org/licenses/LICENSE-2.0 -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. swagger-ui Apache-2.0 @@ -1363,6 +3332,37 @@ Apache-2.0 limitations under the License. +toggle-selection +MIT + +traverse +MIT +Copyright 2010 James Halliday (mail@substack.net) + +This project is free software released under the MIT/X11 license: +http://www.opensource.org/licenses/mit-license.php + +Copyright 2010 James Halliday (mail@substack.net) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + tslib 0BSD Copyright (c) Microsoft Corporation. @@ -1378,6 +3378,111 @@ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +url +MIT +The MIT License (MIT) + +Copyright Joyent, Inc. and other Node contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +url-parse +MIT +The MIT License (MIT) + +Copyright (c) 2015 Unshift.io, Arnout Kazemier, the Contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +xml-but-prettier +MIT +The MIT License (MIT) + +Copyright (c) 2015 Jonathan Persson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +zenscroll +Unlicense +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to + + + zone.js MIT The MIT License diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/483.43ef92bcd845cb24eae3.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/483.43ef92bcd845cb24eae3.js new file mode 100644 index 000000000..1f9b92973 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/483.43ef92bcd845cb24eae3.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[483],{70483:(ct,Oe,p)=>{p.r(Oe),p.d(Oe,{BlockModule:()=>$t,RoutedBlockModule:()=>sr});var l=p(12057),a=p(24751),m=p(6283),ne=p(19723),N=p(38549),Ie=p(37496),v=p(79512),U=p(4222),re=p(54462),Ae=p(44466),be=p(23815),S=p.n(be),le=p(35758),D=p(64762),ie=p(58497),V=p(93523),e=p(74788);let X=class{constructor(s){this.http=s}listTargets(){return this.http.get("api/iscsi/target")}getTarget(s){return this.http.get(`api/iscsi/target/${s}`)}updateTarget(s,t){return this.http.put(`api/iscsi/target/${s}`,t,{observe:"response"})}status(){return this.http.get("ui-api/iscsi/status")}settings(){return this.http.get("ui-api/iscsi/settings")}version(){return this.http.get("ui-api/iscsi/version")}portals(){return this.http.get("ui-api/iscsi/portals")}createTarget(s){return this.http.post("api/iscsi/target",s,{observe:"response"})}deleteTarget(s){return this.http.delete(`api/iscsi/target/${s}`,{observe:"response"})}getDiscovery(){return this.http.get("api/iscsi/discoveryauth")}updateDiscovery(s){return this.http.put("api/iscsi/discoveryauth",s)}overview(){return this.http.get("ui-api/iscsi/overview")}};X.\u0275fac=function(s){return new(s||X)(e.LFG(ie.eN))},X.\u0275prov=e.Yz7({token:X,factory:X.\u0275fac,providedIn:"root"}),X=(0,D.gn)([V.o,(0,D.w6)("design:paramtypes",[ie.eN])],X);var Ne=p(88002),Fe=p(76189),L=p(19358),he=p(34089);let q=class extends Fe.S{constructor(s,t){super(),this.http=s,this.rbdConfigurationService=t}isRBDPool(s){return-1!==S().indexOf(s.application_metadata,"rbd")&&!s.pool_name.includes("/")}create(s){return this.http.post("api/block/image",s,{observe:"response"})}delete(s){return this.http.delete(`api/block/image/${s.toStringEncoded()}`,{observe:"response"})}update(s,t){return this.http.put(`api/block/image/${s.toStringEncoded()}`,t,{observe:"response"})}get(s){return this.http.get(`api/block/image/${s.toStringEncoded()}`)}list(s){return this.http.get("api/block/image",{params:s,headers:{Accept:this.getVersionHeaderValue(2,0)},observe:"response"}).pipe((0,Ne.U)(t=>t.body.map(o=>(o.value.map(i=>(i.configuration&&i.configuration.map(_=>Object.assign(_,this.rbdConfigurationService.getOptionByName(_.name))),i)),o.headers=t.headers,o))))}copy(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/copy`,t,{observe:"response"})}flatten(s){return this.http.post(`api/block/image/${s.toStringEncoded()}/flatten`,null,{observe:"response"})}defaultFeatures(){return this.http.get("api/block/image/default_features")}cloneFormatVersion(){return this.http.get("api/block/image/clone_format_version")}createSnapshot(s,t){const o={snapshot_name:t};return this.http.post(`api/block/image/${s.toStringEncoded()}/snap`,o,{observe:"response"})}renameSnapshot(s,t,o){const i={new_snap_name:o};return this.http.put(`api/block/image/${s.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}protectSnapshot(s,t,o){const i={is_protected:o};return this.http.put(`api/block/image/${s.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}rollbackSnapshot(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/snap/${t}/rollback`,null,{observe:"response"})}cloneSnapshot(s,t,o){return this.http.post(`api/block/image/${s.toStringEncoded()}/snap/${t}/clone`,o,{observe:"response"})}deleteSnapshot(s,t){return this.http.delete(`api/block/image/${s.toStringEncoded()}/snap/${t}`,{observe:"response"})}listTrash(){return this.http.get("api/block/image/trash/")}createNamespace(s,t){return this.http.post(`api/block/pool/${s}/namespace`,{namespace:t},{observe:"response"})}listNamespaces(s){return this.http.get(`api/block/pool/${s}/namespace/`)}deleteNamespace(s,t){return this.http.delete(`api/block/pool/${s}/namespace/${t}`,{observe:"response"})}moveTrash(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/move_trash`,{delay:t},{observe:"response"})}purgeTrash(s){return this.http.post(`api/block/image/trash/purge/?pool_name=${s}`,null,{observe:"response"})}restoreTrash(s,t){return this.http.post(`api/block/image/trash/${s.toStringEncoded()}/restore`,{new_image_name:t},{observe:"response"})}removeTrash(s,t=!1){return this.http.delete(`api/block/image/trash/${s.toStringEncoded()}/?force=${t}`,{observe:"response"})}};q.\u0275fac=function(s){return new(s||q)(e.LFG(ie.eN),e.LFG(he.n))},q.\u0275prov=e.Yz7({token:q,factory:q.\u0275fac,providedIn:"root"}),(0,D.gn)([(0,D.fM)(1,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[L.N,String]),(0,D.w6)("design:returntype",void 0)],q.prototype,"createSnapshot",null),(0,D.gn)([(0,D.fM)(2,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[L.N,String,String]),(0,D.w6)("design:returntype",void 0)],q.prototype,"renameSnapshot",null),(0,D.gn)([(0,D.fM)(2,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[L.N,String,Boolean]),(0,D.w6)("design:returntype",void 0)],q.prototype,"protectSnapshot",null),(0,D.gn)([(0,D.fM)(1,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[L.N,String]),(0,D.w6)("design:returntype",void 0)],q.prototype,"restoreTrash",null),q=(0,D.gn)([V.o,(0,D.w6)("design:paramtypes",[ie.eN,he.n])],q);var F=p(7022),y=p(14745),T=p(65862),H=p(93614),x=p(95463),z=p(77205),A=p(86919),R=p(76111),u=p(32337),C=p(60312),h=p(41582),O=p(56310),$=p(87925),k=p(94276),j=p(30839);function to(n,s){if(1&n&&(e.TgZ(0,"option",6),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("ngValue",t),e.xp6(1),e.Oqu(t)}}function oo(n,s){if(1&n&&(e.TgZ(0,"select",5),e._UZ(1,"option",6),e.YNc(2,to,2,2,"option",7),e.qZA()),2&n){const t=e.oxw();e.s9C("id",t.setting),e.s9C("name",t.setting),e.Q6J("formControlName",t.setting),e.xp6(1),e.Q6J("ngValue",null),e.xp6(1),e.Q6J("ngForOf",t.limits.values)}}function no(n,s){if(1&n&&e._UZ(0,"input",10),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function io(n,s){if(1&n&&e._UZ(0,"input",11),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function so(n,s){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"div",12),e._UZ(3,"input",13),e.TgZ(4,"label",14),e._uU(5,"Yes"),e.qZA(),e.qZA(),e.TgZ(6,"div",12),e._UZ(7,"input",13),e.TgZ(8,"label",14),e._uU(9,"No"),e.qZA(),e.qZA(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(3),e.Q6J("id",t.setting+"True")("value",!0)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"True"),e.xp6(3),e.Q6J("id",t.setting+"False")("value",!1)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"False")}}function _o(n,s){if(1&n&&(e.TgZ(0,"span"),e.YNc(1,no,1,1,"input",8),e.YNc(2,io,1,1,"input",9),e.YNc(3,so,10,8,"ng-container",3),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf","int"===t.limits.type),e.xp6(1),e.Q6J("ngIf","str"===t.limits.type),e.xp6(1),e.Q6J("ngIf","bool"===t.limits.type)}}function ao(n,s){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,16),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.min),e.QtT(2)}}function ro(n,s){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,17),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.max),e.QtT(2)}}let dt=(()=>{class n{ngOnInit(){const t=[];"min"in this.limits&&t.push(a.kI.min(this.limits.min)),"max"in this.limits&&t.push(a.kI.max(this.limits.max)),this.settingsForm.get(this.setting).setValidators(t)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-setting"]],inputs:{settingsForm:"settingsForm",formDir:"formDir",setting:"setting",limits:"limits"},decls:7,vars:7,consts:function(){let s,t;return s="Must be greater than or equal to " + "\ufffd0\ufffd" + ".",t="Must be less than or equal to " + "\ufffd0\ufffd" + ".",[[1,"form-group",3,"formGroup"],[1,"col-form-label",3,"for"],["class","form-control",3,"id","name","formControlName",4,"ngIf"],[4,"ngIf"],["class","invalid-feedback",4,"ngIf"],[1,"form-control",3,"id","name","formControlName"],[3,"ngValue"],[3,"ngValue",4,"ngFor","ngForOf"],["type","number","class","form-control",3,"formControlName",4,"ngIf"],["type","text","class","form-control",3,"formControlName",4,"ngIf"],["type","number",1,"form-control",3,"formControlName"],["type","text",1,"form-control",3,"formControlName"],[1,"custom-control","custom-radio","custom-control-inline"],["type","radio",1,"custom-control-input",3,"id","value","formControlName"],[1,"custom-control-label",3,"for"],[1,"invalid-feedback"],s,t]},template:function(t,o){1&t&&(e.TgZ(0,"div",0),e.TgZ(1,"label",1),e._uU(2),e.qZA(),e.YNc(3,oo,3,5,"select",2),e.YNc(4,_o,4,3,"span",3),e.YNc(5,ao,3,1,"span",4),e.YNc(6,ro,3,1,"span",4),e.qZA()),2&t&&(e.Q6J("formGroup",o.settingsForm),e.xp6(1),e.s9C("for",o.setting),e.xp6(1),e.Oqu(o.setting),e.xp6(1),e.Q6J("ngIf","enum"===o.limits.type),e.xp6(1),e.Q6J("ngIf","enum"!==o.limits.type),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"min")),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"max")))},directives:[O.P,a.JL,a.sg,h.V,l.O5,$.o,a.EJ,k.b,a.JJ,a.u,a.YN,a.Kr,l.sg,a.wV,a.Fj,a._],styles:[""]}),n})();var Je=p(88820);function lo(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function co(n,s){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"legend",10),e.SDv(2,21),e.qZA(),e.TgZ(3,"div",12),e.TgZ(4,"div",13),e.TgZ(5,"label",22),e.SDv(6,23),e.qZA(),e._UZ(7,"input",24),e.YNc(8,lo,2,0,"span",25),e.qZA(),e.qZA(),e.TgZ(9,"div",12),e.TgZ(10,"div",13),e.TgZ(11,"label",26),e.SDv(12,27),e.qZA(),e._UZ(13,"input",28),e.qZA(),e.qZA(),e.qZA()),2&n){const t=e.oxw(),o=e.MAs(9);e.xp6(8),e.Q6J("ngIf",t.settingsForm.showError("lun",o,"required"))}}function po(n,s){if(1&n&&(e.TgZ(0,"option",31),e._uU(1),e.ALo(2,"iscsiBackstore"),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(e.lcZ(2,2,t))}}function go(n,s){if(1&n&&(e.TgZ(0,"div",12),e.TgZ(1,"div",13),e._UZ(2,"cd-iscsi-setting",33),e.qZA(),e.qZA()),2&n){const t=s.$implicit,o=e.oxw(2).$implicit,i=e.oxw(),_=e.MAs(9);e.xp6(2),e.Q6J("settingsForm",i.settingsForm)("formDir",_)("setting",t.key)("limits",i.getDiskControlLimits(o,t.key))}}function uo(n,s){if(1&n&&(e.ynx(0),e.YNc(1,go,3,4,"div",32),e.ALo(2,"keyvalue"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngForOf",e.lcZ(2,1,o.disk_default_controls[t]))}}function mo(n,s){if(1&n&&(e.ynx(0),e.YNc(1,uo,3,3,"ng-container",9),e.BQk()),2&n){const t=s.$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngIf",o.settingsForm.value.backstore===t)}}let To=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={backstore:new a.NI(this.imagesSettings[this.image].backstore),lun:new a.NI(this.imagesSettings[this.image].lun),wwn:new a.NI(this.imagesSettings[this.image].wwn)};S().forEach(this.backstores,o=>{const i=this.imagesSettings[this.image][o]||{};S().forIn(this.disk_default_controls[o],(_,r)=>{t[r]=new a.NI(i[r])})}),this.settingsForm=new x.d(t)}getDiskControlLimits(t,o){return this.disk_controls_limits?this.disk_controls_limits[t][o]:{type:"int"}}save(){const t=this.settingsForm.controls.backstore.value,o=this.settingsForm.controls.lun.value,i=this.settingsForm.controls.wwn.value,_={};S().forIn(this.settingsForm.controls,(r,c)=>{""!==r.value&&null!==r.value&&c in this.disk_default_controls[this.settingsForm.value.backstore]&&(_[c]=r.value,S().forEach(this.backstores,d=>{d!==t&&c in(this.imagesSettings[this.image][d]||{})&&(this.imagesSettings[this.image][d][c]=r.value)}))}),this.imagesSettings[this.image].backstore=t,this.imagesSettings[this.image].lun=o,this.imagesSettings[this.image].wwn=i,this.imagesSettings[this.image][t]=_,this.imagesSettings=Object.assign({},this.imagesSettings),this.control.updateValueAndValidity({emitEvent:!1}),this.activeModal.close()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(X),e.Y36(v.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-image-settings-modal"]],decls:25,vars:8,consts:function(){let s,t,o,i,_,r,c,d;return s="Configure",t="Changing these parameters from their default values is usually not necessary.",o="Settings",i="Backstore",_="Identifier",r="lun",c="wwn",d="This field is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","settingsForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,[4,"ngIf"],[1,"cd-header"],o,[1,"form-group","row"],[1,"col-sm-12"],[1,"col-form-label"],i,["id","backstore","name","backstore","formControlName","backstore",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],[4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],_,["for","lun",1,"col-form-label","required"],r,["type","number","id","lun","name","lun","formControlName","lun",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","wwn",1,"col-form-label"],c,["type","text","id","wwn","name","wwn","formControlName","wwn",1,"form-control"],[1,"invalid-feedback"],d,[3,"value"],["class","form-group row",4,"ngFor","ngForOf"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.ynx(2),e.SDv(3,2),e.BQk(),e._uU(4,"\xa0 "),e.TgZ(5,"small"),e._uU(6),e.qZA(),e.BQk(),e.ynx(7,3),e.TgZ(8,"form",4,5),e.TgZ(10,"div",6),e.TgZ(11,"p",7),e.SDv(12,8),e.qZA(),e.YNc(13,co,14,1,"span",9),e.TgZ(14,"legend",10),e.SDv(15,11),e.qZA(),e.TgZ(16,"div",12),e.TgZ(17,"div",13),e.TgZ(18,"label",14),e.SDv(19,15),e.qZA(),e.TgZ(20,"select",16),e.YNc(21,po,3,4,"option",17),e.qZA(),e.qZA(),e.qZA(),e.YNc(22,mo,2,1,"ng-container",18),e.qZA(),e.TgZ(23,"div",19),e.TgZ(24,"cd-form-button-panel",20),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(6),e.Oqu(o.image),e.xp6(2),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngIf",o.api_version>=1),e.xp6(8),e.Q6J("ngForOf",o.backstores),e.xp6(1),e.Q6J("ngForOf",o.backstores),e.xp6(2),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},directives:[C.z,a._Y,a.JL,a.sg,h.V,l.O5,O.P,$.o,a.EJ,k.b,a.JJ,a.u,l.sg,j.p,a.wV,a.Fj,a.YN,a.Kr,dt],pipes:[Je.V,l.Nd],styles:[""]}),n})();function fo(n,s){if(1&n&&(e.TgZ(0,"div",12),e.TgZ(1,"div",13),e._UZ(2,"cd-iscsi-setting",14),e.qZA(),e.qZA()),2&n){const t=s.$implicit,o=e.oxw(),i=e.MAs(5);e.xp6(2),e.Q6J("settingsForm",o.settingsForm)("formDir",i)("setting",t.key)("limits",o.getTargetControlLimits(t.key))}}let Co=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={};S().forIn(this.target_default_controls,(o,i)=>{t[i]=new a.NI(this.target_controls.value[i])}),this.settingsForm=new x.d(t)}save(){const t={};S().forIn(this.settingsForm.controls,(o,i)=>{""===o.value||null===o.value||(t[i]=o.value)}),this.target_controls.setValue(t),this.activeModal.close()}getTargetControlLimits(t){return this.target_controls_limits?this.target_controls_limits[t]:["Yes","No"].includes(this.target_default_controls[t])?{type:"bool"}:{type:"int"}}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(X),e.Y36(v.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-iqn-settings-modal"]],decls:13,vars:7,consts:function(){let s,t;return s="Advanced Settings",t="Changing these parameters from their default values is usually not necessary.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","settingsForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,["class","form-group row",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"form-group","row"],[1,"col-sm-12"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p",7),e.SDv(8,8),e.qZA(),e.YNc(9,fo,3,4,"div",9),e.ALo(10,"keyvalue"),e.qZA(),e.TgZ(11,"div",10),e.TgZ(12,"cd-form-button-panel",11),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngForOf",e.lcZ(10,5,o.settingsForm.controls)),e.xp6(3),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},directives:[C.z,a._Y,a.JL,a.sg,h.V,l.sg,j.p,O.P,dt],pipes:[l.Nd],styles:[""]}),n})();var de=p(63285),pt=p(63622);let So=(()=>{class n{constructor(t){this.ngControl=t}onInput(t){this.setValue(t)}setValue(t){t=S().isString(t)?t.trim():t,this.ngControl.control.setValue(t)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(a.a5))},n.\u0275dir=e.lG2({type:n,selectors:[["","cdTrim",""]],hostBindings:function(t,o){1&t&&e.NdJ("input",function(_){return o.onInput(_.target.value)})}}),n})();var Eo=p(39092),gt=p(4416),Ye=p(58039),et=p(10545);function Ro(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,42),e.qZA())}function Mo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,43),e.qZA())}function Oo(n,s){1&n&&(e.TgZ(0,"span",41),e.ynx(1),e.SDv(2,44),e.BQk(),e._UZ(3,"br"),e.ynx(4),e.SDv(5,45),e.BQk(),e._UZ(6,"br"),e.TgZ(7,"a",46),e.SDv(8,47),e.qZA(),e.qZA())}function Ao(n,s){1&n&&(e.TgZ(0,"span",48),e.SDv(1,49),e.qZA())}const J=function(n){return[n]};function ho(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,r=i.$implicit;return e.oxw(2).removePortal(_,r)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,J,o.icons.destroy))}}function Po(n,s){if(1&n&&(e.TgZ(0,"span",41),e.SDv(1,53),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.pQV(t.minimum_gateways),e.QtT(1)}}function Io(n,s){if(1&n&&(e.TgZ(0,"div",56),e._uU(1),e.qZA()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(1),e.hij("lun: ",o.imagesSettings[t].lun,"")}}function bo(n,s){if(1&n&&(e.ynx(0),e.SDv(1,57),e.ALo(2,"iscsiBackstore"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(2),e.pQV(e.lcZ(2,1,o.imagesSettings[t].backstore)),e.QtT(1)}}function No(n,s){1&n&&(e.ynx(0),e.SDv(1,58),e.BQk())}function Fo(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.YNc(4,Io,2,1,"div",54),e.TgZ(5,"button",52),e.NdJ("click",function(){const _=e.CHM(t).$implicit;return e.oxw(2).imageSettingsModal(_)}),e._UZ(6,"i",16),e.qZA(),e.TgZ(7,"button",52),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,r=i.$implicit;return e.oxw(2).removeImage(_,r)}),e._UZ(8,"i",16),e.qZA(),e.qZA(),e.qZA(),e.TgZ(9,"span",48),e.YNc(10,bo,3,3,"ng-container",55),e.YNc(11,No,2,0,"ng-container",55),e.qZA(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngIf",o.api_version>=1),e.xp6(2),e.Q6J("ngClass",e.VKq(6,J,o.icons.deepCheck)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,J,o.icons.destroy)),e.xp6(2),e.Q6J("ngIf",o.backstores.length>1),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.imagesSettings[t][o.imagesSettings[t].backstore]))}}function Do(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,59),e.qZA())}function vo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,60),e.qZA())}function Lo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,78),e.qZA())}function $o(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,79),e.qZA())}function Zo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,80),e.qZA())}function Bo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,81),e.qZA())}function Go(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,82),e.qZA())}function yo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,83),e.qZA())}function xo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,84),e.qZA())}function wo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,85),e.qZA())}function qo(n,s){if(1&n&&(e.TgZ(0,"div",61),e.TgZ(1,"div",8),e.TgZ(2,"label",62),e.ynx(3),e.SDv(4,63),e.BQk(),e.qZA(),e.TgZ(5,"div",11),e._UZ(6,"input",64),e.YNc(7,Lo,2,0,"span",17),e.YNc(8,$o,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(9,"div",8),e.TgZ(10,"label",65),e.ynx(11),e.SDv(12,66),e.BQk(),e.qZA(),e.TgZ(13,"div",11),e.TgZ(14,"div",12),e._UZ(15,"input",67),e.TgZ(16,"span",14),e._UZ(17,"button",68),e._UZ(18,"cd-copy-2-clipboard-button",69),e.qZA(),e.qZA(),e.YNc(19,Zo,2,0,"span",17),e.YNc(20,Bo,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(21,"div",8),e.TgZ(22,"label",70),e.ynx(23),e.SDv(24,71),e.BQk(),e.qZA(),e.TgZ(25,"div",11),e._UZ(26,"input",72),e.YNc(27,Go,2,0,"span",17),e.YNc(28,yo,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(29,"div",8),e.TgZ(30,"label",73),e.ynx(31),e.SDv(32,74),e.BQk(),e.qZA(),e.TgZ(33,"div",11),e.TgZ(34,"div",12),e._UZ(35,"input",75),e.TgZ(36,"span",14),e._UZ(37,"button",76),e._UZ(38,"cd-copy-2-clipboard-button",77),e.qZA(),e.qZA(),e.YNc(39,xo,2,0,"span",17),e.YNc(40,wo,2,0,"span",17),e.qZA(),e.qZA(),e.qZA()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("user",t,"pattern")),e.xp6(11),e.Q6J("ngIf",o.targetForm.showError("password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("password",t,"pattern")),e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"pattern")),e.xp6(11),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"pattern"))}}function Ho(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,115),e.qZA())}function ko(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,116),e.qZA())}function Ko(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,117),e.qZA())}function Xo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,118),e.qZA())}function Qo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,119),e.qZA())}function zo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,120),e.qZA())}function Jo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,121),e.qZA())}function Yo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,122),e.qZA())}function Vo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,123),e.qZA())}function Uo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,124),e.qZA())}function jo(n,s){1&n&&(e.TgZ(0,"span",41),e.SDv(1,125),e.qZA())}function Wo(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,r=i.$implicit,c=e.oxw(),d=c.$implicit,g=c.index;return e.oxw(3).removeInitiatorImage(d,_,g,r)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,J,o.icons.destroy))}}function en(n,s){1&n&&(e.TgZ(0,"span"),e.SDv(1,126),e.qZA())}function tn(n,s){if(1&n&&(e.TgZ(0,"div",22),e.TgZ(1,"div",23),e.TgZ(2,"cd-select",127),e._UZ(3,"i",25),e.ynx(4),e.SDv(5,128),e.BQk(),e.qZA(),e.qZA(),e.qZA()),2&n){const t=e.oxw(),o=t.$implicit,i=t.index,_=e.oxw(3);e.xp6(2),e.Q6J("data",o.getValue("luns"))("options",_.imagesInitiatorSelections[i])("messages",_.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(4,J,_.icons.add))}}function on(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",92),e.TgZ(1,"div",5),e.ynx(2),e.SDv(3,93),e.BQk(),e._uU(4),e.TgZ(5,"button",94),e.NdJ("click",function(){const _=e.CHM(t).index;return e.oxw(3).removeInitiator(_)}),e._UZ(6,"i",25),e.qZA(),e.qZA(),e.TgZ(7,"div",7),e.TgZ(8,"div",8),e.TgZ(9,"label",95),e.SDv(10,96),e.qZA(),e.TgZ(11,"div",11),e.TgZ(12,"input",97),e.NdJ("blur",function(){return e.CHM(t),e.oxw(3).updatedInitiatorSelector()}),e.qZA(),e.YNc(13,Ho,2,0,"span",17),e.YNc(14,ko,2,0,"span",17),e.YNc(15,Ko,2,0,"span",17),e.qZA(),e.qZA(),e.ynx(16,61),e.TgZ(17,"div",8),e.TgZ(18,"label",98),e.SDv(19,99),e.qZA(),e.TgZ(20,"div",11),e._UZ(21,"input",100),e.YNc(22,Xo,2,0,"span",17),e.YNc(23,Qo,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(24,"div",8),e.TgZ(25,"label",101),e.SDv(26,102),e.qZA(),e.TgZ(27,"div",11),e.TgZ(28,"div",12),e._UZ(29,"input",103),e.TgZ(30,"span",14),e._UZ(31,"button",104),e._UZ(32,"cd-copy-2-clipboard-button",105),e.qZA(),e.qZA(),e.YNc(33,zo,2,0,"span",17),e.YNc(34,Jo,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(35,"div",8),e.TgZ(36,"label",106),e.ynx(37),e.SDv(38,107),e.BQk(),e.qZA(),e.TgZ(39,"div",11),e._UZ(40,"input",108),e.YNc(41,Yo,2,0,"span",17),e.YNc(42,Vo,2,0,"span",17),e.qZA(),e.qZA(),e.TgZ(43,"div",8),e.TgZ(44,"label",109),e.SDv(45,110),e.qZA(),e.TgZ(46,"div",11),e.TgZ(47,"div",12),e._UZ(48,"input",111),e.TgZ(49,"span",14),e._UZ(50,"button",104),e._UZ(51,"cd-copy-2-clipboard-button",105),e.qZA(),e.qZA(),e.YNc(52,Uo,2,0,"span",17),e.YNc(53,jo,2,0,"span",17),e.qZA(),e.qZA(),e.BQk(),e.TgZ(54,"div",8),e.TgZ(55,"label",112),e.SDv(56,113),e.qZA(),e.TgZ(57,"div",11),e.YNc(58,Wo,6,4,"ng-container",21),e.YNc(59,en,2,0,"span",55),e.YNc(60,tn,6,6,"div",114),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=s.$implicit,o=s.index;e.oxw(2);const i=e.MAs(2),_=e.oxw();e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("client_iqn")," "),e.xp6(2),e.Q6J("ngClass",e.VKq(25,J,_.icons.destroy)),e.xp6(7),e.Q6J("ngIf",t.showError("client_iqn",i,"notUnique")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"pattern")),e.xp6(6),e.Q6J("id","user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"pattern")),e.xp6(6),e.Q6J("id","password"+o),e.xp6(2),e.Q6J("cdPasswordButton","password"+o),e.xp6(1),e.Q6J("source","password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_password"+o),e.xp6(2),e.Q6J("cdPasswordButton","mutual_password"+o),e.xp6(1),e.Q6J("source","mutual_password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"pattern")),e.xp6(5),e.Q6J("ngForOf",t.getValue("luns")),e.xp6(1),e.Q6J("ngIf",t.getValue("cdIsInGroup")),e.xp6(1),e.Q6J("ngIf",!t.getValue("cdIsInGroup"))}}function nn(n,s){1&n&&(e.TgZ(0,"span",48),e.SDv(1,129),e.qZA())}function sn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",86),e.SDv(2,87),e.qZA(),e.TgZ(3,"div",88),e.YNc(4,on,61,27,"div",89),e.TgZ(5,"div",22),e.TgZ(6,"div",23),e.YNc(7,nn,2,0,"span",18),e.TgZ(8,"button",90),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addInitiator(),!1}),e._UZ(9,"i",25),e.ynx(10),e.SDv(11,91),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(12,"hr"),e.qZA(),e.qZA()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.initiators.controls),e.xp6(3),e.Q6J("ngIf",0===t.initiators.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,J,t.icons.add))}}function _n(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const _=e.CHM(t).index,r=e.oxw(),c=r.$implicit,d=r.index;return e.oxw(3).removeGroupInitiator(c,_,d)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,J,o.icons.destroy))}}function an(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",50),e._UZ(2,"input",51),e.TgZ(3,"span",14),e.TgZ(4,"button",52),e.NdJ("click",function(){const _=e.CHM(t).index,r=e.oxw(),c=r.$implicit,d=r.index;return e.oxw(3).removeGroupDisk(c,_,d)}),e._UZ(5,"i",16),e.qZA(),e.qZA(),e.qZA(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(3),e.Q6J("ngClass",e.VKq(2,J,o.icons.destroy))}}function rn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",92),e.TgZ(1,"div",5),e.ynx(2),e.SDv(3,133),e.BQk(),e._uU(4),e.TgZ(5,"button",94),e.NdJ("click",function(){const _=e.CHM(t).index;return e.oxw(3).removeGroup(_)}),e._UZ(6,"i",25),e.qZA(),e.qZA(),e.TgZ(7,"div",7),e.TgZ(8,"div",8),e.TgZ(9,"label",134),e.SDv(10,135),e.qZA(),e.TgZ(11,"div",11),e._UZ(12,"input",136),e.qZA(),e.qZA(),e.TgZ(13,"div",8),e.TgZ(14,"label",137),e.ynx(15),e.SDv(16,138),e.BQk(),e.qZA(),e.TgZ(17,"div",11),e.YNc(18,_n,6,4,"ng-container",21),e.TgZ(19,"div",22),e.TgZ(20,"div",23),e.TgZ(21,"cd-select",24),e.NdJ("selection",function(i){const r=e.CHM(t).index;return e.oxw(3).onGroupMemberSelection(i,r)}),e._UZ(22,"i",25),e.ynx(23),e.SDv(24,139),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(25,"hr"),e.qZA(),e.qZA(),e.TgZ(26,"div",8),e.TgZ(27,"label",28),e.ynx(28),e.SDv(29,140),e.BQk(),e.qZA(),e.TgZ(30,"div",11),e.YNc(31,an,6,4,"ng-container",21),e.TgZ(32,"div",22),e.TgZ(33,"div",23),e.TgZ(34,"cd-select",127),e._UZ(35,"i",25),e.ynx(36),e.SDv(37,141),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(38,"hr"),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=s.$implicit,o=s.index,i=e.oxw(3);e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("group_id")," "),e.xp6(2),e.Q6J("ngClass",e.VKq(13,J,i.icons.destroy)),e.xp6(12),e.Q6J("ngForOf",t.getValue("members")),e.xp6(3),e.Q6J("data",t.getValue("members"))("options",i.groupMembersSelections[o])("messages",i.messages.groupInitiator),e.xp6(1),e.Q6J("ngClass",e.VKq(15,J,i.icons.add)),e.xp6(9),e.Q6J("ngForOf",t.getValue("disks")),e.xp6(3),e.Q6J("data",t.getValue("disks"))("options",i.groupDiskSelections[o])("messages",i.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(17,J,i.icons.add))}}function ln(n,s){1&n&&(e.TgZ(0,"span",48),e.SDv(1,142),e.qZA())}function cn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",86),e.SDv(2,130),e.qZA(),e.TgZ(3,"div",131),e.YNc(4,rn,39,19,"div",89),e.TgZ(5,"div",22),e.TgZ(6,"div",23),e.YNc(7,ln,2,0,"span",18),e.TgZ(8,"button",90),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addGroup(),!1}),e._UZ(9,"i",25),e.ynx(10),e.SDv(11,132),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.groups.controls),e.xp6(3),e.Q6J("ngIf",0===t.groups.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,J,t.icons.add))}}function dn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.TgZ(9,"div",8),e.TgZ(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.TgZ(15,"span",14),e.TgZ(16,"button",15),e.NdJ("click",function(){return e.CHM(t),e.oxw().targetSettingsModal()}),e._UZ(17,"i",16),e.qZA(),e.qZA(),e.qZA(),e.YNc(18,Ro,2,0,"span",17),e.YNc(19,Mo,2,0,"span",17),e.YNc(20,Oo,9,0,"span",17),e.YNc(21,Ao,2,0,"span",18),e._UZ(22,"hr"),e.qZA(),e.qZA(),e.TgZ(23,"div",8),e.TgZ(24,"label",19),e.SDv(25,20),e.qZA(),e.TgZ(26,"div",11),e.YNc(27,ho,6,4,"ng-container",21),e.TgZ(28,"div",22),e.TgZ(29,"div",23),e.TgZ(30,"cd-select",24),e.NdJ("selection",function(i){return e.CHM(t),e.oxw().onPortalSelection(i)}),e._UZ(31,"i",25),e.ynx(32),e.SDv(33,26),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(34,"input",27),e.YNc(35,Po,2,1,"span",17),e._UZ(36,"hr"),e.qZA(),e.qZA(),e.TgZ(37,"div",8),e.TgZ(38,"label",28),e.SDv(39,29),e.qZA(),e.TgZ(40,"div",11),e.YNc(41,Fo,12,10,"ng-container",21),e._UZ(42,"input",30),e.YNc(43,Do,2,0,"span",17),e.YNc(44,vo,2,0,"span",17),e.TgZ(45,"div",22),e.TgZ(46,"div",23),e.TgZ(47,"cd-select",24),e.NdJ("selection",function(i){return e.CHM(t),e.oxw().onImageSelection(i)}),e._UZ(48,"i",25),e.ynx(49),e.SDv(50,31),e.BQk(),e.qZA(),e.qZA(),e.qZA(),e._UZ(51,"hr"),e.qZA(),e.qZA(),e.TgZ(52,"div",8),e.TgZ(53,"div",32),e.TgZ(54,"div",33),e._UZ(55,"input",34),e.TgZ(56,"label",35),e.SDv(57,36),e.qZA(),e.qZA(),e._UZ(58,"hr"),e.qZA(),e.qZA(),e.YNc(59,qo,41,8,"div",37),e.YNc(60,sn,13,5,"div",38),e.YNc(61,cn,12,5,"div",38),e.qZA(),e.TgZ(62,"div",39),e.TgZ(63,"cd-form-button-panel",40),e.NdJ("submitActionEvent",function(){return e.CHM(t),e.oxw().submit()}),e.ALo(64,"titlecase"),e.ALo(65,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.targetForm),e.xp6(6),e.pQV(e.lcZ(6,26,o.action))(e.lcZ(7,28,o.resource)),e.QtT(5),e.xp6(10),e.Q6J("ngClass",e.VKq(34,J,o.icons.deepCheck)),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"pattern")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"iqn")),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.targetForm.getValue("target_controls"))),e.xp6(6),e.Q6J("ngForOf",o.portals.value),e.xp6(3),e.Q6J("data",o.portals.value)("options",o.portalsSelections)("messages",o.messages.portals),e.xp6(1),e.Q6J("ngClass",e.VKq(36,J,o.icons.add)),e.xp6(4),e.Q6J("ngIf",o.targetForm.showError("portals",t,"minGateways")),e.xp6(6),e.Q6J("ngForOf",o.targetForm.getValue("disks")),e.xp6(2),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupLunId")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupWwn")),e.xp6(3),e.Q6J("data",o.disks.value)("options",o.imagesSelections)("messages",o.messages.images),e.xp6(1),e.Q6J("ngClass",e.VKq(38,J,o.icons.add)),e.xp6(11),e.Q6J("ngIf",o.cephIscsiConfigVersion>10&&!o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(2),e.Q6J("form",o.targetForm)("submitText",e.lcZ(64,30,o.action)+" "+e.lcZ(65,32,o.resource))}}let ut=(()=>{class n extends H.E{constructor(t,o,i,_,r,c,d){super(),this.iscsiService=t,this.modalService=o,this.rbdService=i,this.router=_,this.route=r,this.taskWrapper=c,this.actionLabels=d,this.api_version=0,this.minimum_gateways=1,this.icons=T.P,this.isEdit=!1,this.portalsSelections=[],this.imagesInitiatorSelections=[],this.groupDiskSelections=[],this.groupMembersSelections=[],this.imagesSettings={},this.messages={portals:new F.a({noOptions:"There are no portals available."}),images:new F.a({noOptions:"There are no images available."}),initiatorImage:new F.a({noOptions:"There are no images available. Please make sure you add an image to the target."}),groupInitiator:new F.a({noOptions:"There are no initiators available. Please make sure you add an initiator to the target."})},this.IQN_REGEX=/^iqn\.(19|20)\d\d-(0[1-9]|1[0-2])\.\D{2,3}(\.[A-Za-z0-9-]+)+(:[A-Za-z0-9-\.]+)*$/,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.resource="target"}ngOnInit(){const t=new A.E(()=>{});t.pageInfo.limit=-1;const o=[this.iscsiService.listTargets(),this.rbdService.list(t.toParams()),this.iscsiService.portals(),this.iscsiService.settings(),this.iscsiService.version()];this.router.url.startsWith("/block/iscsi/targets/edit")&&(this.isEdit=!0,this.route.params.subscribe(i=>{this.target_iqn=decodeURIComponent(i.target_iqn),o.push(this.iscsiService.getTarget(this.target_iqn))})),this.action=this.isEdit?this.actionLabels.EDIT:this.actionLabels.CREATE,(0,le.D)(o).subscribe(i=>{const _=S()(i[0]).filter(c=>c.target_iqn!==this.target_iqn).flatMap(c=>c.disks).map(c=>`${c.pool}/${c.image}`).value();"api_version"in i[3]&&(this.api_version=i[3].api_version),this.minimum_gateways=i[3].config.minimum_gateways,this.target_default_controls=i[3].target_default_controls,this.target_controls_limits=i[3].target_controls_limits,this.disk_default_controls=i[3].disk_default_controls,this.disk_controls_limits=i[3].disk_controls_limits,this.backstores=i[3].backstores,this.default_backstore=i[3].default_backstore,this.unsupported_rbd_features=i[3].unsupported_rbd_features,this.required_rbd_features=i[3].required_rbd_features,this.imagesAll=S()(i[1]).flatMap(c=>c.value).filter(c=>!c.namespace&&!(-1!==_.indexOf(`${c.pool_name}/${c.name}`)||0===this.getValidBackstores(c).length)).value(),this.imagesSelections=this.imagesAll.map(c=>new y.$(!1,`${c.pool_name}/${c.name}`,""));const r=[];i[2].forEach(c=>{c.ip_addresses.forEach(d=>{r.push(new y.$(!1,c.name+":"+d,""))})}),this.portalsSelections=[...r],this.cephIscsiConfigVersion=i[4].ceph_iscsi_config_version,this.createForm(),i[5]&&this.resolveModel(i[5]),this.loadingReady()})}createForm(){if(this.targetForm=new x.d({target_iqn:new a.NI("iqn.2001-07.com.ceph:"+Date.now(),{validators:[a.kI.required,a.kI.pattern(this.IQN_REGEX)]}),target_controls:new a.NI({}),portals:new a.NI([],{validators:[z.h.custom("minGateways",t=>S().uniq(t.map(i=>i.split(":")[0])).length{const o=this.getLunIds(t);return o.length!==S().uniq(o).length}),z.h.custom("dupWwn",t=>{const o=this.getWwns(t);return o.length!==S().uniq(o).length})]}),initiators:new a.Oe([]),groups:new a.Oe([]),acl_enabled:new a.NI(!1)}),this.cephIscsiConfigVersion>10){const t=new x.d({user:new a.NI(""),password:new a.NI(""),mutual_user:new a.NI(""),mutual_password:new a.NI("")});this.setAuthValidator(t),this.targetForm.addControl("auth",t)}}resolveModel(t){this.targetForm.patchValue({target_iqn:t.target_iqn,target_controls:t.target_controls,acl_enabled:t.acl_enabled}),this.cephIscsiConfigVersion>10&&this.targetForm.patchValue({auth:t.auth});const o=[];S().forEach(t.portals,_=>{o.push(`${_.host}:${_.ip}`)}),this.targetForm.patchValue({portals:o});const i=[];S().forEach(t.disks,_=>{const r=`${_.pool}/${_.image}`;i.push(r),this.imagesSettings[r]={backstore:_.backstore},this.imagesSettings[r][_.backstore]=_.controls,"lun"in _&&(this.imagesSettings[r].lun=_.lun),"wwn"in _&&(this.imagesSettings[r].wwn=_.wwn),this.onImageSelection({option:{name:r,selected:!0}})}),this.targetForm.patchValue({disks:i}),S().forEach(t.clients,_=>{const r=this.addInitiator();_.luns=S().map(_.luns,c=>`${c.pool}/${c.image}`),r.patchValue(_)}),t.groups.forEach((_,r)=>{const c=this.addGroup();_.disks=S().map(_.disks,d=>`${d.pool}/${d.image}`),c.patchValue(_),S().forEach(_.members,d=>{this.onGroupMemberSelection({option:new y.$(!0,d,"")},r)})})}hasAdvancedSettings(t){return Object.values(t).length>0}get portals(){return this.targetForm.get("portals")}onPortalSelection(){this.portals.setValue(this.portals.value)}removePortal(t,o){return this.portalsSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.portals.value.splice(t,1),this.portals.setValue(this.portals.value),!1}get disks(){return this.targetForm.get("disks")}removeImage(t,o){return this.imagesSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.disks.value.splice(t,1),this.removeImageRefs(o),this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1}),!1}removeImageRefs(t){this.initiators.controls.forEach(o=>{const i=o.value.luns.filter(_=>_!==t);o.get("luns").setValue(i)}),this.groups.controls.forEach(o=>{const i=o.value.disks.filter(_=>_!==t);o.get("disks").setValue(i)}),S().forEach(this.imagesInitiatorSelections,(o,i)=>{this.imagesInitiatorSelections[i]=o.filter(_=>_.name!==t)}),S().forEach(this.groupDiskSelections,(o,i)=>{this.groupDiskSelections[i]=o.filter(_=>_.name!==t)})}getDefaultBackstore(t){let o=this.default_backstore;const i=this.getImageById(t);return this.validFeatures(i,this.default_backstore)||this.backstores.forEach(_=>{_!==this.default_backstore&&this.validFeatures(i,_)&&(o=_)}),o}isLunIdInUse(t,o){const i=this.disks.value.filter(_=>_!==o);return this.getLunIds(i).includes(t)}getLunIds(t){return S().map(t,o=>this.imagesSettings[o].lun)}nextLunId(t){const o=this.disks.value.filter(r=>r!==t),i=this.getLunIds(o);let _=0;for(;i.includes(_);)_++;return _}getWwns(t){return S().map(t,i=>this.imagesSettings[i].wwn).filter(i=>S().isString(i)&&""!==i)}onImageSelection(t){const o=t.option;if(o.selected){if(this.imagesSettings[o.name])this.isLunIdInUse(this.imagesSettings[o.name].lun,o.name)&&(this.imagesSettings[o.name].lun=this.nextLunId(o.name));else{const i=this.getDefaultBackstore(o.name);this.imagesSettings[o.name]={backstore:i,lun:this.nextLunId(o.name)},this.imagesSettings[o.name][i]={}}S().forEach(this.imagesInitiatorSelections,(i,_)=>{i.push(new y.$(!1,o.name,"")),this.imagesInitiatorSelections[_]=[...i]}),S().forEach(this.groupDiskSelections,(i,_)=>{i.push(new y.$(!1,o.name,"")),this.groupDiskSelections[_]=[...i]})}else this.removeImageRefs(o.name);this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1})}get initiators(){return this.targetForm.get("initiators")}addInitiator(){const t=new x.d({client_iqn:new a.NI("",{validators:[a.kI.required,z.h.custom("notUnique",i=>{const _=this.initiators.controls.reduce(function(r,c){return r.concat(c.value.client_iqn)},[]);return _.indexOf(i)!==_.lastIndexOf(i)}),a.kI.pattern(this.IQN_REGEX)]}),auth:new x.d({user:new a.NI(""),password:new a.NI(""),mutual_user:new a.NI(""),mutual_password:new a.NI("")}),luns:new a.NI([]),cdIsInGroup:new a.NI(!1)});this.setAuthValidator(t),this.initiators.push(t),S().forEach(this.groupMembersSelections,(i,_)=>{i.push(new y.$(!1,"","")),this.groupMembersSelections[_]=[...i]});const o=S().map(this.targetForm.getValue("disks"),i=>new y.$(!1,i,""));return this.imagesInitiatorSelections.push(o),t}setAuthValidator(t){z.h.validateIf(t.get("user"),()=>t.getValue("password")||t.getValue("mutual_user")||t.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[t.get("password"),t.get("mutual_user"),t.get("mutual_password")]),z.h.validateIf(t.get("password"),()=>t.getValue("user")||t.getValue("mutual_user")||t.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("mutual_user"),t.get("mutual_password")]),z.h.validateIf(t.get("mutual_user"),()=>t.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_password")]),z.h.validateIf(t.get("mutual_password"),()=>t.getValue("mutual_user"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_user")])}removeInitiator(t){const o=this.initiators.value[t];this.initiators.removeAt(t),S().forEach(this.groupMembersSelections,(i,_)=>{i.splice(t,1),this.groupMembersSelections[_]=[...i]}),this.groups.controls.forEach(i=>{const _=i.value.members.filter(r=>r!==o.client_iqn);i.get("members").setValue(_)}),this.imagesInitiatorSelections.splice(t,1)}updatedInitiatorSelector(){this.initiators.controls.forEach(t=>{t.get("client_iqn").updateValueAndValidity({emitEvent:!1})}),S().forEach(this.groupMembersSelections,(t,o)=>{S().forEach(t,(i,_)=>{const r=i.name;i.name=this.initiators.controls[_].value.client_iqn,this.groups.controls.forEach(c=>{const d=c.value.members,g=d.indexOf(r);-1!==g&&(d[g]=i.name),c.get("members").setValue(d)})}),this.groupMembersSelections[o]=[...this.groupMembersSelections[o]]})}removeInitiatorImage(t,o,i,_){const r=t.getValue("luns");return r.splice(o,1),t.patchValue({luns:r}),this.imagesInitiatorSelections[i].forEach(c=>{c.name===_&&(c.selected=!1)}),!1}get groups(){return this.targetForm.get("groups")}addGroup(){const t=new x.d({group_id:new a.NI("",{validators:[a.kI.required]}),members:new a.NI([]),disks:new a.NI([])});this.groups.push(t);const o=S().map(this.targetForm.getValue("disks"),_=>new y.$(!1,_,""));this.groupDiskSelections.push(o);const i=S().map(this.initiators.value,_=>new y.$(!1,_.client_iqn,"",!_.cdIsInGroup));return this.groupMembersSelections.push(i),t}removeGroup(t){this.groups.removeAt(t),this.groupMembersSelections[t].filter(i=>i.selected).forEach(i=>{i.selected=!1,this.onGroupMemberSelection({option:i},t)}),this.groupMembersSelections.splice(t,1),this.groupDiskSelections.splice(t,1)}onGroupMemberSelection(t,o){const i=t.option;let _=[];i.selected||(_=this.groupDiskSelections[o].filter(c=>c.selected).map(c=>c.name)),this.initiators.controls.forEach((r,c)=>{r.value.client_iqn===i.name&&(r.patchValue({luns:_}),r.get("cdIsInGroup").setValue(i.selected),S().forEach(this.groupMembersSelections,d=>{d[c].enabled=!i.selected}),this.imagesInitiatorSelections[c].forEach(d=>{d.selected=_.includes(d.name)}))})}removeGroupInitiator(t,o,i){const _=t.getValue("members")[o];t.getValue("members").splice(o,1),this.onGroupMemberSelection({option:new y.$(!1,_,"")},i)}removeGroupDisk(t,o,i){const _=t.getValue("disks")[o];t.getValue("disks").splice(o,1),this.groupDiskSelections[i].forEach(r=>{r.name===_&&(r.selected=!1)}),this.groupDiskSelections[i]=[...this.groupDiskSelections[i]]}submit(){const t=S().cloneDeep(this.targetForm.value),o={target_iqn:this.targetForm.getValue("target_iqn"),target_controls:this.targetForm.getValue("target_controls"),acl_enabled:this.targetForm.getValue("acl_enabled"),portals:[],disks:[],clients:[],groups:[]};if(this.cephIscsiConfigVersion>10){const _=this.targetForm.get("auth");_.getValue("user")||_.get("user").setValue(""),_.getValue("password")||_.get("password").setValue(""),_.getValue("mutual_user")||_.get("mutual_user").setValue(""),_.getValue("mutual_password")||_.get("mutual_password").setValue("");const r=this.targetForm.getValue("acl_enabled");o.auth={user:r?"":_.getValue("user"),password:r?"":_.getValue("password"),mutual_user:r?"":_.getValue("mutual_user"),mutual_password:r?"":_.getValue("mutual_password")}}let i;t.disks.forEach(_=>{const r=_.split("/"),c=this.imagesSettings[_].backstore;o.disks.push({pool:r[0],image:r[1],backstore:c,controls:this.imagesSettings[_][c],lun:this.imagesSettings[_].lun,wwn:this.imagesSettings[_].wwn})}),t.portals.forEach(_=>{const r=_.indexOf(":");o.portals.push({host:_.substring(0,r),ip:_.substring(r+1)})}),o.acl_enabled&&(t.initiators.forEach(_=>{_.auth.user||(_.auth.user=""),_.auth.password||(_.auth.password=""),_.auth.mutual_user||(_.auth.mutual_user=""),_.auth.mutual_password||(_.auth.mutual_password=""),delete _.cdIsInGroup;const r=[];_.luns.forEach(c=>{const d=c.split("/");r.push({pool:d[0],image:d[1]})}),_.luns=r}),o.clients=t.initiators),o.acl_enabled&&(t.groups.forEach(_=>{const r=[];_.disks.forEach(c=>{const d=c.split("/");r.push({pool:d[0],image:d[1]})}),_.disks=r}),o.groups=t.groups),this.isEdit?(o.new_target_iqn=o.target_iqn,o.target_iqn=this.target_iqn,i=this.taskWrapper.wrapTaskAroundCall({task:new R.R("iscsi/target/edit",{target_iqn:o.target_iqn}),call:this.iscsiService.updateTarget(this.target_iqn,o)})):i=this.taskWrapper.wrapTaskAroundCall({task:new R.R("iscsi/target/create",{target_iqn:o.target_iqn}),call:this.iscsiService.createTarget(o)}),i.subscribe({error:()=>{this.targetForm.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/block/iscsi/targets"])})}targetSettingsModal(){const t={target_controls:this.targetForm.get("target_controls"),target_default_controls:this.target_default_controls,target_controls_limits:this.target_controls_limits};this.modalRef=this.modalService.show(Co,t)}imageSettingsModal(t){const o={imagesSettings:this.imagesSettings,image:t,api_version:this.api_version,disk_default_controls:this.disk_default_controls,disk_controls_limits:this.disk_controls_limits,backstores:this.getValidBackstores(this.getImageById(t)),control:this.targetForm.get("disks")};this.modalRef=this.modalService.show(To,o)}validFeatures(t,o){const i=t.features,_=this.required_rbd_features[o];return(i&_)===_&&0==(i&this.unsupported_rbd_features[o])}getImageById(t){return this.imagesAll.find(o=>t===`${o.pool_name}/${o.name}`)}getValidBackstores(t){return this.backstores.filter(o=>this.validFeatures(t,o))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(X),e.Y36(de.Z),e.Y36(q),e.Y36(m.F0),e.Y36(m.gz),e.Y36(u.P),e.Y36(v.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let s,t,o,i,_,r,c,d,g,f,I,P,Z,B,Q,Y,ee,te,w,_e,ae,M,ge,ue,me,Te,fe,Ce,G,Be,Ge,ye,xe,we,qe,He,ke,Ke,Xe,Qe,ze,b,Zt,Bt,Gt,yt,xt,wt,qt,Ht,kt,Kt,Xt,Qt,zt,Jt,Yt,Vt,Ut,jt,Wt,eo;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Target IQN",o="Portals",i="Add portal",_="Images",r="Add image",c="ACL authentication",d="This field is required.",g="IQN has wrong pattern.",f="An IQN has the following notation 'iqn.$year-$month.$reversedAddress:$definedName'",I="For example: iqn.2016-06.org.dashboard:storage:disk.sn-a8675309",P="More information",Z="This target has modified advanced settings.",B="At least " + "\ufffd0\ufffd" + " gateways are required.",Q="Backstore: " + "\ufffd0\ufffd" + ".\xA0",Y="This image has modified settings.",ee="Duplicated LUN numbers.",te="Duplicated WWN.",w="User",_e="Password",ae="Mutual User",M="Mutual Password",ge="This field is required.",ue="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",me="This field is required.",Te="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",fe="This field is required.",Ce="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",G="This field is required.",Be="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Ge="Initiators",ye="Add initiator",xe="Initiator",we="Client IQN",qe="User",He="Password",ke="Mutual User",Ke="Mutual Password",Xe="Images",Qe="Initiator IQN needs to be unique.",ze="This field is required.",b="IQN has wrong pattern.",Zt="This field is required.",Bt="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Gt="This field is required.",yt="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",xt="This field is required.",wt="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",qt="This field is required.",Ht="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",kt="Initiator belongs to a group. Images will be configure in the group.",Kt="Add image",Xt="No items added.",Qt="Groups",zt="Add group",Jt="Group",Yt="Name",Vt="Initiators",Ut="Add initiator",jt="Images",Wt="Add image",eo="No items added.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","targetForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],s,[1,"card-body"],[1,"form-group","row"],["for","target_iqn",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],[1,"input-group"],["type","text","id","target_iqn","name","target_iqn","formControlName","target_iqn","cdTrim","",1,"form-control"],[1,"input-group-append"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],["class","invalid-feedback",4,"ngIf"],["class","form-text text-muted",4,"ngIf"],["for","portals",1,"cd-col-form-label","required"],o,[4,"ngFor","ngForOf"],[1,"row"],[1,"col-md-12"],["elemClass","btn btn-light float-right",3,"data","options","messages","selection"],[3,"ngClass"],i,["type","hidden","id","portals","name","portals","formControlName","portals",1,"form-control"],["for","disks",1,"cd-col-form-label"],_,["type","hidden","id","disks","name","disks","formControlName","disks",1,"form-control"],r,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","formControlName","acl_enabled","name","acl_enabled","id","acl_enabled",1,"custom-control-input"],["for","acl_enabled",1,"custom-control-label"],c,["formGroupName","auth",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,g,f,I,["target","_blank","href","https://en.wikipedia.org/wiki/ISCSI#Addressing"],P,[1,"form-text","text-muted"],Z,[1,"input-group","cd-mb"],["type","text","disabled","",1,"cd-form-control",3,"value"],["type","button",1,"btn","btn-light",3,"click"],B,["class","input-group-text",4,"ngIf"],[4,"ngIf"],[1,"input-group-text"],Q,Y,ee,te,["formGroupName","auth"],["for","target_user",1,"cd-col-form-label"],w,["type","text","autocomplete","off","id","target_user","name","target_user","formControlName","user",1,"form-control"],["for","target_password",1,"cd-col-form-label"],_e,["type","password","autocomplete","new-password","id","target_password","name","target_password","formControlName","password",1,"form-control"],["type","button","cdPasswordButton","target_password",1,"btn","btn-light"],["source","target_password"],["for","target_mutual_user",1,"cd-col-form-label"],ae,["type","text","autocomplete","off","id","target_mutual_user","name","target_mutual_user","formControlName","mutual_user",1,"form-control"],["for","target_mutual_password",1,"cd-col-form-label"],M,["type","password","autocomplete","new-password","id","target_mutual_password","name","target_mutual_password","formControlName","mutual_password",1,"form-control"],["type","button","cdPasswordButton","target_mutual_password",1,"btn","btn-light"],["source","target_mutual_password"],ge,ue,me,Te,fe,Ce,G,Be,["for","initiators",1,"cd-col-form-label"],Ge,["formArrayName","initiators",1,"cd-col-form-input"],["class","card mb-2",3,"formGroup",4,"ngFor","ngForOf"],[1,"btn","btn-light","float-right",3,"click"],ye,[1,"card","mb-2",3,"formGroup"],xe,["type","button",1,"close",3,"click"],["for","client_iqn",1,"cd-col-form-label","required"],we,["type","text","formControlName","client_iqn","cdTrim","",1,"form-control",3,"blur"],["for","user",1,"cd-col-form-label"],qe,["formControlName","user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","password",1,"cd-col-form-label"],He,["formControlName","password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["type","button",1,"btn","btn-light",3,"cdPasswordButton"],[3,"source"],["for","mutual_user",1,"cd-col-form-label"],ke,["formControlName","mutual_user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","mutual_password",1,"cd-col-form-label"],Ke,["formControlName","mutual_password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["for","luns",1,"cd-col-form-label"],Xe,["class","row",4,"ngIf"],Qe,ze,b,Zt,Bt,Gt,yt,xt,wt,qt,Ht,kt,["elemClass","btn btn-light float-right",3,"data","options","messages"],Kt,Xt,Qt,["formArrayName","groups",1,"cd-col-form-input"],zt,Jt,["for","group_id",1,"cd-col-form-label","required"],Yt,["type","text","formControlName","group_id",1,"form-control"],["for","members",1,"cd-col-form-label"],Vt,Ut,jt,Wt,eo]},template:function(t,o){1&t&&e.YNc(0,dn,66,40,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},directives:[pt.y,a._Y,a.JL,a.sg,h.V,O.P,$.o,a.Fj,k.b,a.JJ,a.u,So,l.mk,l.O5,l.sg,Eo.H,a.Wl,j.p,a.x0,gt.C,Ye.s,a.CE],pipes:[l.rS,et.m,Je.V],styles:[".cd-mb[_ngcontent-%COMP%]{margin-bottom:10px}"]}),n})();var mt=p(68136),pe=p(30982),W=p(35905),De=p(99466),Se=p(68774),Tt=p(55657),ce=p(38047),tt=p(18001),ve=p(97161),oe=p(74937);function pn(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,31),e.qZA())}function gn(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,32),e.qZA())}function un(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,33),e.qZA())}function mn(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,34),e.qZA())}function Tn(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,35),e.qZA())}function fn(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,36),e.qZA())}function Cn(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,37),e.qZA())}function Sn(n,s){1&n&&(e.TgZ(0,"span",30),e.SDv(1,38),e.qZA())}let En=(()=>{class n{constructor(t,o,i,_,r){this.authStorageService=t,this.activeModal=o,this.actionLabels=i,this.iscsiService=_,this.notificationService=r,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.permission=this.authStorageService.getPermissions().iscsi}ngOnInit(){this.hasPermission=this.permission.update,this.createForm(),this.iscsiService.getDiscovery().subscribe(t=>{this.discoveryForm.patchValue(t)})}createForm(){this.discoveryForm=new x.d({user:new a.NI({value:"",disabled:!this.hasPermission}),password:new a.NI({value:"",disabled:!this.hasPermission}),mutual_user:new a.NI({value:"",disabled:!this.hasPermission}),mutual_password:new a.NI({value:"",disabled:!this.hasPermission})}),z.h.validateIf(this.discoveryForm.get("user"),()=>this.discoveryForm.getValue("password")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("password"),()=>this.discoveryForm.getValue("user")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("mutual_user"),()=>this.discoveryForm.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("mutual_password"),()=>this.discoveryForm.getValue("mutual_user"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user")])}submitAction(){this.iscsiService.updateDiscovery(this.discoveryForm.value).subscribe(()=>{this.notificationService.show(tt.k.success,"Updated discovery authentication"),this.activeModal.close()},()=>{this.discoveryForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(N.Kz),e.Y36(v.p4),e.Y36(X),e.Y36(ve.g))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-discovery-modal"]],decls:46,vars:13,consts:function(){let s,t,o,i,_,r,c,d,g,f,I,P,Z;return s="Discovery Authentication",t="User",o="Password",i="Mutual User",_="Mutual Password",r="This field is required.",c="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",d="This field is required.",g="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",f="This field is required.",I="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",P="This field is required.",Z="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","discoveryForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],t,[1,"cd-col-form-input"],["id","user","formControlName","user","type","text","autocomplete","off",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","password",1,"cd-col-form-label"],o,[1,"input-group"],["id","password","formControlName","password","type","password","autocomplete","new-password",1,"form-control"],[1,"input-group-append"],["type","button","cdPasswordButton","password",1,"btn","btn-light"],["source","password"],["for","mutual_user",1,"cd-col-form-label"],i,["id","mutual_user","formControlName","mutual_user","type","text","autocomplete","off",1,"form-control"],["for","mutual_password",1,"cd-col-form-label"],_,["id","mutual_password","formControlName","mutual_password","type","password","autocomplete","new-password",1,"form-control"],["type","button","cdPasswordButton","mutual_password",1,"btn","btn-light"],["source","mutual_password"],[1,"modal-footer"],[3,"form","showSubmit","submitText","submitActionEvent"],[1,"invalid-feedback"],r,c,d,g,f,I,P,Z]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"div",7),e.TgZ(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e._UZ(11,"input",11),e.YNc(12,pn,2,0,"span",12),e.YNc(13,gn,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(14,"div",7),e.TgZ(15,"label",13),e.SDv(16,14),e.qZA(),e.TgZ(17,"div",10),e.TgZ(18,"div",15),e._UZ(19,"input",16),e.TgZ(20,"span",17),e._UZ(21,"button",18),e._UZ(22,"cd-copy-2-clipboard-button",19),e.qZA(),e.qZA(),e.YNc(23,un,2,0,"span",12),e.YNc(24,mn,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(25,"div",7),e.TgZ(26,"label",20),e.ynx(27),e.SDv(28,21),e.BQk(),e.qZA(),e.TgZ(29,"div",10),e._UZ(30,"input",22),e.YNc(31,Tn,2,0,"span",12),e.YNc(32,fn,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(33,"div",7),e.TgZ(34,"label",23),e.SDv(35,24),e.qZA(),e.TgZ(36,"div",10),e.TgZ(37,"div",15),e._UZ(38,"input",25),e.TgZ(39,"span",17),e._UZ(40,"button",26),e._UZ(41,"cd-copy-2-clipboard-button",27),e.qZA(),e.qZA(),e.YNc(42,Cn,2,0,"span",12),e.YNc(43,Sn,2,0,"span",12),e.qZA(),e.qZA(),e.qZA(),e.TgZ(44,"div",28),e.TgZ(45,"cd-form-button-panel",29),e.NdJ("submitActionEvent",function(){return o.submitAction()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.discoveryForm),e.xp6(8),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"pattern")),e.xp6(10),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"pattern")),e.xp6(7),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"pattern")),e.xp6(10),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"pattern")),e.xp6(2),e.Q6J("form",o.discoveryForm)("showSubmit",o.hasPermission)("submitText",o.actionLabels.SUBMIT)}},directives:[C.z,a._Y,a.JL,a.sg,h.V,O.P,$.o,a.Fj,k.b,a.JJ,a.u,l.O5,gt.C,Ye.s,j.p],styles:[""]}),n})();var Rn=p(86969);let ft=(()=>{class n{constructor(t){this.router=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(m.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-tabs"]],decls:8,vars:1,consts:function(){let s,t;return s="Overview",t="Targets",[["ngbNav","",1,"nav-tabs",3,"activeId","navChange"],["nav","ngbNav"],["ngbNavItem","/block/iscsi/overview"],["ngbNavLink",""],s,["ngbNavItem","/block/iscsi/targets"],t]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0,1),e.NdJ("navChange",function(_){return o.router.navigate([_.nextId])}),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.qZA(),e.TgZ(5,"li",5),e.TgZ(6,"a",3),e.SDv(7,6),e.qZA(),e.qZA(),e.qZA()),2&t&&e.Q6J("activeId",o.router.url)},directives:[N.Pz,N.nv,N.Vx],styles:[""]}),n})();var Ct=p(34501),Mn=p(30490),Ee=p(94928),On=p(68962);const An=["highlightTpl"],hn=["detailTable"],Pn=["tree"],In=function(){return["logged_in"]},bn=function(){return["logged_out"]},Nn=function(n,s){return{"badge-success":n,"badge-danger":s}};function Fn(n,s){if(1&n&&(e._UZ(0,"i"),e.TgZ(1,"span"),e._uU(2),e.qZA(),e._uU(3," \xa0 "),e.TgZ(4,"span",8),e._uU(5),e.qZA()),2&n){const t=s.$implicit;e.Tol(t.data.cdIcon),e.xp6(2),e.Oqu(t.data.name),e.xp6(2),e.Q6J("ngClass",e.WLB(7,Nn,e.DdM(5,In).includes(t.data.status),e.DdM(6,bn).includes(t.data.status))),e.xp6(1),e.hij(" ",t.data.status," ")}}function Dn(n,s){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"legend"),e._uU(2),e.qZA(),e._UZ(3,"cd-table",10,11),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.Oqu(t.title),e.xp6(1),e.Q6J("data",t.data)("columns",t.columns)("limit",0)}}function vn(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Ln(n,s){if(1&n&&(e.TgZ(0,"strong"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function $n(n,s){if(1&n&&(e.YNc(0,vn,2,1,"span",12),e.YNc(1,Ln,2,1,"strong",12)),2&n){const t=s.row;e.Q6J("ngIf",void 0===t.default||t.default===t.current),e.xp6(1),e.Q6J("ngIf",void 0!==t.default&&t.default!==t.current)}}let Zn=(()=>{class n{constructor(t,o){this.iscsiBackstorePipe=t,this.booleanTextPipe=o,this.icons=T.P,this.metadata={},this.nodes=[],this.treeOptions={useVirtualScroll:!0,actionMapping:{mouse:{click:this.onNodeSelected.bind(this)}}}}set content(t){this.detailTable=t,t&&t.updateColumns()}ngOnInit(){this.columns=[{prop:"displayName",name:"Name",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"current",name:"Current",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"default",name:"Default",flexGrow:1,cellTemplate:this.highlightTpl}]}ngOnChanges(){this.selection&&(this.selectedItem=this.selection,this.generateTree()),this.data=void 0}generateTree(){const t=S().cloneDeep(this.selectedItem.target_controls);this.cephIscsiConfigVersion>10&&S().extend(t,S().cloneDeep(this.selectedItem.auth)),this.metadata={root:t};const o={target:{expanded:S().join(this.selectedItem.cdExecuting?[T.P.large,T.P.spinner,T.P.spin]:[T.P.large,T.P.bullseye]," ")},initiators:{expanded:S().join([T.P.large,T.P.user]," "),leaf:S().join([T.P.user]," ")},groups:{expanded:S().join([T.P.large,T.P.users]," "),leaf:S().join([T.P.users]," ")},disks:{expanded:S().join([T.P.large,T.P.disk]," "),leaf:S().join([T.P.disk]," ")},portals:{expanded:S().join([T.P.large,T.P.server]," "),leaf:S().join([T.P.server]," ")}},i=[];S().forEach(this.selectedItem.disks,d=>{const g="disk_"+d.pool+"_"+d.image;this.metadata[g]={controls:d.controls,backstore:d.backstore},["wwn","lun"].forEach(f=>{f in d&&(this.metadata[g][f]=d[f])}),i.push({name:`${d.pool}/${d.image}`,cdId:g,cdIcon:o.disks.leaf})});const _=[];S().forEach(this.selectedItem.portals,d=>{_.push({name:`${d.host}:${d.ip}`,cdIcon:o.portals.leaf})});const r=[];S().forEach(this.selectedItem.clients,d=>{const g=S().cloneDeep(d.auth);d.info&&(S().extend(g,d.info),delete g.state,S().forEach(Object.keys(d.info.state),P=>{g[P.toLowerCase()]=d.info.state[P]})),this.metadata["client_"+d.client_iqn]=g;const f=[];d.luns.forEach(P=>{f.push({name:`${P.pool}/${P.image}`,cdId:"disk_"+P.pool+"_"+P.image,cdIcon:o.disks.leaf})});let I="";d.info&&(I=Object.keys(d.info.state).includes("LOGGED_IN")?"logged_in":"logged_out"),r.push({name:d.client_iqn,status:I,cdId:"client_"+d.client_iqn,children:f,cdIcon:o.initiators.leaf})});const c=[];S().forEach(this.selectedItem.groups,d=>{const g=[];d.disks.forEach(I=>{g.push({name:`${I.pool}/${I.image}`,cdId:"disk_"+I.pool+"_"+I.image,cdIcon:o.disks.leaf})});const f=[];d.members.forEach(I=>{f.push({name:I,cdId:"client_"+I})}),c.push({name:d.group_id,cdIcon:o.groups.leaf,children:[{name:"Disks",children:g,cdIcon:o.disks.expanded},{name:"Initiators",children:f,cdIcon:o.initiators.expanded}]})}),this.nodes=[{name:this.selectedItem.target_iqn,cdId:"root",isExpanded:!0,cdIcon:o.target.expanded,children:[{name:"Disks",isExpanded:!0,children:i,cdIcon:o.disks.expanded},{name:"Portals",isExpanded:!0,children:_,cdIcon:o.portals.expanded},{name:"Initiators",isExpanded:!0,children:r,cdIcon:o.initiators.expanded},{name:"Groups",isExpanded:!0,children:c,cdIcon:o.groups.expanded}]}]}format(t){return"boolean"==typeof t?this.booleanTextPipe.transform(t):t}onNodeSelected(t,o){var i,_,r,c;if(ne.iM.ACTIVATE(t,o,!0),o.data.cdId){this.title=o.data.name;const d=this.metadata[o.data.cdId]||{};"root"===o.data.cdId?(null===(i=this.detailTable)||void 0===i||i.toggleColumn({prop:"default",isHidden:!0}),this.data=S().map(this.settings.target_default_controls,(g,f)=>({displayName:f,default:g=this.format(g),current:S().isUndefined(d[f])?g:this.format(d[f])})),this.cephIscsiConfigVersion>10&&["user","password","mutual_user","mutual_password"].forEach(g=>{this.data.push({displayName:g,default:null,current:d[g]})})):o.data.cdId.toString().startsWith("disk_")?(null===(_=this.detailTable)||void 0===_||_.toggleColumn({prop:"default",isHidden:!0}),this.data=S().map(this.settings.disk_default_controls[d.backstore],(g,f)=>({displayName:f,default:g=this.format(g),current:S().isUndefined(d.controls[f])?g:this.format(d.controls[f])})),this.data.push({displayName:"backstore",default:this.iscsiBackstorePipe.transform(this.settings.default_backstore),current:this.iscsiBackstorePipe.transform(d.backstore)}),["wwn","lun"].forEach(g=>{g in d&&this.data.push({displayName:g,default:void 0,current:d[g]})})):(null===(r=this.detailTable)||void 0===r||r.toggleColumn({prop:"default",isHidden:!1}),this.data=S().map(d,(g,f)=>({displayName:f,default:void 0,current:this.format(g)})))}else this.data=void 0;null===(c=this.detailTable)||void 0===c||c.updateColumns()}onUpdateData(){this.tree.treeModel.expandAll()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(Je.V),e.Y36(On.T))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(An,7),e.Gf(hn,5),e.Gf(Pn,5)),2&t){let i;e.iGM(i=e.CRH())&&(o.highlightTpl=i.first),e.iGM(i=e.CRH())&&(o.content=i.first),e.iGM(i=e.CRH())&&(o.tree=i.first)}},inputs:{selection:"selection",settings:"settings",cephIscsiConfigVersion:"cephIscsiConfigVersion"},features:[e.TTD],decls:11,vars:3,consts:function(){let s;return s="iSCSI Topology",[[1,"row"],[1,"col-6"],s,[3,"nodes","options","updateData"],["tree",""],["treeNodeTemplate",""],["class","col-6 metadata",4,"ngIf"],["highlightTpl",""],[1,"badge",3,"ngClass"],[1,"col-6","metadata"],["columnMode","flex",3,"data","columns","limit"],["detailTable",""],[4,"ngIf"]]},template:function(t,o){1&t&&(e.TgZ(0,"div",0),e.TgZ(1,"div",1),e.TgZ(2,"legend"),e.SDv(3,2),e.qZA(),e.TgZ(4,"tree-root",3,4),e.NdJ("updateData",function(){return o.onUpdateData()}),e.YNc(6,Fn,6,10,"ng-template",null,5,e.W1O),e.qZA(),e.qZA(),e.YNc(8,Dn,5,4,"div",6),e.qZA(),e.YNc(9,$n,2,2,"ng-template",null,7,e.W1O)),2&t&&(e.xp6(4),e.Q6J("nodes",o.nodes)("options",o.treeOptions),e.xp6(4),e.Q6J("ngIf",o.data))},directives:[ne.qr,l.O5,l.mk,W.a],styles:[""]}),n})();function Bn(n,s){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"span"),e.SDv(3,6),e.qZA(),e.TgZ(4,"pre"),e._uU(5),e.qZA(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(5),e.Oqu(t.status)}}function Gn(n,s){if(1&n&&(e.TgZ(0,"cd-alert-panel",2),e.ynx(1),e.tHW(2,3),e._UZ(3,"cd-doc",4),e.N_p(),e.BQk(),e.YNc(4,Bn,6,1,"ng-container",5),e.qZA()),2&n){const t=e.oxw();e.xp6(4),e.Q6J("ngIf",t.status)}}function yn(n,s){if(1&n&&e._UZ(0,"cd-iscsi-target-details",15),2&n){const t=e.oxw(2);e.Q6J("cephIscsiConfigVersion",t.cephIscsiConfigVersion)("selection",t.expandedRow)("settings",t.settings)}}const xn=function(n){return[n]};function wn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",7,8),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().getTargets()})("setExpandedRow",function(i){return e.CHM(t),e.oxw().setExpandedRow(i)})("updateSelection",function(i){return e.CHM(t),e.oxw().updateSelection(i)}),e.TgZ(2,"div",9),e._UZ(3,"cd-table-actions",10),e.TgZ(4,"button",11),e.NdJ("click",function(){return e.CHM(t),e.oxw().configureDiscoveryAuth()}),e._UZ(5,"i",12),e.ynx(6),e.SDv(7,13),e.BQk(),e.qZA(),e.qZA(),e.YNc(8,yn,1,3,"cd-iscsi-target-details",14),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.targets)("columns",t.columns)("hasDetails",!0)("autoReload",!1)("status",t.tableStatus),e.xp6(3),e.Q6J("permission",t.permission)("selection",t.selection)("tableActions",t.tableActions),e.xp6(2),e.Q6J("ngClass",e.VKq(10,xn,t.icons.key)),e.xp6(3),e.Q6J("ngIf",t.expandedRow)}}let qn=(()=>{class n extends mt.o{constructor(t,o,i,_,r,c,d,g,f){super(f),this.authStorageService=t,this.iscsiService=o,this.joinPipe=i,this.taskListService=_,this.notAvailablePipe=r,this.modalService=c,this.taskWrapper=d,this.actionLabels=g,this.ngZone=f,this.available=void 0,this.selection=new Se.r,this.targets=[],this.icons=T.P,this.builders={"iscsi/target/create":I=>({target_iqn:I.target_iqn})},this.permission=this.authStorageService.getPermissions().iscsi,this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>"/block/iscsi/targets/create",name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>`/block/iscsi/targets/edit/${this.selection.first().target_iqn}`,name:this.actionLabels.EDIT,disable:()=>this.getEditDisableDesc()},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteIscsiTargetModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Target",prop:"target_iqn",flexGrow:2,cellTransformation:De.e.executing},{name:"Portals",prop:"cdPortals",pipe:this.joinPipe,flexGrow:2},{name:"Images",prop:"cdImages",pipe:this.joinPipe,flexGrow:2},{name:"# Sessions",prop:"info.num_sessions",pipe:this.notAvailablePipe,flexGrow:1}],this.iscsiService.status().subscribe(t=>{this.available=t.available,t.available||(this.status=t.message)})}getTargets(){this.available&&(this.setTableRefreshTimeout(),this.iscsiService.version().subscribe(t=>{this.cephIscsiConfigVersion=t.ceph_iscsi_config_version}),this.taskListService.init(()=>this.iscsiService.listTargets(),t=>this.prepareResponse(t),t=>this.targets=t,()=>this.onFetchError(),this.taskFilter,this.itemFilter,this.builders),this.iscsiService.settings().subscribe(t=>{this.settings=t}))}ngOnDestroy(){this.summaryDataSubscription&&this.summaryDataSubscription.unsubscribe()}getEditDisableDesc(){const t=this.selection.first();return t&&(null==t?void 0:t.cdExecuting)?t.cdExecuting:t&&S().isUndefined(null==t?void 0:t.info)?"Unavailable gateway(s)":!t}getDeleteDisableDesc(){var t;const o=this.selection.first();return(null==o?void 0:o.cdExecuting)?o.cdExecuting:o&&S().isUndefined(null==o?void 0:o.info)?"Unavailable gateway(s)":o&&(null===(t=null==o?void 0:o.info)||void 0===t?void 0:t.num_sessions)?"Target has active sessions":!o}prepareResponse(t){return t.forEach(o=>{o.cdPortals=o.portals.map(i=>`${i.host}:${i.ip}`),o.cdImages=o.disks.map(i=>`${i.pool}/${i.image}`)}),t}onFetchError(){this.table.reset()}itemFilter(t,o){return t.target_iqn===o.metadata.target_iqn}taskFilter(t){return["iscsi/target/create","iscsi/target/edit","iscsi/target/delete"].includes(t.name)}updateSelection(t){this.selection=t}deleteIscsiTargetModal(){const t=this.selection.first().target_iqn;this.modalRef=this.modalService.show(pe.M,{itemDescription:"iSCSI target",itemNames:[t],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new R.R("iscsi/target/delete",{target_iqn:t}),call:this.iscsiService.deleteTarget(t)})})}configureDiscoveryAuth(){this.modalService.show(En)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(X),e.Y36(Rn.A),e.Y36(ce.j),e.Y36(Tt.g),e.Y36(de.Z),e.Y36(u.P),e.Y36(v.p4),e.Y36(e.R0b))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-list"]],viewQuery:function(t,o){if(1&t&&e.Gf(W.a,5),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first)}},features:[e._Bn([ce.j]),e.qOj],decls:3,vars:2,consts:function(){let s,t,o,i;return s="iSCSI Targets not available",t="Please consult the " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + " on how to configure and enable the iSCSI Targets management functionality.",o="Available information:",i="Discovery authentication",[["type","info","title",s,4,"ngIf"],["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection",4,"ngIf"],["type","info","title",s],t,["section","iscsi"],[4,"ngIf"],o,["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],i,["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings",4,"ngIf"],["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.YNc(1,Gn,5,1,"cd-alert-panel",0),e.YNc(2,wn,9,12,"cd-table",1)),2&t&&(e.xp6(1),e.Q6J("ngIf",!1===o.available),e.xp6(1),e.Q6J("ngIf",!0===o.available))},directives:[ft,l.O5,Ct.G,Mn.K,W.a,Ee.K,$.o,l.mk,Zn],styles:[""]}),n})();var ot=p(66369),Hn=p(76446),kn=p(90068);const Kn=["iscsiSparklineTpl"],Xn=["iscsiPerSecondTpl"],Qn=["iscsiRelativeDateTpl"];function zn(n,s){if(1&n&&(e.TgZ(0,"span"),e._UZ(1,"cd-sparkline",9),e.qZA()),2&n){const t=e.oxw(),o=t.value,i=t.row;e.xp6(1),e.Q6J("data",o)("isBinary",i.cdIsBinary)}}function Jn(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function Yn(n,s){if(1&n&&(e.YNc(0,zn,2,2,"span",7),e.YNc(1,Jn,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function Vn(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",t," /s ")}}function Un(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function jn(n,s){if(1&n&&(e.YNc(0,Vn,2,1,"span",7),e.YNc(1,Un,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function Wn(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"notAvailable"),e.ALo(3,"relativeDate"),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",e.lcZ(2,1,e.lcZ(3,3,t))," ")}}function ei(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function ti(n,s){if(1&n&&(e.YNc(0,Wn,4,5,"span",7),e.YNc(1,ei,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}let oi=(()=>{class n{constructor(t,o,i){this.iscsiService=t,this.dimlessPipe=o,this.iscsiBackstorePipe=i,this.gateways=[],this.images=[]}ngOnInit(){this.gatewaysColumns=[{name:"Name",prop:"name"},{name:"State",prop:"state",flexGrow:1,cellTransformation:De.e.badge,customTemplateConfig:{map:{up:{class:"badge-success"},down:{class:"badge-danger"}}}},{name:"# Targets",prop:"num_targets"},{name:"# Sessions",prop:"num_sessions"}],this.imagesColumns=[{name:"Pool",prop:"pool"},{name:"Image",prop:"image"},{name:"Backstore",prop:"backstore",pipe:this.iscsiBackstorePipe},{name:"Read Bytes",prop:"stats_history.rd_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Write Bytes",prop:"stats_history.wr_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Read Ops",prop:"stats.rd",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"Write Ops",prop:"stats.wr",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"A/O Since",prop:"optimized_since",cellTemplate:this.iscsiRelativeDateTpl}]}refresh(){this.iscsiService.overview().subscribe(t=>{this.gateways=t.gateways,this.images=t.images,this.images.map(o=>(o.stats_history&&(o.stats_history.rd_bytes=o.stats_history.rd_bytes.map(i=>i[1]),o.stats_history.wr_bytes=o.stats_history.wr_bytes.map(i=>i[1])),o.cdIsBinary=!0,o))})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(X),e.Y36(ot.n),e.Y36(Je.V))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Kn,7),e.Gf(Xn,7),e.Gf(Qn,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.iscsiSparklineTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiPerSecondTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiRelativeDateTpl=i.first)}},decls:13,vars:4,consts:function(){let s,t;return s="Gateways",t="Images",[s,[3,"data","columns","fetchData"],t,[3,"data","columns"],["iscsiSparklineTpl",""],["iscsiPerSecondTpl",""],["iscsiRelativeDateTpl",""],[4,"ngIf"],["class","text-muted",4,"ngIf"],[3,"data","isBinary"],[1,"text-muted"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.TgZ(1,"legend"),e.SDv(2,0),e.qZA(),e.TgZ(3,"cd-table",1),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA(),e.TgZ(4,"legend"),e.SDv(5,2),e.qZA(),e._UZ(6,"cd-table",3),e.YNc(7,Yn,2,2,"ng-template",null,4,e.W1O),e.YNc(9,jn,2,2,"ng-template",null,5,e.W1O),e.YNc(11,ti,2,2,"ng-template",null,6,e.W1O)),2&t&&(e.xp6(3),e.Q6J("data",o.gateways)("columns",o.gatewaysColumns),e.xp6(3),e.Q6J("data",o.images)("columns",o.imagesColumns))},directives:[ft,W.a,l.O5,Hn.l],pipes:[Tt.g,kn.h],styles:[""]}),n})(),ni=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[l.ez,Ae.m,N.Oz,m.Bz,a.u5,a.UX,N.ZQ]]}),n})();var ii=p(75319),si=p(26215),_i=p(45435),St=p(46947);let K=class{constructor(s,t){this.http=s,this.timerService=t,this.REFRESH_INTERVAL=3e4,this.summaryDataSource=new si.X(null),this.summaryData$=this.summaryDataSource.asObservable()}startPolling(){return this.timerService.get(()=>this.retrieveSummaryObservable(),this.REFRESH_INTERVAL).subscribe(this.retrieveSummaryObserver())}refresh(){return this.retrieveSummaryObservable().subscribe(this.retrieveSummaryObserver())}retrieveSummaryObservable(){return this.http.get("api/block/mirroring/summary")}retrieveSummaryObserver(){return s=>{this.summaryDataSource.next(s)}}subscribeSummary(s,t){return this.summaryData$.pipe((0,_i.h)(o=>!!o)).subscribe(s,t)}getPool(s){return this.http.get(`api/block/mirroring/pool/${s}`)}updatePool(s,t){return this.http.put(`api/block/mirroring/pool/${s}`,t,{observe:"response"})}getSiteName(){return this.http.get("api/block/mirroring/site_name")}setSiteName(s){return this.http.put("api/block/mirroring/site_name",{site_name:s},{observe:"response"})}createBootstrapToken(s){return this.http.post(`api/block/mirroring/pool/${s}/bootstrap/token`,{})}importBootstrapToken(s,t,o){return this.http.post(`api/block/mirroring/pool/${s}/bootstrap/peer`,{direction:t,token:o},{observe:"response"})}getPeer(s,t){return this.http.get(`api/block/mirroring/pool/${s}/peer/${t}`)}addPeer(s,t){return this.http.post(`api/block/mirroring/pool/${s}/peer`,t,{observe:"response"})}updatePeer(s,t,o){return this.http.put(`api/block/mirroring/pool/${s}/peer/${t}`,o,{observe:"response"})}deletePeer(s,t){return this.http.delete(`api/block/mirroring/pool/${s}/peer/${t}`,{observe:"response"})}};K.\u0275fac=function(s){return new(s||K)(e.LFG(ie.eN),e.LFG(St.f))},K.\u0275prov=e.Yz7({token:K,factory:K.\u0275fac,providedIn:"root"}),(0,D.gn)([(0,D.fM)(0,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[String]),(0,D.w6)("design:returntype",void 0)],K.prototype,"setSiteName",null),(0,D.gn)([(0,D.fM)(1,V.G),(0,D.fM)(2,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[String,String,String]),(0,D.w6)("design:returntype",void 0)],K.prototype,"importBootstrapToken",null),K=(0,D.gn)([V.o,(0,D.w6)("design:paramtypes",[ie.eN,St.f])],K);var nt=p(58071),ai=p(68307),Et=p(12627),Re=p(82945),ri=p(39749),li=p(13472);function ci(n,s){1&n&&(e.TgZ(0,"span",25),e.SDv(1,26),e.qZA())}function di(n,s){if(1&n&&(e.TgZ(0,"div",27),e._UZ(1,"input",28),e.TgZ(2,"label",29),e._uU(3),e.qZA(),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function pi(n,s){1&n&&(e.TgZ(0,"span",25),e.SDv(1,30),e.qZA())}let gi=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.rbdMirroringService=o,this.taskWrapper=i,this.pools=[],this.createForm()}createForm(){this.createBootstrapForm=new x.d({siteName:new a.NI("",{validators:[a.kI.required]}),pools:new a.cw({},{validators:[this.validatePools()]}),token:new a.NI("",{})})}ngOnInit(){this.createBootstrapForm.get("siteName").setValue(this.siteName),this.rbdMirroringService.getSiteName().subscribe(t=>{this.createBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((_,r)=>(_.push({name:r.name,mirror_mode:r.mirror_mode}),_),[]);const i=this.createBootstrapForm.get("pools");S().each(this.pools,_=>{const r=_.name,c="disabled"===_.mirror_mode,d=i.controls[r];d?c&&d.disabled?d.enable():!c&&d.enabled&&(d.disable(),d.setValue(!0)):i.addControl(r,new a.NI({value:!c,disabled:!c}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return S().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}generate(){this.createBootstrapForm.get("token").setValue("");let t="";const o=[],i=this.createBootstrapForm.get("pools");S().each(i.controls,(g,f)=>{!0===g.value&&(t=f,g.disabled||o.push(f))});const _={mirror_mode:"image"},r=(0,nt.z)(this.rbdMirroringService.setSiteName(this.createBootstrapForm.getValue("siteName")),(0,le.D)(o.map(g=>this.rbdMirroringService.updatePool(g,_))),this.rbdMirroringService.createBootstrapToken(t).pipe((0,ai.b)(g=>this.createBootstrapForm.get("token").setValue(g.token)))).pipe((0,Et.Z)()),c=()=>{this.rbdMirroringService.refresh(),this.createBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/mirroring/bootstrap/create",{}),call:r}).subscribe({error:c,complete:c})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(K),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-create-modal"]],decls:32,vars:6,consts:function(){let s,t,o,i,_,r,c,d,g,f,I;return s="Create Bootstrap Token",t="To create a bootstrap token which can be imported by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, and click\xA0 " + "\ufffd#10\ufffd" + "Generate" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",_="Pools",r="Generate",c="Token",d="Generated token...",g="Close",f="This field is required.",I="At least one pool is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","createBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],_,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],[1,"mb-4","float-right",3,"form","submitAction"],r,["for","token",1,"col-form-label"],c,["placeholder",d,"id","token","formControlName","token","readonly","",1,"form-control","resize-vertical"],["source","token",1,"float-right"],[1,"modal-footer"],["name",g,3,"backAction"],[1,"invalid-feedback"],f,[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],I]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,ci,2,0,"span",12),e.qZA(),e.TgZ(16,"div",13),e.TgZ(17,"label",14),e.SDv(18,15),e.qZA(),e.YNc(19,di,4,5,"div",16),e.YNc(20,pi,2,0,"span",12),e.qZA(),e.TgZ(21,"cd-submit-button",17),e.NdJ("submitAction",function(){return o.generate()}),e.SDv(22,18),e.qZA(),e.TgZ(23,"div",8),e.TgZ(24,"label",19),e.TgZ(25,"span"),e.SDv(26,20),e.qZA(),e.qZA(),e.TgZ(27,"textarea",21),e._uU(28," "),e.qZA(),e.qZA(),e._UZ(29,"cd-copy-2-clipboard-button",22),e.qZA(),e.TgZ(30,"div",23),e.TgZ(31,"cd-back-button",24),e.NdJ("backAction",function(){return o.activeModal.close()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.createBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.createBootstrapForm.showError("siteName",i,"required")),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.createBootstrapForm.showError("pools",i,"requirePool")),e.xp6(1),e.Q6J("form",o.createBootstrapForm)}},directives:[C.z,a._Y,a.JL,h.V,a.sg,O.P,$.o,a.Fj,k.b,a.JJ,a.u,Re.U,l.O5,a.x0,l.sg,ri.w,Ye.s,li.W,a.Wl],styles:[".form-group.ng-invalid[_ngcontent-%COMP%] .invalid-feedback[_ngcontent-%COMP%]{display:block}"]}),n})();function ui(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,27),e.qZA())}function mi(n,s){if(1&n&&(e.TgZ(0,"option",28),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.key),e.xp6(1),e.Oqu(t.desc)}}function Ti(n,s){if(1&n&&(e.TgZ(0,"div",29),e._UZ(1,"input",30),e.TgZ(2,"label",31),e._uU(3),e.qZA(),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function fi(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,32),e.qZA())}function Ci(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,33),e.qZA())}function Si(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,34),e.qZA())}let Ei=(()=>{class n{constructor(t,o,i,_){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.pools=[],this.directions=[{key:"rx-tx",desc:"Bidirectional"},{key:"rx",desc:"Unidirectional (receive-only)"}],this.createForm()}createForm(){this.importBootstrapForm=new x.d({siteName:new a.NI("",{validators:[a.kI.required]}),direction:new a.NI("rx-tx",{}),pools:new a.cw({},{validators:[this.validatePools()]}),token:new a.NI("",{validators:[a.kI.required,this.validateToken()]})})}ngOnInit(){this.rbdMirroringService.getSiteName().subscribe(t=>{this.importBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((_,r)=>(_.push({name:r.name,mirror_mode:r.mirror_mode}),_),[]);const i=this.importBootstrapForm.get("pools");S().each(this.pools,_=>{const r=_.name,c="disabled"===_.mirror_mode,d=i.controls[r];d?c&&d.disabled?d.enable():!c&&d.enabled&&(d.disable(),d.setValue(!0)):i.addControl(r,new a.NI({value:!c,disabled:!c}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return S().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}validateToken(){return t=>{try{if(JSON.parse(atob(t.value)))return null}catch(o){}return{invalidToken:!0}}}import(){const t=[],o=[],i=this.importBootstrapForm.get("pools");S().each(i.controls,(g,f)=>{!0===g.value&&(t.push(f),g.disabled||o.push(f))});const _={mirror_mode:"image"};let r=(0,nt.z)(this.rbdMirroringService.setSiteName(this.importBootstrapForm.getValue("siteName")),(0,le.D)(o.map(g=>this.rbdMirroringService.updatePool(g,_))));r=t.reduce((g,f)=>(0,nt.z)(g,this.rbdMirroringService.importBootstrapToken(f,this.importBootstrapForm.getValue("direction"),this.importBootstrapForm.getValue("token"))),r).pipe((0,Et.Z)());const c=()=>{this.rbdMirroringService.refresh(),this.importBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/mirroring/bootstrap/import",{}),call:r}).subscribe({error:c,complete:()=>{c(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(v.p4),e.Y36(K),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-import-modal"]],decls:36,vars:10,consts:function(){let s,t,o,i,_,r,c,d,g,f,I,P;return s="Import Bootstrap Token",t="To import a bootstrap token which was created by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, provide the generated token, and click\xA0" + "\ufffd#10\ufffd" + "Import" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",_="Direction",r="Pools",c="Token",d="Generated token...",g="This field is required.",f="At least one pool is required.",I="This field is required.",P="The token is invalid.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","importBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","direction",1,"col-form-label"],_,["id","direction","name","direction","formControlName","direction",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],r,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],["for","token",1,"col-form-label","required"],c,["placeholder",d,"id","token","formControlName","token",1,"form-control","resize-vertical"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],g,[3,"value"],[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],f,I,P]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,ui,2,0,"span",12),e.qZA(),e.TgZ(16,"div",8),e.TgZ(17,"label",13),e.TgZ(18,"span"),e.SDv(19,14),e.qZA(),e.qZA(),e.TgZ(20,"select",15),e.YNc(21,mi,2,2,"option",16),e.qZA(),e.qZA(),e.TgZ(22,"div",17),e.TgZ(23,"label",18),e.SDv(24,19),e.qZA(),e.YNc(25,Ti,4,5,"div",20),e.YNc(26,fi,2,0,"span",12),e.qZA(),e.TgZ(27,"div",8),e.TgZ(28,"label",21),e.SDv(29,22),e.qZA(),e.TgZ(30,"textarea",23),e._uU(31," "),e.qZA(),e.YNc(32,Ci,2,0,"span",12),e.YNc(33,Si,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(34,"div",24),e.TgZ(35,"cd-form-button-panel",25),e.NdJ("submitActionEvent",function(){return o.import()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.importBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.importBootstrapForm.showError("siteName",i,"required")),e.xp6(6),e.Q6J("ngForOf",o.directions),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("pools",i,"requirePool")),e.xp6(6),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"required")),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"invalidToken")),e.xp6(2),e.Q6J("form",o.importBootstrapForm)("submitText",o.actionLabels.SUBMIT)}},directives:[C.z,a._Y,a.JL,h.V,a.sg,O.P,$.o,a.Fj,k.b,a.JJ,a.u,Re.U,l.O5,a.EJ,l.sg,a.x0,j.p,a.YN,a.Kr,a.Wl],styles:[""]}),n})();var se=p(69158),Ri=p(58111);let it=(()=>{class n{transform(t){return"warning"===t?"badge badge-warning":"error"===t?"badge badge-danger":"success"===t?"badge badge-success":"badge badge-info"}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275pipe=e.Yjl({name:"mirrorHealthColor",type:n,pure:!0}),n})();const Mi=["healthTmpl"];function Oi(n,s){if(1&n&&(e.TgZ(0,"span",2),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.health_color)),e.xp6(2),e.Oqu(o)}}let Ai=(()=>{class n{constructor(t,o){this.rbdMirroringService=t,this.cephShortVersionPipe=o,this.tableStatus=new se.E}ngOnInit(){this.columns=[{prop:"instance_id",name:"Instance",flexGrow:2},{prop:"id",name:"ID",flexGrow:2},{prop:"server_hostname",name:"Hostname",flexGrow:2},{prop:"version",name:"Version",pipe:this.cephShortVersionPipe,flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.daemons,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(K),e.Y36(Ri.F))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-daemons"]],viewQuery:function(t,o){if(1&t&&e.Gf(Mi,7),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first)}},decls:3,vars:4,consts:[["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],["healthTmpl",""],[3,"ngClass"]],template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA(),e.YNc(1,Oi,3,4,"ng-template",null,1,e.W1O)),2&t&&e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus)},directives:[W.a,l.mk],pipes:[it],styles:[""]}),n})();var Rt=p(18891);class hi{}function Pi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function Ii(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,26),e.qZA())}function bi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,27),e.qZA())}function Ni(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,28),e.qZA())}function Fi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,29),e.qZA())}function Di(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,30),e.qZA())}let vi=(()=>{class n{constructor(t,o,i,_){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.bsConfig={containerClass:"theme-default"},this.createForm()}createForm(){this.editPeerForm=new x.d({clusterName:new a.NI("",{validators:[a.kI.required,this.validateClusterName]}),clientID:new a.NI("",{validators:[a.kI.required,this.validateClientID]}),monAddr:new a.NI("",{validators:[this.validateMonAddr]}),key:new a.NI("",{validators:[this.validateKey]})})}ngOnInit(){this.pattern=`${this.poolName}/${this.peerUUID}`,"edit"===this.mode&&this.rbdMirroringService.getPeer(this.poolName,this.peerUUID).subscribe(t=>{this.setResponse(t)})}validateClusterName(t){if(!t.value.match(/^[\w\-_]*$/))return{invalidClusterName:{value:t.value}}}validateClientID(t){if(!t.value.match(/^(?!client\.)[\w\-_.]*$/))return{invalidClientID:{value:t.value}}}validateMonAddr(t){if(!t.value.match(/^[,; ]*([\w.\-_\[\]]+(:[\d]+)?[,; ]*)*$/))return{invalidMonAddr:{value:t.value}}}validateKey(t){try{if(""===t.value||atob(t.value))return null}catch(o){}return{invalidKey:{value:t.value}}}setResponse(t){this.response=t,this.editPeerForm.get("clusterName").setValue(t.cluster_name),this.editPeerForm.get("clientID").setValue(t.client_id),this.editPeerForm.get("monAddr").setValue(t.mon_host),this.editPeerForm.get("key").setValue(t.key)}update(){const t=new hi;let o;t.cluster_name=this.editPeerForm.getValue("clusterName"),t.client_id=this.editPeerForm.getValue("clientID"),t.mon_host=this.editPeerForm.getValue("monAddr"),t.key=this.editPeerForm.getValue("key"),o=this.taskWrapper.wrapTaskAroundCall("edit"===this.mode?{task:new R.R("rbd/mirroring/peer/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePeer(this.poolName,this.peerUUID,t)}:{task:new R.R("rbd/mirroring/peer/add",{pool_name:this.poolName}),call:this.rbdMirroringService.addPeer(this.poolName,t)}),o.subscribe({error:()=>this.editPeerForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(v.p4),e.Y36(K),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-peer-modal"]],decls:38,vars:13,consts:function(){let s,t,o,i,_,r,c,d,g,f,I,P,Z,B,Q,Y,ee,te;return s="{VAR_SELECT, select, edit {Edit} other {Add}}",s=e.Zx4(s,{VAR_SELECT:"\ufffd0\ufffd"}),t="" + s + " pool mirror peer",o="{VAR_SELECT, select, edit {Edit} other {Add}}",o=e.Zx4(o,{VAR_SELECT:"\ufffd0\ufffd"}),i="" + o + " the pool mirror peer attributes for pool " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd1\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " and click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Submit" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",i=e.Zx4(i),_="Cluster Name",r="Name...",c="CephX ID",d="CephX ID...",g="Monitor Addresses",f="Comma-delimited addresses...",I="CephX Key",P="Base64-encoded key...",Z="This field is required.",B="The cluster name is not valid.",Q="This field is required.",Y="The CephX ID is not valid.",ee="The monitory address is not valid.",te="CephX key must be base64 encoded.",[[3,"modalRef"],[1,"modal-title"],t,[1,"modal-content"],["name","editPeerForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],i,[1,"form-group"],["for","clusterName",1,"col-form-label","required"],_,["type","text","placeholder",r,"id","clusterName","name","clusterName","formControlName","clusterName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","clientID",1,"col-form-label","required"],c,["type","text","placeholder",d,"id","clientID","name","clientID","formControlName","clientID",1,"form-control"],["for","monAddr",1,"col-form-label"],g,["type","text","placeholder",f,"id","monAddr","name","monAddr","formControlName","monAddr",1,"form-control"],["for","key",1,"col-form-label"],I,["type","text","placeholder",P,"id","key","name","key","formControlName","key",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],Z,B,Q,Y,ee,te]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.TgZ(1,"span",1),e.SDv(2,2),e.qZA(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.TgZ(8,"span"),e.tHW(9,7),e._UZ(10,"kbd"),e._UZ(11,"kbd"),e.N_p(),e.qZA(),e.qZA(),e.TgZ(12,"div",8),e.TgZ(13,"label",9),e.SDv(14,10),e.qZA(),e._UZ(15,"input",11),e.YNc(16,Pi,2,0,"span",12),e.YNc(17,Ii,2,0,"span",12),e.qZA(),e.TgZ(18,"div",8),e.TgZ(19,"label",13),e.SDv(20,14),e.qZA(),e._UZ(21,"input",15),e.YNc(22,bi,2,0,"span",12),e.YNc(23,Ni,2,0,"span",12),e.qZA(),e.TgZ(24,"div",8),e.TgZ(25,"label",16),e.TgZ(26,"span"),e.SDv(27,17),e.qZA(),e.qZA(),e._UZ(28,"input",18),e.YNc(29,Fi,2,0,"span",12),e.qZA(),e.TgZ(30,"div",8),e.TgZ(31,"label",19),e.TgZ(32,"span"),e.SDv(33,20),e.qZA(),e.qZA(),e._UZ(34,"input",21),e.YNc(35,Di,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(36,"div",22),e.TgZ(37,"cd-form-button-panel",23),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(2),e.pQV(o.mode),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.editPeerForm),e.xp6(7),e.pQV(o.mode)(o.poolName),e.QtT(9),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"invalidClusterName")),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"invalidClientID")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("monAddr",i,"invalidMonAddr")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("key",i,"invalidKey")),e.xp6(2),e.Q6J("form",o.editPeerForm)("submitText",o.actionLabels.SUBMIT)}},directives:[C.z,a._Y,a.JL,h.V,a.sg,O.P,$.o,a.Fj,k.b,a.JJ,a.u,Re.U,l.O5,j.p],styles:[""]}),n})();const Li=["healthTmpl"];function $i(n,s){if(1&n&&(e.TgZ(0,"span",4),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.health_color)),e.xp6(2),e.Oqu(o)}}let Bi=(()=>{class n{constructor(t,o,i,_,r){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.taskWrapper=_,this.router=r,this.selection=new Se.r,this.tableStatus=new se.E,this.data=[],this.permission=this.authStorageService.getPermissions().rbdMirroring;const c={permission:"update",icon:T.P.edit,click:()=>this.editModeModal(),name:"Edit Mode",canBePrimary:()=>!0},d={permission:"create",icon:T.P.add,name:"Add Peer",click:()=>this.editPeersModal("add"),disable:()=>!this.selection.first()||"disabled"===this.selection.first().mirror_mode,visible:()=>!this.getPeerUUID(),canBePrimary:()=>!1},g={permission:"update",icon:T.P.exchange,name:"Edit Peer",click:()=>this.editPeersModal("edit"),visible:()=>!!this.getPeerUUID()},f={permission:"delete",icon:T.P.destroy,name:"Delete Peer",click:()=>this.deletePeersModal(),visible:()=>!!this.getPeerUUID()};this.tableActions=[c,d,g,f]}ngOnInit(){this.columns=[{prop:"name",name:"Name",flexGrow:2},{prop:"mirror_mode",name:"Mode",flexGrow:2},{prop:"leader_id",name:"Leader",flexGrow:2},{prop:"image_local_count",name:"# Local",flexGrow:2},{prop:"image_remote_count",name:"# Remote",flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.pools,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}editModeModal(){this.router.navigate(["/block/mirroring",{outlets:{modal:[v.MQ.EDIT,this.selection.first().name]}}])}editPeersModal(t){const o={poolName:this.selection.first().name,mode:t};"edit"===t&&(o.peerUUID=this.getPeerUUID()),this.modalRef=this.modalService.show(vi,o)}deletePeersModal(){const t=this.selection.first().name,o=this.getPeerUUID();this.modalRef=this.modalService.show(pe.M,{itemDescription:"mirror peer",itemNames:[`${t} (${o})`],submitActionObservable:()=>new Rt.y(i=>{this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/mirroring/peer/delete",{pool_name:t}),call:this.rbdMirroringService.deletePeer(t,o)}).subscribe({error:_=>i.error(_),complete:()=>{this.rbdMirroringService.refresh(),i.complete()}})})})}getPeerUUID(){const t=this.selection.first(),o=this.data.find(i=>t&&t.name===i.name);if(o&&o.peer_uuids)return o.peer_uuids[0]}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(K),e.Y36(de.Z),e.Y36(u.P),e.Y36(m.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-pools"]],viewQuery:function(t,o){if(1&t&&e.Gf(Li,7),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first)}},decls:5,vars:7,consts:[["columnMode","flex","identifier","name","forceIdentifier","true","selectionType","single",3,"data","columns","autoReload","status","fetchData","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["healthTmpl",""],["name","modal"],[3,"ngClass"]],template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,$i,3,4,"ng-template",null,2,e.W1O),e._UZ(4,"router-outlet",3)),2&t&&(e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[W.a,Ee.K,m.lC,l.mk],pipes:[it],styles:[""]}),n})();var Mt=p(59376);const Gi=["stateTmpl"],yi=["syncTmpl"],xi=["progressTmpl"];function wi(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",13),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_error.data)("columns",t.image_error.columns)("autoReload",-1)("status",t.tableStatus)}}function qi(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",13),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_syncing.data)("columns",t.image_syncing.columns)("autoReload",-1)("status",t.tableStatus)}}function Hi(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",13),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_ready.data)("columns",t.image_ready.columns)("autoReload",-1)("status",t.tableStatus)}}function ki(n,s){if(1&n&&(e.TgZ(0,"span",14),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.state_color)),e.xp6(2),e.Oqu(o)}}function Ki(n,s){1&n&&e._UZ(0,"div")}function Xi(n,s){if(1&n&&e._UZ(0,"ngb-progressbar",17),2&n){const t=e.oxw().value;e.Q6J("value",t)("showValue",!0)}}function Qi(n,s){if(1&n&&(e.YNc(0,Ki,1,0,"div",15),e.YNc(1,Xi,1,2,"ngb-progressbar",16)),2&n){const t=s.row;e.Q6J("ngIf","Replaying"===t.state),e.xp6(1),e.Q6J("ngIf","Syncing"===t.state)}}let zi=(()=>{class n{constructor(t){this.rbdMirroringService=t,this.image_error={data:[],columns:{}},this.image_syncing={data:[],columns:{}},this.image_ready={data:[],columns:{}},this.tableStatus=new se.E}ngOnInit(){this.image_error.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"description",name:"Issue",flexGrow:4}],this.image_syncing.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"progress",name:"Progress",cellTemplate:this.progressTmpl,flexGrow:2},{prop:"bytes_per_second",name:"Bytes per second",flexGrow:2},{prop:"entries_behind_primary",name:"Entries behind primary",flexGrow:2}],this.image_ready.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"description",name:"Description",flexGrow:4}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.image_error.data=t.content_data.image_error,this.image_syncing.data=t.content_data.image_syncing,this.image_ready.data=t.content_data.image_ready,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(K))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-images"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Gi,7),e.Gf(yi,7),e.Gf(xi,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.stateTmpl=i.first),e.iGM(i=e.CRH())&&(o.syncTmpl=i.first),e.iGM(i=e.CRH())&&(o.progressTmpl=i.first)}},decls:19,vars:4,consts:function(){let s,t,o;return s="Issues (" + "\ufffd0\ufffd" + ")",t="Syncing (" + "\ufffd0\ufffd" + ")",o="Ready (" + "\ufffd0\ufffd" + ")",[["ngbNav","","cdStatefulTab","image-list",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","issues"],["ngbNavLink",""],s,["ngbNavContent",""],["ngbNavItem","syncing"],t,["ngbNavItem","ready"],o,[3,"ngbNavOutlet"],["stateTmpl",""],["progressTmpl",""],["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],[3,"ngClass"],[4,"ngIf"],["type","info",3,"value","showValue",4,"ngIf"],["type","info",3,"value","showValue"]]},template:function(t,o){if(1&t&&(e.TgZ(0,"ul",0,1),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,wi,1,4,"ng-template",5),e.qZA(),e.TgZ(6,"li",6),e.TgZ(7,"a",3),e.SDv(8,7),e.qZA(),e.YNc(9,qi,1,4,"ng-template",5),e.qZA(),e.TgZ(10,"li",8),e.TgZ(11,"a",3),e.SDv(12,9),e.qZA(),e.YNc(13,Hi,1,4,"ng-template",5),e.qZA(),e.qZA(),e._UZ(14,"div",10),e.YNc(15,ki,3,4,"ng-template",null,11,e.W1O),e.YNc(17,Qi,2,2,"ng-template",null,12,e.W1O)),2&t){const i=e.MAs(1);e.xp6(4),e.pQV(o.image_error.data.length),e.QtT(4),e.xp6(4),e.pQV(o.image_syncing.data.length),e.QtT(8),e.xp6(4),e.pQV(o.image_ready.data.length),e.QtT(12),e.xp6(2),e.Q6J("ngbNavOutlet",i)}},directives:[N.Pz,Mt.m,N.nv,N.Vx,N.uN,N.tO,W.a,l.mk,l.O5,N.Ly],pipes:[it],styles:[""]}),n})();function Ji(n,s){if(1&n&&e._UZ(0,"i",18),2&n){const t=e.oxw();e.Q6J("ngClass",t.icons.edit)}}function Yi(n,s){if(1&n&&e._UZ(0,"i",18),2&n){const t=e.oxw();e.Q6J("ngClass",t.icons.check)}}let Vi=(()=>{class n{constructor(t,o,i,_){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.taskWrapper=_,this.selection=new Se.r,this.peersExist=!0,this.subs=new ii.w,this.editing=!1,this.icons=T.P,this.permission=this.authStorageService.getPermissions().rbdMirroring;const r={permission:"update",icon:T.P.upload,click:()=>this.createBootstrapModal(),name:"Create Bootstrap Token",canBePrimary:()=>!0,disable:()=>!1},c={permission:"update",icon:T.P.download,click:()=>this.importBootstrapModal(),name:"Import Bootstrap Token",disable:()=>this.peersExist};this.tableActions=[r,c]}ngOnInit(){this.createForm(),this.subs.add(this.rbdMirroringService.startPolling()),this.subs.add(this.rbdMirroringService.subscribeSummary(t=>{this.status=t.content_data.status,this.peersExist=!!t.content_data.pools.find(o=>o.peer_uuids.length>0)})),this.rbdMirroringService.getSiteName().subscribe(t=>{this.siteName=t.site_name,this.rbdmirroringForm.get("siteName").setValue(this.siteName)})}createForm(){this.rbdmirroringForm=new x.d({siteName:new a.NI({value:"",disabled:!0})})}ngOnDestroy(){this.subs.unsubscribe()}updateSiteName(){this.editing&&this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/mirroring/site_name/edit",{}),call:this.rbdMirroringService.setSiteName(this.rbdmirroringForm.getValue("siteName"))}).subscribe({complete:()=>{this.rbdMirroringService.refresh()}}),this.editing=!this.editing}createBootstrapModal(){this.modalRef=this.modalService.show(gi,{siteName:this.siteName})}importBootstrapModal(){this.modalRef=this.modalService.show(Ei,{siteName:this.siteName})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(K),e.Y36(de.Z),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring"]],decls:29,vars:9,consts:function(){let s,t,o,i;return s="Site Name",t="Daemons",o="Pools",i="Images",[[1,"row"],[1,"col-md-12"],["name","rbdmirroringForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"d-flex","flex-row"],["for","siteName",1,"col-form-label"],s,[1,"col-md-4","input-group","mb-3","mr-auto"],["type","text","id","siteName","name","siteName","formControlName","siteName",1,"form-control"],[1,"input-group-append"],["id","editSiteName",1,"btn","btn-light",3,"click"],[3,"ngClass",4,"ngIf"],[3,"source","byId"],[1,"table-actions",3,"permission","selection","tableActions"],[1,"col-sm-6"],t,o,i,[3,"ngClass"]]},template:function(t,o){1&t&&(e.TgZ(0,"div",0),e.TgZ(1,"div",1),e.TgZ(2,"form",2,3),e.TgZ(4,"div",4),e.TgZ(5,"label",5),e.SDv(6,6),e.qZA(),e.TgZ(7,"div",7),e._UZ(8,"input",8),e.TgZ(9,"div",9),e.TgZ(10,"button",10),e.NdJ("click",function(){return o.updateSiteName()}),e.YNc(11,Ji,1,1,"i",11),e.YNc(12,Yi,1,1,"i",11),e.qZA(),e._UZ(13,"cd-copy-2-clipboard-button",12),e.qZA(),e.qZA(),e._UZ(14,"cd-table-actions",13),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.TgZ(15,"div",0),e.TgZ(16,"div",14),e.TgZ(17,"legend"),e.SDv(18,15),e.qZA(),e._UZ(19,"cd-mirroring-daemons"),e.qZA(),e.TgZ(20,"div",14),e.TgZ(21,"legend"),e.SDv(22,16),e.qZA(),e._UZ(23,"cd-mirroring-pools"),e.qZA(),e.qZA(),e.TgZ(24,"div",0),e.TgZ(25,"div",1),e.TgZ(26,"legend"),e.SDv(27,17),e.qZA(),e._UZ(28,"cd-mirroring-images"),e.qZA(),e.qZA()),2&t&&(e.xp6(2),e.Q6J("formGroup",o.rbdmirroringForm),e.xp6(6),e.uIk("disabled",!o.editing||null),e.xp6(3),e.Q6J("ngIf",!o.editing),e.xp6(1),e.Q6J("ngIf",o.editing),e.xp6(1),e.Q6J("source",o.siteName)("byId",!1),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[a._Y,a.JL,h.V,a.sg,$.o,a.Fj,k.b,a.JJ,a.u,l.O5,Ye.s,Ee.K,Ai,Bi,zi,l.mk],styles:[""]}),n})();class Ui{}function ji(n,s){if(1&n&&(e.TgZ(0,"option",16),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.id),e.xp6(1),e.Oqu(t.name)}}function Wi(n,s){1&n&&(e.TgZ(0,"span",17),e.SDv(1,18),e.qZA())}let es=(()=>{class n{constructor(t,o,i,_,r,c){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.route=r,this.location=c,this.bsConfig={containerClass:"theme-default"},this.peerExists=!1,this.mirrorModes=[{id:"disabled",name:"Disabled"},{id:"pool",name:"Pool"},{id:"image",name:"Image"}],this.createForm()}createForm(){this.editModeForm=new x.d({mirrorMode:new a.NI("",{validators:[a.kI.required,this.validateMode.bind(this)]})})}ngOnInit(){this.route.params.subscribe(t=>{this.poolName=t.pool_name}),this.pattern=`${this.poolName}`,this.rbdMirroringService.getPool(this.poolName).subscribe(t=>{this.setResponse(t)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.peerExists=!1;const i=t.content_data.pools.find(_=>this.poolName===_.name);this.peerExists=i&&i.peer_uuids.length})}ngOnDestroy(){this.subs.unsubscribe()}validateMode(t){return"disabled"===t.value&&this.peerExists?{cannotDisable:{value:t.value}}:null}setResponse(t){this.editModeForm.get("mirrorMode").setValue(t.mirror_mode)}update(){const t=new Ui;t.mirror_mode=this.editModeForm.getValue("mirrorMode"),this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/mirroring/pool/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePool(this.poolName,t)}).subscribe({error:()=>this.editModeForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.location.back()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(v.p4),e.Y36(K),e.Y36(u.P),e.Y36(m.gz),e.Y36(l.Ye))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-mode-modal"]],decls:21,vars:7,consts:function(){let s,t,o,i;return s="Edit pool mirror mode",t="To edit the mirror mode for pool\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ", select a new mode from the list and click\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Update" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",t=e.Zx4(t),o="Mode",i="Peer clusters must be removed prior to disabling mirror.",[["pageURL","mirroring",3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","editModeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","mirrorMode",1,"col-form-label"],o,["id","mirrorMode","name","mirrorMode","formControlName","mirrorMode",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[3,"value"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e._UZ(11,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(12,"div",8),e.TgZ(13,"label",9),e.TgZ(14,"span"),e.SDv(15,10),e.qZA(),e.qZA(),e.TgZ(16,"select",11),e.YNc(17,ji,2,2,"option",12),e.qZA(),e.YNc(18,Wi,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(19,"div",14),e.TgZ(20,"cd-form-button-panel",15),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.editModeForm),e.xp6(7),e.pQV(o.poolName),e.QtT(9),e.xp6(6),e.Q6J("ngForOf",o.mirrorModes),e.xp6(1),e.Q6J("ngIf",o.editModeForm.showError("mirrorMode",i,"cannotDisable")),e.xp6(2),e.Q6J("form",o.editModeForm)("submitText",o.actionLabels.UPDATE)}},directives:[C.z,a._Y,a.JL,h.V,a.sg,O.P,$.o,a.EJ,k.b,a.JJ,a.u,l.sg,l.O5,j.p,a.YN,a.Kr],styles:[""]}),n})();var Ot=p(80226),ts=p(28049),os=p(43190),Ve=p(80842),st=p(30633),Le=p(47557),ns=p(28211);class is{}var Pe=(()=>{return(n=Pe||(Pe={}))[n.V1=1]="V1",n[n.V2=2]="V2",Pe;var n})();class ss{constructor(){this.features=[]}}class _s{constructor(){this.features=[]}}class rs extends class{}{constructor(){super(...arguments),this.features=[]}}class _t{constructor(){this.features=[],this.remove_scheduling=!1}}var Ue=(()=>{return(n=Ue||(Ue={})).editing="editing",n.cloning="cloning",n.copying="copying",Ue;var n})(),ls=p(17932),cs=p(18372),ds=p(54555);function ps(n,s){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"label",58),e.SDv(2,59),e.ALo(3,"titlecase"),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",60),e._UZ(6,"hr"),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(3),e.pQV(e.lcZ(3,1,t.action)),e.QtT(2)}}function gs(n,s){1&n&&(e.TgZ(0,"span",61),e.ynx(1),e.SDv(2,62),e.BQk(),e.qZA())}function us(n,s){1&n&&(e.TgZ(0,"span",61),e.ynx(1),e.SDv(2,63),e.BQk(),e.qZA())}function ms(n,s){1&n&&e._UZ(0,"input",64)}function Ts(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,67),e.qZA()),2&n&&e.Q6J("ngValue",null)}function fs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,68),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Cs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,69),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ss(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function Es(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"select",65),e.NdJ("change",function(){return e.CHM(t),e.oxw(2).setPoolMirrorMode()}),e.YNc(1,Ts,2,1,"option",66),e.YNc(2,fs,2,1,"option",66),e.YNc(3,Cs,2,1,"option",66),e.YNc(4,Ss,2,2,"option",46),e.qZA()}if(2&n){const t=e.oxw(2);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function Rs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,71),e.qZA())}const Ms=function(n,s){return[n,s]};function Os(n,s){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"div",20),e._UZ(2,"i",72),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(2),e.Q6J("ngClass",e.WLB(1,Ms,t.icons.spinner,t.icons.spin))}}function As(n,s){1&n&&e._UZ(0,"input",76)}function hs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,78),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ps(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,79),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Is(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,80),e.qZA()),2&n&&e.Q6J("ngValue",null)}function bs(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function Ns(n,s){if(1&n&&(e.TgZ(0,"select",77),e.YNc(1,hs,2,1,"option",66),e.YNc(2,Ps,2,1,"option",66),e.YNc(3,Is,2,1,"option",66),e.YNc(4,bs,2,2,"option",46),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.namespaces)}}function Fs(n,s){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"label",73),e._uU(2," Namespace "),e.qZA(),e.TgZ(3,"div",12),e.YNc(4,As,1,0,"input",74),e.YNc(5,Ns,5,4,"select",75),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngIf","editing"===t.mode||!t.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==t.mode&&t.poolPermission.read)}}function Ds(n,s){1&n&&(e.TgZ(0,"cd-helper"),e.TgZ(1,"span"),e.SDv(2,81),e.qZA(),e.qZA())}function vs(n,s){1&n&&e._UZ(0,"input",87)}function Ls(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,89),e.qZA()),2&n&&e.Q6J("ngValue",null)}function $s(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,90),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Zs(n,s){1&n&&(e.TgZ(0,"option",50),e._uU(1,"-- Select a data pool -- "),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Bs(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function Gs(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"select",88),e.NdJ("change",function(i){return e.CHM(t),e.oxw(3).onDataPoolChange(i.target.value)}),e.YNc(1,Ls,2,1,"option",66),e.YNc(2,$s,2,1,"option",66),e.YNc(3,Zs,2,1,"option",66),e.YNc(4,Bs,2,2,"option",46),e.qZA()}if(2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.dataPools),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&0===t.dataPools.length),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&t.dataPools.length>0),e.xp6(1),e.Q6J("ngForOf",t.dataPools)}}function ys(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,91),e.qZA())}const je=function(n){return{required:n}};function xs(n,s){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"label",82),e.TgZ(2,"span",72),e.SDv(3,83),e.qZA(),e._UZ(4,"cd-helper",84),e.qZA(),e.TgZ(5,"div",12),e.YNc(6,vs,1,0,"input",85),e.YNc(7,Gs,5,4,"select",86),e.YNc(8,ys,2,0,"span",14),e.qZA(),e.qZA()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(2),e.Q6J("ngClass",e.VKq(4,je,"editing"!==o.mode)),e.xp6(4),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("dataPool",t,"required"))}}function ws(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,92),e.qZA())}function qs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,93),e.qZA())}function Hs(n,s){if(1&n&&e._UZ(0,"cd-helper",97),2&n){const t=e.oxw().$implicit;e.s9C("html",t.helperHtml)}}function ks(n,s){if(1&n&&(e.TgZ(0,"div",21),e._UZ(1,"input",94),e.TgZ(2,"label",95),e._uU(3),e.qZA(),e.YNc(4,Hs,1,1,"cd-helper",96),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.key),e.s9C("name",t.key),e.s9C("formControlName",t.key),e.xp6(1),e.s9C("for",t.key),e.xp6(1),e.Oqu(t.desc),e.xp6(1),e.Q6J("ngIf",t.helperHtml)}}const At=function(n){return["edit",n]},ht=function(n){return{modal:n}},Pt=function(n){return{outlets:n}},It=function(n){return["/block/mirroring",n]};function Ks(n,s){if(1&n&&(e.TgZ(0,"cd-helper"),e.TgZ(1,"span"),e.tHW(2,98),e._UZ(3,"b"),e._UZ(4,"a",99),e.N_p(),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("routerLink",e.VKq(7,It,e.VKq(5,Pt,e.VKq(3,ht,e.VKq(1,At,t.currentPoolName)))))}}function Xs(n,s){if(1&n&&(e.TgZ(0,"cd-helper"),e.TgZ(1,"span"),e.tHW(2,103),e._UZ(3,"b"),e._UZ(4,"a",99),e.N_p(),e.qZA(),e.qZA()),2&n){const t=e.oxw(4);e.xp6(4),e.Q6J("routerLink",e.VKq(7,It,e.VKq(5,Pt,e.VKq(3,ht,e.VKq(1,At,t.currentPoolName)))))}}function Qs(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",101),e.TgZ(1,"input",102),e.NdJ("change",function(){return e.CHM(t),e.oxw(3).setExclusiveLock()}),e.qZA(),e.TgZ(2,"label",95),e._uU(3),e.ALo(4,"titlecase"),e.qZA(),e.YNc(5,Xs,5,9,"cd-helper",25),e.qZA()}if(2&n){const t=s.$implicit,o=e.oxw(3);e.xp6(1),e.Q6J("id",t)("value",t),e.uIk("disabled","pool"===o.poolMirrorMode&&"snapshot"===t||null),e.xp6(1),e.Q6J("for",t),e.xp6(1),e.Oqu(e.lcZ(4,6,t)),e.xp6(2),e.Q6J("ngIf","pool"===o.poolMirrorMode&&"snapshot"===t)}}function zs(n,s){if(1&n&&(e.TgZ(0,"div"),e.YNc(1,Qs,6,8,"div",100),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.Q6J("ngForOf",t.mirroringOptions)}}function Js(n,s){if(1&n&&(e.TgZ(0,"div",9),e.TgZ(1,"label",104),e.tHW(2,105),e._UZ(3,"cd-helper",106),e.N_p(),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",107),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(5),e.uIk("disabled",t.mode===t.rbdFormMode.editing||null)}}function Ys(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"a",108),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).advancedEnabled=!0,!1}),e.SDv(1,109),e.qZA()}}function Vs(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function Us(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function js(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,110),e.qZA())}function Ws(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,111),e.qZA())}function e_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,112),e.qZA())}function t_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,113),e.qZA())}function o_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,ps,7,3,"div",8),e.TgZ(10,"div",9),e.TgZ(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,gs,3,0,"span",14),e.YNc(16,us,3,0,"span",14),e.qZA(),e.qZA(),e.TgZ(17,"div",15),e.NdJ("change",function(i){return e.CHM(t),e.oxw().onPoolChange(i.target.value)}),e.TgZ(18,"label",16),e.SDv(19,17),e.qZA(),e.TgZ(20,"div",12),e.YNc(21,ms,1,0,"input",18),e.YNc(22,Es,5,4,"select",19),e.YNc(23,Rs,2,0,"span",14),e.qZA(),e.qZA(),e.YNc(24,Os,3,4,"div",8),e.YNc(25,Fs,6,2,"div",8),e.TgZ(26,"div",9),e.TgZ(27,"div",20),e.TgZ(28,"div",21),e.TgZ(29,"input",22),e.NdJ("change",function(){return e.CHM(t),e.oxw().onUseDataPoolChange()}),e.qZA(),e.TgZ(30,"label",23),e.SDv(31,24),e.qZA(),e.YNc(32,Ds,3,0,"cd-helper",25),e.qZA(),e.qZA(),e.qZA(),e.YNc(33,xs,9,6,"div",8),e.TgZ(34,"div",9),e.TgZ(35,"label",26),e.SDv(36,27),e.qZA(),e.TgZ(37,"div",12),e._UZ(38,"input",28),e.YNc(39,ws,2,0,"span",14),e.YNc(40,qs,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(41,"div",29),e.TgZ(42,"label",30),e.SDv(43,31),e.qZA(),e.TgZ(44,"div",12),e.YNc(45,ks,5,6,"div",32),e.qZA(),e.qZA(),e.TgZ(46,"div",9),e.TgZ(47,"div",20),e.TgZ(48,"div",21),e.TgZ(49,"input",33),e.NdJ("change",function(){return e.CHM(t),e.oxw().setMirrorMode()}),e.qZA(),e.TgZ(50,"label",34),e._uU(51,"Mirroring"),e.qZA(),e.YNc(52,Ks,5,9,"cd-helper",25),e.qZA(),e.YNc(53,zs,2,1,"div",25),e.qZA(),e.qZA(),e.YNc(54,Js,6,1,"div",8),e.TgZ(55,"div",35),e.TgZ(56,"div",36),e.YNc(57,Ys,2,0,"a",37),e.qZA(),e.qZA(),e.TgZ(58,"div",38),e.TgZ(59,"legend",39),e.SDv(60,40),e.qZA(),e.TgZ(61,"div",41),e.TgZ(62,"h4",39),e.SDv(63,42),e.qZA(),e.TgZ(64,"div",9),e.TgZ(65,"label",43),e.tHW(66,44),e._UZ(67,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(68,"div",12),e.TgZ(69,"select",45),e.YNc(70,Vs,2,2,"option",46),e.qZA(),e.qZA(),e.qZA(),e.TgZ(71,"div",9),e.TgZ(72,"label",47),e.tHW(73,48),e._UZ(74,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(75,"div",12),e.TgZ(76,"select",49),e.TgZ(77,"option",50),e.SDv(78,51),e.qZA(),e.YNc(79,Us,2,2,"option",46),e.qZA(),e.YNc(80,js,2,0,"span",14),e.YNc(81,Ws,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(82,"div",9),e.TgZ(83,"label",52),e.tHW(84,53),e._UZ(85,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(86,"div",12),e._UZ(87,"input",54),e.YNc(88,e_,2,0,"span",14),e.YNc(89,t_,2,0,"span",14),e.qZA(),e.qZA(),e.qZA(),e.TgZ(90,"cd-rbd-configuration-form",55),e.NdJ("changes",function(i){return e.CHM(t),e.oxw().getDirtyConfigurationValues=i}),e.qZA(),e.qZA(),e.qZA(),e.TgZ(91,"div",56),e.TgZ(92,"cd-form-button-panel",57),e.NdJ("submitActionEvent",function(){return e.CHM(t),e.oxw().submit()}),e.ALo(93,"titlecase"),e.ALo(94,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.rbdForm),e.xp6(6),e.pQV(e.lcZ(6,35,o.action))(e.lcZ(7,37,o.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",o.rbdForm.getValue("parent")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("name",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("name",t,"pattern")),e.xp6(2),e.Q6J("ngClass",e.VKq(43,je,"editing"!==o.mode)),e.xp6(3),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("pool",t,"required")),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.rbdForm.getValue("pool")&&null===o.namespaces),e.xp6(1),e.Q6J("ngIf","editing"===o.mode&&o.rbdForm.getValue("namespace")||"editing"!==o.mode&&(o.namespaces&&o.namespaces.length>0||!o.poolPermission.read)),e.xp6(7),e.Q6J("ngIf",o.allDataPools.length<=1),e.xp6(1),e.Q6J("ngIf",o.rbdForm.getValue("useDataPool")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("size",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("size",t,"invalidSizeObject")),e.xp6(5),e.Q6J("ngForOf",o.featuresList),e.xp6(7),e.Q6J("ngIf",!1===o.mirroring&&o.currentPoolName),e.xp6(1),e.Q6J("ngIf",o.mirroring),e.xp6(1),e.Q6J("ngIf","snapshot"===o.rbdForm.getValue("mirroringMode")&&o.mirroring),e.xp6(3),e.Q6J("ngIf",!o.advancedEnabled),e.xp6(1),e.Q6J("hidden",!o.advancedEnabled),e.xp6(12),e.Q6J("ngForOf",o.objectSizes),e.xp6(2),e.Q6J("ngClass",e.VKq(45,je,o.rbdForm.getValue("stripingCount"))),e.xp6(5),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.objectSizes),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"invalidStripingUnit")),e.xp6(2),e.Q6J("ngClass",e.VKq(47,je,o.rbdForm.getValue("stripingUnit"))),e.xp6(5),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"min")),e.xp6(1),e.Q6J("form",o.rbdForm)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",t)("submitText",e.lcZ(93,39,o.action)+" "+e.lcZ(94,41,o.resource))}}let $e=(()=>{class n extends H.E{constructor(t,o,i,_,r,c,d,g,f,I){super(),this.authStorageService=t,this.route=o,this.poolService=i,this.rbdService=_,this.formatter=r,this.taskWrapper=c,this.dimlessBinaryPipe=d,this.actionLabels=g,this.router=f,this.rbdMirroringService=I,this.namespaces=[],this.namespacesByPoolCache={},this.pools=null,this.allPools=null,this.dataPools=null,this.allDataPools=[],this.featuresList=[],this.initializeConfigData=new Ot.t(1),this.advancedEnabled=!1,this.rbdFormMode=Ue,this.defaultObjectSize="4 MiB",this.mirroringOptions=["journal","snapshot"],this.mirroring=!1,this.currentPoolName="",this.objectSizes=["4 KiB","8 KiB","16 KiB","32 KiB","64 KiB","128 KiB","256 KiB","512 KiB","1 MiB","2 MiB","4 MiB","8 MiB","16 MiB","32 MiB"],this.defaultStripingUnit="4 MiB",this.defaultStripingCount=1,this.rbdImage=new Ot.t(1),this.icons=T.P,this.routerUrl=this.router.url,this.poolPermission=this.authStorageService.getPermissions().pool,this.resource="RBD",this.features={"deep-flatten":{desc:"Deep flatten",requires:null,allowEnable:!1,allowDisable:!0},layering:{desc:"Layering",requires:null,allowEnable:!1,allowDisable:!1},"exclusive-lock":{desc:"Exclusive lock",requires:null,allowEnable:!0,allowDisable:!0},"object-map":{desc:"Object map (requires exclusive-lock)",requires:"exclusive-lock",allowEnable:!0,allowDisable:!0,initDisabled:!0},"fast-diff":{desc:"Fast diff (interlocked with object-map)",requires:"object-map",allowEnable:!0,allowDisable:!0,interlockedWith:"object-map",initDisabled:!0}},this.featuresList=this.objToArray(this.features),this.createForm()}objToArray(t){return S().map(t,(o,i)=>Object.assign(o,{key:i}))}createForm(){this.rbdForm=new x.d({parent:new a.NI(""),name:new a.NI("",{validators:[a.kI.required,a.kI.pattern(/^[^@/]+?$/)]}),pool:new a.NI(null,{validators:[a.kI.required]}),namespace:new a.NI(null),useDataPool:new a.NI(!1),dataPool:new a.NI(null),size:new a.NI(null,{updateOn:"blur"}),obj_size:new a.NI(this.defaultObjectSize),features:new x.d(this.featuresList.reduce((t,o)=>(t[o.key]=new a.NI({value:!1,disabled:!!o.initDisabled}),t),{})),mirroring:new a.NI(!1),schedule:new a.NI("",{validators:[a.kI.pattern(/^([0-9]+)d|([0-9]+)h|([0-9]+)m$/)]}),mirroringMode:new a.NI(this.mirroringOptions[0]),stripingUnit:new a.NI(this.defaultStripingUnit),stripingCount:new a.NI(this.defaultStripingCount,{updateOn:"blur"})},this.validateRbdForm(this.formatter))}disableForEdit(){this.rbdForm.get("parent").disable(),this.rbdForm.get("pool").disable(),this.rbdForm.get("namespace").disable(),this.rbdForm.get("useDataPool").disable(),this.rbdForm.get("dataPool").disable(),this.rbdForm.get("obj_size").disable(),this.rbdForm.get("stripingUnit").disable(),this.rbdForm.get("stripingCount").disable(),this.rbdImage.subscribe(t=>{t.image_format===Pe.V1&&(this.rbdForm.get("deep-flatten").disable(),this.rbdForm.get("layering").disable(),this.rbdForm.get("exclusive-lock").disable())})}disableForClone(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}disableForCopy(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}ngOnInit(){this.prepareFormForAction(),this.gatherNeededData().subscribe(this.handleExternalData.bind(this))}setExclusiveLock(){this.mirroring&&"journal"===this.rbdForm.get("mirroringMode").value?(this.rbdForm.get("exclusive-lock").setValue(!0),this.rbdForm.get("exclusive-lock").disable()):(this.rbdForm.get("exclusive-lock").enable(),"pool"===this.poolMirrorMode&&this.rbdForm.get("mirroringMode").setValue(this.mirroringOptions[0]))}setMirrorMode(){this.mirroring=!this.mirroring,this.setExclusiveLock()}setPoolMirrorMode(){var t;this.currentPoolName=this.mode===this.rbdFormMode.editing?null===(t=this.response)||void 0===t?void 0:t.pool_name:this.rbdForm.getValue("pool"),this.currentPoolName&&(this.rbdMirroringService.refresh(),this.rbdMirroringService.subscribeSummary(o=>{const i=o.content_data.pools.find(_=>_.name===this.currentPoolName);this.poolMirrorMode=i.mirror_mode,"disabled"===i.mirror_mode?(this.mirroring=!1,this.rbdForm.get("mirroring").setValue(this.mirroring),this.rbdForm.get("mirroring").disable()):this.mode!==this.rbdFormMode.editing&&(this.rbdForm.get("mirroring").enable(),this.mirroring=!0,this.rbdForm.get("mirroring").setValue(this.mirroring))})),this.setExclusiveLock()}prepareFormForAction(){const t=this.routerUrl;t.startsWith("/block/rbd/edit")?(this.mode=this.rbdFormMode.editing,this.action=this.actionLabels.EDIT,this.disableForEdit()):t.startsWith("/block/rbd/clone")?(this.mode=this.rbdFormMode.cloning,this.disableForClone(),this.action=this.actionLabels.CLONE):t.startsWith("/block/rbd/copy")?(this.mode=this.rbdFormMode.copying,this.action=this.actionLabels.COPY,this.disableForCopy()):this.action=this.actionLabels.CREATE,S().each(this.features,o=>{this.rbdForm.get("features").get(o.key).valueChanges.subscribe(i=>this.featureFormUpdate(o.key,i))})}gatherNeededData(){const t={};return this.mode?this.route.params.subscribe(o=>{const i=L.N.fromString(decodeURIComponent(o.image_spec));o.snap&&(this.snapName=decodeURIComponent(o.snap)),t.rbd=this.rbdService.get(i)}):t.defaultFeatures=this.rbdService.defaultFeatures(),this.mode!==this.rbdFormMode.editing&&this.poolPermission.read&&(t.pools=this.poolService.list(["pool_name","type","flags_names","application_metadata"])),(0,le.D)(t)}handleExternalData(t){if(this.handlePoolData(t.pools),this.setPoolMirrorMode(),t.defaultFeatures&&this.setFeatures(t.defaultFeatures),t.rbd){const o=t.rbd;this.setResponse(o,this.snapName),this.rbdImage.next(o)}this.loadingReady()}handlePoolData(t){if(!t)return;const o=[],i=[];for(const _ of t)this.rbdService.isRBDPool(_)&&("replicated"===_.type?(o.push(_),i.push(_)):"erasure"===_.type&&-1!==_.flags_names.indexOf("ec_overwrites")&&i.push(_));if(this.pools=o,this.allPools=o,this.dataPools=i,this.allDataPools=i,1===this.pools.length){const _=this.pools[0].pool_name;this.rbdForm.get("pool").setValue(_),this.onPoolChange(_)}this.allDataPools.length<=1&&this.rbdForm.get("useDataPool").disable()}onPoolChange(t){const o=this.rbdForm.get("dataPool");o.value===t&&o.setValue(null),this.dataPools=this.allDataPools?this.allDataPools.filter(i=>i.pool_name!==t):[],this.namespaces=null,t in this.namespacesByPoolCache?this.namespaces=this.namespacesByPoolCache[t]:this.rbdService.listNamespaces(t).subscribe(i=>{i=i.map(_=>_.namespace),this.namespacesByPoolCache[t]=i,this.namespaces=i}),this.rbdForm.get("namespace").setValue(null)}onUseDataPoolChange(){this.rbdForm.getValue("useDataPool")||(this.rbdForm.get("dataPool").setValue(null),this.onDataPoolChange(null))}onDataPoolChange(t){const o=this.allPools.filter(i=>i.pool_name!==t);this.rbdForm.getValue("pool")===t&&this.rbdForm.get("pool").setValue(null),this.pools=o}validateRbdForm(t){return o=>{const i=o.get("useDataPool"),_=o.get("dataPool");let r=null;i.value&&null==_.value&&(r={required:!0}),_.setErrors(r);const c=o.get("size"),d=o.get("obj_size"),g=t.toBytes(null!=d.value?d.value:this.defaultObjectSize),f=o.get("stripingCount"),I=null!=f.value?f.value:this.defaultStripingCount;let P=null;null===c.value?P={required:!0}:I*g>t.toBytes(c.value)&&(P={invalidSizeObject:!0}),c.setErrors(P);const Z=o.get("stripingUnit");let B=null;null===Z.value&&null!==f.value?B={required:!0}:null!==Z.value&&t.toBytes(Z.value)>g&&(B={invalidStripingUnit:!0}),Z.setErrors(B);let Q=null;return null===f.value&&null!==Z.value?Q={required:!0}:I<1&&(Q={min:!0}),f.setErrors(Q),null}}deepBoxCheck(t,o){this.getDependentChildFeatures(t).forEach(_=>{const r=this.rbdForm.get(_.key);o?r.enable({emitEvent:!1}):(r.disable({emitEvent:!1}),r.setValue(!1,{emitEvent:!1}),this.deepBoxCheck(_.key,o));const c=this.rbdForm.get("features");this.mode===this.rbdFormMode.editing&&c.get(_.key).enabled&&(-1!==this.response.features_name.indexOf(_.key)&&!_.allowDisable||-1===this.response.features_name.indexOf(_.key)&&!_.allowEnable)&&c.get(_.key).disable()})}getDependentChildFeatures(t){return S().filter(this.features,o=>o.requires===t)||[]}interlockCheck(t,o){const i=this.featuresList.find(_=>_.key===t);if(this.response){const _=null!=i.interlockedWith,r=this.featuresList.find(d=>d.interlockedWith===i.key),c=!!this.response.features_name.find(d=>d===i.key);if(_){if(c!==!!this.response.features_name.find(g=>g===i.interlockedWith))return}else if(r&&!!this.response.features_name.find(g=>g===r.key)!==c)return}o?S().filter(this.features,_=>_.interlockedWith===t).forEach(_=>this.rbdForm.get(_.key).setValue(!0,{emitEvent:!1})):i.interlockedWith&&this.rbdForm.get("features").get(i.interlockedWith).setValue(!1)}featureFormUpdate(t,o){if(o){const i=this.features[t].requires;if(i&&!this.rbdForm.getValue(i))return void this.rbdForm.get(`features.${t}`).setValue(!1)}this.deepBoxCheck(t,o),this.interlockCheck(t,o)}setFeatures(t){const o=this.rbdForm.get("features");S().forIn(this.features,i=>{-1!==t.indexOf(i.key)&&o.get(i.key).setValue(!0),this.featureFormUpdate(i.key,o.get(i.key).value)})}setResponse(t,o){this.response=t;const i=new L.N(t.pool_name,t.namespace,t.name).toString();if(this.mode===this.rbdFormMode.cloning)this.rbdForm.get("parent").setValue(`${i}@${o}`);else if(this.mode===this.rbdFormMode.copying)o?this.rbdForm.get("parent").setValue(`${i}@${o}`):this.rbdForm.get("parent").setValue(`${i}`);else if(t.parent){const _=t.parent;this.rbdForm.get("parent").setValue(`${_.pool_name}/${_.image_name}@${_.snap_name}`)}this.mode===this.rbdFormMode.editing&&(this.rbdForm.get("name").setValue(t.name),"snapshot"===(null==t?void 0:t.mirror_mode)||t.features_name.includes("journaling")?(this.mirroring=!0,this.rbdForm.get("mirroring").setValue(this.mirroring),this.rbdForm.get("mirroringMode").setValue(null==t?void 0:t.mirror_mode),this.rbdForm.get("schedule").setValue(null==t?void 0:t.schedule_interval)):(this.mirroring=!1,this.rbdForm.get("mirroring").setValue(this.mirroring)),this.setPoolMirrorMode()),this.rbdForm.get("pool").setValue(t.pool_name),this.onPoolChange(t.pool_name),this.rbdForm.get("namespace").setValue(t.namespace),t.data_pool&&(this.rbdForm.get("useDataPool").setValue(!0),this.rbdForm.get("dataPool").setValue(t.data_pool)),this.rbdForm.get("size").setValue(this.dimlessBinaryPipe.transform(t.size)),this.rbdForm.get("obj_size").setValue(this.dimlessBinaryPipe.transform(t.obj_size)),this.setFeatures(t.features_name),this.rbdForm.get("stripingUnit").setValue(this.dimlessBinaryPipe.transform(t.stripe_unit)),this.rbdForm.get("stripingCount").setValue(t.stripe_count),this.initializeConfigData.next({initialData:this.response.configuration,sourceType:st.h.image})}createRequest(){const t=new rs;return t.pool_name=this.rbdForm.getValue("pool"),t.namespace=this.rbdForm.getValue("namespace"),t.name=this.rbdForm.getValue("name"),t.schedule_interval=this.rbdForm.getValue("schedule"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),"image"===this.poolMirrorMode&&(t.mirror_mode=this.rbdForm.getValue("mirroringMode")),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(),t}addObjectSizeAndStripingToRequest(t){t.obj_size=this.formatter.toBytes(this.rbdForm.getValue("obj_size")),S().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),this.mirroring&&"journal"===this.rbdForm.getValue("mirroringMode")&&t.features.push("journaling"),t.stripe_unit=this.formatter.toBytes(this.rbdForm.getValue("stripingUnit")),t.stripe_count=this.rbdForm.getValue("stripingCount"),t.data_pool=this.rbdForm.getValue("dataPool")}createAction(){const t=this.createRequest();return this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/create",{pool_name:t.pool_name,namespace:t.namespace,image_name:t.name,schedule_interval:t.schedule_interval,start_time:t.start_time}),call:this.rbdService.create(t)})}editRequest(){const t=new _t;if(t.name=this.rbdForm.getValue("name"),t.schedule_interval=this.rbdForm.getValue("schedule"),t.name=this.rbdForm.getValue("name"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),S().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),t.enable_mirror=this.rbdForm.getValue("mirroring"),"image"===this.poolMirrorMode)t.enable_mirror&&(t.mirror_mode=this.rbdForm.getValue("mirroringMode"));else if(t.enable_mirror)t.features.push("journaling");else{const o=t.features.indexOf("journaling",0);o>-1&&t.features.splice(o,1)}return t.configuration=this.getDirtyConfigurationValues(),t}cloneRequest(){const t=new ss;return t.child_pool_name=this.rbdForm.getValue("pool"),t.child_namespace=this.rbdForm.getValue("namespace"),t.child_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,st.h.image),t}editAction(){const t=new L.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/edit",{image_spec:t.toString()}),call:this.rbdService.update(t,this.editRequest())})}cloneAction(){const t=this.cloneRequest(),o=new L.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/clone",{parent_image_spec:o.toString(),parent_snap_name:this.snapName,child_pool_name:t.child_pool_name,child_namespace:t.child_namespace,child_image_name:t.child_image_name}),call:this.rbdService.cloneSnapshot(o,this.snapName,t)})}copyRequest(){const t=new _s;return this.snapName&&(t.snapshot_name=this.snapName),t.dest_pool_name=this.rbdForm.getValue("pool"),t.dest_namespace=this.rbdForm.getValue("namespace"),t.dest_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,st.h.image),t}copyAction(){const t=this.copyRequest(),o=new L.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/copy",{src_image_spec:o.toString(),dest_pool_name:t.dest_pool_name,dest_namespace:t.dest_namespace,dest_image_name:t.dest_image_name}),call:this.rbdService.copy(o,t)})}submit(){this.mode||this.rbdImage.next("create"),this.rbdImage.pipe((0,ts.P)(),(0,os.w)(()=>this.mode===this.rbdFormMode.editing?this.editAction():this.mode===this.rbdFormMode.cloning?this.cloneAction():this.mode===this.rbdFormMode.copying?this.copyAction():this.createAction())).subscribe(()=>{},()=>this.rbdForm.setErrors({cdSubmitButton:!0}),()=>this.router.navigate(["/block/rbd"]))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(m.gz),e.Y36(Ve.q),e.Y36(q),e.Y36(ns.H),e.Y36(u.P),e.Y36(Le.$),e.Y36(v.p4),e.Y36(m.F0),e.Y36(K))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let s,t,o,i,_,r,c,d,g,f,I,P,Z,B,Q,Y,ee,te,w,_e,ae,M,ge,ue,me,Te,fe,Ce,G,Be,Ge,ye,xe,we,qe,He,ke,Ke,Xe,Qe,ze;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="Pool",i="Use a dedicated data pool",_="Size",r="e.g., 10GiB",c="Features",d="Advanced",g="Striping",f="Object size" + "\ufffd#67\ufffd" + "Objects in the Ceph Storage Cluster have a maximum configurable size (e.g., 2MB, 4MB, etc.). The object size should be large enough to accommodate many stripe units, and should be a multiple of the stripe unit." + "\ufffd/#67\ufffd" + "",I="Stripe unit" + "\ufffd#74\ufffd" + "Stripes have a configurable unit size (e.g., 64kb). The Ceph Client divides the data it will write to objects into equally sized stripe units, except for the last stripe unit. A stripe width, should be a fraction of the Object Size so that an object may contain many stripe units." + "\ufffd/#74\ufffd" + "",P="-- Select stripe unit --",Z="Stripe count" + "\ufffd#85\ufffd" + "The Ceph Client writes a sequence of stripe units over a series of objects determined by the stripe count. The series of objects is called an object set. After the Ceph Client writes to the last object in the object set, it returns to the first object in the object set." + "\ufffd/#85\ufffd" + "",B="" + "\ufffd0\ufffd" + " from",Q="This field is required.",Y="'/' and '@' are not allowed.",ee="Loading...",te="-- No rbd pools available --",w="-- Select a pool --",_e="This field is required.",ae="Loading...",M="-- No namespaces available --",ge="-- Select a namespace --",ue="You need more than one pool with the rbd application label use to use a dedicated data pool.",me="Data pool",Te="Dedicated pool that stores the object-data of the RBD.",fe="Loading...",Ce="-- No data pools available --",G="This field is required.",Be="This field is required.",Ge="You have to increase the size.",ye="You need to enable a " + "\ufffd#3\ufffd" + "mirror mode" + "\ufffd/#3\ufffd" + " in the selected pool. Please " + "\ufffd#4\ufffd" + "click here to select a mode and enable it in this pool." + "\ufffd/#4\ufffd" + "",xe="You need to enable " + "\ufffd#3\ufffd" + "image mirror mode" + "\ufffd/#3\ufffd" + " in the selected pool. Please " + "\ufffd#4\ufffd" + "click here to select a mode and enable it in this pool." + "\ufffd/#4\ufffd" + "",we="Create Mirror-Snapshots automatically on a periodic basis. The interval can be specified in days, hours, or minutes using d, h, m suffix respectively.",qe="Schedule Interval " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + "",He="e.g., 12h or 1d or 10m",ke="Advanced...",Ke="This field is required because stripe count is defined!",Xe="Stripe unit is greater than object size.",Qe="This field is required because stripe unit is defined!",ze="Stripe count must be greater than 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","rbdForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],s,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Name...","id","name","name","name","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"form-group","row",3,"change"],["for","pool",1,"cd-col-form-label",3,"ngClass"],o,["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-control","formControlName","pool",3,"change",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","id","useDataPool","name","useDataPool","formControlName","useDataPool",1,"custom-control-input",3,"change"],["for","useDataPool",1,"custom-control-label"],i,[4,"ngIf"],["for","size",1,"cd-col-form-label","required"],_,["id","size","name","size","type","text","formControlName","size","placeholder",r,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["formGroupName","features",1,"form-group","row"],["for","features",1,"cd-col-form-label"],c,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],["type","checkbox","id","mirroring","name","mirroring","formControlName","mirroring",1,"custom-control-input",3,"change"],["for","mirroring",1,"custom-control-label"],[1,"row"],[1,"col-sm-12"],["class","float-right margin-right-md","href","",3,"click",4,"ngIf"],[3,"hidden"],[1,"cd-header"],d,[1,"col-md-12"],g,["for","size",1,"cd-col-form-label"],f,["id","obj_size","name","obj_size","formControlName","obj_size",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["for","stripingUnit",1,"cd-col-form-label",3,"ngClass"],I,["id","stripingUnit","name","stripingUnit","formControlName","stripingUnit",1,"form-control"],[3,"ngValue"],P,["for","stripingCount",1,"cd-col-form-label",3,"ngClass"],Z,["id","stripingCount","name","stripingCount","formControlName","stripingCount","type","number",1,"form-control"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","name",1,"cd-col-form-label"],B,["type","text","id","parent","name","parent","formControlName","parent",1,"form-control"],[1,"invalid-feedback"],Q,Y,["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-control",3,"change"],[3,"ngValue",4,"ngIf"],ee,te,w,[3,"value"],_e,[3,"ngClass"],["for","pool",1,"cd-col-form-label"],["class","form-control","type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",4,"ngIf"],["id","namespace","name","namespace","class","form-control","formControlName","namespace",4,"ngIf"],["type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",1,"form-control"],["id","namespace","name","namespace","formControlName","namespace",1,"form-control"],ae,M,ge,ue,["for","dataPool",1,"cd-col-form-label"],me,["html",Te],["class","form-control","type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",4,"ngIf"],["id","dataPool","name","dataPool","class","form-control","formControlName","dataPool",3,"change",4,"ngIf"],["type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",1,"form-control"],["id","dataPool","name","dataPool","formControlName","dataPool",1,"form-control",3,"change"],fe,Ce,G,Be,Ge,["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],[3,"html",4,"ngIf"],[3,"html"],ye,[3,"routerLink"],["class","custom-control custom-radio ml-2",4,"ngFor","ngForOf"],[1,"custom-control","custom-radio","ml-2"],["type","radio","name","mirroringMode","formControlName","mirroringMode",1,"custom-control-input",3,"id","value","change"],xe,[1,"cd-col-form-label"],qe,["html",we],["id","schedule","name","schedule","type","text","formControlName","schedule","placeholder",He,1,"form-control"],["href","",1,"float-right","margin-right-md",3,"click"],ke,Ke,Xe,Qe,ze]},template:function(t,o){1&t&&e.YNc(0,o_,95,49,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},directives:[pt.y,a._Y,a.JL,a.sg,h.V,l.O5,O.P,$.o,a.Fj,k.b,a.JJ,a.u,Re.U,l.mk,a.Wl,ls.Q,a.x0,l.sg,cs.S,a.EJ,a.YN,a.Kr,a.wV,ds.d,j.p,m.yS,a._],pipes:[l.rS,et.m],styles:[""]}),n})();var bt=p(71225),Nt=p(36169);let n_=(()=>{class n{constructor(){}static getCount(t){var o;return Number(null===(o=t.headers)||void 0===o?void 0:o.get("X-Total-Count"))}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275prov=e.Yz7({token:n,factory:n.\u0275fac,providedIn:"root"}),n})();var at=p(51847),i_=p(16738),Me=p.n(i_),rt=p(62862),s_=p(52266);function __(n,s){1&n&&(e.TgZ(0,"div",18),e.TgZ(1,"span"),e.SDv(2,19),e.qZA(),e.qZA())}function a_(n,s){1&n&&(e.TgZ(0,"span",20),e.SDv(1,21),e.qZA())}function r_(n,s){1&n&&(e.TgZ(0,"span",20),e.SDv(1,22),e.qZA())}function l_(n,s){if(1&n&&e._UZ(0,"cd-date-time-picker",23),2&n){const t=e.oxw();e.Q6J("control",t.moveForm.get("expiresAt"))}}let c_=(()=>{class n{constructor(t,o,i,_,r){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=_,this.taskWrapper=r,this.createForm()}createForm(){this.moveForm=this.fb.group({expiresAt:["",[z.h.custom("format",t=>!(""===t||Me()(t,"YYYY-MM-DD HH:mm:ss").isValid())),z.h.custom("expired",t=>Me()().isAfter(t))]]})}ngOnInit(){this.imageSpec=new L.N(this.poolName,this.namespace,this.imageName),this.imageSpecStr=this.imageSpec.toString(),this.pattern=`${this.poolName}/${this.imageName}`}moveImage(){let t=0;const o=this.moveForm.getValue("expiresAt");o&&(t=Me()(o,"YYYY-MM-DD HH:mm:ss").diff(Me()(),"seconds",!0)),t<0&&(t=0),this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/trash/move",{image_spec:this.imageSpecStr}),call:this.rbdService.moveTrash(this.imageSpec,t)}).subscribe({complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(q),e.Y36(N.Kz),e.Y36(v.p4),e.Y36(rt.O),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-move-modal"]],decls:23,vars:9,consts:function(){let s,t,o,i,_,r,c;return s="Move an image to trash",t="To move " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " to trash, click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Move" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ". Optionally, you can pick an expiration date.",t=e.Zx4(t),o="Protection expires at",i="NOT PROTECTED",_="This image contains snapshot(s), which will prevent it from being removed after moved to trash.",r="Wrong date format. Please use \"YYYY-MM-DD HH:mm:ss\".",c="Protection has already expired. Please pick a future date or leave it empty.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","moveForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],["class","alert alert-warning","role","alert",4,"ngIf"],t,[1,"form-group"],["for","expiresAt",1,"col-form-label"],o,["type","text","placeholder",i,"formControlName","expiresAt","triggers","manual",1,"form-control",3,"ngbPopover","click","keypress"],["p","ngbPopover"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["popContent",""],["role","alert",1,"alert","alert-warning"],_,[1,"invalid-feedback"],r,c,[3,"control"]]},template:function(t,o){if(1&t){const i=e.EpF();e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.YNc(7,__,3,0,"div",7),e.TgZ(8,"p"),e.tHW(9,8),e._UZ(10,"kbd"),e._UZ(11,"kbd"),e.N_p(),e.qZA(),e.TgZ(12,"div",9),e.TgZ(13,"label",10),e.SDv(14,11),e.qZA(),e.TgZ(15,"input",12,13),e.NdJ("click",function(){return e.CHM(i),e.MAs(16).open()})("keypress",function(){return e.CHM(i),e.MAs(16).close()}),e.qZA(),e.YNc(17,a_,2,0,"span",14),e.YNc(18,r_,2,0,"span",14),e.qZA(),e.qZA(),e.TgZ(19,"div",15),e.TgZ(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return o.moveImage()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA(),e.YNc(21,l_,1,1,"ng-template",null,17,e.W1O)}if(2&t){const i=e.MAs(5),_=e.MAs(22);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.moveForm),e.xp6(3),e.Q6J("ngIf",o.hasSnapshots),e.xp6(4),e.pQV(o.imageSpecStr),e.QtT(9),e.xp6(4),e.Q6J("ngbPopover",_),e.xp6(2),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"format")),e.xp6(1),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"expired")),e.xp6(2),e.Q6J("form",o.moveForm)("submitText",o.actionLabels.MOVE)}},directives:[C.z,a._Y,a.JL,a.sg,h.V,l.O5,O.P,$.o,a.Fj,k.b,a.JJ,a.u,N.o8,j.p,s_.J],styles:[""]}),n})();function d_(n,s){1&n&&(e.TgZ(0,"li",10),e.TgZ(1,"a",3),e.SDv(2,11),e.qZA(),e.qZA())}let We=(()=>{class n{constructor(t,o){this.authStorageService=t,this.router=o,this.grafanaPermission=this.authStorageService.getPermissions().grafana}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(m.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-tabs"]],decls:12,vars:2,consts:function(){let s,t,o,i;return s="Images",t="Namespaces",o="Trash",i="Overall Performance",[["ngbNav","",1,"nav-tabs",3,"activeId","navChange"],["nav","ngbNav"],["ngbNavItem","/block/rbd"],["ngbNavLink",""],s,["ngbNavItem","/block/rbd/namespaces"],t,["ngbNavItem","/block/rbd/trash"],o,["ngbNavItem","/block/rbd/performance",4,"ngIf"],["ngbNavItem","/block/rbd/performance"],i]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0,1),e.NdJ("navChange",function(_){return o.router.navigate([_.nextId])}),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.qZA(),e.TgZ(5,"li",5),e.TgZ(6,"a",3),e.SDv(7,6),e.qZA(),e.qZA(),e.TgZ(8,"li",7),e.TgZ(9,"a",3),e.SDv(10,8),e.qZA(),e.qZA(),e.YNc(11,d_,3,0,"li",9),e.qZA()),2&t&&(e.Q6J("activeId",o.router.url),e.xp6(11),e.Q6J("ngIf",o.grafanaPermission.read))},directives:[N.Pz,N.nv,N.Vx,l.O5],styles:[""]}),n})();var p_=p(25917),Ft=p(51295),lt=p(60737),g_=p(74255),Dt=p(71099),vt=p(79765);function u_(n,s){1&n&&(e.TgZ(0,"span",16),e.SDv(1,17),e.qZA())}function m_(n,s){if(1&n&&(e.TgZ(0,"span"),e.tHW(1,18),e._UZ(2,"b"),e.N_p(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.imageName),e.QtT(1)}}let T_=(()=>{class n{constructor(t,o,i,_,r){this.activeModal=t,this.rbdService=o,this.taskManagerService=i,this.notificationService=_,this.actionLabels=r,this.editing=!1,this.onSubmit=new vt.xQ,this.action=this.actionLabels.CREATE,this.resource="RBD Snapshot",this.createForm()}createForm(){this.snapshotForm=new x.d({snapshotName:new a.NI("",{validators:[a.kI.required]})})}setSnapName(t){this.snapName=t,"snapshot"!==this.mirroring?this.snapshotForm.get("snapshotName").setValue(t):this.snapshotForm.get("snapshotName").clearValidators()}setEditing(t=!0){this.editing=t,this.action=this.editing?this.actionLabels.RENAME:this.actionLabels.CREATE}editAction(){const t=this.snapshotForm.getValue("snapshotName"),o=new L.N(this.poolName,this.namespace,this.imageName),i=new R.R;i.name="rbd/snap/edit",i.metadata={image_spec:o.toString(),snapshot_name:t},this.rbdService.renameSnapshot(o,this.snapName,t).toPromise().then(()=>{this.taskManagerService.subscribe(i.name,i.metadata,_=>{this.notificationService.notifyTask(_)}),this.activeModal.close(),this.onSubmit.next(this.snapName)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}createAction(){const t=this.snapshotForm.getValue("snapshotName"),o=new L.N(this.poolName,this.namespace,this.imageName),i=new R.R;i.name="rbd/snap/create",i.metadata={image_spec:o.toString(),snapshot_name:t},this.rbdService.createSnapshot(o,t).toPromise().then(()=>{this.taskManagerService.subscribe(i.name,i.metadata,_=>{this.notificationService.notifyTask(_)}),this.activeModal.close(),this.onSubmit.next(t)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}submit(){this.editing?this.editAction():this.createAction()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(q),e.Y36(Dt.k),e.Y36(ve.g),e.Y36(v.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-form-modal"]],decls:22,vars:17,consts:function(){let s,t,o,i;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="This field is required.",i="Snapshot mode is enabled on image " + "\ufffd#2\ufffd" + "" + "\ufffd0\ufffd" + "" + "\ufffd/#2\ufffd" + ": snapshot names are auto generated",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","snapshotForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","snapshotName",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Snapshot name...","id","snapshotName","name","snapshotName","formControlName","snapshotName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],o,i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,u_,2,0,"span",12),e._UZ(15,"br"),e._UZ(16,"br"),e.YNc(17,m_,3,1,"span",13),e.qZA(),e.qZA(),e.qZA(),e.TgZ(18,"div",14),e.TgZ(19,"cd-form-button-panel",15),e.NdJ("submitActionEvent",function(){return o.submit()}),e.ALo(20,"titlecase"),e.ALo(21,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,9,o.action))(e.lcZ(4,11,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.snapshotForm),e.xp6(7),e.uIk("disabled","snapshot"===o.mirroring||null),e.xp6(1),e.Q6J("ngIf",o.snapshotForm.showError("snapshotName",i,"required")),e.xp6(3),e.Q6J("ngIf","snapshot"===o.mirroring),e.xp6(2),e.Q6J("form",o.snapshotForm)("submitText",e.lcZ(20,13,o.action)+" "+e.lcZ(21,15,o.resource))}},directives:[C.z,a._Y,a.JL,a.sg,h.V,O.P,$.o,a.Fj,k.b,a.JJ,a.u,Re.U,l.O5,j.p],pipes:[l.rS,et.m],styles:[""]}),n})();class f_{constructor(s,t,o){this.featuresName=t,this.cloneFormatVersion=1,o.cloneFormatVersion().subscribe(i=>{this.cloneFormatVersion=i}),this.create={permission:"create",icon:T.P.add,name:s.CREATE},this.rename={permission:"update",icon:T.P.edit,name:s.RENAME,disable:i=>this.disableForMirrorSnapshot(i)},this.protect={permission:"update",icon:T.P.lock,visible:i=>i.hasSingleSelection&&!i.first().is_protected,name:s.PROTECT,disable:i=>this.disableForMirrorSnapshot(i)},this.unprotect={permission:"update",icon:T.P.unlock,visible:i=>i.hasSingleSelection&&i.first().is_protected,name:s.UNPROTECT,disable:i=>this.disableForMirrorSnapshot(i)},this.clone={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>this.getCloneDisableDesc(i,this.featuresName)||this.disableForMirrorSnapshot(i),icon:T.P.clone,name:s.CLONE},this.copy={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>!i.hasSingleSelection||i.first().cdExecuting||this.disableForMirrorSnapshot(i),icon:T.P.copy,name:s.COPY},this.rollback={permission:"update",icon:T.P.undo,name:s.ROLLBACK,disable:i=>this.disableForMirrorSnapshot(i)},this.deleteSnap={permission:"delete",icon:T.P.destroy,disable:i=>{const _=i.first();return!i.hasSingleSelection||_.cdExecuting||_.is_protected||this.disableForMirrorSnapshot(i)},name:s.DELETE},this.ordering=[this.create,this.rename,this.protect,this.unprotect,this.clone,this.copy,this.rollback,this.deleteSnap]}getCloneDisableDesc(s,t){return!(s.hasSingleSelection&&!s.first().cdExecuting)||((null==t?void 0:t.includes("layering"))?1===this.cloneFormatVersion&&!s.first().is_protected&&"Snapshot must be protected in order to clone.":"Parent image must support Layering")}disableForMirrorSnapshot(s){return s.hasSingleSelection&&"snapshot"===s.first().mirror_mode&&s.first().name.includes(".mirror.")}}class C_{}var Ze=p(96102);const S_=["nameTpl"],E_=["rollbackTpl"];function R_(n,s){if(1&n&&(e.ynx(0),e.SDv(1,3),e.BQk(),e.TgZ(2,"strong"),e._uU(3),e.qZA(),e._uU(4,".\n")),2&n){const t=s.$implicit;e.xp6(3),e.hij(" ",t.snapName,"")}}let M_=(()=>{class n{constructor(t,o,i,_,r,c,d,g,f,I,P){this.authStorageService=t,this.modalService=o,this.dimlessBinaryPipe=i,this.cdDatePipe=_,this.rbdService=r,this.taskManagerService=c,this.notificationService=d,this.summaryService=g,this.taskListService=f,this.actionLabels=I,this.cdr=P,this.snapshots=[],this.selection=new Se.r,this.builders={"rbd/snap/create":Z=>{const B=new C_;return B.name=Z.snapshot_name,B}},this.permission=this.authStorageService.getPermissions().rbdImage}ngOnInit(){this.columns=[{name:"Name",prop:"name",cellTransformation:De.e.executing,flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"Provisioned",prop:"disk_usage",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"State",prop:"is_protected",flexGrow:1,cellTransformation:De.e.badge,customTemplateConfig:{map:{true:{value:"PROTECTED",class:"badge-success"},false:{value:"UNPROTECTED",class:"badge-info"}}}},{name:"Created",prop:"timestamp",flexGrow:1,pipe:this.cdDatePipe}],this.imageSpec=new L.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions=new f_(this.actionLabels,this.featuresName,this.rbdService),this.rbdTableActions.create.click=()=>this.openCreateSnapshotModal(),this.rbdTableActions.rename.click=()=>this.openEditSnapshotModal(),this.rbdTableActions.protect.click=()=>this.toggleProtection(),this.rbdTableActions.unprotect.click=()=>this.toggleProtection();const t=()=>this.selection.first()&&`${this.imageSpec.toStringEncoded()}/${encodeURIComponent(this.selection.first().name)}`;this.rbdTableActions.clone.routerLink=()=>`/block/rbd/clone/${t()}`,this.rbdTableActions.copy.routerLink=()=>`/block/rbd/copy/${t()}`,this.rbdTableActions.rollback.click=()=>this.rollbackModal(),this.rbdTableActions.deleteSnap.click=()=>this.deleteSnapshotModal(),this.tableActions=this.rbdTableActions.ordering,this.taskListService.init(()=>(0,p_.of)(this.snapshots),null,_=>{Ft.T.updateChanged(this,{data:_})&&(this.cdr.detectChanges(),this.data=[...this.data])},()=>{Ft.T.updateChanged(this,{data:this.snapshots})&&(this.cdr.detectChanges(),this.data=[...this.data])},_=>["rbd/snap/create","rbd/snap/delete","rbd/snap/edit","rbd/snap/rollback"].includes(_.name)&&this.imageSpec.toString()===_.metadata.image_spec,(_,r)=>_.name===r.metadata.snapshot_name,this.builders)}ngOnChanges(){this.columns&&(this.imageSpec=new L.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions&&(this.rbdTableActions.featuresName=this.featuresName),this.taskListService.fetch())}openSnapshotModal(t,o=null){this.modalRef=this.modalService.show(T_,{mirroring:this.mirroring}),this.modalRef.componentInstance.poolName=this.poolName,this.modalRef.componentInstance.imageName=this.rbdName,this.modalRef.componentInstance.namespace=this.namespace,o?this.modalRef.componentInstance.setEditing():o=`${this.rbdName}_${Me()().toISOString(!0)}`,this.modalRef.componentInstance.setSnapName(o),this.modalRef.componentInstance.onSubmit.subscribe(_=>{const r=new lt.o;r.name=t,r.metadata={image_spec:this.imageSpec.toString(),snapshot_name:_},this.summaryService.addRunningTask(r)})}openCreateSnapshotModal(){this.openSnapshotModal("rbd/snap/create")}openEditSnapshotModal(){this.openSnapshotModal("rbd/snap/edit",this.selection.first().name)}toggleProtection(){const t=this.selection.first().name,o=this.selection.first().is_protected,i=new R.R;i.name="rbd/snap/edit";const _=new L.N(this.poolName,this.namespace,this.rbdName);i.metadata={image_spec:_.toString(),snapshot_name:t},this.rbdService.protectSnapshot(_,t,!o).toPromise().then(()=>{const r=new lt.o;r.name=i.name,r.metadata=i.metadata,this.summaryService.addRunningTask(r),this.taskManagerService.subscribe(i.name,i.metadata,c=>{this.notificationService.notifyTask(c)})})}_asyncTask(t,o,i){const _=new R.R;_.name=o,_.metadata={image_spec:new L.N(this.poolName,this.namespace,this.rbdName).toString(),snapshot_name:i};const r=new L.N(this.poolName,this.namespace,this.rbdName);this.rbdService[t](r,i).toPromise().then(()=>{const c=new lt.o;c.name=_.name,c.metadata=_.metadata,this.summaryService.addRunningTask(c),this.modalRef.close(),this.taskManagerService.subscribe(c.name,c.metadata,d=>{this.notificationService.notifyTask(d)})}).catch(()=>{this.modalRef.componentInstance.stopLoadingSpinner()})}rollbackModal(){const t=this.selection.selected[0].name,o=new L.N(this.poolName,this.namespace,this.rbdName).toString(),i={titleText:"RBD snapshot rollback",buttonText:"Rollback",bodyTpl:this.rollbackTpl,bodyData:{snapName:`${o}@${t}`},onSubmit:()=>{this._asyncTask("rollbackSnapshot","rbd/snap/rollback",t)}};this.modalRef=this.modalService.show(Nt.Y,i)}deleteSnapshotModal(){const t=this.selection.selected[0].name;this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD snapshot",itemNames:[t],submitAction:()=>this._asyncTask("deleteSnapshot","rbd/snap/delete",t)})}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(de.Z),e.Y36(Le.$),e.Y36(Ze.N),e.Y36(q),e.Y36(Dt.k),e.Y36(ve.g),e.Y36(g_.J),e.Y36(ce.j),e.Y36(v.p4),e.Y36(e.sBO))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(S_,5),e.Gf(E_,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.rollbackTpl=i.first)}},inputs:{snapshots:"snapshots",featuresName:"featuresName",poolName:"poolName",namespace:"namespace",mirroring:"mirroring",rbdName:"rbdName"},features:[e._Bn([ce.j]),e.TTD],decls:4,vars:5,consts:function(){let s;return s="You are about to rollback",[["columnMode","flex","selectionType","single",3,"data","columns","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["rollbackTpl",""],s]},template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,R_,5,1,"ng-template",null,2,e.W1O)),2&t&&(e.Q6J("data",o.data)("columns",o.columns),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[W.a,Ee.K],styles:[""],changeDetection:0}),n})();var O_=p(71752),Lt=p(76317),A_=p(41039);const h_=["poolConfigurationSourceTpl"];function P_(n,s){1&n&&(e.ynx(0),e.tHW(1,3),e._UZ(2,"strong"),e.N_p(),e.BQk())}function I_(n,s){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"span",38),e._uU(2),e.qZA(),e.qZA()),2&n){const t=s.$implicit;e.xp6(2),e.Oqu(t)}}function b_(n,s){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"span",39),e.SDv(2,40),e.qZA(),e.qZA()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function N_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.disk_usage)," ")}}function F_(n,s){if(1&n&&(e.TgZ(0,"span"),e.TgZ(1,"span",39),e.SDv(2,41),e.qZA(),e.qZA()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function D_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.total_disk_usage)," ")}}function v_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(4);e.xp6(1),e.hij("/",t.selection.parent.pool_namespace,"")}}function L_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,v_,2,1,"span",1),e._uU(3),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Oqu(t.selection.parent.pool_name),e.xp6(1),e.Q6J("ngIf",t.selection.parent.pool_namespace),e.xp6(1),e.AsE("/",t.selection.parent.image_name,"@",t.selection.parent.snap_name,"")}}function $_(n,s){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function Z_(n,s){if(1&n&&(e.TgZ(0,"table",17),e.TgZ(1,"tbody"),e.TgZ(2,"tr"),e.TgZ(3,"td",18),e.SDv(4,19),e.qZA(),e.TgZ(5,"td",20),e._uU(6),e.qZA(),e.qZA(),e.TgZ(7,"tr"),e.TgZ(8,"td",21),e.SDv(9,22),e.qZA(),e.TgZ(10,"td"),e._uU(11),e.qZA(),e.qZA(),e.TgZ(12,"tr"),e.TgZ(13,"td",21),e.SDv(14,23),e.qZA(),e.TgZ(15,"td"),e._uU(16),e.ALo(17,"empty"),e.qZA(),e.qZA(),e.TgZ(18,"tr"),e.TgZ(19,"td",21),e.SDv(20,24),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.ALo(23,"cdDate"),e.qZA(),e.qZA(),e.TgZ(24,"tr"),e.TgZ(25,"td",21),e.SDv(26,25),e.qZA(),e.TgZ(27,"td"),e._uU(28),e.ALo(29,"dimlessBinary"),e.qZA(),e.qZA(),e.TgZ(30,"tr"),e.TgZ(31,"td",21),e.SDv(32,26),e.qZA(),e.TgZ(33,"td"),e._uU(34),e.ALo(35,"dimless"),e.qZA(),e.qZA(),e.TgZ(36,"tr"),e.TgZ(37,"td",21),e.SDv(38,27),e.qZA(),e.TgZ(39,"td"),e._uU(40),e.ALo(41,"dimlessBinary"),e.qZA(),e.qZA(),e.TgZ(42,"tr"),e.TgZ(43,"td",21),e.SDv(44,28),e.qZA(),e.TgZ(45,"td"),e.YNc(46,I_,3,1,"span",29),e.qZA(),e.qZA(),e.TgZ(47,"tr"),e.TgZ(48,"td",21),e.SDv(49,30),e.qZA(),e.TgZ(50,"td"),e.YNc(51,b_,3,1,"span",1),e.YNc(52,N_,3,3,"span",1),e.qZA(),e.qZA(),e.TgZ(53,"tr"),e.TgZ(54,"td",21),e.SDv(55,31),e.qZA(),e.TgZ(56,"td"),e.YNc(57,F_,3,1,"span",1),e.YNc(58,D_,3,3,"span",1),e.qZA(),e.qZA(),e.TgZ(59,"tr"),e.TgZ(60,"td",21),e.SDv(61,32),e.qZA(),e.TgZ(62,"td"),e._uU(63),e.ALo(64,"dimlessBinary"),e.qZA(),e.qZA(),e.TgZ(65,"tr"),e.TgZ(66,"td",21),e.SDv(67,33),e.qZA(),e.TgZ(68,"td"),e._uU(69),e.qZA(),e.qZA(),e.TgZ(70,"tr"),e.TgZ(71,"td",21),e.SDv(72,34),e.qZA(),e.TgZ(73,"td"),e.YNc(74,L_,4,4,"span",1),e.YNc(75,$_,2,0,"span",1),e.qZA(),e.qZA(),e.TgZ(76,"tr"),e.TgZ(77,"td",21),e.SDv(78,35),e.qZA(),e.TgZ(79,"td"),e._uU(80),e.qZA(),e.qZA(),e.TgZ(81,"tr"),e.TgZ(82,"td",21),e.SDv(83,36),e.qZA(),e.TgZ(84,"td"),e._uU(85),e.qZA(),e.qZA(),e.TgZ(86,"tr"),e.TgZ(87,"td",21),e.SDv(88,37),e.qZA(),e.TgZ(89,"td"),e._uU(90),e.qZA(),e.qZA(),e.qZA(),e.qZA()),2&n){const t=e.oxw(2);e.xp6(6),e.Oqu(t.selection.name),e.xp6(5),e.Oqu(t.selection.pool_name),e.xp6(5),e.Oqu(e.lcZ(17,19,t.selection.data_pool)),e.xp6(6),e.Oqu(e.lcZ(23,21,t.selection.timestamp)),e.xp6(6),e.Oqu(e.lcZ(29,23,t.selection.size)),e.xp6(6),e.Oqu(e.lcZ(35,25,t.selection.num_objs)),e.xp6(6),e.Oqu(e.lcZ(41,27,t.selection.obj_size)),e.xp6(6),e.Q6J("ngForOf",t.selection.features_name),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Oqu(e.lcZ(64,29,t.selection.stripe_unit)),e.xp6(6),e.Oqu(t.selection.stripe_count),e.xp6(5),e.Q6J("ngIf",t.selection.parent),e.xp6(1),e.Q6J("ngIf",!t.selection.parent),e.xp6(5),e.Oqu(t.selection.block_name_prefix),e.xp6(5),e.Oqu(t.selection.order),e.xp6(5),e.Oqu(t.selection.image_format)}}function B_(n,s){if(1&n&&e._UZ(0,"cd-rbd-snapshot-list",42),2&n){const t=e.oxw(2);e.Q6J("snapshots",t.selection.snapshots)("featuresName",t.selection.features_name)("poolName",t.selection.pool_name)("namespace",t.selection.namespace)("mirroring",t.selection.mirror_mode)("rbdName",t.selection.name)}}function G_(n,s){if(1&n&&e._UZ(0,"cd-rbd-configuration-table",43),2&n){const t=e.oxw(2);e.Q6J("data",t.selection.configuration)}}function y_(n,s){if(1&n&&e._UZ(0,"cd-grafana",44),2&n){const t=e.oxw(2);e.Q6J("grafanaPath",t.rbdDashboardUrl)("type","metrics")}}function x_(n,s){if(1&n&&(e.ynx(0),e.TgZ(1,"ul",4,5),e.TgZ(3,"li",6),e.TgZ(4,"a",7),e.SDv(5,8),e.qZA(),e.YNc(6,Z_,91,31,"ng-template",9),e.qZA(),e.TgZ(7,"li",10),e.TgZ(8,"a",7),e.SDv(9,11),e.qZA(),e.YNc(10,B_,1,6,"ng-template",9),e.qZA(),e.TgZ(11,"li",12),e.TgZ(12,"a",7),e.SDv(13,13),e.qZA(),e.YNc(14,G_,1,1,"ng-template",9),e.qZA(),e.TgZ(15,"li",14),e.TgZ(16,"a",7),e.SDv(17,15),e.qZA(),e.YNc(18,y_,1,2,"ng-template",9),e.qZA(),e.qZA(),e._UZ(19,"div",16),e.BQk()),2&n){const t=e.MAs(2);e.xp6(19),e.Q6J("ngbNavOutlet",t)}}function w_(n,s){1&n&&(e.ynx(0),e.TgZ(1,"cd-alert-panel",45),e.SDv(2,46),e.qZA(),e.BQk())}function q_(n,s){1&n&&(e.ynx(0),e.TgZ(1,"strong",49),e.SDv(2,50),e.qZA(),e.BQk())}function H_(n,s){1&n&&(e.TgZ(0,"span",51),e.SDv(1,52),e.qZA())}function k_(n,s){if(1&n&&(e.YNc(0,q_,3,0,"ng-container",47),e.YNc(1,H_,2,0,"ng-template",null,48,e.W1O)),2&n){const t=s.value,o=e.MAs(2);e.Q6J("ngIf",+t)("ngIfElse",o)}}let K_=(()=>{class n{ngOnChanges(){this.selection&&(this.rbdDashboardUrl=`rbd-details?var-Pool=${this.selection.pool_name}&var-Image=${this.selection.name}`)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(h_,7),e.Gf(N.Pz,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.poolConfigurationSourceTpl=i.first),e.iGM(i=e.CRH())&&(o.nav=i.first)}},inputs:{selection:"selection",images:"images"},features:[e.TTD],decls:6,vars:2,consts:function(){let s,t,o,i,_,r,c,d,g,f,I,P,Z,B,Q,Y,ee,te,w,_e,ae,M,ge,ue,me,Te,fe,Ce;return s="Only available for RBD images with " + "\ufffd#2\ufffd" + "fast-diff" + "\ufffd/#2\ufffd" + " enabled",t="Details",o="Snapshots",i="Configuration",_="Performance",r="Name",c="Pool",d="Data Pool",g="Created",f="Size",I="Objects",P="Object size",Z="Features",B="Provisioned",Q="Total provisioned",Y="Striping unit",ee="Striping count",te="Parent",w="Block name prefix",_e="Order",ae="Format Version",M="N/A",ge="N/A",ue="Information can not be displayed for RBD in status 'Removing'.",me="This setting overrides the global value",Te="Image",fe="This is the global value. No value for this option has been set for this image.",Ce="Global",[["usageNotAvailableTooltipTpl",""],[4,"ngIf"],["poolConfigurationSourceTpl",""],s,["ngbNav","","cdStatefulTab","rbd-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],t,["ngbNavContent",""],["ngbNavItem","snapshots"],o,["ngbNavItem","configuration"],i,["ngbNavItem","performance"],_,[3,"ngbNavOutlet"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],r,[1,"w-75"],[1,"bold"],c,d,g,f,I,P,Z,[4,"ngFor","ngForOf"],B,Q,Y,ee,te,w,_e,ae,[1,"badge","badge-dark","mr-2"],["placement","top",1,"form-text","text-muted",3,"ngbTooltip"],M,ge,[3,"snapshots","featuresName","poolName","namespace","mirroring","rbdName"],[3,"data"],["uid","YhCYGcuZz","grafanaStyle","one",3,"grafanaPath","type"],["type","warning"],ue,[4,"ngIf","ngIfElse"],["global",""],["ngbTooltip",me],Te,["ngbTooltip",fe],Ce]},template:function(t,o){1&t&&(e.YNc(0,P_,3,0,"ng-template",null,0,e.W1O),e.YNc(2,x_,20,1,"ng-container",1),e.YNc(3,w_,3,0,"ng-container",1),e.YNc(4,k_,3,2,"ng-template",null,2,e.W1O)),2&t&&(e.xp6(2),e.Q6J("ngIf",o.selection&&"REMOVING"!==o.selection.source),e.xp6(1),e.Q6J("ngIf",o.selection&&"REMOVING"===o.selection.source))},directives:[l.O5,N.Pz,Mt.m,N.nv,N.Vx,N.uN,N.tO,l.sg,N._L,M_,O_.P,Lt.F,Ct.G],pipes:[A_.W,Ze.N,Le.$,ot.n],styles:[""]}),n})();const X_=["usageTpl"],Q_=["parentTpl"],z_=["nameTpl"],J_=["mirroringTpl"],Y_=["flattenTpl"],V_=["deleteTpl"],U_=["removingStatTpl"],j_=["provisionedNotAvailableTooltipTpl"],W_=["totalProvisionedNotAvailableTooltipTpl"];function ea(n,s){1&n&&e._UZ(0,"div",12),2&n&&e.Q6J("innerHtml","Only available for RBD images with fast-diff enabled",e.oJD)}function ta(n,s){if(1&n&&(e.TgZ(0,"span",15),e.SDv(1,16),e.qZA()),2&n){const t=e.oxw(2);e.Q6J("ngbTooltip",t.usageNotAvailableTooltipTpl)}}function oa(n,s){if(1&n&&(e.SDv(0,17),e.ALo(1,"dimlessBinary")),2&n){const t=e.oxw().row;e.xp6(1),e.pQV(e.lcZ(1,1,t.disk_usage)),e.QtT(0)}}function na(n,s){if(1&n&&(e.YNc(0,ta,2,1,"span",13),e.YNc(1,oa,2,3,"ng-template",null,14,e.W1O)),2&n){const t=s.row,o=e.MAs(2);e.Q6J("ngIf",null===t.disk_usage&&!t.features_name.includes("fast-diff"))("ngIfElse",o)}}function ia(n,s){if(1&n&&(e.TgZ(0,"span",15),e.SDv(1,19),e.qZA()),2&n){const t=e.oxw(2);e.Q6J("ngbTooltip",t.usageNotAvailableTooltipTpl)}}function sa(n,s){if(1&n&&(e.SDv(0,20),e.ALo(1,"dimlessBinary")),2&n){const t=e.oxw().row;e.xp6(1),e.pQV(e.lcZ(1,1,t.total_disk_usage)),e.QtT(0)}}function _a(n,s){if(1&n&&(e.YNc(0,ia,2,1,"span",13),e.YNc(1,sa,2,3,"ng-template",null,18,e.W1O)),2&n){const t=s.row,o=e.MAs(2);e.Q6J("ngIf",null===t.total_disk_usage&&!t.features_name.includes("fast-diff"))("ngIfElse",o)}}function aa(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(2).value;e.xp6(1),e.hij("/",t.pool_namespace,"")}}function ra(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,aa,2,1,"span",21),e._uU(3),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t.pool_name),e.xp6(1),e.Q6J("ngIf",t.pool_namespace),e.xp6(1),e.AsE("/",t.image_name,"@",t.snap_name,"")}}function la(n,s){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function ca(n,s){if(1&n&&(e.YNc(0,ra,4,4,"span",21),e.YNc(1,la,2,0,"span",21)),2&n){const t=s.value;e.Q6J("ngIf",t),e.xp6(1),e.Q6J("ngIf",!t)}}function da(n,s){if(1&n&&(e.TgZ(0,"span",26),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t[0])}}function pa(n,s){if(1&n&&(e.TgZ(0,"span",27),e.ALo(1,"cdDate"),e._uU(2),e.qZA()),2&n){const t=e.oxw().value;e.Q6J("ngbTooltip","Next scheduled snapshot on "+e.lcZ(1,2,t[2])),e.xp6(2),e.Oqu(t[1])}}function ga(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,28),e.qZA())}function ua(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,29),e.qZA())}function ma(n,s){if(1&n&&(e.TgZ(0,"span",26),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Ta(n,s){if(1&n&&(e.YNc(0,da,2,1,"span",22),e._uU(1,"\xa0 "),e.YNc(2,pa,3,4,"span",23),e.YNc(3,ga,2,0,"span",24),e.YNc(4,ua,2,0,"span",24),e.YNc(5,ma,2,1,"ng-template",null,25,e.W1O)),2&n){const t=s.value,o=s.row,i=e.MAs(6);e.Q6J("ngIf",3===t.length)("ngIfElse",i),e.xp6(2),e.Q6J("ngIf",3===t.length),e.xp6(1),e.Q6J("ngIf",!0===o.primary),e.xp6(1),e.Q6J("ngIf",!1===o.primary)}}function fa(n,s){if(1&n&&(e._uU(0," You are about to flatten "),e.TgZ(1,"strong"),e._uU(2),e.qZA(),e._uU(3,". "),e._UZ(4,"br"),e._UZ(5,"br"),e._uU(6," All blocks will be copied from parent "),e.TgZ(7,"strong"),e._uU(8),e.qZA(),e._uU(9," to child "),e.TgZ(10,"strong"),e._uU(11),e.qZA(),e._uU(12,".\n")),2&n){const t=s.$implicit;e.xp6(2),e.Oqu(t.child),e.xp6(6),e.Oqu(t.parent),e.xp6(3),e.Oqu(t.child)}}function Ca(n,s){if(1&n&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.Oqu(t)}}function Sa(n,s){if(1&n&&(e.ynx(0),e.TgZ(1,"span"),e.SDv(2,33),e.qZA(),e.TgZ(3,"ul"),e.YNc(4,Ca,2,1,"li",34),e.qZA(),e.BQk()),2&n){const t=e.oxw(2).snapshots;e.xp6(4),e.Q6J("ngForOf",t)}}function Ea(n,s){if(1&n&&(e.TgZ(0,"div",31),e.TgZ(1,"span"),e.SDv(2,32),e.qZA(),e._UZ(3,"br"),e.YNc(4,Sa,5,1,"ng-container",21),e.qZA()),2&n){const t=e.oxw().snapshots;e.xp6(4),e.Q6J("ngIf",t.length>0)}}function Ra(n,s){1&n&&e.YNc(0,Ea,5,1,"div",30),2&n&&e.Q6J("ngIf",s.hasSnapshots)}const Ma=function(n,s){return[n,s]};function Oa(n,s){if(1&n&&e._UZ(0,"i",36),2&n){const t=e.oxw(2);e.Q6J("ngClass",e.WLB(1,Ma,t.icons.spinner,t.icons.spin))}}function Aa(n,s){if(1&n&&(e.TgZ(0,"span",36),e._uU(1),e.qZA()),2&n){const t=e.oxw(),o=t.column,i=t.row;e.Q6J("ngClass",null!=o&&null!=o.customTemplateConfig&&o.customTemplateConfig.executingClass?o.customTemplateConfig.executingClass:"text-muted italic"),e.xp6(1),e.hij(" (",i.cdExecuting,") ")}}function ha(n,s){if(1&n&&e._UZ(0,"i",38),2&n){const t=e.oxw(2);e.Gre("",t.icons.warning," warn")}}function Pa(n,s){if(1&n&&(e.YNc(0,Oa,1,4,"i",35),e.TgZ(1,"span",36),e._uU(2),e.qZA(),e.YNc(3,Aa,2,2,"span",35),e.YNc(4,ha,1,3,"i",37)),2&n){const t=s.column,o=s.value,i=s.row;e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngClass",null==t||null==t.customTemplateConfig?null:t.customTemplateConfig.valueClass),e.xp6(1),e.hij(" ",o," "),e.xp6(1),e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngIf",i.source&&"REMOVING"===i.source)}}let ba=(()=>{class n extends mt.o{constructor(t,o,i,_,r,c,d,g,f){super(),this.authStorageService=t,this.rbdService=o,this.dimlessBinaryPipe=i,this.dimlessPipe=_,this.modalService=r,this.taskWrapper=c,this.taskListService=d,this.urlBuilder=g,this.actionLabels=f,this.tableStatus=new bt.c("light"),this.selection=new Se.r,this.icons=T.P,this.count=0,this.tableContext=null,this.builders={"rbd/create":M=>this.createRbdFromTask(M.pool_name,M.namespace,M.image_name),"rbd/delete":M=>this.createRbdFromTaskImageSpec(M.image_spec),"rbd/clone":M=>this.createRbdFromTask(M.child_pool_name,M.child_namespace,M.child_image_name),"rbd/copy":M=>this.createRbdFromTask(M.dest_pool_name,M.dest_namespace,M.dest_image_name)},this.permission=this.authStorageService.getPermissions().rbdImage;const I=()=>this.selection.first()&&new L.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name).toStringEncoded();this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>this.urlBuilder.getCreate(),canBePrimary:M=>!M.hasSingleSelection,name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>this.urlBuilder.getEdit(I()),name:this.actionLabels.EDIT,disable:M=>this.getRemovingStatusDesc(M)||this.getInvalidNameDisable(M)},{permission:"create",canBePrimary:M=>M.hasSingleSelection,disable:M=>this.getRemovingStatusDesc(M)||this.getInvalidNameDisable(M)||!!M.first().cdExecuting,icon:T.P.copy,routerLink:()=>`/block/rbd/copy/${I()}`,name:this.actionLabels.COPY},{permission:"update",disable:M=>this.getRemovingStatusDesc(M)||this.getInvalidNameDisable(M)||M.first().cdExecuting||!M.first().parent,icon:T.P.flatten,click:()=>this.flattenRbdModal(),name:this.actionLabels.FLATTEN},{permission:"update",icon:T.P.refresh,click:()=>this.resyncRbdModal(),name:this.actionLabels.RESYNC,disable:M=>this.getResyncDisableDesc(M)},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteRbdModal(),name:this.actionLabels.DELETE,disable:M=>this.getDeleteDisableDesc(M)},{permission:"delete",icon:T.P.trash,click:()=>this.trashRbdModal(),name:this.actionLabels.TRASH,disable:M=>this.getRemovingStatusDesc(M)||this.getInvalidNameDisable(M)||M.first().image_format===Pe.V1},{permission:"update",icon:T.P.edit,click:()=>this.removeSchedulingModal(),name:this.actionLabels.REMOVE_SCHEDULING,disable:M=>this.getRemovingStatusDesc(M)||this.getInvalidNameDisable(M)||void 0===M.first().schedule_info},{permission:"update",icon:T.P.edit,click:()=>this.actionPrimary(!0),name:this.actionLabels.PROMOTE,visible:()=>null!=this.selection.first()&&!this.selection.first().primary},{permission:"update",icon:T.P.edit,click:()=>this.actionPrimary(!1),name:this.actionLabels.DEMOTE,visible:()=>null!=this.selection.first()&&this.selection.first().primary}]}createRbdFromTaskImageSpec(t){const o=L.N.fromString(t);return this.createRbdFromTask(o.poolName,o.namespace,o.imageName)}createRbdFromTask(t,o,i){const _=new is;return _.id="-1",_.unique_id="-1",_.name=i,_.namespace=o,_.pool_name=t,_.image_format=Pe.V2,_}ngOnInit(){this.columns=[{name:"Name",prop:"name",flexGrow:2,cellTemplate:this.removingStatTpl},{name:"Pool",prop:"pool_name",flexGrow:2},{name:"Namespace",prop:"namespace",flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessBinaryPipe},{name:"Objects",prop:"num_objs",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessPipe},{name:"Object size",prop:"obj_size",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessBinaryPipe},{name:"Provisioned",prop:"disk_usage",cellClass:"text-center",flexGrow:1,pipe:this.dimlessBinaryPipe,sortable:!1,cellTemplate:this.provisionedNotAvailableTooltipTpl},{name:"Total provisioned",prop:"total_disk_usage",cellClass:"text-center",flexGrow:1,pipe:this.dimlessBinaryPipe,sortable:!1,cellTemplate:this.totalProvisionedNotAvailableTooltipTpl},{name:"Parent",prop:"parent",flexGrow:2,sortable:!1,cellTemplate:this.parentTpl},{name:"Mirroring",prop:"mirror_mode",flexGrow:3,sortable:!1,cellTemplate:this.mirroringTpl}],this.taskListService.init(i=>this.getRbdImages(i),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/clone","rbd/copy","rbd/create","rbd/delete","rbd/edit","rbd/flatten","rbd/trash/move"].includes(i.name),(i,_)=>{let r;switch(_.name){case"rbd/copy":r=new L.N(_.metadata.dest_pool_name,_.metadata.dest_namespace,_.metadata.dest_image_name).toString();break;case"rbd/clone":r=new L.N(_.metadata.child_pool_name,_.metadata.child_namespace,_.metadata.child_image_name).toString();break;case"rbd/create":r=new L.N(_.metadata.pool_name,_.metadata.namespace,_.metadata.image_name).toString();break;default:r=_.metadata.image_spec}return r===new L.N(i.pool_name,i.namespace,i.name).toString()},this.builders)}onFetchError(){this.table.reset(),this.tableStatus=new bt.c("danger")}getRbdImages(t){var o;return null!==t&&(this.tableContext=t),null==this.tableContext&&(this.tableContext=new A.E(()=>{})),this.rbdService.list(null===(o=this.tableContext)||void 0===o?void 0:o.toParams())}prepareResponse(t){let o=[];return t.forEach(i=>{o=o.concat(i.value)}),o.forEach(i=>{if(void 0!==i.schedule_info){let _=[];const r="scheduled";let c=+new Date(i.schedule_info.schedule_time);const d=(new Date).getTimezoneOffset();c+=6e4*Math.abs(d),_.push(i.mirror_mode,r,c),i.mirror_mode=_,_=[]}}),this.count=o.length>0?n_.getCount(t[0]):0,o}updateSelection(t){this.selection=t}deleteRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=new L.N(t,o,i);this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD",itemNames:[_],bodyTemplate:this.deleteTpl,bodyContext:{hasSnapshots:this.hasSnapshots(),snapshots:this.listProtectedSnapshots()},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/delete",{image_spec:_.toString()}),call:this.rbdService.delete(_)})})}resyncRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=new L.N(t,o,i);this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD",itemNames:[_],actionDescription:"resync",submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/edit",{image_spec:_.toString()}),call:this.rbdService.update(_,{resync:!0})})})}trashRbdModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,hasSnapshots:this.hasSnapshots()};this.modalRef=this.modalService.show(c_,t)}flattenRbd(t){this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/flatten",{image_spec:t.toString()}),call:this.rbdService.flatten(t)}).subscribe({complete:()=>{this.modalRef.close()}})}flattenRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=this.selection.first().parent,r=new L.N(_.pool_name,_.pool_namespace,_.image_name),c=new L.N(t,o,i),d={titleText:"RBD flatten",buttonText:"Flatten",bodyTpl:this.flattenTpl,bodyData:{parent:`${r}@${_.snap_name}`,child:c.toString()},onSubmit:()=>{this.flattenRbd(c)}};this.modalRef=this.modalService.show(Nt.Y,d)}editRequest(){const t=new _t;return t.remove_scheduling=!t.remove_scheduling,t}removeSchedulingModal(){const t=this.selection.first().name,o=new L.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name);this.modalRef=this.modalService.show(pe.M,{actionDescription:"remove scheduling on",itemDescription:"image",itemNames:[`${t}`],submitActionObservable:()=>new Rt.y(i=>{this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/edit",{image_spec:o.toString()}),call:this.rbdService.update(o,this.editRequest())}).subscribe({error:_=>i.error(_),complete:()=>{this.modalRef.close()}})})})}actionPrimary(t){const o=new _t;o.primary=t;const i=new L.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name);this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/edit",{image_spec:i.toString()}),call:this.rbdService.update(i,o)}).subscribe()}hasSnapshots(){return(this.selection.first().snapshots||[]).length>0}hasClonedSnapshots(t){return(t.snapshots||[]).some(i=>i.children&&i.children.length>0)}listProtectedSnapshots(){return this.selection.first().snapshots.reduce((i,_)=>(_.is_protected&&i.push(_.name),i),[])}getDeleteDisableDesc(t){const o=t.first();return o&&this.hasClonedSnapshots(o)?"This RBD has cloned snapshots. Please delete related RBDs before deleting this RBD.":this.getInvalidNameDisable(t)||this.hasClonedSnapshots(t.first())}getResyncDisableDesc(t){const o=t.first();return o&&this.imageIsPrimary(o)?"Primary RBD images cannot be resynced":this.getInvalidNameDisable(t)}imageIsPrimary(t){return t.primary}getInvalidNameDisable(t){var o;const i=t.first();return(null===(o=null==i?void 0:i.name)||void 0===o?void 0:o.match(/[@/]/))?"This RBD image has an invalid name and can't be managed by ceph.":!t.first()||!t.hasSingleSelection}getRemovingStatusDesc(t){const o=t.first();return"REMOVING"===(null==o?void 0:o.source)&&"Action not possible for an RBD in status 'Removing'"}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(Le.$),e.Y36(ot.n),e.Y36(de.Z),e.Y36(u.P),e.Y36(ce.j),e.Y36(at.F),e.Y36(v.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(W.a,7),e.Gf(X_,5),e.Gf(Q_,7),e.Gf(z_,5),e.Gf(J_,7),e.Gf(Y_,7),e.Gf(V_,7),e.Gf(U_,7),e.Gf(j_,7),e.Gf(W_,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.usageTpl=i.first),e.iGM(i=e.CRH())&&(o.parentTpl=i.first),e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.mirroringTpl=i.first),e.iGM(i=e.CRH())&&(o.flattenTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first),e.iGM(i=e.CRH())&&(o.removingStatTpl=i.first),e.iGM(i=e.CRH())&&(o.provisionedNotAvailableTooltipTpl=i.first),e.iGM(i=e.CRH())&&(o.totalProvisionedNotAvailableTooltipTpl=i.first)}},features:[e._Bn([ce.j,{provide:at.F,useValue:new at.F("block/rbd")}]),e.qOj],decls:21,vars:13,consts:function(){let s,t,o,i,_,r,c,d,g;return s="N/A",t="" + "\ufffd0\ufffd" + "",o="N/A",i="" + "\ufffd0\ufffd" + "",_="primary",r="secondary",c="Deleting this image will also delete all its snapshots.",d="The following snapshots are currently protected and will be removed:",g="RBD in status 'Removing'",[["columnMode","flex","identifier","unique_id","forceIdentifier","true","selectionType","single",3,"data","columns","searchableObjects","serverSide","count","hasDetails","status","maxLimit","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["scheduleStatus",""],["provisionedNotAvailableTooltipTpl",""],["totalProvisionedNotAvailableTooltipTpl",""],["parentTpl",""],["mirroringTpl",""],["flattenTpl",""],["deleteTpl",""],["removingStatTpl",""],[3,"innerHtml"],["placement","top",3,"ngbTooltip",4,"ngIf","ngIfElse"],["provisioned",""],["placement","top",3,"ngbTooltip"],s,t,["totalProvisioned",""],o,i,[4,"ngIf"],["class","badge badge-info",4,"ngIf","ngIfElse"],["class","badge badge-info",3,"ngbTooltip",4,"ngIf"],["class","badge badge-info",4,"ngIf"],["probb",""],[1,"badge","badge-info"],[1,"badge","badge-info",3,"ngbTooltip"],_,r,["class","alert alert-warning","role","alert",4,"ngIf"],["role","alert",1,"alert","alert-warning"],c,d,[4,"ngFor","ngForOf"],[3,"ngClass",4,"ngIf"],[3,"ngClass"],["title",g,3,"class",4,"ngIf"],["title",g]]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0,1),e.NdJ("fetchData",function(_){return o.taskListService.fetch(_)})("setExpandedRow",function(_){return o.setExpandedRow(_)})("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(3,"cd-table-actions",2),e._UZ(4,"cd-rbd-details",3),e.qZA(),e.YNc(5,ea,1,1,"ng-template",null,4,e.W1O),e.YNc(7,na,3,2,"ng-template",null,5,e.W1O),e.YNc(9,_a,3,2,"ng-template",null,6,e.W1O),e.YNc(11,ca,2,2,"ng-template",null,7,e.W1O),e.YNc(13,Ta,7,5,"ng-template",null,8,e.W1O),e.YNc(15,fa,13,3,"ng-template",null,9,e.W1O),e.YNc(17,Ra,1,1,"ng-template",null,10,e.W1O),e.YNc(19,Pa,5,5,"ng-template",null,11,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("searchableObjects",!0)("serverSide",!0)("count",o.count)("hasDetails",!0)("status",o.tableStatus)("maxLimit",25)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("selection",o.expandedRow))},directives:[We,W.a,Ee.K,K_,l.O5,N._L,l.sg,l.mk],pipes:[Le.$,Ze.N],styles:[".warn[_ngcontent-%COMP%]{color:#ffc200}"]}),n})();function Na(n,s){1&n&&e._UZ(0,"input",19)}function Fa(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,24),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Da(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,25),e.qZA()),2&n&&e.Q6J("ngValue",null)}function va(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,26),e.qZA()),2&n&&e.Q6J("ngValue",null)}function La(n,s){if(1&n&&(e.TgZ(0,"option",27),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function $a(n,s){if(1&n&&(e.TgZ(0,"select",20),e.YNc(1,Fa,2,1,"option",21),e.YNc(2,Da,2,1,"option",21),e.YNc(3,va,2,1,"option",21),e.YNc(4,La,2,2,"option",22),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function Za(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,29),e.qZA())}function Ba(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,30),e.qZA())}function Ga(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,31),e.qZA())}let ya=(()=>{class n{constructor(t,o,i,_,r,c){this.activeModal=t,this.actionLabels=o,this.authStorageService=i,this.notificationService=_,this.poolService=r,this.rbdService=c,this.pools=null,this.editing=!1,this.poolPermission=this.authStorageService.getPermissions().pool,this.createForm()}createForm(){this.namespaceForm=new x.d({pool:new a.NI(""),namespace:new a.NI("")},this.validator(),this.asyncValidator())}validator(){return t=>{const o=t.get("pool"),i=t.get("namespace");let _=null;o.value||(_={required:!0}),o.setErrors(_);let r=null;return i.value||(r={required:!0}),i.setErrors(r),null}}asyncValidator(){return t=>new Promise(o=>{const i=t.get("pool"),_=t.get("namespace");this.rbdService.listNamespaces(i.value).subscribe(r=>{if(r.some(c=>c.namespace===_.value)){const c={namespaceExists:!0};_.setErrors(c),o(c)}else o(null)})})}ngOnInit(){this.onSubmit=new vt.xQ,this.poolPermission.read&&this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{const o=[];for(const i of t)this.rbdService.isRBDPool(i)&&"replicated"===i.type&&o.push(i);if(this.pools=o,1===this.pools.length){const i=this.pools[0].pool_name;this.namespaceForm.get("pool").setValue(i)}})}submit(){const t=this.namespaceForm.getValue("pool"),o=this.namespaceForm.getValue("namespace"),i=new R.R;i.name="rbd/namespace/create",i.metadata={pool:t,namespace:o},this.rbdService.createNamespace(t,o).toPromise().then(()=>{this.notificationService.show(tt.k.success,"Created namespace '" + t + "/" + o + "'"),this.activeModal.close(),this.onSubmit.next()}).catch(()=>{this.namespaceForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(N.Kz),e.Y36(v.p4),e.Y36(oe.j),e.Y36(ve.g),e.Y36(Ve.q),e.Y36(q))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-form-modal"]],decls:23,vars:9,consts:function(){let s,t,o,i,_,r,c,d,g;return s="Create Namespace",t="Pool",o="Name",i="Loading...",_="-- No rbd pools available --",r="-- Select a pool --",c="This field is required.",d="This field is required.",g="Namespace already exists.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","namespaceForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","pool",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-control","formControlName","pool",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","namespace",1,"cd-col-form-label","required"],o,["type","text","placeholder","Namespace name...","id","namespace","name","namespace","formControlName","namespace","autofocus","",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-control"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],i,_,r,[3,"value"],[1,"invalid-feedback"],c,d,g]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"div",7),e.TgZ(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e.YNc(11,Na,1,0,"input",11),e.YNc(12,$a,5,4,"select",12),e.YNc(13,Za,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(14,"div",7),e.TgZ(15,"label",14),e.SDv(16,15),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",16),e.YNc(19,Ba,2,0,"span",13),e.YNc(20,Ga,2,0,"span",13),e.qZA(),e.qZA(),e.qZA(),e.TgZ(21,"div",17),e.TgZ(22,"cd-form-button-panel",18),e.NdJ("submitActionEvent",function(){return o.submit()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.namespaceForm),e.xp6(7),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("pool",i,"required")),e.xp6(6),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"required")),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"namespaceExists")),e.xp6(2),e.Q6J("form",o.namespaceForm)("submitText",o.actionLabels.CREATE)}},directives:[C.z,a._Y,a.JL,a.sg,h.V,O.P,l.O5,$.o,a.Fj,k.b,a.JJ,a.u,Re.U,j.p,a.EJ,l.sg,a.YN,a.Kr],styles:[""]}),n})(),xa=(()=>{class n{constructor(t,o,i,_,r,c){this.authStorageService=t,this.rbdService=o,this.poolService=i,this.modalService=_,this.notificationService=r,this.actionLabels=c,this.selection=new Se.r,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"create",icon:T.P.add,click:()=>this.createModal(),name:this.actionLabels.CREATE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Pool",prop:"pool",flexGrow:1},{name:"Total images",prop:"num_images",flexGrow:1}],this.refresh()}refresh(){this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{t=t.filter(i=>this.rbdService.isRBDPool(i)&&"replicated"===i.type);const o=[];t.forEach(i=>{o.push(this.rbdService.listNamespaces(i.pool_name))}),o.length>0?(0,le.D)(o).subscribe(i=>{const _=[];for(let r=0;r{_.push({id:`${d}/${g.namespace}`,pool:d,namespace:g.namespace,num_images:g.num_images})})}this.namespaces=_}):this.namespaces=[]})}updateSelection(t){this.selection=t}createModal(){this.modalRef=this.modalService.show(ya),this.modalRef.componentInstance.onSubmit.subscribe(()=>{this.refresh()})}deleteModal(){const t=this.selection.first().pool,o=this.selection.first().namespace;this.modalRef=this.modalService.show(pe.M,{itemDescription:"Namespace",itemNames:[`${t}/${o}`],submitAction:()=>this.rbdService.deleteNamespace(t,o).subscribe(()=>{this.notificationService.show(tt.k.success,"Deleted namespace '" + t + "/" + o + "'"),this.modalRef.close(),this.refresh()},()=>{this.modalRef.componentInstance.stopLoadingSpinner()})})}getDeleteDisableDesc(){var t;const o=this.selection.first();return(null==o?void 0:o.num_images)>0?"Namespace contains images":!(null===(t=this.selection)||void 0===t?void 0:t.first())}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(Ve.q),e.Y36(de.Z),e.Y36(ve.g),e.Y36(v.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-list"]],features:[e._Bn([ce.j])],decls:4,vars:5,consts:[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"]],template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(_){return o.updateSelection(_)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.qZA(),e.qZA()),2&t&&(e.xp6(1),e.Q6J("data",o.namespaces)("columns",o.columns),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[We,W.a,Ee.K],styles:[""]}),n})(),wa=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-performance"]],decls:2,vars:2,consts:[["uid","41FrpeUiz","grafanaStyle","two",3,"grafanaPath","type"]],template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e._UZ(1,"cd-grafana",0)),2&t&&(e.xp6(1),e.Q6J("grafanaPath","rbd-overview?")("type","metrics"))},directives:[We,Lt.F],styles:[""]}),n})();var qa=p(91801);function Ha(n,s){1&n&&e._UZ(0,"input",15)}function ka(n,s){if(1&n&&(e.TgZ(0,"option",20),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function Ka(n,s){if(1&n&&(e.TgZ(0,"select",16),e.TgZ(1,"option",17),e.SDv(2,18),e.qZA(),e.YNc(3,ka,2,2,"option",19),e.qZA()),2&n){const t=e.oxw();e.xp6(3),e.Q6J("ngForOf",t.pools)}}let Xa=(()=>{class n{constructor(t,o,i,_,r,c,d){this.authStorageService=t,this.rbdService=o,this.activeModal=i,this.actionLabels=_,this.fb=r,this.poolService=c,this.taskWrapper=d,this.poolPermission=this.authStorageService.getPermissions().pool}createForm(){this.purgeForm=this.fb.group({poolName:""})}ngOnInit(){this.poolPermission.read&&this.poolService.list(["pool_name","application_metadata"]).then(t=>{this.pools=t.filter(o=>o.application_metadata.includes("rbd")).map(o=>o.pool_name)}),this.createForm()}purge(){const t=this.purgeForm.getValue("poolName")||"";this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/trash/purge",{pool_name:t}),call:this.rbdService.purgeTrash(t)}).subscribe({error:()=>{this.purgeForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(N.Kz),e.Y36(v.p4),e.Y36(rt.O),e.Y36(Ve.q),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-purge-modal"]],decls:18,vars:6,consts:function(){let s,t,o,i,_;return s="Purge Trash",t="To purge, select\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "All" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + "\xA0 or one pool and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Purge" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".\xA0",t=e.Zx4(t),o="Pool:",i="Pool name...",_="All",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","purgeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],[1,"col-form-label","mx-auto"],o,["class","form-control","type","text","placeholder",i,"formControlName","poolName",4,"ngIf"],["id","poolName","name","poolName","class","form-control","formControlName","poolName",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder",i,"formControlName","poolName",1,"form-control"],["id","poolName","name","poolName","formControlName","poolName",1,"form-control"],["value",""],_,[3,"value",4,"ngFor","ngForOf"],[3,"value"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.tHW(8,7),e._UZ(9,"kbd"),e._UZ(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e.YNc(14,Ha,1,0,"input",11),e.YNc(15,Ka,4,1,"select",12),e.qZA(),e.qZA(),e.TgZ(16,"div",13),e.TgZ(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.purge()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.purgeForm),e.xp6(10),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(2),e.Q6J("form",o.purgeForm)("submitText",o.actionLabels.PURGE))},directives:[C.z,a._Y,a.JL,a.sg,h.V,O.P,l.O5,j.p,$.o,a.Fj,k.b,a.JJ,a.u,a.EJ,a.YN,a.Kr,l.sg],styles:[""]}),n})();function Qa(n,s){1&n&&(e.TgZ(0,"span",15),e.SDv(1,16),e.qZA())}let za=(()=>{class n{constructor(t,o,i,_,r){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=_,this.taskWrapper=r}ngOnInit(){this.imageSpec=new L.N(this.poolName,this.namespace,this.imageName).toString(),this.restoreForm=this.fb.group({name:this.imageName})}restore(){const t=this.restoreForm.getValue("name"),o=new L.N(this.poolName,this.namespace,this.imageId);this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/trash/restore",{image_id_spec:o.toString(),new_image_name:t}),call:this.rbdService.restoreTrash(o,t)}).subscribe({error:()=>{this.restoreForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(q),e.Y36(N.Kz),e.Y36(v.p4),e.Y36(rt.O),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-restore-modal"]],decls:18,vars:7,consts:function(){let s,t,o,i;return s="Restore Image",t="To restore\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "" + "\ufffd0\ufffd" + "@" + "\ufffd1\ufffd" + "" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ",\xA0 type the image's new name and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Restore" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".",t=e.Zx4(t),o="New Name",i="This field is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","restoreForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","name",1,"col-form-label"],o,["type","text","name","name","id","name","autocomplete","off","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.TgZ(6,"div",6),e.TgZ(7,"p"),e.tHW(8,7),e._UZ(9,"kbd"),e._UZ(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8),e.TgZ(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,Qa,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(16,"div",13),e.TgZ(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.restore()}),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.restoreForm),e.xp6(6),e.pQV(o.imageSpec)(o.imageId),e.QtT(8),e.xp6(5),e.Q6J("ngIf",o.restoreForm.showError("name",i,"required")),e.xp6(2),e.Q6J("form",o.restoreForm)("submitText",o.actionLabels.RESTORE)}},directives:[C.z,a._Y,a.JL,a.sg,h.V,O.P,$.o,a.Fj,k.b,a.JJ,a.u,Re.U,l.O5,j.p],styles:[""]}),n})();const Ja=["expiresTpl"],Ya=["deleteTpl"],Va=function(n){return[n]};function Ua(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"button",6),e.NdJ("click",function(){return e.CHM(t),e.oxw().purgeModal()}),e._UZ(1,"i",7),e.ynx(2),e.SDv(3,8),e.BQk(),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("disabled",t.disablePurgeBtn),e.xp6(1),e.Q6J("ngClass",e.VKq(2,Va,t.icons.destroy))}}function ja(n,s){1&n&&(e.ynx(0),e.SDv(1,10),e.BQk())}function Wa(n,s){1&n&&(e.ynx(0),e.SDv(1,11),e.BQk())}function er(n,s){if(1&n&&(e.YNc(0,ja,2,0,"ng-container",9),e.YNc(1,Wa,2,0,"ng-container",9),e._uU(2),e.ALo(3,"cdDate")),2&n){const t=s.row,o=s.value;e.Q6J("ngIf",t.cdIsExpired),e.xp6(1),e.Q6J("ngIf",!t.cdIsExpired),e.xp6(1),e.hij(" ",e.lcZ(3,3,o),"\n")}}function tr(n,s){if(1&n&&(e.TgZ(0,"p",13),e.TgZ(1,"strong"),e.ynx(2),e.SDv(3,14),e.ALo(4,"cdDate"),e.BQk(),e.qZA(),e.qZA()),2&n){const t=e.oxw().expiresAt;e.xp6(4),e.pQV(e.lcZ(4,1,t)),e.QtT(3)}}function or(n,s){1&n&&e.YNc(0,tr,5,3,"p",12),2&n&&e.Q6J("ngIf",!s.isExpired)}let nr=(()=>{class n{constructor(t,o,i,_,r,c,d){this.authStorageService=t,this.rbdService=o,this.modalService=i,this.cdDatePipe=_,this.taskListService=r,this.taskWrapper=c,this.actionLabels=d,this.icons=T.P,this.executingTasks=[],this.selection=new Se.r,this.tableStatus=new se.E,this.disablePurgeBtn=!0,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"update",icon:T.P.undo,click:()=>this.restoreModal(),name:this.actionLabels.RESTORE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE}]}ngOnInit(){this.columns=[{name:"ID",prop:"id",flexGrow:1,cellTransformation:De.e.executing},{name:"Name",prop:"name",flexGrow:1},{name:"Pool",prop:"pool_name",flexGrow:1},{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Status",prop:"deferment_end_time",flexGrow:1,cellTemplate:this.expiresTpl},{name:"Deleted At",prop:"deletion_time",flexGrow:1,pipe:this.cdDatePipe}],this.taskListService.init(()=>this.rbdService.listTrash(),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/trash/remove","rbd/trash/restore"].includes(i.name),(i,_)=>new L.N(i.pool_name,i.namespace,i.id).toString()===_.metadata.image_id_spec,void 0)}prepareResponse(t){let o=[];const i={};let _;if(t.forEach(r=>{S().isUndefined(i[r.status])&&(i[r.status]=[]),i[r.status].push(r.pool_name),o=o.concat(r.value),this.disablePurgeBtn=!o.length}),i[3]?_=3:i[1]?_=1:i[2]&&(_=2),_){const r=(i[_].length>1?"pools ":"pool ")+i[_].join();this.tableStatus=new se.E(_,r)}else this.tableStatus=new se.E;return o.forEach(r=>{r.cdIsExpired=Me()().isAfter(r.deferment_end_time)}),o}onFetchError(){this.table.reset(),this.tableStatus=new se.E(qa.T.ValueException)}updateSelection(t){this.selection=t}restoreModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,imageId:this.selection.first().id};this.modalRef=this.modalService.show(za,t)}deleteModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().id,_=this.selection.first().deferment_end_time,r=Me()().isAfter(_),c=new L.N(t,o,i);this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD",itemNames:[c],bodyTemplate:this.deleteTpl,bodyContext:{expiresAt:_,isExpired:r},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new R.R("rbd/trash/remove",{image_id_spec:c.toString()}),call:this.rbdService.removeTrash(c,!0)})})}purgeModal(){this.modalService.show(Xa)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(de.Z),e.Y36(Ze.N),e.Y36(ce.j),e.Y36(u.P),e.Y36(v.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(W.a,7),e.Gf(Ja,7),e.Gf(Ya,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.expiresTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first)}},features:[e._Bn([ce.j])],decls:9,vars:8,consts:function(){let s,t,o,i;return s="Purge Trash",t="Expired at",o="Protected until",i="This image is protected until " + "\ufffd0\ufffd" + ".",[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","status","autoReload","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["class","btn btn-light","type","button",3,"disabled","click",4,"ngIf"],["expiresTpl",""],["deleteTpl",""],["type","button",1,"btn","btn-light",3,"disabled","click"],["aria-hidden","true",3,"ngClass"],s,[4,"ngIf"],t,o,["class","text-danger",4,"ngIf"],[1,"text-danger"],i]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.taskListService.fetch()})("updateSelection",function(_){return o.updateSelection(_)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.YNc(4,Ua,4,4,"button",3),e.qZA(),e.qZA(),e.YNc(5,er,4,5,"ng-template",null,4,e.W1O),e.YNc(7,or,1,1,"ng-template",null,5,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("status",o.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("ngIf",o.permission.delete))},directives:[We,W.a,Ee.K,l.O5,$.o,l.mk],pipes:[Ze.N],styles:[""]}),n})(),$t=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[l.ez,ni,a.u5,a.UX,N.Oz,N.dT,N.HK,Ie.b,Ae.m,m.Bz,ne.xc]]}),n})();const ir=[{path:"",redirectTo:"rbd",pathMatch:"full"},{path:"rbd",canActivate:[U.T,re.P],data:{moduleStatusGuardConfig:{uiApiPath:"block/rbd",redirectTo:"error",header:"No RBD pools available",button_name:"Create RBD pool",button_route:"/pool/create"},breadcrumbs:"Images"},children:[{path:"",component:ba},{path:"namespaces",component:xa,data:{breadcrumbs:"Namespaces"}},{path:"trash",component:nr,data:{breadcrumbs:"Trash"}},{path:"performance",component:wa,data:{breadcrumbs:"Overall Performance"}},{path:v.MQ.CREATE,component:$e,data:{breadcrumbs:v.Qn.CREATE}},{path:`${v.MQ.EDIT}/:image_spec`,component:$e,data:{breadcrumbs:v.Qn.EDIT}},{path:`${v.MQ.CLONE}/:image_spec/:snap`,component:$e,data:{breadcrumbs:v.Qn.CLONE}},{path:`${v.MQ.COPY}/:image_spec`,component:$e,data:{breadcrumbs:v.Qn.COPY}},{path:`${v.MQ.COPY}/:image_spec/:snap`,component:$e,data:{breadcrumbs:v.Qn.COPY}}]},{path:"mirroring",component:Vi,canActivate:[U.T,re.P],data:{moduleStatusGuardConfig:{uiApiPath:"block/mirroring",redirectTo:"error",header:"RBD mirroring is not configured",button_name:"Configure RBD Mirroring",button_title:"This will create rbd-mirror service and a replicated RBD pool",component:"RBD Mirroring",uiConfig:!0},breadcrumbs:"Mirroring"},children:[{path:`${v.MQ.EDIT}/:pool_name`,component:es,outlet:"modal"}]},{path:"iscsi",canActivate:[U.T],data:{breadcrumbs:"iSCSI"},children:[{path:"",redirectTo:"overview",pathMatch:"full"},{path:"overview",component:oi,data:{breadcrumbs:"Overview"}},{path:"targets",data:{breadcrumbs:"Targets"},children:[{path:"",component:qn},{path:v.MQ.CREATE,component:ut,data:{breadcrumbs:v.Qn.CREATE}},{path:`${v.MQ.EDIT}/:target_iqn`,component:ut,data:{breadcrumbs:v.Qn.EDIT}}]}]}];let sr=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[$t,m.Bz.forChild(ir)]]}),n})()},54555:(ct,Oe,p)=>{p.d(Oe,{d:()=>z});var l=p(74788),a=p(24751),m=p(23815),ne=p.n(m),N=p(80226),Ie=p(65862),v=p(95463),U=p(30633),re=p(28211),Ae=p(34089),be=p(41582),S=p(12057),le=p(56310),D=p(18372),ie=p(87925),V=p(94276);let e=(()=>{class A{constructor(u,C){this.control=u,this.formatter=C}setValue(u){const C=this.formatter.toMilliseconds(u);this.control.control.setValue(`${C} ms`)}ngOnInit(){this.setValue(this.control.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.control.value))}onUpdate(u){this.setValue(u)}}return A.\u0275fac=function(u){return new(u||A)(l.Y36(a.a5),l.Y36(re.H))},A.\u0275dir=l.lG2({type:A,selectors:[["","cdMilliseconds",""]],hostBindings:function(u,C){1&u&&l.NdJ("blur",function(O){return C.onUpdate(O.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),A})();var X=p(20044);let Ne=(()=>{class A{constructor(u,C,h,O){this.elementRef=u,this.control=C,this.dimlessBinaryPerSecondPipe=h,this.formatter=O,this.ngModelChange=new l.vpe,this.el=this.elementRef.nativeElement}ngOnInit(){this.setValue(this.el.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.el.value))}setValue(u){/^[\d.]+$/.test(u)&&(u+=this.defaultUnit||"m");const C=this.formatter.toBytes(u,0),h=this.round(C);this.el.value=this.dimlessBinaryPerSecondPipe.transform(h),null!==C?(this.ngModelChange.emit(this.el.value),this.control.control.setValue(this.el.value)):(this.ngModelChange.emit(null),this.control.control.setValue(null))}round(u){if(null!==u&&0!==u){if(!ne().isUndefined(this.minBytes)&&uthis.maxBytes)return this.maxBytes;if(!ne().isUndefined(this.roundPower)){const C=Math.round(Math.log(u)/Math.log(this.roundPower));return Math.pow(this.roundPower,C)}}return u}onBlur(u){this.setValue(u)}}return A.\u0275fac=function(u){return new(u||A)(l.Y36(l.SBq),l.Y36(a.a5),l.Y36(X.O),l.Y36(re.H))},A.\u0275dir=l.lG2({type:A,selectors:[["","cdDimlessBinaryPerSecond",""]],hostBindings:function(u,C){1&u&&l.NdJ("blur",function(O){return C.onBlur(O.target.value)})},inputs:{ngDataReady:"ngDataReady",minBytes:"minBytes",maxBytes:"maxBytes",roundPower:"roundPower",defaultUnit:"defaultUnit"},outputs:{ngModelChange:"ngModelChange"}}),A})(),Fe=(()=>{class A{constructor(u,C){this.formatter=u,this.ngControl=C}setValue(u){const C=this.formatter.toIops(u);this.ngControl.control.setValue(`${C} IOPS`)}ngOnInit(){this.setValue(this.ngControl.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.ngControl.value))}onUpdate(u){this.setValue(u)}}return A.\u0275fac=function(u){return new(u||A)(l.Y36(re.H),l.Y36(a.a5))},A.\u0275dir=l.lG2({type:A,selectors:[["","cdIops",""]],hostBindings:function(u,C){1&u&&l.NdJ("blur",function(O){return C.onUpdate(O.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),A})();function L(A,R){if(1&A&&(l.ynx(0),l._UZ(1,"input",18),l.BQk()),2&A){const u=l.oxw().$implicit,C=l.oxw(2);l.xp6(1),l.Q6J("id",u.name)("name",u.name)("formControlName",u.name)("ngDataReady",C.ngDataReady)}}function he(A,R){if(1&A&&(l.ynx(0),l._UZ(1,"input",19),l.BQk()),2&A){const u=l.oxw().$implicit,C=l.oxw(2);l.xp6(1),l.Q6J("id",u.name)("name",u.name)("formControlName",u.name)("ngDataReady",C.ngDataReady)}}function q(A,R){if(1&A&&(l.ynx(0),l._UZ(1,"input",20),l.BQk()),2&A){const u=l.oxw().$implicit,C=l.oxw(2);l.xp6(1),l.Q6J("id",u.name)("name",u.name)("formControlName",u.name)("ngDataReady",C.ngDataReady)}}function F(A,R){1&A&&(l.TgZ(0,"span",21),l.SDv(1,22),l.qZA())}const y=function(A){return{active:A}},T=function(A){return[A]};function H(A,R){if(1&A){const u=l.EpF();l.TgZ(0,"div",10),l.TgZ(1,"label",11),l._uU(2),l.TgZ(3,"cd-helper"),l._uU(4),l.qZA(),l.qZA(),l.TgZ(5,"div"),l.TgZ(6,"div",12),l.ynx(7,13),l.YNc(8,L,2,4,"ng-container",14),l.YNc(9,he,2,4,"ng-container",14),l.YNc(10,q,2,4,"ng-container",14),l.BQk(),l.TgZ(11,"span",15),l.TgZ(12,"button",16),l.NdJ("click",function(){const O=l.CHM(u).$implicit;return l.oxw(2).reset(O.name)}),l._UZ(13,"i",7),l.qZA(),l.qZA(),l.qZA(),l.YNc(14,F,2,0,"span",17),l.qZA(),l.qZA()}if(2&A){const u=R.$implicit,C=l.oxw().$implicit,h=l.oxw(),O=l.MAs(1);l.xp6(1),l.Q6J("for",u.name),l.xp6(1),l.Oqu(u.displayName),l.xp6(2),l.Oqu(u.description),l.xp6(1),l.Gre("cd-col-form-input ",C.heading,""),l.xp6(2),l.Q6J("ngSwitch",u.type),l.xp6(1),l.Q6J("ngSwitchCase",h.configurationType.milliseconds),l.xp6(1),l.Q6J("ngSwitchCase",h.configurationType.bps),l.xp6(1),l.Q6J("ngSwitchCase",h.configurationType.iops),l.xp6(2),l.Q6J("ngClass",l.VKq(13,y,h.isDisabled(u.name))),l.xp6(1),l.Q6J("ngClass",l.VKq(15,T,h.icons.erase)),l.xp6(1),l.Q6J("ngIf",h.form.showError("configuration."+u.name,O,"min"))}}function x(A,R){if(1&A){const u=l.EpF();l.TgZ(0,"div",4),l.TgZ(1,"h4",5),l.TgZ(2,"span",6),l.NdJ("click",function(){const O=l.CHM(u).$implicit;return l.oxw().toggleSectionVisibility(O.class)}),l._uU(3),l._UZ(4,"i",7),l.qZA(),l.qZA(),l.TgZ(5,"div",8),l.YNc(6,H,15,17,"div",9),l.qZA(),l.qZA()}if(2&A){const u=R.$implicit,C=l.oxw();l.xp6(3),l.hij(" ",u.heading," "),l.xp6(1),l.Q6J("ngClass",C.sectionVisibility[u.class]?C.icons.minusCircle:C.icons.addCircle),l.xp6(1),l.Tol(u.class),l.Q6J("hidden",!C.sectionVisibility[u.class]),l.xp6(1),l.Q6J("ngForOf",u.options)}}let z=(()=>{class A{constructor(u,C){this.formatterService=u,this.rbdConfigurationService=C,this.initializeData=new N.t(1),this.changes=new l.vpe,this.icons=Ie.P,this.ngDataReady=new l.vpe,this.configurationType=U.r,this.sectionVisibility={}}ngOnInit(){const u=this.createConfigurationFormGroup();this.form.addControl("configuration",u),u.valueChanges.subscribe(()=>{this.changes.emit(this.getDirtyValues.bind(this))}),this.initializeData&&this.initializeData.subscribe(C=>{this.initialData=C.initialData;const h=C.sourceType;this.rbdConfigurationService.getWritableOptionFields().forEach(O=>{const $=C.initialData.filter(k=>k.name===O.name).pop();$&&$.source===h&&this.form.get(`configuration.${O.name}`).setValue($.value)}),this.ngDataReady.emit()}),this.rbdConfigurationService.getWritableSections().forEach(C=>this.sectionVisibility[C.class]=!1)}getDirtyValues(u=!1,C){if(u&&!C)throw new Error("ProgrammingError: If local values shall be included, a proper localFieldType argument has to be provided, too");const h={};return this.rbdConfigurationService.getWritableOptionFields().forEach(O=>{const $=this.form.get("configuration").get(O.name);this.initialData&&this.initialData[O.name]===$.value||($.dirty||u&&$.source===C)&&(h[O.name]=null===$.value?$.value:O.type===U.r.bps?this.formatterService.toBytes($.value):O.type===U.r.milliseconds?this.formatterService.toMilliseconds($.value):O.type===U.r.iops?this.formatterService.toIops($.value):$.value)}),h}createConfigurationFormGroup(){const u=new v.d({});return this.rbdConfigurationService.getWritableOptionFields().forEach(C=>{let h;if(C.type!==U.r.milliseconds&&C.type!==U.r.iops&&C.type!==U.r.bps)throw new Error(`Type ${C.type} is unknown, you may need to add it to RbdConfiguration class`);{let O=0;ne().forEach(this.initialData,$=>{$.name===C.name&&(O=$.value)}),h=new a.NI(O,a.kI.min(0))}u.addControl(C.name,h)}),u}reset(u){const C=this.form.get("configuration").get(u);C.disabled?(C.setValue(C.previousValue||0),C.enable(),C.previousValue||C.markAsPristine()):(C.previousValue=C.value,C.setValue(null),C.markAsDirty(),C.disable())}isDisabled(u){return this.form.get("configuration").get(u).disabled}toggleSectionVisibility(u){this.sectionVisibility[u]=!this.sectionVisibility[u]}}return A.\u0275fac=function(u){return new(u||A)(l.Y36(re.H),l.Y36(Ae.n))},A.\u0275cmp=l.Xpm({type:A,selectors:[["cd-rbd-configuration-form"]],inputs:{form:"form",initializeData:"initializeData"},outputs:{changes:"changes"},decls:5,vars:2,consts:function(){let R,u,C;return R="RBD Configuration",u="Remove the local configuration value. The parent configuration value will be inherited and used instead.",C="The minimum value is 0",[[3,"formGroup"],["cfgFormGroup",""],R,["class","col-12",4,"ngFor","ngForOf"],[1,"col-12"],[1,"cd-header"],[1,"collapsible",3,"click"],["aria-hidden","true",3,"ngClass"],[3,"hidden"],["class","form-group row",4,"ngFor","ngForOf"],[1,"form-group","row"],[1,"cd-col-form-label",3,"for"],[1,"input-group"],[3,"ngSwitch"],[4,"ngSwitchCase"],[1,"input-group-append"],["type","button","data-toggle","button","title",u,1,"btn","btn-light",3,"ngClass","click"],["class","invalid-feedback",4,"ngIf"],["type","text","cdMilliseconds","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","defaultUnit","b","cdDimlessBinaryPerSecond","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","cdIops","",1,"form-control",3,"id","name","formControlName","ngDataReady"],[1,"invalid-feedback"],C]},template:function(u,C){1&u&&(l.TgZ(0,"fieldset",0,1),l.TgZ(2,"legend"),l.SDv(3,2),l.qZA(),l.YNc(4,x,7,7,"div",3),l.qZA()),2&u&&(l.Q6J("formGroup",C.form.get("configuration")),l.xp6(4),l.Q6J("ngForOf",C.rbdConfigurationService.sections))},directives:[a.JL,a.sg,be.V,S.sg,S.mk,le.P,D.S,S.RF,S.n9,ie.o,S.O5,a.Fj,V.b,e,a.JJ,a.u,Ne,Fe],styles:[".collapsible[_ngcontent-%COMP%]{cursor:pointer;user-select:none}"]}),A})()},71752:(ct,Oe,p)=>{p.d(Oe,{P:()=>q});var l=p(35905),a=p(30633),m=p(74788);let ne=(()=>{class F{transform(T){return{0:"global",1:"pool",2:"image"}[T]}}return F.\u0275fac=function(T){return new(T||F)},F.\u0275pipe=m.Yjl({name:"rbdConfigurationSource",type:F,pure:!0}),F})();var N=p(28211),Ie=p(34089),v=p(12057),U=p(20044),re=p(48537),Ae=p(21766);const be=["configurationSourceTpl"],S=["configurationValueTpl"],le=["poolConfTable"];function D(F,y){1&F&&(m.TgZ(0,"span"),m.SDv(1,6),m.qZA())}function ie(F,y){1&F&&(m.TgZ(0,"strong"),m.SDv(1,7),m.qZA())}function V(F,y){1&F&&(m.TgZ(0,"strong"),m.SDv(1,8),m.qZA())}function e(F,y){1&F&&(m.TgZ(0,"div",4),m.YNc(1,D,2,0,"span",5),m.YNc(2,ie,2,0,"strong",5),m.YNc(3,V,2,0,"strong",5),m.qZA()),2&F&&(m.Q6J("ngSwitch",y.value),m.xp6(1),m.Q6J("ngSwitchCase","global"),m.xp6(1),m.Q6J("ngSwitchCase","image"),m.xp6(1),m.Q6J("ngSwitchCase","pool"))}function X(F,y){if(1&F&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"dimlessBinaryPerSecond"),m.qZA()),2&F){const T=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,T))}}function Ne(F,y){if(1&F&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"milliseconds"),m.qZA()),2&F){const T=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,T))}}function Fe(F,y){if(1&F&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"iops"),m.qZA()),2&F){const T=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,T))}}function L(F,y){if(1&F&&(m.TgZ(0,"span"),m._uU(1),m.qZA()),2&F){const T=m.oxw().value;m.xp6(1),m.Oqu(T)}}function he(F,y){if(1&F&&(m.TgZ(0,"div",4),m.YNc(1,X,3,3,"span",5),m.YNc(2,Ne,3,3,"span",5),m.YNc(3,Fe,3,3,"span",5),m.YNc(4,L,2,1,"span",9),m.qZA()),2&F){const T=y.row,H=m.oxw();m.Q6J("ngSwitch",T.type),m.xp6(1),m.Q6J("ngSwitchCase",H.typeField.bps),m.xp6(1),m.Q6J("ngSwitchCase",H.typeField.milliseconds),m.xp6(1),m.Q6J("ngSwitchCase",H.typeField.iops)}}let q=(()=>{class F{constructor(T,H){this.formatterService=T,this.rbdConfigurationService=H,this.sourceField=a.h,this.typeField=a.r}ngOnInit(){this.poolConfigurationColumns=[{prop:"displayName",name:"Name"},{prop:"description",name:"Description"},{prop:"name",name:"Key"},{prop:"source",name:"Source",cellTemplate:this.configurationSourceTpl,pipe:new ne},{prop:"value",name:"Value",cellTemplate:this.configurationValueTpl}]}ngOnChanges(){!this.data||(this.data=this.data.filter(T=>this.rbdConfigurationService.getOptionFields().map(H=>H.name).includes(T.name)))}}return F.\u0275fac=function(T){return new(T||F)(m.Y36(N.H),m.Y36(Ie.n))},F.\u0275cmp=m.Xpm({type:F,selectors:[["cd-rbd-configuration-table"]],viewQuery:function(T,H){if(1&T&&(m.Gf(be,7),m.Gf(S,7),m.Gf(le,7)),2&T){let x;m.iGM(x=m.CRH())&&(H.configurationSourceTpl=x.first),m.iGM(x=m.CRH())&&(H.configurationValueTpl=x.first),m.iGM(x=m.CRH())&&(H.poolConfTable=x.first)}},inputs:{data:"data"},features:[m.TTD],decls:6,vars:2,consts:function(){let y,T,H;return y="Global",T="Image",H="Pool",[["identifier","name",3,"data","columns"],["poolConfTable",""],["configurationSourceTpl",""],["configurationValueTpl",""],[3,"ngSwitch"],[4,"ngSwitchCase"],y,T,H,[4,"ngSwitchDefault"]]},template:function(T,H){1&T&&(m._UZ(0,"cd-table",0,1),m.YNc(2,e,4,4,"ng-template",null,2,m.W1O),m.YNc(4,he,5,4,"ng-template",null,3,m.W1O)),2&T&&m.Q6J("data",H.data)("columns",H.poolConfigurationColumns)},directives:[l.a,v.RF,v.n9,v.ED],pipes:[U.O,re.J,Ae.A],styles:[""]}),F})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/585.764bfab2e2f489fdfd7f.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/585.7d0bcf3a0ac0c40fef3b.js similarity index 87% rename from ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/585.764bfab2e2f489fdfd7f.js rename to ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/585.7d0bcf3a0ac0c40fef3b.js index 810294007..c43f640dc 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/585.764bfab2e2f489fdfd7f.js +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/585.7d0bcf3a0ac0c40fef3b.js @@ -1 +1 @@ -"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[585],{24585:(Fi,Fe,r)=>{r.r(Fe),r.d(Fe,{PoolModule:()=>ze,RoutedPoolModule:()=>Ai});var C=r(12057),a=r(24751),Oe=r(6283),g=r(38549),M=r(79512),d_=r(44466),u_=r(91330),f_=r(370),P_=r(23815),u=r.n(P_),E_=r(80226),g_=r(26504),ue=r(80842);class S{constructor(){this.nodes=[],this.idTree={},this.allDevices=[],this.buckets=[],this.failureDomains={},this.failureDomainKeys=[],this.devices=[],this.deviceCount=0}static searchFailureDomains(i,_){return this.getFailureDomains(this.search(i,_))}static search(i,_){const[o,n]=_.split("~"),s=i.find(c=>["name","id","type"].some(d=>c[d]===o));return s?(i=this.getSubNodes(s,this.createIdTreeFromNodes(i)),n&&(i=this.filterNodesByDeviceType(i,n)),i):[]}static createIdTreeFromNodes(i){const _={};return i.forEach(o=>{_[o.id]=o}),_}static getSubNodes(i,_){let o=[i];return i.children&&i.children.forEach(n=>{o=o.concat(this.getSubNodes(_[n],_))}),o}static filterNodesByDeviceType(i,_){let n,o=i.filter(c=>c.device_class&&c.device_class!==_).map(c=>c.id),s=o;do{n=!1,i=i.filter(d=>!o.includes(d.id));const c=[];i.forEach(d=>{d.children&&d.children.every(P=>o.includes(P))&&(c.push(d.id),n=!0)}),n&&(o=c,s=s.concat(c))}while(n);return(i=u().cloneDeep(i)).map(c=>(c.children&&(c.children=c.children.filter(d=>!s.includes(d))),c))}static getFailureDomains(i){const _={};return i.forEach(o=>{const n=o.type;_[n]||(_[n]=[]),_[n].push(o)}),_}initCrushNodeSelection(i,_,o,n){this.nodes=i,this.idTree=S.createIdTreeFromNodes(i),i.forEach(s=>{this.idTree[s.id]=s}),this.buckets=u().sortBy(i.filter(s=>s.children),"name"),this.controls={root:_,failure:o,device:n},this.preSelectRoot(),this.controls.root.valueChanges.subscribe(()=>this.onRootChange()),this.controls.failure.valueChanges.subscribe(()=>this.onFailureDomainChange()),this.controls.device.valueChanges.subscribe(()=>this.onDeviceChange())}preSelectRoot(){const i=this.nodes.find(_=>"root"===_.type);this.silentSet(this.controls.root,i),this.onRootChange()}silentSet(i,_){i.setValue(_,{emitEvent:!1})}onRootChange(){const i=S.getSubNodes(this.controls.root.value,this.idTree),_=S.getFailureDomains(i);Object.keys(_).forEach(o=>{_[o].length<=1&&delete _[o]}),this.failureDomains=_,this.failureDomainKeys=Object.keys(_).sort(),this.updateFailureDomain()}updateFailureDomain(){let i=this.getIncludedCustomValue(this.controls.failure,Object.keys(this.failureDomains));""===i&&(i=this.setMostCommonDomain(this.controls.failure)),this.updateDevices(i)}getIncludedCustomValue(i,_){return i.dirty&&_.includes(i.value)?i.value:""}setMostCommonDomain(i){let _={n:0,type:""};return Object.keys(this.failureDomains).forEach(o=>{const n=this.failureDomains[o].length;_.nS.getSubNodes(n,this.idTree)));this.allDevices=_.filter(n=>n.device_class).map(n=>n.device_class),this.devices=u().uniq(this.allDevices).sort();const o=1===this.devices.length?this.devices[0]:this.getIncludedCustomValue(this.controls.device,this.devices);this.silentSet(this.controls.device,o),this.onDeviceChange(o)}onDeviceChange(i=this.controls.device.value){this.deviceCount=""===i?this.allDevices.length:this.allDevices.filter(_=>_===i).length}}var Ne=r(30982),p_=r(14745),b=r(65862),R_=r(93614),be=r(95463),E=r(77205),m_=r(30633),v=r(76111),C_=r(47557),M_=r(28211),de=r(32337),e=r(74788),ve=r(62862),Ie=r(83608),$e=r(60312),fe=r(41582),Pe=r(56310),Ee=r(87925),ge=r(94276),pe=r(82945),Re=r(18372),me=r(30839),Ce=r(10545);function h_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,31),e.qZA())}function T_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,32),e.qZA())}function S_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,33),e.qZA())}function L_(t,i){1&t&&(e.TgZ(0,"option",26),e.SDv(1,34),e.qZA())}function A_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function F_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,36),e.qZA())}function N_(t,i){1&t&&(e.TgZ(0,"option",26),e.SDv(1,37),e.qZA())}function b_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function v_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,38),e.qZA())}function I_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let $_=(()=>{class t extends S{constructor(_,o,n,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=n,this.crushRuleService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.crushRuleService.formTooltips,this.action=this.actionLabels.CREATE,this.resource="Crush Rule",this.createForm()}createForm(){this.form=this.formBuilder.group({name:["",[a.kI.required,a.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],root:null,failure_domain:"",device_class:""})}ngOnInit(){this.crushRuleService.getInfo().subscribe(({names:_,nodes:o})=>{this.initCrushNodeSelection(o,this.form.get("root"),this.form.get("failure_domain"),this.form.get("device_class")),this.names=_})}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=u().cloneDeep(this.form.value);_.root=_.root.name,""===_.device_class&&delete _.device_class,this.taskWrapper.wrapTaskAroundCall({task:new v.R("crushRule/create",_),call:this.crushRuleService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ve.O),e.Y36(g.Kz),e.Y36(de.P),e.Y36(Ie.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-crush-rule-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:55,vars:27,consts:function(){let i,_,o,n,s,c,d,P,p,R,h,T,m;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Root",n="Failure domain type",s="Device class",c="Let Ceph decide",d="This field is required!",P="The name can only consist of alphanumeric characters, dashes and underscores.",p="The chosen erasure code profile name is already in use.",R="Loading...",h="This field is required!",T="Loading...",m="This field is required!",[[3,"modalRef"],[1,"modal-title"],i,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"required"],[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","root",1,"cd-col-form-label"],o,[3,"html"],["id","root","name","root","formControlName","root",1,"form-control"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","failure_domain",1,"cd-col-form-label"],n,["id","failure_domain","name","failure_domain","formControlName","failure_domain",1,"form-control"],["for","device_class",1,"cd-col-form-label"],s,["id","device_class","name","device_class","formControlName","device_class",1,"form-control"],["ngValue",""],c,[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,P,p,R,[3,"ngValue"],h,T,m]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.ynx(11),e.SDv(12,9),e.BQk(),e._UZ(13,"span",10),e.qZA(),e.TgZ(14,"div",11),e._UZ(15,"input",12),e.YNc(16,h_,2,0,"span",13),e.YNc(17,T_,2,0,"span",13),e.YNc(18,S_,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(19,"div",7),e.TgZ(20,"label",14),e.ynx(21),e.SDv(22,15),e.BQk(),e._UZ(23,"cd-helper",16),e._UZ(24,"span",10),e.qZA(),e.TgZ(25,"div",11),e.TgZ(26,"select",17),e.YNc(27,L_,2,0,"option",18),e.YNc(28,A_,2,2,"option",19),e.qZA(),e.YNc(29,F_,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(30,"div",7),e.TgZ(31,"label",20),e.ynx(32),e.SDv(33,21),e.BQk(),e._UZ(34,"cd-helper",16),e._UZ(35,"span",10),e.qZA(),e.TgZ(36,"div",11),e.TgZ(37,"select",22),e.YNc(38,N_,2,0,"option",18),e.YNc(39,b_,2,3,"option",19),e.qZA(),e.YNc(40,v_,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(41,"div",7),e.TgZ(42,"label",23),e.ynx(43),e.SDv(44,24),e.BQk(),e._UZ(45,"cd-helper",16),e.qZA(),e.TgZ(46,"div",11),e.TgZ(47,"select",25),e.TgZ(48,"option",26),e.SDv(49,27),e.qZA(),e.YNc(50,I_,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.TgZ(51,"div",28),e.TgZ(52,"cd-form-button-panel",29),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(53,"titlecase"),e.ALo(54,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const n=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,19,o.action))(e.lcZ(4,21,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(10),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.root),e.xp6(4),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(1),e.Q6J("ngIf",o.form.showError("root",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.failure_domain),e.xp6(4),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.form.showError("failure_domain",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.device_class),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(53,23,o.action)+" "+e.lcZ(54,25,o.resource))}},directives:[$e.z,a._Y,a.JL,fe.V,a.sg,Pe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,Re.S,a.EJ,C.sg,a.YN,a.Kr,me.p],pipes:[C.rS,Ce.m],styles:[""]}),t})();class D_{}var Z_=r(58497);let Me=(()=>{class t{constructor(_){this.http=_,this.apiPath="api/erasure_code_profile",this.formTooltips={k:"Each object is split in data-chunks parts, each stored on a different OSD.",m:"Compute coding chunks for each object and store them on different OSDs.\n The number of coding chunks is also the number of OSDs that can be down without losing data.",plugins:{jerasure:{description:"The jerasure plugin is the most generic and flexible plugin,\n it is also the default for Ceph erasure coded pools.",technique:"The more flexible technique is reed_sol_van : it is enough to set k\n and m. The cauchy_good technique can be faster but you need to chose the packetsize\n carefully. All of reed_sol_r6_op, liberation, blaum_roth, liber8tion are RAID6 equivalents\n in the sense that they can only be configured with m=2.",packetSize:"The encoding will be done on packets of bytes size at a time.\n Choosing the right packet size is difficult.\n The jerasure documentation contains extensive information on this topic."},lrc:{description:"With the jerasure plugin, when an erasure coded object is stored on\n multiple OSDs, recovering from the loss of one OSD requires reading from all the others.\n For instance if jerasure is configured with k=8 and m=4, losing one OSD requires reading\n from the eleven others to repair.\n\n The lrc erasure code plugin creates local parity chunks to be able to recover using\n less OSDs. For instance if lrc is configured with k=8, m=4 and l=4, it will create\n an additional parity chunk for every four OSDs. When a single OSD is lost, it can be\n recovered with only four OSDs instead of eleven.",l:"Group the coding and data chunks into sets of size locality. For instance,\n for k=4 and m=2, when locality=3 two groups of three are created. Each set can\n be recovered without reading chunks from another set.",crushLocality:"The type of the crush bucket in which each set of chunks defined\n by l will be stored. For instance, if it is set to rack, each group of l chunks will be\n placed in a different rack. It is used to create a CRUSH rule step such as step choose\n rack. If it is not set, no such grouping is done."},isa:{description:"The isa plugin encapsulates the ISA library. It only runs on Intel processors.",technique:"The ISA plugin comes in two Reed Solomon forms.\n If reed_sol_van is set, it is Vandermonde, if cauchy is set, it is Cauchy."},shec:{description:"The shec plugin encapsulates the multiple SHEC library.\n It allows ceph to recover data more efficiently than Reed Solomon codes.",c:"The number of parity chunks each of which includes each data chunk in its\n calculation range. The number is used as a durability estimator. For instance, if c=2,\n 2 OSDs can be down without losing data."},clay:{description:"CLAY (short for coupled-layer) codes are erasure codes designed to\n bring about significant savings in terms of network bandwidth and disk IO when a failed\n node/OSD/rack is being repaired.",d:"Number of OSDs requested to send data during recovery of a single chunk.\n d needs to be chosen such that k+1 <= d <= k+m-1. The larger the d, the better\n the savings.",scalar_mds:"scalar_mds specifies the plugin that is used as a building block\n in the layered construction. It can be one of jerasure, isa, shec.",technique:"technique specifies the technique that will be picked\n within the 'scalar_mds' plugin specified. Supported techniques\n are 'reed_sol_van', 'reed_sol_r6_op', 'cauchy_orig',\n 'cauchy_good', 'liber8tion' for jerasure, 'reed_sol_van',\n 'cauchy' for isa and 'single', 'multiple' for shec."}},crushRoot:"The name of the crush bucket used for the first step of the CRUSH rule.\n For instance step take default.",crushFailureDomain:"Ensure that no two chunks are in a bucket with the same failure\n domain. For instance, if the failure domain is host no two chunks will be stored on the same\n host. It is used to create a CRUSH rule step such as step chooseleaf host.",crushDeviceClass:"Restrict placement to devices of a specific class\n (e.g., ssd or hdd), using the crush device class names in the CRUSH map.",directory:"Set the directory name from which the erasure code plugin is loaded."}}list(){return this.http.get(this.apiPath)}create(_){return this.http.post(this.apiPath,_,{observe:"response"})}delete(_){return this.http.delete(`${this.apiPath}/${_}`,{observe:"response"})}getInfo(){return this.http.get(`ui-${this.apiPath}/info`)}}return t.\u0275fac=function(_){return new(_||t)(e.LFG(Z_.eN))},t.\u0275prov=e.Yz7({token:t,factory:t.\u0275fac,providedIn:"root"}),t})();function x_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,47),e.qZA())}function y_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,48),e.qZA())}function U_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,49),e.qZA())}function q_(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,50),e.qZA())}function H_(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function G_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,52),e.qZA())}function z_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,53),e.qZA())}function X_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,54),e.qZA())}function w_(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,55),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function Q_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,56),e.qZA())}function J_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,57),e.qZA())}function k_(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,58),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.lrcMultiK),e.QtT(1)}}function V_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,59),e.qZA())}function Y_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,60),e.qZA())}function B_(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,61),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function j_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,65),e.qZA())}function K_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,66),e.qZA())}function W_(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",62),e.TgZ(2,"span",14),e.SDv(3,63),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",64),e.YNc(7,j_,2,0,"span",12),e.YNc(8,K_,2,0,"span",12),e.qZA(),e.qZA()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.shec.c),e.xp6(3),e.Q6J("ngIf",_.form.showError("c",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("c",o,"cGreaterM"))}}function eo(t,i){1&t&&(e.TgZ(0,"span",39),e.SDv(1,75),e.qZA())}function _o(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,76),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMin())(_.getDMax()),e.QtT(1)}}function oo(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,77),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function to(t,i){if(1&t&&(e.ynx(0),e.YNc(1,_o,2,2,"span",23),e.YNc(2,oo,2,1,"span",23),e.BQk()),2&t){const _=e.oxw(2);e.xp6(1),e.Q6J("ngIf",_.getDMin()<_.getDMax()),e.xp6(1),e.Q6J("ngIf",_.getDMin()===_.getDMax())}}function io(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,78),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMin()),e.QtT(1)}}function no(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,79),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function so(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",7),e.TgZ(1,"label",67),e.TgZ(2,"span",14),e.SDv(3,68),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"div",69),e._UZ(7,"input",70),e.TgZ(8,"span",71),e.TgZ(9,"button",72),e.NdJ("click",function(){return e.CHM(_),e.oxw().toggleDCalc()}),e._UZ(10,"i",73),e.qZA(),e.qZA(),e.qZA(),e.YNc(11,eo,2,0,"span",23),e.YNc(12,to,3,2,"ng-container",74),e.YNc(13,io,2,1,"span",12),e.YNc(14,no,2,1,"span",12),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.d),e.xp6(6),e.Q6J("ngClass",_.dCalc?_.icons.unlock:_.icons.lock),e.xp6(1),e.Q6J("ngIf",_.dCalc),e.xp6(1),e.Q6J("ngIf",!_.dCalc),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMin")),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMax"))}}function ao(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,84),e.qZA())}function lo(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,85),e.qZA())}function ro(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,86),e.qZA())}function co(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",80),e.TgZ(2,"span",14),e.SDv(3,81),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",82),e.YNc(7,ao,2,0,"span",12),e.YNc(8,lo,2,0,"span",12),e.YNc(9,ro,2,0,"span",12),e.TgZ(10,"span",39),e.SDv(11,83),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.l),e.xp6(3),e.Q6J("ngIf",_.form.showError("l",o,"required")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"unequal")),e.xp6(2),e.pQV(_.lrcGroups),e.QtT(11)}}function Oo(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,87),e.qZA())}function uo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function fo(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,91),e.qZA())}function Po(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,92),e.qZA())}function Eo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw(2);e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function go(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",88),e.ynx(2),e.SDv(3,89),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"select",90),e.YNc(7,fo,2,0,"option",18),e.YNc(8,Po,2,0,"option",18),e.YNc(9,Eo,2,3,"option",19),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.crushLocality),e.xp6(3),e.Q6J("ngIf",!_.failureDomains),e.xp6(1),e.Q6J("ngIf",_.failureDomainKeys.length>0),e.xp6(1),e.Q6J("ngForOf",_.failureDomainKeys)}}function po(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}const De=function(t,i,_){return[t,i,_]};function Ro(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",93),e.ynx(2),e.SDv(3,94),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"select",95),e.YNc(7,po,2,2,"option",19),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.scalar_mds),e.xp6(3),e.Q6J("ngForOf",e.kEZ(2,De,_.PLUGIN.JERASURE,_.PLUGIN.ISA,_.PLUGIN.SHEC))}}function mo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function Co(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",96),e.ynx(2),e.SDv(3,97),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"select",98),e.YNc(7,mo,2,2,"option",19),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins[_.plugin].technique),e.xp6(3),e.Q6J("ngForOf",_.techniques)}}function Mo(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,102),e.qZA())}function ho(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",99),e.ynx(2),e.SDv(3,100),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",101),e.YNc(7,Mo,2,0,"span",12),e.qZA(),e.qZA()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.jerasure.packetSize),e.xp6(3),e.Q6J("ngIf",_.form.showError("packetSize",o,"min"))}}function To(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,103),e.qZA())}function So(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function Lo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let Ao=(()=>{class t extends S{constructor(_,o,n,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=n,this.ecpService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.ecpService.formTooltips,this.PLUGIN={LRC:"lrc",SHEC:"shec",CLAY:"clay",JERASURE:"jerasure",ISA:"isa"},this.plugin=this.PLUGIN.JERASURE,this.icons=b.P,this.action=this.actionLabels.CREATE,this.resource="EC Profile",this.createForm(),this.setJerasureDefaults()}createForm(){this.form=this.formBuilder.group({name:[null,[a.kI.required,a.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],plugin:[this.PLUGIN.JERASURE,[a.kI.required]],k:[4,[a.kI.required,E.h.custom("max",()=>this.baseValueValidation(!0)),E.h.custom("unequal",_=>this.lrcDataValidation(_)),E.h.custom("kLowerM",_=>this.shecDataValidation(_))]],m:[2,[a.kI.required,E.h.custom("max",()=>this.baseValueValidation())]],crushFailureDomain:"",crushRoot:null,crushDeviceClass:"",directory:"",technique:"reed_sol_van",packetSize:[2048],l:[3,[a.kI.required,E.h.custom("unequal",_=>this.lrcLocalityValidation(_))]],crushLocality:"",c:[2,[a.kI.required,E.h.custom("cGreaterM",_=>this.shecDurabilityValidation(_))]],d:[5,[a.kI.required,E.h.custom("dMin",_=>this.dMinValidation(_)),E.h.custom("dMax",_=>this.dMaxValidation(_))]],scalar_mds:[this.PLUGIN.JERASURE,[a.kI.required]]}),this.toggleDCalc(),this.form.get("k").valueChanges.subscribe(()=>this.updateValidityOnChange(["m","l","d"])),this.form.get("m").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","l","c","d"])),this.form.get("l").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","m"])),this.form.get("plugin").valueChanges.subscribe(_=>this.onPluginChange(_)),this.form.get("scalar_mds").valueChanges.subscribe(()=>this.setClayDefaultsForScalar())}baseValueValidation(_=!1){return this.validValidation(()=>this.getKMSum()>this.deviceCount&&this.form.getValue("k")>this.form.getValue("m")===_)}validValidation(_,o){return!((!this.form||o)&&this.plugin!==o)&&_()}getKMSum(){return this.form.getValue("k")+this.form.getValue("m")}lrcDataValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m"),n=this.form.getValue("l"),s=_+o;return this.lrcMultiK=_/(s/n),_%(s/n)!=0},"lrc")}shecDataValidation(_){return this.validValidation(()=>this.form.getValue("m")>_,"shec")}lrcLocalityValidation(_){return this.validValidation(()=>{const o=this.getKMSum();return this.lrcGroups=_>0?o/_:0,_>0&&o%_!=0},"lrc")}shecDurabilityValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m");return _>o},"shec")}dMinValidation(_){return this.validValidation(()=>this.getDMin()>_,"clay")}getDMin(){return this.form.getValue("k")+1}dMaxValidation(_){return this.validValidation(()=>_>this.getDMax(),"clay")}getDMax(){const _=this.form.getValue("m");return this.form.getValue("k")+_-1}toggleDCalc(){this.dCalc=!this.dCalc,this.form.get("d")[this.dCalc?"disable":"enable"](),this.calculateD()}calculateD(){this.plugin!==this.PLUGIN.CLAY||!this.dCalc||this.form.silentSet("d",this.getDMax())}updateValidityOnChange(_){_.forEach(o=>{"d"===o&&this.calculateD(),this.form.get(o).updateValueAndValidity({emitEvent:!1})})}onPluginChange(_){this.plugin=_,_===this.PLUGIN.JERASURE?this.setJerasureDefaults():_===this.PLUGIN.LRC?this.setLrcDefaults():_===this.PLUGIN.ISA?this.setIsaDefaults():_===this.PLUGIN.SHEC?this.setShecDefaults():_===this.PLUGIN.CLAY&&this.setClayDefaults(),this.updateValidityOnChange(["m"])}setJerasureDefaults(){this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liberation","blaum_roth","liber8tion"],this.setDefaults({k:4,m:2,technique:"reed_sol_van"})}setLrcDefaults(){this.setDefaults({k:4,m:2,l:3})}setIsaDefaults(){this.techniques=["reed_sol_van","cauchy"],this.setDefaults({k:7,m:3,technique:"reed_sol_van"})}setShecDefaults(){this.setDefaults({k:4,m:3,c:2})}setClayDefaults(){this.setDefaults({k:4,m:2,scalar_mds:this.PLUGIN.JERASURE}),this.setClayDefaultsForScalar()}setClayDefaultsForScalar(){const _=this.form.getValue("scalar_mds");let o="reed_sol_van";_===this.PLUGIN.JERASURE?this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liber8tion"]:_===this.PLUGIN.ISA?this.techniques=["reed_sol_van","cauchy"]:(o="single",this.techniques=["single","multiple"]),this.setDefaults({technique:o})}setDefaults(_){Object.keys(_).forEach(o=>{const n=this.form.get(o),s=n.value;n.pristine||"technique"===o&&!this.techniques.includes(s)||"k"===o&&[4,7].includes(s)||"m"===o&&[2,3].includes(s)?n.setValue(_[o]):n.updateValueAndValidity()})}ngOnInit(){this.ecpService.getInfo().subscribe(({plugins:_,names:o,directory:n,nodes:s})=>{this.initCrushNodeSelection(s,this.form.get("crushRoot"),this.form.get("crushFailureDomain"),this.form.get("crushDeviceClass")),this.plugins=_,this.names=o,this.form.silentSet("directory",n),this.preValidateNumericInputFields()})}preValidateNumericInputFields(){const _=["k","m","l","c","d"].map(o=>this.form.get(o));_.forEach(o=>{o.markAsTouched(),o.markAsDirty()}),_[1].updateValueAndValidity()}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=this.createJson();this.taskWrapper.wrapTaskAroundCall({task:new v.R("ecp/create",{name:_.name}),call:this.ecpService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}createJson(){const _={technique:[this.PLUGIN.ISA,this.PLUGIN.JERASURE,this.PLUGIN.CLAY],packetSize:[this.PLUGIN.JERASURE],l:[this.PLUGIN.LRC],crushLocality:[this.PLUGIN.LRC],c:[this.PLUGIN.SHEC],d:[this.PLUGIN.CLAY],scalar_mds:[this.PLUGIN.CLAY]},o=new D_,n=this.form.getValue("plugin");return Object.keys(this.form.controls).filter(s=>{const c=_[s],d=this.form.getValue(s);return(c&&c.includes(n)||!c)&&d&&""!==d}).forEach(s=>{this.extendJson(s,o)}),o}extendJson(_,o){const s=this.form.getValue(_);o[{crushFailureDomain:"crush-failure-domain",crushRoot:"crush-root",crushDeviceClass:"crush-device-class",packetSize:"packetsize",crushLocality:"crush-locality"}[_]||_]="crushRoot"===_?s.name:s}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ve.O),e.Y36(g.Kz),e.Y36(de.P),e.Y36(Me),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-erasure-code-profile-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:98,vars:53,consts:function(){let i,_,o,n,s,c,d,P,p,R,h,T,m,f,A,I,$,D,Z,x,y,U,q,H,G,z,X,w,Q,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ie,ne,se,ae,le,re,ce;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Plugin",n="Data chunks (k)",s="Coding chunks (m)",c="Crush failure domain",d="Crush root",P="Crush device class",p="Let Ceph decide",R="Available OSDs: " + "\ufffd0\ufffd" + "",h="Directory",T="This field is required!",m="The name can only consist of alphanumeric characters, dashes and underscores.",f="The chosen erasure code profile name is already in use.",A="Loading...",I="This field is required!",$="This field is required!",D="Must be equal to or greater than 2.",Z="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",x="For an equal distribution k has to be a multiple of (k+m)/l.",y="K has to be equal to or greater than m in order to recover data correctly through c.",U="Distribution factor: " + "\ufffd0\ufffd" + "",q="This field is required!",H="Must be equal to or greater than 1.",G="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",z="Durability estimator (c)",X="Must be equal to or greater than 1.",w="C has to be equal to or lower than m as m defines the amount of chunks that can be used.",Q="Helper chunks (d)",J="Set d manually or use the plugin's default calculation that maximizes d.",k="D is automatically updated on k and m changes",V="D can be set from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + "",Y="D can only be set to " + "\ufffd0\ufffd" + "",B="D has to be greater than k (" + "\ufffd0\ufffd" + ").",j="D has to be lower than k + m (" + "\ufffd0\ufffd" + ").",K="Locality (l)",N="Locality groups: " + "\ufffd0\ufffd" + "",W="This field is required!",ee="Must be equal to or greater than 1.",_e="Can't split up chunks (k+m) correctly with the current locality.",oe="Loading...",te="Crush Locality",ie="Loading...",ne="None",se="Scalar mds",ae="Technique",le="Packetsize",re="Must be equal to or greater than 1.",ce="Loading...",[[3,"modalRef"],[1,"modal-title"],i,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","plugin",1,"cd-col-form-label"],[1,"required"],o,[3,"html"],["id","plugin","name","plugin","formControlName","plugin",1,"form-control"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","k",1,"cd-col-form-label"],n,["type","number","id","k","name","k","ng-model","$ctrl.erasureCodeProfile.k","placeholder","Data chunks...","formControlName","k","min","2",1,"form-control"],["class","form-text text-muted",4,"ngIf"],["for","m",1,"cd-col-form-label"],s,["type","number","id","m","name","m","placeholder","Coding chunks...","formControlName","m","min","1",1,"form-control"],["class","form-group row",4,"ngIf"],["for","crushFailureDomain",1,"cd-col-form-label"],c,["id","crushFailureDomain","name","crushFailureDomain","formControlName","crushFailureDomain",1,"form-control"],["for","crushRoot",1,"cd-col-form-label"],d,["id","crushRoot","name","crushRoot","formControlName","crushRoot",1,"form-control"],["for","crushDeviceClass",1,"cd-col-form-label"],P,["id","crushDeviceClass","name","crushDeviceClass","formControlName","crushDeviceClass",1,"form-control"],["ngValue",""],p,[1,"form-text","text-muted"],R,["for","directory",1,"cd-col-form-label"],h,["type","text","id","directory","name","directory","placeholder","Path...","formControlName","directory",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],T,m,f,A,[3,"ngValue"],I,$,D,Z,x,y,U,q,H,G,["for","c",1,"cd-col-form-label"],z,["type","number","id","c","name","c","placeholder","Coding chunks...","formControlName","c","min","1",1,"form-control"],X,w,["for","d",1,"cd-col-form-label"],Q,[1,"input-group"],["type","number","id","d","name","d","placeholder","Helper chunks...","formControlName","d",1,"form-control"],[1,"input-group-append"],["id","d-calc-btn","ngbTooltip",J,"type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],[4,"ngIf"],k,V,Y,B,j,["for","l",1,"cd-col-form-label"],K,["type","number","id","l","name","l","placeholder","Coding chunks...","formControlName","l","min","1",1,"form-control"],N,W,ee,_e,oe,["for","crushLocality",1,"cd-col-form-label"],te,["id","crushLocality","name","crushLocality","formControlName","crushLocality",1,"form-control"],ie,ne,["for","scalar_mds",1,"cd-col-form-label"],se,["id","scalar_mds","name","scalar_mds","formControlName","scalar_mds",1,"form-control"],["for","technique",1,"cd-col-form-label"],ae,["id","technique","name","technique","formControlName","technique",1,"form-control"],["for","packetSize",1,"cd-col-form-label"],le,["type","number","id","packetSize","name","packetSize","placeholder","Packetsize...","formControlName","packetSize","min","1",1,"form-control"],re,ce]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,x_,2,0,"span",12),e.YNc(15,y_,2,0,"span",12),e.YNc(16,U_,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(17,"div",7),e.TgZ(18,"label",13),e.TgZ(19,"span",14),e.SDv(20,15),e.qZA(),e._UZ(21,"cd-helper",16),e.qZA(),e.TgZ(22,"div",10),e.TgZ(23,"select",17),e.YNc(24,q_,2,0,"option",18),e.YNc(25,H_,2,2,"option",19),e.qZA(),e.YNc(26,G_,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(27,"div",7),e.TgZ(28,"label",20),e.TgZ(29,"span",14),e.SDv(30,21),e.qZA(),e._UZ(31,"cd-helper",16),e.qZA(),e.TgZ(32,"div",10),e._UZ(33,"input",22),e.YNc(34,z_,2,0,"span",12),e.YNc(35,X_,2,0,"span",12),e.YNc(36,w_,2,1,"span",12),e.YNc(37,Q_,2,0,"span",12),e.YNc(38,J_,2,0,"span",12),e.YNc(39,k_,2,1,"span",23),e.qZA(),e.qZA(),e.TgZ(40,"div",7),e.TgZ(41,"label",24),e.TgZ(42,"span",14),e.SDv(43,25),e.qZA(),e._UZ(44,"cd-helper",16),e.qZA(),e.TgZ(45,"div",10),e._UZ(46,"input",26),e.YNc(47,V_,2,0,"span",12),e.YNc(48,Y_,2,0,"span",12),e.YNc(49,B_,2,1,"span",12),e.qZA(),e.qZA(),e.YNc(50,W_,9,3,"div",27),e.YNc(51,so,15,6,"div",27),e.YNc(52,co,12,5,"div",27),e.TgZ(53,"div",7),e.TgZ(54,"label",28),e.ynx(55),e.SDv(56,29),e.BQk(),e._UZ(57,"cd-helper",16),e.qZA(),e.TgZ(58,"div",10),e.TgZ(59,"select",30),e.YNc(60,Oo,2,0,"option",18),e.YNc(61,uo,2,3,"option",19),e.qZA(),e.qZA(),e.qZA(),e.YNc(62,go,10,4,"div",27),e.YNc(63,Ro,8,6,"div",27),e.YNc(64,Co,8,2,"div",27),e.YNc(65,ho,8,2,"div",27),e.TgZ(66,"div",7),e.TgZ(67,"label",31),e.ynx(68),e.SDv(69,32),e.BQk(),e._UZ(70,"cd-helper",16),e.qZA(),e.TgZ(71,"div",10),e.TgZ(72,"select",33),e.YNc(73,To,2,0,"option",18),e.YNc(74,So,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.TgZ(75,"div",7),e.TgZ(76,"label",34),e.ynx(77),e.SDv(78,35),e.BQk(),e._UZ(79,"cd-helper",16),e.qZA(),e.TgZ(80,"div",10),e.TgZ(81,"select",36),e.TgZ(82,"option",37),e.SDv(83,38),e.qZA(),e.YNc(84,Lo,2,2,"option",19),e.qZA(),e.TgZ(85,"span",39),e.SDv(86,40),e.qZA(),e.qZA(),e.qZA(),e.TgZ(87,"div",7),e.TgZ(88,"label",41),e.ynx(89),e.SDv(90,42),e.BQk(),e._UZ(91,"cd-helper",16),e.qZA(),e.TgZ(92,"div",10),e._UZ(93,"input",43),e.qZA(),e.qZA(),e.qZA(),e.TgZ(94,"div",44),e.TgZ(95,"cd-form-button-panel",45),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(96,"titlecase"),e.ALo(97,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const n=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,41,o.action))(e.lcZ(4,43,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(8),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.plugins[o.plugin].description),e.xp6(3),e.Q6J("ngIf",!o.plugins),e.xp6(1),e.Q6J("ngForOf",o.plugins),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.k),e.xp6(3),e.Q6J("ngIf",o.form.showError("k",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"max")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"unequal")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"kLowerM")),e.xp6(1),e.Q6J("ngIf","lrc"===o.plugin),e.xp6(5),e.Q6J("html",o.tooltips.m),e.xp6(3),e.Q6J("ngIf",o.form.showError("m",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",n,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",n,"max")),e.xp6(1),e.Q6J("ngIf","shec"===o.plugin),e.xp6(1),e.Q6J("ngIf","clay"===o.plugin),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(5),e.Q6J("html",o.tooltips.crushFailureDomain),e.xp6(3),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(1),e.Q6J("ngIf",o.PLUGIN.CLAY===o.plugin),e.xp6(1),e.Q6J("ngIf",e.kEZ(49,De,o.PLUGIN.JERASURE,o.PLUGIN.ISA,o.PLUGIN.CLAY).includes(o.plugin)),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.JERASURE),e.xp6(5),e.Q6J("html",o.tooltips.crushRoot),e.xp6(3),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(5),e.Q6J("html",o.tooltips.crushDeviceClass),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.pQV(o.deviceCount),e.QtT(86),e.xp6(5),e.Q6J("html",o.tooltips.directory),e.xp6(4),e.Q6J("form",o.form)("submitText",e.lcZ(96,45,o.action)+" "+e.lcZ(97,47,o.resource))}},directives:[$e.z,a._Y,a.JL,fe.V,a.sg,Pe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,Re.S,a.EJ,C.sg,a.wV,a.qQ,a.YN,a.Kr,me.p,g._L,C.mk],pipes:[C.rS,Ce.m],styles:[""]}),t})();var Fo=r(7022);class No{constructor(){this.erasureInfo=!1,this.crushInfo=!1,this.pgs=1,this.poolTypes=["erasure","replicated"],this.applications={selected:[],default:["cephfs","rbd","rgw"],available:[],validators:[a.kI.pattern("[A-Za-z0-9_]+"),a.kI.maxLength(128)],messages:new Fo.a({empty:"No applications added",selectionLimit:{text:"Applications limit reached",tooltip:"A pool can only have up to four applications definitions."},customValidations:{pattern:"Allowed characters '_a-zA-Z0-9'",maxlength:"Maximum length is 128 characters"},filter:"Filter or add applications'",add:"Add application"})}}}var Ze=r(63285),he=r(74937),bo=r(63622),vo=r(60192),Io=r(17932),$o=r(54555),Do=r(30490),xe=r(61350);const Zo=["crushInfoTabs"],xo=["crushDeletionBtn"],yo=["ecpInfoTabs"],Uo=["ecpDeletionBtn"];function qo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,42),e.qZA())}function Ho(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,43),e.qZA())}function Go(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,44),e.qZA())}function zo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,45),e.qZA())}function Xo(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function wo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,47),e.qZA())}function Qo(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Jo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,58),e.qZA())}function ko(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,59),e.qZA())}function Vo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,60),e.qZA())}function Yo(t,i){1&t&&(e.TgZ(0,"span",55),e.SDv(1,61),e.qZA())}function Bo(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",52),e.SDv(2,53),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"input",54),e.NdJ("focus",function(){return e.CHM(_),e.oxw(3).externalPgChange=!1})("blur",function(){return e.CHM(_),e.oxw(3).alignPgs()}),e.qZA(),e.YNc(5,Jo,2,0,"span",13),e.YNc(6,ko,2,0,"span",13),e.YNc(7,Vo,2,0,"span",13),e.TgZ(8,"span",55),e._UZ(9,"cd-doc",56),e.qZA(),e.YNc(10,Yo,2,0,"span",57),e.qZA(),e.qZA()}if(2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.form.showError("pgNum",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"34")),e.xp6(3),e.Q6J("ngIf",o.externalPgChange)}}function jo(t,i){if(1&t&&(e.TgZ(0,"span",41),e.TgZ(1,"ul",66),e.TgZ(2,"li"),e.SDv(3,67),e.qZA(),e.TgZ(4,"li"),e.SDv(5,68),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(3),e.pQV(_.getMinSize()),e.QtT(3),e.xp6(2),e.pQV(_.getMaxSize()),e.QtT(5)}}function Ko(t,i){if(1&t&&(e.TgZ(0,"span",41),e.SDv(1,69),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.pQV(_.getMinSize())(_.getMaxSize()),e.QtT(1)}}function Wo(t,i){1&t&&(e.TgZ(0,"span",70),e.SDv(1,71),e.qZA())}function et(t,i){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",62),e.SDv(2,63),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",64),e.YNc(5,jo,6,2,"span",13),e.YNc(6,Ko,2,2,"span",13),e.YNc(7,Wo,2,0,"span",65),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(4),e.Q6J("max",o.getMaxSize())("min",o.getMinSize()),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",1===o.form.getValue("size"))}}function _t(t,i){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",72),e.SDv(2,73),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",74),e._UZ(5,"input",75),e.TgZ(6,"label",76),e.SDv(7,77),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function ot(t,i){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"div",8),e.TgZ(2,"label",48),e.SDv(3,49),e.qZA(),e.TgZ(4,"div",11),e.TgZ(5,"select",50),e.YNc(6,Qo,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.YNc(7,Bo,11,4,"div",51),e.YNc(8,et,8,5,"div",51),e.YNc(9,_t,8,0,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(6),e.Q6J("ngForOf",_.pgAutoscaleModes),e.xp6(1),e.Q6J("ngIf","on"!==_.form.getValue("pgAutoscaleMode")),e.xp6(1),e.Q6J("ngIf",_.isReplicated),e.xp6(1),e.Q6J("ngIf",_.info.is_all_bluestore&&_.isErasure)}}function tt(t,i){if(1&t&&e._UZ(0,"i",78),2&t){const _=e.oxw(2);e.Gre("",_.icons.warning," icon-warning-color")}}function it(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,93),e.qZA())}function nt(t,i){1&t&&(e.TgZ(0,"option",94),e.SDv(1,95),e.qZA()),2&t&&e.Q6J("ngValue",null)}function st(t,i){1&t&&(e.TgZ(0,"option",94),e.SDv(1,96),e.qZA()),2&t&&e.Q6J("ngValue",null)}function at(t,i){if(1&t&&(e.TgZ(0,"option",94),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}const F=function(t){return[t]};function lt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",97),e.NdJ("click",function(){return e.CHM(_),e.oxw(4).addErasureCodeProfile()}),e._UZ(1,"i",89),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function rt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",98,99),e.NdJ("click",function(){return e.CHM(_),e.oxw(4).deleteErasureCodeProfile()}),e._UZ(2,"i",89),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const ct=function(){return["name"]};function Ot(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",110),2&t){const _=e.oxw(5);e.Q6J("renderObjects",!0)("hideKeys",e.DdM(4,ct))("data",_.form.getValue("erasureProfile"))("autoReload",!1)}}function dt(t,i){1&t&&(e.TgZ(0,"span"),e.SDv(1,113),e.qZA())}function ut(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.xp6(1),e.hij(" ",_," ")}}function ft(t,i){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,ut,2,1,"li",114),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.ecpUsage)}}function Pt(t,i){if(1&t&&(e.YNc(0,dt,2,0,"ng-template",null,111,e.W1O),e.YNc(2,ft,2,1,"ul",112)),2&t){const _=e.MAs(1),o=e.oxw(5);e.xp6(2),e.Q6J("ngIf",o.ecpUsage)("ngIfElse",_)}}function Et(t,i){if(1&t&&(e.TgZ(0,"span",100),e.TgZ(1,"ul",101,102),e.TgZ(3,"li",103),e.TgZ(4,"a",104),e.SDv(5,105),e.qZA(),e.YNc(6,Ot,1,5,"ng-template",106),e.qZA(),e.TgZ(7,"li",107),e.TgZ(8,"a",104),e.SDv(9,108),e.qZA(),e.YNc(10,Pt,3,2,"ng-template",106),e.qZA(),e.qZA(),e._UZ(11,"div",109),e.qZA()),2&t){const _=e.MAs(2);e.xp6(11),e.Q6J("ngbNavOutlet",_)}}const ye=function(t){return{active:t}};function gt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",80),e.SDv(2,81),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",82),e.TgZ(5,"select",83),e.YNc(6,it,2,0,"option",84),e.YNc(7,nt,2,1,"option",85),e.YNc(8,st,2,1,"option",85),e.YNc(9,at,2,2,"option",86),e.qZA(),e.TgZ(10,"span",87),e.TgZ(11,"button",88),e.NdJ("click",function(){e.CHM(_);const n=e.oxw(3);return n.data.erasureInfo=!n.data.erasureInfo}),e._UZ(12,"i",89),e.qZA(),e.YNc(13,lt,2,3,"button",90),e.YNc(14,rt,3,3,"button",91),e.qZA(),e.qZA(),e.YNc(15,Et,12,1,"span",92),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(3);e.xp6(6),e.Q6J("ngIf",!_.ecProfiles),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&0===_.ecProfiles.length),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&_.ecProfiles.length>0),e.xp6(1),e.Q6J("ngForOf",_.ecProfiles),e.xp6(2),e.Q6J("ngClass",e.VKq(9,ye,_.data.erasureInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,_.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",_.data.erasureInfo&&_.form.getValue("erasureProfile"))}}function pt(t,i){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",115),e.SDv(2,116),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"span",55),e.SDv(5,117),e.qZA(),e.qZA(),e.qZA())}function Rt(t,i){1&t&&(e.TgZ(0,"span",55),e.TgZ(1,"span"),e.SDv(2,120),e.qZA(),e._uU(3,"\xa0 "),e.qZA())}function mt(t,i){if(1&t&&(e.TgZ(0,"option",94),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.rule_name," ")}}function Ct(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",97),e.NdJ("click",function(){return e.CHM(_),e.oxw(5).addCrushRule()}),e._UZ(1,"i",89),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function Mt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",126,127),e.NdJ("click",function(){return e.CHM(_),e.oxw(5).deleteCrushRule()}),e._UZ(2,"i",89),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const ht=function(){return["steps","type","rule_name"]};function Tt(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",110),2&t){const _=e.oxw(6);e.Q6J("renderObjects",!1)("hideKeys",e.DdM(4,ht))("data",_.form.getValue("crushRule"))("autoReload",!1)}}function St(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw(7);e.xp6(1),e.hij(" ",o.describeCrushStep(_)," ")}}function Lt(t,i){if(1&t&&(e.TgZ(0,"ol"),e.YNc(1,St,2,1,"li",114),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.form.get("crushRule").value.steps)}}function At(t,i){1&t&&(e.TgZ(0,"span"),e.SDv(1,136),e.qZA())}function Ft(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.xp6(1),e.hij(" ",_," ")}}function Nt(t,i){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,Ft,2,1,"li",114),e.qZA()),2&t){const _=e.oxw(7);e.xp6(1),e.Q6J("ngForOf",_.crushUsage)}}function bt(t,i){if(1&t&&(e.YNc(0,At,2,0,"ng-template",null,135,e.W1O),e.YNc(2,Nt,2,1,"ul",112)),2&t){const _=e.MAs(1),o=e.oxw(6);e.xp6(2),e.Q6J("ngIf",o.crushUsage)("ngIfElse",_)}}function vt(t,i){if(1&t&&(e.TgZ(0,"div",128),e.TgZ(1,"ul",101,129),e.TgZ(3,"li",130),e.TgZ(4,"a",104),e.SDv(5,131),e.qZA(),e.YNc(6,Tt,1,5,"ng-template",106),e.qZA(),e.TgZ(7,"li",132),e.TgZ(8,"a",104),e.SDv(9,133),e.qZA(),e.YNc(10,Lt,2,1,"ng-template",106),e.qZA(),e.TgZ(11,"li",107),e.TgZ(12,"a",104),e.SDv(13,134),e.qZA(),e.YNc(14,bt,3,2,"ng-template",106),e.qZA(),e.qZA(),e._UZ(15,"div",109),e.qZA()),2&t){const _=e.MAs(2);e.xp6(15),e.Q6J("ngbNavOutlet",_)}}function It(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,137),e.qZA())}function $t(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,138),e.qZA())}function Dt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div"),e.TgZ(1,"div",82),e.TgZ(2,"select",121),e.TgZ(3,"option",94),e.SDv(4,122),e.qZA(),e.YNc(5,mt,2,2,"option",86),e.qZA(),e.TgZ(6,"span",87),e.TgZ(7,"button",123),e.NdJ("click",function(){e.CHM(_);const n=e.oxw(4);return n.data.crushInfo=!n.data.crushInfo}),e._UZ(8,"i",89),e.qZA(),e.YNc(9,Ct,2,3,"button",90),e.YNc(10,Mt,3,3,"button",124),e.qZA(),e.qZA(),e.YNc(11,vt,16,1,"div",125),e.YNc(12,It,2,0,"span",13),e.YNc(13,$t,2,0,"span",13),e.qZA()}if(2&t){e.oxw(3);const _=e.MAs(2),o=e.oxw();e.xp6(3),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.current.rules),e.xp6(2),e.Q6J("ngClass",e.VKq(9,ye,o.data.crushInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,o.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.data.crushInfo&&o.form.getValue("crushRule")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"tooFewOsds"))}}function Zt(t,i){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",115),e.SDv(2,118),e.qZA(),e.TgZ(3,"div",11),e.YNc(4,Rt,4,0,"ng-template",null,119,e.W1O),e.YNc(6,Dt,14,13,"div",112),e.qZA(),e.qZA()),2&t){const _=e.MAs(5),o=e.oxw(3);e.xp6(6),e.Q6J("ngIf",o.current.rules.length>0)("ngIfElse",_)}}function xt(t,i){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,79),e.qZA(),e.YNc(3,gt,16,13,"div",51),e.YNc(4,pt,6,0,"div",51),e.YNc(5,Zt,7,2,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(3),e.Q6J("ngIf",_.isErasure),e.xp6(1),e.Q6J("ngIf",_.isErasure&&!_.editing),e.xp6(1),e.Q6J("ngIf",_.isReplicated||_.editing)}}function yt(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Ut(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,156),e.qZA())}function qt(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,157),e.qZA())}function Ht(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Gt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,158),e.qZA())}function zt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,159),e.qZA())}function Xt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,160),e.qZA())}function wt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,161),e.qZA())}function Qt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,162),e.qZA())}function Jt(t,i){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"div",8),e.TgZ(2,"label",144),e.SDv(3,145),e.qZA(),e.TgZ(4,"div",11),e.TgZ(5,"select",146),e.YNc(6,Ut,2,0,"option",84),e.YNc(7,qt,2,0,"option",84),e.YNc(8,Ht,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.TgZ(9,"div",8),e.TgZ(10,"label",147),e.SDv(11,148),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",149),e.YNc(14,Gt,2,0,"span",13),e.YNc(15,zt,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(16,"div",8),e.TgZ(17,"label",150),e.SDv(18,151),e.qZA(),e.TgZ(19,"div",11),e._UZ(20,"input",152),e.YNc(21,Xt,2,0,"span",13),e.YNc(22,wt,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(23,"div",8),e.TgZ(24,"label",153),e.SDv(25,154),e.qZA(),e.TgZ(26,"div",11),e._UZ(27,"input",155),e.YNc(28,Qt,2,0,"span",13),e.qZA(),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(6),e.Q6J("ngIf",!o.info.compression_algorithms),e.xp6(1),e.Q6J("ngIf",o.info.compression_algorithms&&0===o.info.compression_algorithms.length),e.xp6(1),e.Q6J("ngForOf",o.info.compression_algorithms),e.xp6(6),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"maximum")),e.xp6(6),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"minimum")),e.xp6(6),e.Q6J("ngIf",o.form.showError("ratio",_,"min")||o.form.showError("ratio",_,"max"))}}function kt(t,i){if(1&t&&(e.TgZ(0,"div",139),e.TgZ(1,"legend"),e.SDv(2,140),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"label",141),e.SDv(5,142),e.qZA(),e.TgZ(6,"div",11),e.TgZ(7,"select",143),e.YNc(8,yt,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.YNc(9,Jt,29,8,"div",20),e.qZA()),2&t){const _=e.oxw(2);e.xp6(8),e.Q6J("ngForOf",_.info.compression_modes),e.xp6(1),e.Q6J("ngIf",_.hasCompressionEnabled())}}function Vt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,163),e.qZA())}function Yt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.TgZ(9,"div",8),e.TgZ(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,qo,2,0,"span",13),e.YNc(15,Ho,2,0,"span",13),e.YNc(16,Go,2,0,"span",13),e.YNc(17,zo,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(18,"div",8),e.TgZ(19,"label",14),e.SDv(20,15),e.qZA(),e.TgZ(21,"div",11),e.TgZ(22,"select",16),e.TgZ(23,"option",17),e.SDv(24,18),e.qZA(),e.YNc(25,Xo,2,2,"option",19),e.qZA(),e.YNc(26,wo,2,0,"span",13),e.qZA(),e.qZA(),e.YNc(27,ot,10,4,"div",20),e.TgZ(28,"div",8),e.TgZ(29,"label",21),e.SDv(30,22),e.qZA(),e.TgZ(31,"div",11),e.TgZ(32,"cd-select-badges",23),e.NdJ("selection",function(){return e.CHM(_),e.oxw().appSelection()}),e.qZA(),e.YNc(33,tt,1,3,"i",24),e.qZA(),e.qZA(),e.YNc(34,xt,6,3,"div",20),e.YNc(35,kt,10,2,"div",25),e.TgZ(36,"div"),e.TgZ(37,"legend"),e.SDv(38,26),e.qZA(),e.TgZ(39,"div",8),e.TgZ(40,"label",27),e.ynx(41),e.SDv(42,28),e.BQk(),e.TgZ(43,"cd-helper"),e.TgZ(44,"span"),e.SDv(45,29),e.qZA(),e._UZ(46,"br"),e.TgZ(47,"span"),e.SDv(48,30),e.qZA(),e.qZA(),e.qZA(),e.TgZ(49,"div",11),e._UZ(50,"input",31),e.qZA(),e.qZA(),e.TgZ(51,"div",8),e.TgZ(52,"label",32),e.ynx(53),e.SDv(54,33),e.BQk(),e.TgZ(55,"cd-helper"),e.TgZ(56,"span"),e.SDv(57,34),e.qZA(),e._UZ(58,"br"),e.TgZ(59,"span"),e.SDv(60,35),e.qZA(),e.qZA(),e.qZA(),e.TgZ(61,"div",11),e._UZ(62,"input",36),e.YNc(63,Vt,2,0,"span",13),e.qZA(),e.qZA(),e.qZA(),e.TgZ(64,"div",37),e.TgZ(65,"cd-rbd-configuration-form",38),e.NdJ("changes",function(n){return e.CHM(_),e.oxw().currentConfigurationValues=n()}),e.qZA(),e.qZA(),e.qZA(),e.TgZ(66,"div",39),e.TgZ(67,"cd-form-button-panel",40),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().submit()}),e.ALo(68,"titlecase"),e.ALo(69,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.form),e.xp6(6),e.pQV(e.lcZ(6,25,o.action))(e.lcZ(7,27,o.resource)),e.QtT(5),e.xp6(7),e.Q6J("ngIf",o.form.showError("name",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"uniqueName")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"rbdPool")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"pattern")),e.xp6(8),e.Q6J("ngForOf",o.data.poolTypes),e.xp6(1),e.Q6J("ngIf",o.form.showError("poolType",_,"required")),e.xp6(1),e.Q6J("ngIf",o.isReplicated||o.isErasure),e.xp6(5),e.Q6J("customBadges",!0)("customBadgeValidators",o.data.applications.validators)("messages",o.data.applications.messages)("data",o.data.applications.selected)("options",o.data.applications.available)("selectionLimit",4),e.xp6(1),e.Q6J("ngIf",o.data.applications.selected<=0),e.xp6(1),e.Q6J("ngIf",o.isErasure||o.isReplicated),e.xp6(1),e.Q6J("ngIf",o.info.is_all_bluestore),e.xp6(28),e.Q6J("ngIf",o.form.showError("max_objects",_,"min")),e.xp6(1),e.Q6J("hidden",o.isErasure||-1===o.data.applications.selected.indexOf("rbd")),e.xp6(1),e.Q6J("form",o.form)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(68,29,o.action)+" "+e.lcZ(69,31,o.resource))}}let Ue=(()=>{class t extends R_.E{constructor(_,o,n,s,c,d,P,p,R,h,T){super(),this.dimlessBinaryPipe=_,this.route=o,this.router=n,this.modalService=s,this.poolService=c,this.authStorageService=d,this.formatter=P,this.taskWrapper=p,this.ecpService=R,this.crushRuleService=h,this.actionLabels=T,this.editing=!1,this.isReplicated=!1,this.isErasure=!1,this.data=new No,this.externalPgChange=!1,this.current={rules:[]},this.initializeConfigData=new E_.t(1),this.currentConfigurationValues={},this.icons=b.P,this.crushUsage=void 0,this.ecpUsage=void 0,this.crushRuleMaxSize=10,this.editing=this.router.url.startsWith(`/pool/${M.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="pool",this.authenticate(),this.createForm()}authenticate(){if(this.permission=this.authStorageService.getPermissions().pool,!this.permission.read||!this.permission.update&&this.editing||!this.permission.create&&!this.editing)throw new g_._2}createForm(){const _=new be.d({mode:new a.NI("none"),algorithm:new a.NI(""),minBlobSize:new a.NI("",{updateOn:"blur"}),maxBlobSize:new a.NI("",{updateOn:"blur"}),ratio:new a.NI("",{updateOn:"blur"})});this.form=new be.d({name:new a.NI("",{validators:[a.kI.pattern(/^[.A-Za-z0-9_/-]+$/),a.kI.required,E.h.custom("rbdPool",()=>this.form&&this.form.getValue("name").includes("/")&&this.data&&-1!==this.data.applications.selected.indexOf("rbd"))]}),poolType:new a.NI("",{validators:[a.kI.required]}),crushRule:new a.NI(null,{validators:[E.h.custom("tooFewOsds",o=>this.info&&o&&this.info.osd_count<1),E.h.custom("required",o=>this.isReplicated&&this.info.crush_rules_replicated.length>0&&!o)]}),size:new a.NI("",{updateOn:"blur"}),erasureProfile:new a.NI(null),pgNum:new a.NI("",{validators:[a.kI.required]}),pgAutoscaleMode:new a.NI(null),ecOverwrites:new a.NI(!1),compression:_,max_bytes:new a.NI(""),max_objects:new a.NI(0)},[E.h.custom("form",()=>null)])}ngOnInit(){this.poolService.getInfo().subscribe(_=>{this.initInfo(_),this.editing?this.initEditMode():(this.setAvailableApps(),this.loadingReady()),this.listenToChanges(),this.setComplexValidators()})}initInfo(_){this.pgAutoscaleModes=_.pg_autoscale_modes,this.form.silentSet("pgAutoscaleMode",_.pg_autoscale_default_mode),this.form.silentSet("algorithm",_.bluestore_compression_algorithm),this.info=_,this.initEcp(_.erasure_code_profiles)}initEcp(_){this.setListControlStatus("erasureProfile",_),this.ecProfiles=_}setListControlStatus(_,o){const n=this.form.get(_),s=n.value;1!==o.length||s&&u().isEqual(s,o[0])?0===o.length&&s&&n.setValue(null):n.setValue(o[0]),o.length<=1?n.enabled&&n.disable():n.disabled&&n.enable()}initEditMode(){this.disableForEdit(),this.routeParamsSubscribe=this.route.params.subscribe(_=>this.poolService.get(_.name).subscribe(o=>{this.data.pool=o,this.initEditFormData(o),this.loadingReady()}))}disableForEdit(){["poolType","crushRule","size","erasureProfile","ecOverwrites"].forEach(_=>this.form.get(_).disable())}initEditFormData(_){this.initializeConfigData.next({initialData:_.configuration,sourceType:m_.h.pool}),this.poolTypeChange(_.type);const o=this.info.crush_rules_replicated.concat(this.info.crush_rules_erasure),n={name:_.pool_name,poolType:_.type,crushRule:o.find(s=>s.rule_name===_.crush_rule),size:_.size,erasureProfile:this.ecProfiles.find(s=>s.name===_.erasure_code_profile),pgAutoscaleMode:_.pg_autoscale_mode,pgNum:_.pg_num,ecOverwrites:_.flags_names.includes("ec_overwrites"),mode:_.options.compression_mode,algorithm:_.options.compression_algorithm,minBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_min_blob_size),maxBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_max_blob_size),ratio:_.options.compression_required_ratio,max_bytes:this.dimlessBinaryPipe.transform(_.quota_max_bytes),max_objects:_.quota_max_objects};Object.keys(n).forEach(s=>{const c=n[s];!u().isUndefined(c)&&""!==c&&this.form.silentSet(s,c)}),this.data.pgs=this.form.getValue("pgNum"),this.setAvailableApps(this.data.applications.default.concat(_.application_metadata)),this.data.applications.selected=_.application_metadata}setAvailableApps(_=this.data.applications.default){this.data.applications.available=u().uniq(_.sort()).map(o=>new p_.$(!1,o,""))}listenToChanges(){this.listenToChangesDuringAddEdit(),this.editing||this.listenToChangesDuringAdd()}listenToChangesDuringAddEdit(){this.form.get("pgNum").valueChanges.subscribe(_=>{const o=_-this.data.pgs;1===Math.abs(o)&&2!==_?this.doPgPowerJump(o):this.data.pgs=_})}doPgPowerJump(_){const o=this.calculatePgPower()+_;this.setPgs(-1===_?Math.round(o):Math.floor(o))}calculatePgPower(_=this.form.getValue("pgNum")){return Math.log(_)/Math.log(2)}setPgs(_){const o=Math.pow(2,_<0?0:_);this.data.pgs=o,this.form.silentSet("pgNum",o)}listenToChangesDuringAdd(){this.form.get("poolType").valueChanges.subscribe(_=>{this.poolTypeChange(_)}),this.form.get("crushRule").valueChanges.subscribe(_=>{this.crushDeletionBtn&&this.crushDeletionBtn.isOpen()&&this.crushDeletionBtn.close(),_&&(this.setCorrectMaxSize(_),this.crushRuleIsUsedBy(_.rule_name),this.replicatedRuleChange(),this.pgCalc())}),this.form.get("size").valueChanges.subscribe(()=>{this.pgCalc()}),this.form.get("erasureProfile").valueChanges.subscribe(_=>{this.ecpDeletionBtn&&this.ecpDeletionBtn.isOpen()&&this.ecpDeletionBtn.close(),_&&(this.ecpIsUsedBy(_.name),this.pgCalc())}),this.form.get("mode").valueChanges.subscribe(()=>{["minBlobSize","maxBlobSize","ratio"].forEach(_=>{this.form.get(_).updateValueAndValidity({emitEvent:!1})})}),this.form.get("minBlobSize").valueChanges.subscribe(()=>{this.form.get("maxBlobSize").updateValueAndValidity({emitEvent:!1})}),this.form.get("maxBlobSize").valueChanges.subscribe(()=>{this.form.get("minBlobSize").updateValueAndValidity({emitEvent:!1})})}poolTypeChange(_){if("replicated"===_?this.setTypeBooleans(!0,!1):this.setTypeBooleans(!1,"erasure"===_),!_||!this.info)return void(this.current.rules=[]);const o=this.info["crush_rules_"+_]||[];this.current.rules=o,!this.editing&&(this.isReplicated&&this.setListControlStatus("crushRule",o),this.replicatedRuleChange(),this.pgCalc())}setTypeBooleans(_,o){this.isReplicated=_,this.isErasure=o}replicatedRuleChange(){if(!this.isReplicated)return;const _=this.form.get("size");let o=this.form.getValue("size")||3;const n=this.getMinSize(),s=this.getMaxSize();os&&(o=s),o!==_.value&&this.form.silentSet("size",o)}getMinSize(){return!this.info||this.info.osd_count<1?0:1}getMaxSize(){const _=this.form.getValue("crushRule");return this.info?_?_.usable_size:Math.min(this.info.osd_count,3):0}pgCalc(){const _=this.form.getValue("poolType");if(!this.info||this.form.get("pgNum").dirty||!_)return;const o=100*this.info.osd_count,n=this.isReplicated?this.replicatedPgCalc(o):this.erasurePgCalc(o);if(!n)return;const s=this.data.pgs;this.alignPgs(n),this.externalPgChange||(this.externalPgChange=s!==this.data.pgs)}setCorrectMaxSize(_=this.form.getValue("crushRule")){if(!_)return;const n=S.searchFailureDomains(this.info.nodes,_.steps[0].item_name)[_.steps[1].type];_.usable_size=Math.min(n?n.length:this.crushRuleMaxSize,this.crushRuleMaxSize)}replicatedPgCalc(_){const o=this.form.get("size"),n=o.value;return o.valid&&n>0?_/n:0}erasurePgCalc(_){const o=this.form.get("erasureProfile"),n=o.value;return(o.valid||o.disabled)&&n?_/(n.k+n.m):0}alignPgs(_=this.form.getValue("pgNum")){this.setPgs(Math.round(this.calculatePgPower(_<1?1:_)))}setComplexValidators(){this.editing?this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.data.pool&&this.info&&-1!==this.info.pool_names.indexOf(_)&&this.info.pool_names.indexOf(_)!==this.info.pool_names.indexOf(this.data.pool.pool_name))]):(E.h.validateIf(this.form.get("size"),()=>this.isReplicated,[E.h.custom("min",_=>this.form.getValue("size")&&_this.form.getValue("size")&&this.getMaxSize()<_)]),this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.info&&-1!==this.info.pool_names.indexOf(_))])),this.setCompressionValidators()}setCompressionValidators(){E.h.validateIf(this.form.get("minBlobSize"),()=>this.hasCompressionEnabled(),[a.kI.min(0),E.h.custom("maximum",_=>this.oddBlobSize(_,this.form.getValue("maxBlobSize")))]),E.h.validateIf(this.form.get("maxBlobSize"),()=>this.hasCompressionEnabled(),[a.kI.min(0),E.h.custom("minimum",_=>this.oddBlobSize(this.form.getValue("minBlobSize"),_))]),E.h.validateIf(this.form.get("ratio"),()=>this.hasCompressionEnabled(),[a.kI.min(0),a.kI.max(1)])}oddBlobSize(_,o){const n=this.formatter.toBytes(_),s=this.formatter.toBytes(o);return Boolean(n&&s&&n>=s)}hasCompressionEnabled(){return this.form.getValue("mode")&&"none"!==this.form.get("mode").value.toLowerCase()}describeCrushStep(_){return[_.op.replace("_"," "),_.item_name||"",_.type?_.num+" type "+_.type:""].join(" ")}addErasureCodeProfile(){this.addModal(Ao,_=>this.reloadECPs(_))}addModal(_,o){this.hideOpenTooltips(),this.modalService.show(_).componentInstance.submitAction.subscribe(s=>{o(s.name)})}hideOpenTooltips(){const _=o=>o&&o.isOpen()&&o.close();_(this.ecpDeletionBtn),_(this.crushDeletionBtn)}reloadECPs(_){this.reloadList({newItemName:_,getInfo:()=>this.ecpService.list(),initInfo:o=>this.initEcp(o),findNewItem:()=>this.ecProfiles.find(o=>o.name===_),controlName:"erasureProfile"})}reloadList({newItemName:_,getInfo:o,initInfo:n,findNewItem:s,controlName:c}){this.modalSubscription&&this.modalSubscription.unsubscribe(),o().subscribe(d=>{if(n(d),!_)return;const P=s();P&&this.form.get(c).setValue(P)})}deleteErasureCodeProfile(){this.deletionModal({value:this.form.getValue("erasureProfile"),usage:this.ecpUsage,deletionBtn:this.ecpDeletionBtn,dataName:"erasureInfo",getTabs:()=>this.ecpInfoTabs,tabPosition:"used-by-pools",nameAttribute:"name",itemDescription:"erasure code profile",reloadFn:()=>this.reloadECPs(),deleteFn:_=>this.ecpService.delete(_),taskName:"ecp/delete"})}deletionModal({value:_,usage:o,deletionBtn:n,dataName:s,getTabs:c,tabPosition:d,nameAttribute:P,itemDescription:p,reloadFn:R,deleteFn:h,taskName:T}){if(!_)return;if(o)return n.animation=!1,n.toggle(),this.data[s]=!0,void setTimeout(()=>{const f=c();f&&f.select(d)},50);const m=_[P];this.modalService.show(Ne.M,{itemDescription:p,itemNames:[m],submitActionObservable:()=>{const f=h(m);return f.subscribe(()=>R()),this.taskWrapper.wrapTaskAroundCall({task:new v.R(T,{name:m}),call:f})}})}addCrushRule(){this.addModal($_,_=>this.reloadCrushRules(_))}reloadCrushRules(_){this.reloadList({newItemName:_,getInfo:()=>this.poolService.getInfo(),initInfo:o=>{this.initInfo(o),this.poolTypeChange("replicated")},findNewItem:()=>this.info.crush_rules_replicated.find(o=>o.rule_name===_),controlName:"crushRule"})}deleteCrushRule(){this.deletionModal({value:this.form.getValue("crushRule"),usage:this.crushUsage,deletionBtn:this.crushDeletionBtn,dataName:"crushInfo",getTabs:()=>this.crushInfoTabs,tabPosition:"used-by-pools",nameAttribute:"rule_name",itemDescription:"crush rule",reloadFn:()=>this.reloadCrushRules(),deleteFn:_=>this.crushRuleService.delete(_),taskName:"crushRule/delete"})}crushRuleIsUsedBy(_){this.crushUsage=_?this.info.used_rules[_]:void 0}ecpIsUsedBy(_){this.ecpUsage=_?this.info.used_profiles[_]:void 0}submit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _={pool:this.form.getValue("name")};this.assignFormFields(_,[{externalFieldName:"pool_type",formControlName:"poolType"},{externalFieldName:"pg_autoscale_mode",formControlName:"pgAutoscaleMode",editable:!0},{externalFieldName:"pg_num",formControlName:"pgNum",replaceFn:n=>"on"===this.form.getValue("pgAutoscaleMode")?1:n,editable:!0},this.isReplicated?{externalFieldName:"size",formControlName:"size"}:{externalFieldName:"erasure_code_profile",formControlName:"erasureProfile",attr:"name"},{externalFieldName:"rule_name",formControlName:"crushRule",replaceFn:n=>this.isReplicated?n&&n.rule_name:void 0},{externalFieldName:"quota_max_bytes",formControlName:"max_bytes",replaceFn:this.formatter.toBytes,editable:!0,resetValue:this.editing?0:void 0},{externalFieldName:"quota_max_objects",formControlName:"max_objects",editable:!0,resetValue:this.editing?0:void 0}]),this.info.is_all_bluestore&&(this.assignFormField(_,{externalFieldName:"flags",formControlName:"ecOverwrites",replaceFn:()=>this.isErasure?["ec_overwrites"]:void 0}),"none"!==this.form.getValue("mode")?this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:n=>this.hasCompressionEnabled()&&n},{externalFieldName:"compression_algorithm",formControlName:"algorithm",editable:!0},{externalFieldName:"compression_min_blob_size",formControlName:"minBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_max_blob_size",formControlName:"maxBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_required_ratio",formControlName:"ratio",editable:!0,resetValue:0}]):this.editing&&this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:()=>"unset"},{externalFieldName:"srcpool",formControlName:"name",editable:!0,replaceFn:()=>this.data.pool.pool_name}]));const o=this.data.applications.selected;(o.length>0||this.editing)&&(_.application_metadata=o),this.isReplicated&&!u().isEmpty(this.currentConfigurationValues)&&(_.configuration=this.currentConfigurationValues),this.triggerApiTask(_)}assignFormFields(_,o){o.forEach(n=>this.assignFormField(_,n))}assignFormField(_,{externalFieldName:o,formControlName:n,attr:s,replaceFn:c,editable:d,resetValue:P}){if(this.editing&&(!d||this.form.get(n).pristine))return;const p=this.form.getValue(n);let R=c?c(p):s?u().get(p,s):p;if(!p||!R){if(!d||u().isUndefined(P))return;R=P}_[o]=R}triggerApiTask(_){this.taskWrapper.wrapTaskAroundCall({task:new v.R("pool/"+(this.editing?M.MQ.EDIT:M.MQ.CREATE),{pool_name:_.hasOwnProperty("srcpool")?_.srcpool:_.pool}),call:this.poolService[this.editing?M.MQ.UPDATE:M.MQ.CREATE](_)}).subscribe({error:o=>{u().isObject(o.error)&&"34"===o.error.code&&this.form.get("pgNum").setErrors({34:!0}),this.form.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/pool"])})}appSelection(){this.form.get("name").updateValueAndValidity({emitEvent:!1,onlySelf:!0})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(C_.$),e.Y36(Oe.gz),e.Y36(Oe.F0),e.Y36(Ze.Z),e.Y36(ue.q),e.Y36(he.j),e.Y36(M_.H),e.Y36(de.P),e.Y36(Me),e.Y36(Ie.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-form"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Zo,5),e.Gf(xo,5),e.Gf(yo,5),e.Gf(Uo,5)),2&_){let n;e.iGM(n=e.CRH())&&(o.crushInfoTabs=n.first),e.iGM(n=e.CRH())&&(o.crushDeletionBtn=n.first),e.iGM(n=e.CRH())&&(o.ecpInfoTabs=n.first),e.iGM(n=e.CRH())&&(o.ecpDeletionBtn=n.first)}},features:[e.qOj],decls:1,vars:1,consts:function(){let i,_,o,n,s,c,d,P,p,R,h,T,m,f,A,I,$,D,Z,x,y,U,q,H,G,z,X,w,Q,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ie,ne,se,ae,le,re,ce,O,Xe,we,Qe,Je,ke,Ve,Ye,Be,je,Ke,We,e_,__,o_,t_,i_,n_,s_,a_,l_,r_,c_,O_;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Name...",n="Pool type",s="-- Select a pool type --",c="Applications",d="Pools should be associated with an application tag",P="Quotas",p="Max bytes",R="Leave it blank or specify 0 to disable this quota.",h="A valid quota should be greater than 0.",T="e.g., 10GiB",m="Max objects",f="Leave it blank or specify 0 to disable this quota.",A="A valid quota should be greater than 0.",I="This field is required!",$="The chosen Ceph pool name is already in use.",D="It's not possible to create an RBD pool with '/' in the name. Please change the name or remove 'rbd' from the applications list.",Z="Pool name can only contain letters, numbers, '.', '-', '_' or '/'.",x="This field is required!",y="PG Autoscale",U="Placement groups",q="Calculation help",H="This field is required!",G="At least one placement group is needed!",z="Your cluster can't handle this many PGs. Please recalculate the PG amount needed.",X="The current PGs settings were calculated for you, you should make sure the values suit your needs before submit.",w="Replicated size",Q="Minimum: " + "\ufffd0\ufffd" + "",J="Maximum: " + "\ufffd0\ufffd" + "",k="The size specified is out of range. A value from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + " is usable.",V="A size of 1 will not create a replication of the object. The 'Replicated size' includes the object itself.",Y="Flags",B="EC Overwrites",j="CRUSH",K="Erasure code profile",N="This profile can't be deleted as it is in use.",W="Loading...",ee="-- No erasure code profile available --",_e="-- Select an erasure code profile --",oe="Profile",te="Used by pools",ie="Profile is not in use.",ne="Crush ruleset",se="A new crush ruleset will be implicitly created.",ae="Crush ruleset",le="There are no rules.",re="-- Select a crush rule --",ce="Placement and\n replication strategies or distribution policies that allow to\n specify how CRUSH places data replicas.",O="This rule can't be deleted as it is in use.",Xe="Crush rule",we="Crush steps",Qe="Used by pools",Je="Rule is not in use.",ke="This field is required!",Ve="The rule can't be used in the current cluster as it has too few OSDs to meet the minimum required OSD by this rule.",Ye="Compression",Be="Mode",je="Algorithm",Ke="Minimum blob size",We="e.g., 128KiB",e_="Maximum blob size",__="e.g., 512KiB",o_="Ratio",t_="Compression ratio",i_="Loading...",n_="-- No erasure compression algorithm available --",s_="Value should be greater than 0",a_="Value should be less than the maximum blob size",l_="Value should be greater than 0",r_="Value should be greater than the minimum blob size",c_="Value should be between 0.0 and 1.0",O_="The value should be greater or equal to 0",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","form","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],i,[1,"card-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],_,[1,"cd-col-form-input"],["id","name","name","name","type","text","placeholder",o,"formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","poolType",1,"cd-col-form-label","required"],n,["id","poolType","formControlName","poolType","name","poolType",1,"form-control"],["ngValue",""],s,[3,"value",4,"ngFor","ngForOf"],[4,"ngIf"],["for","applications",1,"cd-col-form-label"],c,["id","applications",3,"customBadges","customBadgeValidators","messages","data","options","selectionLimit","selection"],["title",d,3,"class",4,"ngIf"],["formGroupName","compression",4,"ngIf"],P,["for","max_bytes",1,"cd-col-form-label"],p,R,h,["id","max_bytes","name","max_bytes","type","text","formControlName","max_bytes","placeholder",T,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["for","max_objects",1,"cd-col-form-label"],m,f,A,["id","max_objects","min","0","name","max_objects","type","number","formControlName","max_objects",1,"form-control"],[3,"hidden"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],I,$,D,Z,[3,"value"],x,["for","pgAutoscaleMode",1,"cd-col-form-label"],y,["id","pgAutoscaleMode","name","pgAutoscaleMode","formControlName","pgAutoscaleMode",1,"form-control"],["class","form-group row",4,"ngIf"],["for","pgNum",1,"cd-col-form-label","required"],U,["id","pgNum","name","pgNum","formControlName","pgNum","min","1","type","number","required","",1,"form-control",3,"focus","blur"],[1,"form-text","text-muted"],["section","pgs","docText",q],["class","form-text text-muted",4,"ngIf"],H,G,z,X,["for","size",1,"cd-col-form-label","required"],w,["id","size","name","size","type","number","formControlName","size",1,"form-control",3,"max","min"],["class","text-warning-dark",4,"ngIf"],[1,"list-inline"],Q,J,k,[1,"text-warning-dark"],V,[1,"cd-col-form-label"],Y,[1,"custom-control","custom-checkbox"],["type","checkbox","id","ec-overwrites","formControlName","ecOverwrites",1,"custom-control-input"],["for","ec-overwrites",1,"custom-control-label"],B,["title",d],j,["for","erasureProfile",1,"cd-col-form-label"],K,[1,"input-group"],["id","erasureProfile","name","erasureProfile","formControlName","erasureProfile",1,"form-control"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],[1,"input-group-append"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"ngClass","click"],["aria-hidden","true",3,"ngClass"],["class","btn btn-light","type","button",3,"click",4,"ngIf"],["class","btn btn-light","type","button","ngbTooltip",N,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","ecp-info-block",4,"ngIf"],W,[3,"ngValue"],ee,_e,["type","button",1,"btn","btn-light",3,"click"],["type","button","ngbTooltip",N,"triggers","manual",1,"btn","btn-light",3,"click"],["ecpDeletionBtn","ngbTooltip"],["id","ecp-info-block",1,"form-text","text-muted"],["ngbNav","",1,"nav-tabs"],["ecpInfoTabs","ngbNav"],["ngbNavItem","ecp-info"],["ngbNavLink",""],oe,["ngbNavContent",""],["ngbNavItem","used-by-pools"],te,[3,"ngbNavOutlet"],[3,"renderObjects","hideKeys","data","autoReload"],["ecpIsNotUsed",""],[4,"ngIf","ngIfElse"],ie,[4,"ngFor","ngForOf"],["for","crushRule",1,"cd-col-form-label"],ne,se,ae,["noRules",""],le,["id","crushRule","formControlName","crushRule","name","crushSet",1,"form-control"],re,["id","crush-info-button","type","button","ngbTooltip",ce,1,"btn","btn-light",3,"ngClass","click"],["class","btn btn-light","type","button","ngbTooltip",O,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","crush-info-block",4,"ngIf"],["type","button","ngbTooltip",O,"triggers","manual",1,"btn","btn-light",3,"click"],["crushDeletionBtn","ngbTooltip"],["id","crush-info-block",1,"form-text","text-muted"],["crushInfoTabs","ngbNav"],["ngbNavItem","crush-rule-info"],Xe,["ngbNavItem","crush-rule-steps"],we,Qe,["ruleIsNotUsed",""],Je,ke,Ve,["formGroupName","compression"],Ye,["for","mode",1,"cd-col-form-label"],Be,["id","mode","name","mode","formControlName","mode",1,"form-control"],["for","algorithm",1,"cd-col-form-label"],je,["id","algorithm","name","algorithm","formControlName","algorithm",1,"form-control"],["for","minBlobSize",1,"cd-col-form-label"],Ke,["id","minBlobSize","name","minBlobSize","formControlName","minBlobSize","type","text","min","0","placeholder",We,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","maxBlobSize",1,"cd-col-form-label"],e_,["id","maxBlobSize","type","text","min","0","formControlName","maxBlobSize","placeholder",__,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","ratio",1,"cd-col-form-label"],o_,["id","ratio","name","ratio","formControlName","ratio","type","number","min","0","max","1","step","0.1","placeholder",t_,1,"form-control"],i_,n_,s_,a_,l_,r_,c_,O_]},template:function(_,o){1&_&&e.YNc(0,Yt,70,33,"div",0),2&_&&e.Q6J("cdFormLoading",o.loading)},directives:[bo.y,a._Y,a.JL,fe.V,a.sg,Pe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,a.EJ,a.YN,a.Kr,C.sg,vo.m,Re.S,Io.Q,a.qQ,a.wV,$o.d,me.p,a.Q7,Do.K,a.Fd,a.Wl,C.mk,g._L,g.Pz,g.nv,g.Vx,g.uN,g.tO,xe.b,a.x0],pipes:[C.rS,Ce.m],styles:[".icon-warning-color[_ngcontent-%COMP%]{margin-left:3px}"]}),t})();var Bt=r(19773),jt=r(49671),Kt=r(68136),Te=r(69158),Se=r(64337),L=r(99466),Wt=r(91801),ei=r(68774),_i=r(66369),qe=r(38047),Le=r(51847);class oi{constructor(i){this.pool_name=i}}var ti=r(64724);let ii=(()=>{class t{constructor(_,o,n){this.templateRef=_,this.viewContainer=o,this.authStorageService=n,this.cdScopeMatchAll=!0}set cdScope(_){this.permissions=this.authStorageService.getPermissions(),this.isAuthorized(_)?this.viewContainer.createEmbeddedView(this.templateRef):this.viewContainer.clear()}isAuthorized(_){const o=this.cdScopeMatchAll?u().every:u().some;return u().isString(_)?u().get(this.permissions,[_,"read"],!1):u().isArray(_)?o(_,n=>this.permissions[n].read):!!u().isObject(_)&&o(_,(n,s)=>o(n,c=>this.permissions[s][c]))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(e.Rgc),e.Y36(e.s_b),e.Y36(he.j))},t.\u0275dir=e.lG2({type:t,selectors:[["","cdScope",""]],inputs:{cdScope:"cdScope",cdScopeMatchAll:"cdScopeMatchAll"}}),t})();var ni=r(94928),He=r(51295),si=r(59376),Ge=r(76317),ai=r(71752);function li(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",12),2&t){const _=e.oxw(2);e.Q6J("renderObjects",!0)("data",_.poolDetails)("autoReload",!1)}}function ri(t,i){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.MGl("grafanaPath","ceph-pool-detail?var-pool_name=",_.selection.pool_name,"")}}function ci(t,i){1&t&&(e.TgZ(0,"li",13),e.TgZ(1,"a",5),e.SDv(2,14),e.qZA(),e.YNc(3,ri,1,1,"ng-template",7),e.qZA())}function Oi(t,i){if(1&t&&e._UZ(0,"cd-rbd-configuration-table",18),2&t){const _=e.oxw(3);e.Q6J("data",_.selectedPoolConfiguration)}}function di(t,i){1&t&&(e.TgZ(0,"li",16),e.TgZ(1,"a",5),e.SDv(2,17),e.qZA(),e.YNc(3,Oi,1,1,"ng-template",7),e.qZA())}function ui(t,i){if(1&t&&e._UZ(0,"cd-table",21),2&t){const _=e.oxw(3);e.Q6J("data",_.cacheTiers)("columns",_.cacheTierColumns)("autoSave",!1)}}function fi(t,i){1&t&&(e.TgZ(0,"li",19),e.TgZ(1,"a",5),e.SDv(2,20),e.qZA(),e.YNc(3,ui,1,3,"ng-template",7),e.qZA())}function Pi(t,i){if(1&t&&(e.ynx(0,1),e.TgZ(1,"ul",2,3),e.TgZ(3,"li",4),e.TgZ(4,"a",5),e.SDv(5,6),e.qZA(),e.YNc(6,li,1,3,"ng-template",7),e.qZA(),e.YNc(7,ci,4,0,"li",8),e.YNc(8,di,4,0,"li",9),e.YNc(9,fi,4,0,"li",10),e.qZA(),e._UZ(10,"div",11),e.BQk()),2&t){const _=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.permissions.grafana.read),e.xp6(1),e.Q6J("ngIf","replicated"===o.selection.type),e.xp6(1),e.Q6J("ngIf",(null==o.selection.tiers?null:o.selection.tiers.length)>0),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Ei=(()=>{class t{constructor(_){this.poolService=_,this.cacheTierColumns=[],this.omittedPoolAttributes=["cdExecuting","cdIsBinary","stats"],this.cacheTierColumns=[{prop:"pool_name",name:"Name",flexGrow:3},{prop:"cache_mode",name:"Cache Mode",flexGrow:2},{prop:"cache_min_evict_age",name:"Min Evict Age",flexGrow:2},{prop:"cache_min_flush_age",name:"Min Flush Age",flexGrow:2},{prop:"target_max_bytes",name:"Target Max Bytes",flexGrow:2},{prop:"target_max_objects",name:"Target Max Objects",flexGrow:2}]}ngOnChanges(){this.selection&&(this.poolService.getConfiguration(this.selection.pool_name).subscribe(_=>{He.T.updateChanged(this,{selectedPoolConfiguration:_})}),He.T.updateChanged(this,{poolDetails:u().omit(this.selection,this.omittedPoolAttributes)}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-details"]],inputs:{cacheTiers:"cacheTiers",permissions:"permissions",selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let i,_,o,n;return i="Details",_="Performance Details",o="Configuration",n="Cache Tiers Details",[["cdTableDetail","",4,"ngIf"],["cdTableDetail",""],["ngbNav","","cdStatefulTab","pool-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],i,["ngbNavContent",""],["ngbNavItem","performance-details",4,"ngIf"],["ngbNavItem","configuration",4,"ngIf"],["ngbNavItem","cache-tiers-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"renderObjects","data","autoReload"],["ngbNavItem","performance-details"],_,["uid","-xyV8KCiz","grafanaStyle","three",3,"grafanaPath"],["ngbNavItem","configuration"],o,[3,"data"],["ngbNavItem","cache-tiers-details"],n,["columnMode","flex",3,"data","columns","autoSave"]]},template:function(_,o){1&_&&e.YNc(0,Pi,11,4,"ng-container",0),2&_&&e.Q6J("ngIf",o.selection)},directives:[C.O5,g.Pz,si.m,g.nv,g.Vx,g.uN,g.tO,xe.b,Ge.F,ai.P,Se.a],styles:[""],changeDetection:0}),t})();var gi=r(60251);const pi=["poolUsageTpl"],Ri=["poolConfigurationSourceTpl"];function mi(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",9,10),e.NdJ("fetchData",function(){return e.CHM(_),e.oxw().taskListService.fetch()})("setExpandedRow",function(n){return e.CHM(_),e.oxw().setExpandedRow(n)})("updateSelection",function(n){return e.CHM(_),e.oxw().updateSelection(n)}),e._UZ(2,"cd-table-actions",11),e._UZ(3,"cd-pool-details",12),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.pools)("columns",_.columns)("hasDetails",!0)("status",_.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",_.permissions.pool)("selection",_.selection)("tableActions",_.tableActions),e.xp6(1),e.Q6J("selection",_.expandedRow)("permissions",_.permissions)("cacheTiers",_.cacheTiers)}}function Ci(t,i){1&t&&e._UZ(0,"cd-grafana",14),2&t&&e.Q6J("grafanaPath","ceph-pools-overview?")}function Mi(t,i){1&t&&(e.TgZ(0,"li",2),e.TgZ(1,"a",3),e.SDv(2,13),e.qZA(),e.YNc(3,Ci,1,1,"ng-template",5),e.qZA())}function hi(t,i){if(1&t&&e._UZ(0,"cd-usage-bar",16),2&t){const _=e.oxw().row;e.Q6J("total",_.stats.bytes_used.latest+_.stats.avail_raw.latest)("used",_.stats.bytes_used.latest)}}function Ti(t,i){if(1&t&&e.YNc(0,hi,1,2,"cd-usage-bar",15),2&t){const _=i.row;e.Q6J("ngIf",null==_.stats||null==_.stats.avail_raw?null:_.stats.avail_raw.latest)}}let Si=(()=>{class t extends Kt.o{constructor(_,o,n,s,c,d,P,p,R,h,T){super(),this.poolService=_,this.taskWrapper=o,this.ecpService=n,this.authStorageService=s,this.taskListService=c,this.modalService=d,this.pgCategoryService=P,this.dimlessPipe=p,this.urlBuilder=R,this.configurationService=h,this.actionLabels=T,this.selection=new ei.r,this.executingTasks=[],this.tableStatus=new Te.E,this.cacheTiers=[],this.monAllowPoolDelete=!1,this.permissions=this.authStorageService.getPermissions(),this.tableActions=[{permission:"create",icon:b.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE},{permission:"update",icon:b.P.edit,routerLink:()=>this.urlBuilder.getEdit(encodeURIComponent(this.selection.first().pool_name)),name:this.actionLabels.EDIT},{permission:"delete",icon:b.P.destroy,click:()=>this.deletePoolModal(),name:this.actionLabels.DELETE,disable:this.getDisableDesc.bind(this)}],this.permissions.configOpt.read&&this.configurationService.get("mon_allow_pool_delete").subscribe(m=>{if(u().has(m,"value")){const f=u().find(m.value,A=>"mon"===A.section)||{value:!1};this.monAllowPoolDelete="true"===f.value}})}ngOnInit(){const _=(o,n,s)=>u().get(n,o)>u().get(s,o)?1:-1;this.columns=[{prop:"pool_name",name:"Name",flexGrow:4,cellTransformation:L.e.executing},{prop:"data_protection",name:"Data Protection",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-gray"},flexGrow:1.3},{prop:"application_metadata",name:"Applications",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-primary"},flexGrow:1.5},{prop:"pg_status",name:"PG Status",flexGrow:1.2,cellClass:({row:o,column:n,value:s})=>this.getPgStatusCellClass(o,n,s)},{prop:"crush_rule",name:"Crush Ruleset",isHidden:!0,flexGrow:2},{name:"Usage",prop:"usage",cellTemplate:this.poolUsageTpl,flexGrow:1.2},{prop:"stats.rd_bytes.rates",name:"Read bytes",comparator:(o,n,s,c)=>_("stats.rd_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.wr_bytes.rates",name:"Write bytes",comparator:(o,n,s,c)=>_("stats.wr_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.rd.rate",name:"Read ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond},{prop:"stats.wr.rate",name:"Write ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond}],this.taskListService.init(()=>this.ecpService.list().pipe((0,Bt.zg)(o=>(this.ecProfileList=o,this.poolService.getList()))),void 0,o=>{this.pools=this.transformPoolsData(o),this.tableStatus=new Te.E},()=>{this.table.reset(),this.tableStatus=new Te.E(Wt.T.ValueException)},o=>o.name.startsWith("pool/"),(o,n)=>n.metadata.pool_name===o.pool_name,{default:o=>new oi(o.pool_name)})}updateSelection(_){this.selection=_}deletePoolModal(){const _=this.selection.first().pool_name;this.modalService.show(Ne.M,{itemDescription:"Pool",itemNames:[_],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new v.R(`pool/${M.MQ.DELETE}`,{pool_name:_}),call:this.poolService.delete(_)})})}getPgStatusCellClass(_,o,n){return{"text-right":!0,[`pg-${this.pgCategoryService.getTypeByStates(n)}`]:!0}}getErasureCodeProfile(_){let o="";return u().forEach(this.ecProfileList,n=>{n.name===_&&(o=`EC: ${n.k}+${n.m}`)}),o}transformPoolsData(_){const o=["bytes_used","max_avail","avail_raw","percent_used","rd_bytes","wr_bytes","rd","wr"],n={latest:0,rate:0,rates:[]};return u().forEach(_,s=>{s.pg_status=this.transformPgStatus(s.pg_status);const c={};u().forEach(o,d=>{c[d]=s.stats&&s.stats[d]?s.stats[d]:n}),s.stats=c,s.usage=c.percent_used.latest,!s.cdExecuting&&s.pg_num+s.pg_placement_num!==s.pg_num_target+s.pg_placement_num_target&&(s.cdExecuting="Updating"),["rd_bytes","wr_bytes"].forEach(d=>{s.stats[d].rates=s.stats[d].rates.map(P=>P[1])}),s.cdIsBinary=!0,"erasure"===s.type&&(s.data_protection=this.getErasureCodeProfile(s.erasure_code_profile)),"replicated"===s.type&&(s.data_protection=`replica: \xd7${s.size}`)}),_}transformPgStatus(_){const o=[];return u().forEach(_,(n,s)=>{o.push(`${n} ${s}`)}),o.join(", ")}getSelectionTiers(){if(void 0!==this.expandedRow){const _=this.expandedRow.tiers;this.cacheTiers=this.pools.filter(o=>_.includes(o.pool))}}getDisableDesc(){var _;return!(null===(_=this.selection)||void 0===_?void 0:_.hasSelection)||!this.monAllowPoolDelete&&"Pool deletion is disabled by the mon_allow_pool_delete configuration setting."}setExpandedRow(_){super.setExpandedRow(_),this.getSelectionTiers()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q),e.Y36(de.P),e.Y36(Me),e.Y36(he.j),e.Y36(qe.j),e.Y36(Ze.Z),e.Y36(jt.j),e.Y36(_i.n),e.Y36(Le.F),e.Y36(ti.e),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-list"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Se.a,5),e.Gf(pi,7),e.Gf(Ri,5)),2&_){let n;e.iGM(n=e.CRH())&&(o.table=n.first),e.iGM(n=e.CRH())&&(o.poolUsageTpl=n.first),e.iGM(n=e.CRH())&&(o.poolConfigurationSourceTpl=n.first)}},features:[e._Bn([qe.j,{provide:Le.F,useValue:new Le.F("pool")}]),e.qOj],decls:10,vars:2,consts:function(){let i,_;return i="Pools List",_="Overall Performance",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],i,["ngbNavContent",""],["ngbNavItem","",4,"cdScope"],[3,"ngbNavOutlet"],["poolUsageTpl",""],["id","pool-list","selectionType","single",3,"data","columns","hasDetails","status","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],["id","pool-list-actions",1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","","id","pool-list-details",3,"selection","permissions","cacheTiers"],_,["uid","z99hzWtmk","grafanaStyle","two",3,"grafanaPath"],["decimals","2",3,"total","used",4,"ngIf"],["decimals","2",3,"total","used"]]},template:function(_,o){if(1&_&&(e.TgZ(0,"ul",0,1),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,mi,4,11,"ng-template",5),e.qZA(),e.YNc(6,Mi,4,0,"li",6),e.qZA(),e._UZ(7,"div",7),e.YNc(8,Ti,1,1,"ng-template",null,8,e.W1O)),2&_){const n=e.MAs(1);e.xp6(6),e.Q6J("cdScope","grafana"),e.xp6(1),e.Q6J("ngbNavOutlet",n)}},directives:[g.Pz,g.nv,g.Vx,g.uN,ii,g.tO,Se.a,ni.K,Ei,Ge.F,C.O5,gi.O],styles:["cd-pool-list .pg-clean{color:#0b0} cd-pool-list .pg-working{color:#2b99a8} cd-pool-list .pg-warning{color:#ffc200} cd-pool-list .pg-unknown{color:#ef5c55}"]}),t})(),ze=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[f_.t,C.ez,g.Oz,d_.m,Oe.Bz,a.UX,g.HK,u_.BlockModule]]}),t})();const Li=[{path:"",component:Si},{path:M.MQ.CREATE,component:Ue,data:{breadcrumbs:M.Qn.CREATE}},{path:`${M.MQ.EDIT}/:name`,component:Ue,data:{breadcrumbs:M.Qn.EDIT}}];let Ai=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[ze,Oe.Bz.forChild(Li)]]}),t})()}}]); \ No newline at end of file +"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[585],{24585:(Fi,Fe,r)=>{r.r(Fe),r.d(Fe,{PoolModule:()=>ze,RoutedPoolModule:()=>Ai});var C=r(12057),a=r(24751),Oe=r(6283),g=r(38549),M=r(79512),d_=r(44466),u_=r(70483),f_=r(370),P_=r(23815),u=r.n(P_),E_=r(80226),g_=r(26504),ue=r(80842);class S{constructor(){this.nodes=[],this.idTree={},this.allDevices=[],this.buckets=[],this.failureDomains={},this.failureDomainKeys=[],this.devices=[],this.deviceCount=0}static searchFailureDomains(i,_){return this.getFailureDomains(this.search(i,_))}static search(i,_){const[o,n]=_.split("~"),s=i.find(c=>["name","id","type"].some(d=>c[d]===o));return s?(i=this.getSubNodes(s,this.createIdTreeFromNodes(i)),n&&(i=this.filterNodesByDeviceType(i,n)),i):[]}static createIdTreeFromNodes(i){const _={};return i.forEach(o=>{_[o.id]=o}),_}static getSubNodes(i,_){let o=[i];return i.children&&i.children.forEach(n=>{o=o.concat(this.getSubNodes(_[n],_))}),o}static filterNodesByDeviceType(i,_){let n,o=i.filter(c=>c.device_class&&c.device_class!==_).map(c=>c.id),s=o;do{n=!1,i=i.filter(d=>!o.includes(d.id));const c=[];i.forEach(d=>{d.children&&d.children.every(P=>o.includes(P))&&(c.push(d.id),n=!0)}),n&&(o=c,s=s.concat(c))}while(n);return(i=u().cloneDeep(i)).map(c=>(c.children&&(c.children=c.children.filter(d=>!s.includes(d))),c))}static getFailureDomains(i){const _={};return i.forEach(o=>{const n=o.type;_[n]||(_[n]=[]),_[n].push(o)}),_}initCrushNodeSelection(i,_,o,n){this.nodes=i,this.idTree=S.createIdTreeFromNodes(i),i.forEach(s=>{this.idTree[s.id]=s}),this.buckets=u().sortBy(i.filter(s=>s.children),"name"),this.controls={root:_,failure:o,device:n},this.preSelectRoot(),this.controls.root.valueChanges.subscribe(()=>this.onRootChange()),this.controls.failure.valueChanges.subscribe(()=>this.onFailureDomainChange()),this.controls.device.valueChanges.subscribe(()=>this.onDeviceChange())}preSelectRoot(){const i=this.nodes.find(_=>"root"===_.type);this.silentSet(this.controls.root,i),this.onRootChange()}silentSet(i,_){i.setValue(_,{emitEvent:!1})}onRootChange(){const i=S.getSubNodes(this.controls.root.value,this.idTree),_=S.getFailureDomains(i);Object.keys(_).forEach(o=>{_[o].length<=1&&delete _[o]}),this.failureDomains=_,this.failureDomainKeys=Object.keys(_).sort(),this.updateFailureDomain()}updateFailureDomain(){let i=this.getIncludedCustomValue(this.controls.failure,Object.keys(this.failureDomains));""===i&&(i=this.setMostCommonDomain(this.controls.failure)),this.updateDevices(i)}getIncludedCustomValue(i,_){return i.dirty&&_.includes(i.value)?i.value:""}setMostCommonDomain(i){let _={n:0,type:""};return Object.keys(this.failureDomains).forEach(o=>{const n=this.failureDomains[o].length;_.nS.getSubNodes(n,this.idTree)));this.allDevices=_.filter(n=>n.device_class).map(n=>n.device_class),this.devices=u().uniq(this.allDevices).sort();const o=1===this.devices.length?this.devices[0]:this.getIncludedCustomValue(this.controls.device,this.devices);this.silentSet(this.controls.device,o),this.onDeviceChange(o)}onDeviceChange(i=this.controls.device.value){this.deviceCount=""===i?this.allDevices.length:this.allDevices.filter(_=>_===i).length}}var Ne=r(30982),p_=r(14745),b=r(65862),R_=r(93614),be=r(95463),E=r(77205),m_=r(30633),v=r(76111),C_=r(47557),M_=r(28211),de=r(32337),e=r(74788),ve=r(62862),Ie=r(83608),$e=r(60312),fe=r(41582),Pe=r(56310),Ee=r(87925),ge=r(94276),pe=r(82945),Re=r(18372),me=r(30839),Ce=r(10545);function h_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,31),e.qZA())}function T_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,32),e.qZA())}function S_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,33),e.qZA())}function L_(t,i){1&t&&(e.TgZ(0,"option",26),e.SDv(1,34),e.qZA())}function A_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function F_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,36),e.qZA())}function N_(t,i){1&t&&(e.TgZ(0,"option",26),e.SDv(1,37),e.qZA())}function b_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function v_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,38),e.qZA())}function I_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let $_=(()=>{class t extends S{constructor(_,o,n,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=n,this.crushRuleService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.crushRuleService.formTooltips,this.action=this.actionLabels.CREATE,this.resource="Crush Rule",this.createForm()}createForm(){this.form=this.formBuilder.group({name:["",[a.kI.required,a.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],root:null,failure_domain:"",device_class:""})}ngOnInit(){this.crushRuleService.getInfo().subscribe(({names:_,nodes:o})=>{this.initCrushNodeSelection(o,this.form.get("root"),this.form.get("failure_domain"),this.form.get("device_class")),this.names=_})}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=u().cloneDeep(this.form.value);_.root=_.root.name,""===_.device_class&&delete _.device_class,this.taskWrapper.wrapTaskAroundCall({task:new v.R("crushRule/create",_),call:this.crushRuleService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ve.O),e.Y36(g.Kz),e.Y36(de.P),e.Y36(Ie.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-crush-rule-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:55,vars:27,consts:function(){let i,_,o,n,s,c,d,P,p,R,h,T,m;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Root",n="Failure domain type",s="Device class",c="Let Ceph decide",d="This field is required!",P="The name can only consist of alphanumeric characters, dashes and underscores.",p="The chosen erasure code profile name is already in use.",R="Loading...",h="This field is required!",T="Loading...",m="This field is required!",[[3,"modalRef"],[1,"modal-title"],i,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"required"],[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","root",1,"cd-col-form-label"],o,[3,"html"],["id","root","name","root","formControlName","root",1,"form-control"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","failure_domain",1,"cd-col-form-label"],n,["id","failure_domain","name","failure_domain","formControlName","failure_domain",1,"form-control"],["for","device_class",1,"cd-col-form-label"],s,["id","device_class","name","device_class","formControlName","device_class",1,"form-control"],["ngValue",""],c,[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,P,p,R,[3,"ngValue"],h,T,m]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.ynx(11),e.SDv(12,9),e.BQk(),e._UZ(13,"span",10),e.qZA(),e.TgZ(14,"div",11),e._UZ(15,"input",12),e.YNc(16,h_,2,0,"span",13),e.YNc(17,T_,2,0,"span",13),e.YNc(18,S_,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(19,"div",7),e.TgZ(20,"label",14),e.ynx(21),e.SDv(22,15),e.BQk(),e._UZ(23,"cd-helper",16),e._UZ(24,"span",10),e.qZA(),e.TgZ(25,"div",11),e.TgZ(26,"select",17),e.YNc(27,L_,2,0,"option",18),e.YNc(28,A_,2,2,"option",19),e.qZA(),e.YNc(29,F_,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(30,"div",7),e.TgZ(31,"label",20),e.ynx(32),e.SDv(33,21),e.BQk(),e._UZ(34,"cd-helper",16),e._UZ(35,"span",10),e.qZA(),e.TgZ(36,"div",11),e.TgZ(37,"select",22),e.YNc(38,N_,2,0,"option",18),e.YNc(39,b_,2,3,"option",19),e.qZA(),e.YNc(40,v_,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(41,"div",7),e.TgZ(42,"label",23),e.ynx(43),e.SDv(44,24),e.BQk(),e._UZ(45,"cd-helper",16),e.qZA(),e.TgZ(46,"div",11),e.TgZ(47,"select",25),e.TgZ(48,"option",26),e.SDv(49,27),e.qZA(),e.YNc(50,I_,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.TgZ(51,"div",28),e.TgZ(52,"cd-form-button-panel",29),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(53,"titlecase"),e.ALo(54,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const n=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,19,o.action))(e.lcZ(4,21,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(10),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.root),e.xp6(4),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(1),e.Q6J("ngIf",o.form.showError("root",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.failure_domain),e.xp6(4),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.form.showError("failure_domain",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.device_class),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(53,23,o.action)+" "+e.lcZ(54,25,o.resource))}},directives:[$e.z,a._Y,a.JL,fe.V,a.sg,Pe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,Re.S,a.EJ,C.sg,a.YN,a.Kr,me.p],pipes:[C.rS,Ce.m],styles:[""]}),t})();class D_{}var Z_=r(58497);let Me=(()=>{class t{constructor(_){this.http=_,this.apiPath="api/erasure_code_profile",this.formTooltips={k:"Each object is split in data-chunks parts, each stored on a different OSD.",m:"Compute coding chunks for each object and store them on different OSDs.\n The number of coding chunks is also the number of OSDs that can be down without losing data.",plugins:{jerasure:{description:"The jerasure plugin is the most generic and flexible plugin,\n it is also the default for Ceph erasure coded pools.",technique:"The more flexible technique is reed_sol_van : it is enough to set k\n and m. The cauchy_good technique can be faster but you need to chose the packetsize\n carefully. All of reed_sol_r6_op, liberation, blaum_roth, liber8tion are RAID6 equivalents\n in the sense that they can only be configured with m=2.",packetSize:"The encoding will be done on packets of bytes size at a time.\n Choosing the right packet size is difficult.\n The jerasure documentation contains extensive information on this topic."},lrc:{description:"With the jerasure plugin, when an erasure coded object is stored on\n multiple OSDs, recovering from the loss of one OSD requires reading from all the others.\n For instance if jerasure is configured with k=8 and m=4, losing one OSD requires reading\n from the eleven others to repair.\n\n The lrc erasure code plugin creates local parity chunks to be able to recover using\n less OSDs. For instance if lrc is configured with k=8, m=4 and l=4, it will create\n an additional parity chunk for every four OSDs. When a single OSD is lost, it can be\n recovered with only four OSDs instead of eleven.",l:"Group the coding and data chunks into sets of size locality. For instance,\n for k=4 and m=2, when locality=3 two groups of three are created. Each set can\n be recovered without reading chunks from another set.",crushLocality:"The type of the crush bucket in which each set of chunks defined\n by l will be stored. For instance, if it is set to rack, each group of l chunks will be\n placed in a different rack. It is used to create a CRUSH rule step such as step choose\n rack. If it is not set, no such grouping is done."},isa:{description:"The isa plugin encapsulates the ISA library. It only runs on Intel processors.",technique:"The ISA plugin comes in two Reed Solomon forms.\n If reed_sol_van is set, it is Vandermonde, if cauchy is set, it is Cauchy."},shec:{description:"The shec plugin encapsulates the multiple SHEC library.\n It allows ceph to recover data more efficiently than Reed Solomon codes.",c:"The number of parity chunks each of which includes each data chunk in its\n calculation range. The number is used as a durability estimator. For instance, if c=2,\n 2 OSDs can be down without losing data."},clay:{description:"CLAY (short for coupled-layer) codes are erasure codes designed to\n bring about significant savings in terms of network bandwidth and disk IO when a failed\n node/OSD/rack is being repaired.",d:"Number of OSDs requested to send data during recovery of a single chunk.\n d needs to be chosen such that k+1 <= d <= k+m-1. The larger the d, the better\n the savings.",scalar_mds:"scalar_mds specifies the plugin that is used as a building block\n in the layered construction. It can be one of jerasure, isa, shec.",technique:"technique specifies the technique that will be picked\n within the 'scalar_mds' plugin specified. Supported techniques\n are 'reed_sol_van', 'reed_sol_r6_op', 'cauchy_orig',\n 'cauchy_good', 'liber8tion' for jerasure, 'reed_sol_van',\n 'cauchy' for isa and 'single', 'multiple' for shec."}},crushRoot:"The name of the crush bucket used for the first step of the CRUSH rule.\n For instance step take default.",crushFailureDomain:"Ensure that no two chunks are in a bucket with the same failure\n domain. For instance, if the failure domain is host no two chunks will be stored on the same\n host. It is used to create a CRUSH rule step such as step chooseleaf host.",crushDeviceClass:"Restrict placement to devices of a specific class\n (e.g., ssd or hdd), using the crush device class names in the CRUSH map.",directory:"Set the directory name from which the erasure code plugin is loaded."}}list(){return this.http.get(this.apiPath)}create(_){return this.http.post(this.apiPath,_,{observe:"response"})}delete(_){return this.http.delete(`${this.apiPath}/${_}`,{observe:"response"})}getInfo(){return this.http.get(`ui-${this.apiPath}/info`)}}return t.\u0275fac=function(_){return new(_||t)(e.LFG(Z_.eN))},t.\u0275prov=e.Yz7({token:t,factory:t.\u0275fac,providedIn:"root"}),t})();function x_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,47),e.qZA())}function y_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,48),e.qZA())}function U_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,49),e.qZA())}function q_(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,50),e.qZA())}function H_(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function G_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,52),e.qZA())}function z_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,53),e.qZA())}function X_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,54),e.qZA())}function w_(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,55),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function Q_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,56),e.qZA())}function J_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,57),e.qZA())}function k_(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,58),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.lrcMultiK),e.QtT(1)}}function V_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,59),e.qZA())}function Y_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,60),e.qZA())}function B_(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,61),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function j_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,65),e.qZA())}function K_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,66),e.qZA())}function W_(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",62),e.TgZ(2,"span",14),e.SDv(3,63),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",64),e.YNc(7,j_,2,0,"span",12),e.YNc(8,K_,2,0,"span",12),e.qZA(),e.qZA()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.shec.c),e.xp6(3),e.Q6J("ngIf",_.form.showError("c",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("c",o,"cGreaterM"))}}function eo(t,i){1&t&&(e.TgZ(0,"span",39),e.SDv(1,75),e.qZA())}function _o(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,76),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMin())(_.getDMax()),e.QtT(1)}}function oo(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,77),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function to(t,i){if(1&t&&(e.ynx(0),e.YNc(1,_o,2,2,"span",23),e.YNc(2,oo,2,1,"span",23),e.BQk()),2&t){const _=e.oxw(2);e.xp6(1),e.Q6J("ngIf",_.getDMin()<_.getDMax()),e.xp6(1),e.Q6J("ngIf",_.getDMin()===_.getDMax())}}function io(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,78),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMin()),e.QtT(1)}}function no(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,79),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function so(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",7),e.TgZ(1,"label",67),e.TgZ(2,"span",14),e.SDv(3,68),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"div",69),e._UZ(7,"input",70),e.TgZ(8,"span",71),e.TgZ(9,"button",72),e.NdJ("click",function(){return e.CHM(_),e.oxw().toggleDCalc()}),e._UZ(10,"i",73),e.qZA(),e.qZA(),e.qZA(),e.YNc(11,eo,2,0,"span",23),e.YNc(12,to,3,2,"ng-container",74),e.YNc(13,io,2,1,"span",12),e.YNc(14,no,2,1,"span",12),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.d),e.xp6(6),e.Q6J("ngClass",_.dCalc?_.icons.unlock:_.icons.lock),e.xp6(1),e.Q6J("ngIf",_.dCalc),e.xp6(1),e.Q6J("ngIf",!_.dCalc),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMin")),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMax"))}}function ao(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,84),e.qZA())}function lo(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,85),e.qZA())}function ro(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,86),e.qZA())}function co(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",80),e.TgZ(2,"span",14),e.SDv(3,81),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",82),e.YNc(7,ao,2,0,"span",12),e.YNc(8,lo,2,0,"span",12),e.YNc(9,ro,2,0,"span",12),e.TgZ(10,"span",39),e.SDv(11,83),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.l),e.xp6(3),e.Q6J("ngIf",_.form.showError("l",o,"required")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"unequal")),e.xp6(2),e.pQV(_.lrcGroups),e.QtT(11)}}function Oo(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,87),e.qZA())}function uo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function fo(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,91),e.qZA())}function Po(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,92),e.qZA())}function Eo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw(2);e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function go(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",88),e.ynx(2),e.SDv(3,89),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"select",90),e.YNc(7,fo,2,0,"option",18),e.YNc(8,Po,2,0,"option",18),e.YNc(9,Eo,2,3,"option",19),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.crushLocality),e.xp6(3),e.Q6J("ngIf",!_.failureDomains),e.xp6(1),e.Q6J("ngIf",_.failureDomainKeys.length>0),e.xp6(1),e.Q6J("ngForOf",_.failureDomainKeys)}}function po(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}const De=function(t,i,_){return[t,i,_]};function Ro(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",93),e.ynx(2),e.SDv(3,94),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"select",95),e.YNc(7,po,2,2,"option",19),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.scalar_mds),e.xp6(3),e.Q6J("ngForOf",e.kEZ(2,De,_.PLUGIN.JERASURE,_.PLUGIN.ISA,_.PLUGIN.SHEC))}}function mo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function Co(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",96),e.ynx(2),e.SDv(3,97),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e.TgZ(6,"select",98),e.YNc(7,mo,2,2,"option",19),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins[_.plugin].technique),e.xp6(3),e.Q6J("ngForOf",_.techniques)}}function Mo(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,102),e.qZA())}function ho(t,i){if(1&t&&(e.TgZ(0,"div",7),e.TgZ(1,"label",99),e.ynx(2),e.SDv(3,100),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",101),e.YNc(7,Mo,2,0,"span",12),e.qZA(),e.qZA()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.jerasure.packetSize),e.xp6(3),e.Q6J("ngIf",_.form.showError("packetSize",o,"min"))}}function To(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,103),e.qZA())}function So(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function Lo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let Ao=(()=>{class t extends S{constructor(_,o,n,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=n,this.ecpService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.ecpService.formTooltips,this.PLUGIN={LRC:"lrc",SHEC:"shec",CLAY:"clay",JERASURE:"jerasure",ISA:"isa"},this.plugin=this.PLUGIN.JERASURE,this.icons=b.P,this.action=this.actionLabels.CREATE,this.resource="EC Profile",this.createForm(),this.setJerasureDefaults()}createForm(){this.form=this.formBuilder.group({name:[null,[a.kI.required,a.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],plugin:[this.PLUGIN.JERASURE,[a.kI.required]],k:[4,[a.kI.required,E.h.custom("max",()=>this.baseValueValidation(!0)),E.h.custom("unequal",_=>this.lrcDataValidation(_)),E.h.custom("kLowerM",_=>this.shecDataValidation(_))]],m:[2,[a.kI.required,E.h.custom("max",()=>this.baseValueValidation())]],crushFailureDomain:"",crushRoot:null,crushDeviceClass:"",directory:"",technique:"reed_sol_van",packetSize:[2048],l:[3,[a.kI.required,E.h.custom("unequal",_=>this.lrcLocalityValidation(_))]],crushLocality:"",c:[2,[a.kI.required,E.h.custom("cGreaterM",_=>this.shecDurabilityValidation(_))]],d:[5,[a.kI.required,E.h.custom("dMin",_=>this.dMinValidation(_)),E.h.custom("dMax",_=>this.dMaxValidation(_))]],scalar_mds:[this.PLUGIN.JERASURE,[a.kI.required]]}),this.toggleDCalc(),this.form.get("k").valueChanges.subscribe(()=>this.updateValidityOnChange(["m","l","d"])),this.form.get("m").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","l","c","d"])),this.form.get("l").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","m"])),this.form.get("plugin").valueChanges.subscribe(_=>this.onPluginChange(_)),this.form.get("scalar_mds").valueChanges.subscribe(()=>this.setClayDefaultsForScalar())}baseValueValidation(_=!1){return this.validValidation(()=>this.getKMSum()>this.deviceCount&&this.form.getValue("k")>this.form.getValue("m")===_)}validValidation(_,o){return!((!this.form||o)&&this.plugin!==o)&&_()}getKMSum(){return this.form.getValue("k")+this.form.getValue("m")}lrcDataValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m"),n=this.form.getValue("l"),s=_+o;return this.lrcMultiK=_/(s/n),_%(s/n)!=0},"lrc")}shecDataValidation(_){return this.validValidation(()=>this.form.getValue("m")>_,"shec")}lrcLocalityValidation(_){return this.validValidation(()=>{const o=this.getKMSum();return this.lrcGroups=_>0?o/_:0,_>0&&o%_!=0},"lrc")}shecDurabilityValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m");return _>o},"shec")}dMinValidation(_){return this.validValidation(()=>this.getDMin()>_,"clay")}getDMin(){return this.form.getValue("k")+1}dMaxValidation(_){return this.validValidation(()=>_>this.getDMax(),"clay")}getDMax(){const _=this.form.getValue("m");return this.form.getValue("k")+_-1}toggleDCalc(){this.dCalc=!this.dCalc,this.form.get("d")[this.dCalc?"disable":"enable"](),this.calculateD()}calculateD(){this.plugin!==this.PLUGIN.CLAY||!this.dCalc||this.form.silentSet("d",this.getDMax())}updateValidityOnChange(_){_.forEach(o=>{"d"===o&&this.calculateD(),this.form.get(o).updateValueAndValidity({emitEvent:!1})})}onPluginChange(_){this.plugin=_,_===this.PLUGIN.JERASURE?this.setJerasureDefaults():_===this.PLUGIN.LRC?this.setLrcDefaults():_===this.PLUGIN.ISA?this.setIsaDefaults():_===this.PLUGIN.SHEC?this.setShecDefaults():_===this.PLUGIN.CLAY&&this.setClayDefaults(),this.updateValidityOnChange(["m"])}setJerasureDefaults(){this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liberation","blaum_roth","liber8tion"],this.setDefaults({k:4,m:2,technique:"reed_sol_van"})}setLrcDefaults(){this.setDefaults({k:4,m:2,l:3})}setIsaDefaults(){this.techniques=["reed_sol_van","cauchy"],this.setDefaults({k:7,m:3,technique:"reed_sol_van"})}setShecDefaults(){this.setDefaults({k:4,m:3,c:2})}setClayDefaults(){this.setDefaults({k:4,m:2,scalar_mds:this.PLUGIN.JERASURE}),this.setClayDefaultsForScalar()}setClayDefaultsForScalar(){const _=this.form.getValue("scalar_mds");let o="reed_sol_van";_===this.PLUGIN.JERASURE?this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liber8tion"]:_===this.PLUGIN.ISA?this.techniques=["reed_sol_van","cauchy"]:(o="single",this.techniques=["single","multiple"]),this.setDefaults({technique:o})}setDefaults(_){Object.keys(_).forEach(o=>{const n=this.form.get(o),s=n.value;n.pristine||"technique"===o&&!this.techniques.includes(s)||"k"===o&&[4,7].includes(s)||"m"===o&&[2,3].includes(s)?n.setValue(_[o]):n.updateValueAndValidity()})}ngOnInit(){this.ecpService.getInfo().subscribe(({plugins:_,names:o,directory:n,nodes:s})=>{this.initCrushNodeSelection(s,this.form.get("crushRoot"),this.form.get("crushFailureDomain"),this.form.get("crushDeviceClass")),this.plugins=_,this.names=o,this.form.silentSet("directory",n),this.preValidateNumericInputFields()})}preValidateNumericInputFields(){const _=["k","m","l","c","d"].map(o=>this.form.get(o));_.forEach(o=>{o.markAsTouched(),o.markAsDirty()}),_[1].updateValueAndValidity()}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=this.createJson();this.taskWrapper.wrapTaskAroundCall({task:new v.R("ecp/create",{name:_.name}),call:this.ecpService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}createJson(){const _={technique:[this.PLUGIN.ISA,this.PLUGIN.JERASURE,this.PLUGIN.CLAY],packetSize:[this.PLUGIN.JERASURE],l:[this.PLUGIN.LRC],crushLocality:[this.PLUGIN.LRC],c:[this.PLUGIN.SHEC],d:[this.PLUGIN.CLAY],scalar_mds:[this.PLUGIN.CLAY]},o=new D_,n=this.form.getValue("plugin");return Object.keys(this.form.controls).filter(s=>{const c=_[s],d=this.form.getValue(s);return(c&&c.includes(n)||!c)&&d&&""!==d}).forEach(s=>{this.extendJson(s,o)}),o}extendJson(_,o){const s=this.form.getValue(_);o[{crushFailureDomain:"crush-failure-domain",crushRoot:"crush-root",crushDeviceClass:"crush-device-class",packetSize:"packetsize",crushLocality:"crush-locality"}[_]||_]="crushRoot"===_?s.name:s}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ve.O),e.Y36(g.Kz),e.Y36(de.P),e.Y36(Me),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-erasure-code-profile-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:98,vars:53,consts:function(){let i,_,o,n,s,c,d,P,p,R,h,T,m,f,A,I,$,D,Z,x,y,U,q,H,G,z,X,w,Q,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ie,ne,se,ae,le,re,ce;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Plugin",n="Data chunks (k)",s="Coding chunks (m)",c="Crush failure domain",d="Crush root",P="Crush device class",p="Let Ceph decide",R="Available OSDs: " + "\ufffd0\ufffd" + "",h="Directory",T="This field is required!",m="The name can only consist of alphanumeric characters, dashes and underscores.",f="The chosen erasure code profile name is already in use.",A="Loading...",I="This field is required!",$="This field is required!",D="Must be equal to or greater than 2.",Z="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",x="For an equal distribution k has to be a multiple of (k+m)/l.",y="K has to be equal to or greater than m in order to recover data correctly through c.",U="Distribution factor: " + "\ufffd0\ufffd" + "",q="This field is required!",H="Must be equal to or greater than 1.",G="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",z="Durability estimator (c)",X="Must be equal to or greater than 1.",w="C has to be equal to or lower than m as m defines the amount of chunks that can be used.",Q="Helper chunks (d)",J="Set d manually or use the plugin's default calculation that maximizes d.",k="D is automatically updated on k and m changes",V="D can be set from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + "",Y="D can only be set to " + "\ufffd0\ufffd" + "",B="D has to be greater than k (" + "\ufffd0\ufffd" + ").",j="D has to be lower than k + m (" + "\ufffd0\ufffd" + ").",K="Locality (l)",N="Locality groups: " + "\ufffd0\ufffd" + "",W="This field is required!",ee="Must be equal to or greater than 1.",_e="Can't split up chunks (k+m) correctly with the current locality.",oe="Loading...",te="Crush Locality",ie="Loading...",ne="None",se="Scalar mds",ae="Technique",le="Packetsize",re="Must be equal to or greater than 1.",ce="Loading...",[[3,"modalRef"],[1,"modal-title"],i,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","plugin",1,"cd-col-form-label"],[1,"required"],o,[3,"html"],["id","plugin","name","plugin","formControlName","plugin",1,"form-control"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","k",1,"cd-col-form-label"],n,["type","number","id","k","name","k","ng-model","$ctrl.erasureCodeProfile.k","placeholder","Data chunks...","formControlName","k","min","2",1,"form-control"],["class","form-text text-muted",4,"ngIf"],["for","m",1,"cd-col-form-label"],s,["type","number","id","m","name","m","placeholder","Coding chunks...","formControlName","m","min","1",1,"form-control"],["class","form-group row",4,"ngIf"],["for","crushFailureDomain",1,"cd-col-form-label"],c,["id","crushFailureDomain","name","crushFailureDomain","formControlName","crushFailureDomain",1,"form-control"],["for","crushRoot",1,"cd-col-form-label"],d,["id","crushRoot","name","crushRoot","formControlName","crushRoot",1,"form-control"],["for","crushDeviceClass",1,"cd-col-form-label"],P,["id","crushDeviceClass","name","crushDeviceClass","formControlName","crushDeviceClass",1,"form-control"],["ngValue",""],p,[1,"form-text","text-muted"],R,["for","directory",1,"cd-col-form-label"],h,["type","text","id","directory","name","directory","placeholder","Path...","formControlName","directory",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],T,m,f,A,[3,"ngValue"],I,$,D,Z,x,y,U,q,H,G,["for","c",1,"cd-col-form-label"],z,["type","number","id","c","name","c","placeholder","Coding chunks...","formControlName","c","min","1",1,"form-control"],X,w,["for","d",1,"cd-col-form-label"],Q,[1,"input-group"],["type","number","id","d","name","d","placeholder","Helper chunks...","formControlName","d",1,"form-control"],[1,"input-group-append"],["id","d-calc-btn","ngbTooltip",J,"type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],[4,"ngIf"],k,V,Y,B,j,["for","l",1,"cd-col-form-label"],K,["type","number","id","l","name","l","placeholder","Coding chunks...","formControlName","l","min","1",1,"form-control"],N,W,ee,_e,oe,["for","crushLocality",1,"cd-col-form-label"],te,["id","crushLocality","name","crushLocality","formControlName","crushLocality",1,"form-control"],ie,ne,["for","scalar_mds",1,"cd-col-form-label"],se,["id","scalar_mds","name","scalar_mds","formControlName","scalar_mds",1,"form-control"],["for","technique",1,"cd-col-form-label"],ae,["id","technique","name","technique","formControlName","technique",1,"form-control"],["for","packetSize",1,"cd-col-form-label"],le,["type","number","id","packetSize","name","packetSize","placeholder","Packetsize...","formControlName","packetSize","min","1",1,"form-control"],re,ce]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5),e.TgZ(8,"div",6),e.TgZ(9,"div",7),e.TgZ(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,x_,2,0,"span",12),e.YNc(15,y_,2,0,"span",12),e.YNc(16,U_,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(17,"div",7),e.TgZ(18,"label",13),e.TgZ(19,"span",14),e.SDv(20,15),e.qZA(),e._UZ(21,"cd-helper",16),e.qZA(),e.TgZ(22,"div",10),e.TgZ(23,"select",17),e.YNc(24,q_,2,0,"option",18),e.YNc(25,H_,2,2,"option",19),e.qZA(),e.YNc(26,G_,2,0,"span",12),e.qZA(),e.qZA(),e.TgZ(27,"div",7),e.TgZ(28,"label",20),e.TgZ(29,"span",14),e.SDv(30,21),e.qZA(),e._UZ(31,"cd-helper",16),e.qZA(),e.TgZ(32,"div",10),e._UZ(33,"input",22),e.YNc(34,z_,2,0,"span",12),e.YNc(35,X_,2,0,"span",12),e.YNc(36,w_,2,1,"span",12),e.YNc(37,Q_,2,0,"span",12),e.YNc(38,J_,2,0,"span",12),e.YNc(39,k_,2,1,"span",23),e.qZA(),e.qZA(),e.TgZ(40,"div",7),e.TgZ(41,"label",24),e.TgZ(42,"span",14),e.SDv(43,25),e.qZA(),e._UZ(44,"cd-helper",16),e.qZA(),e.TgZ(45,"div",10),e._UZ(46,"input",26),e.YNc(47,V_,2,0,"span",12),e.YNc(48,Y_,2,0,"span",12),e.YNc(49,B_,2,1,"span",12),e.qZA(),e.qZA(),e.YNc(50,W_,9,3,"div",27),e.YNc(51,so,15,6,"div",27),e.YNc(52,co,12,5,"div",27),e.TgZ(53,"div",7),e.TgZ(54,"label",28),e.ynx(55),e.SDv(56,29),e.BQk(),e._UZ(57,"cd-helper",16),e.qZA(),e.TgZ(58,"div",10),e.TgZ(59,"select",30),e.YNc(60,Oo,2,0,"option",18),e.YNc(61,uo,2,3,"option",19),e.qZA(),e.qZA(),e.qZA(),e.YNc(62,go,10,4,"div",27),e.YNc(63,Ro,8,6,"div",27),e.YNc(64,Co,8,2,"div",27),e.YNc(65,ho,8,2,"div",27),e.TgZ(66,"div",7),e.TgZ(67,"label",31),e.ynx(68),e.SDv(69,32),e.BQk(),e._UZ(70,"cd-helper",16),e.qZA(),e.TgZ(71,"div",10),e.TgZ(72,"select",33),e.YNc(73,To,2,0,"option",18),e.YNc(74,So,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.TgZ(75,"div",7),e.TgZ(76,"label",34),e.ynx(77),e.SDv(78,35),e.BQk(),e._UZ(79,"cd-helper",16),e.qZA(),e.TgZ(80,"div",10),e.TgZ(81,"select",36),e.TgZ(82,"option",37),e.SDv(83,38),e.qZA(),e.YNc(84,Lo,2,2,"option",19),e.qZA(),e.TgZ(85,"span",39),e.SDv(86,40),e.qZA(),e.qZA(),e.qZA(),e.TgZ(87,"div",7),e.TgZ(88,"label",41),e.ynx(89),e.SDv(90,42),e.BQk(),e._UZ(91,"cd-helper",16),e.qZA(),e.TgZ(92,"div",10),e._UZ(93,"input",43),e.qZA(),e.qZA(),e.qZA(),e.TgZ(94,"div",44),e.TgZ(95,"cd-form-button-panel",45),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(96,"titlecase"),e.ALo(97,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.BQk(),e.qZA()),2&_){const n=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,41,o.action))(e.lcZ(4,43,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(8),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.plugins[o.plugin].description),e.xp6(3),e.Q6J("ngIf",!o.plugins),e.xp6(1),e.Q6J("ngForOf",o.plugins),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.k),e.xp6(3),e.Q6J("ngIf",o.form.showError("k",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"max")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"unequal")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"kLowerM")),e.xp6(1),e.Q6J("ngIf","lrc"===o.plugin),e.xp6(5),e.Q6J("html",o.tooltips.m),e.xp6(3),e.Q6J("ngIf",o.form.showError("m",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",n,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",n,"max")),e.xp6(1),e.Q6J("ngIf","shec"===o.plugin),e.xp6(1),e.Q6J("ngIf","clay"===o.plugin),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(5),e.Q6J("html",o.tooltips.crushFailureDomain),e.xp6(3),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(1),e.Q6J("ngIf",o.PLUGIN.CLAY===o.plugin),e.xp6(1),e.Q6J("ngIf",e.kEZ(49,De,o.PLUGIN.JERASURE,o.PLUGIN.ISA,o.PLUGIN.CLAY).includes(o.plugin)),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.JERASURE),e.xp6(5),e.Q6J("html",o.tooltips.crushRoot),e.xp6(3),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(5),e.Q6J("html",o.tooltips.crushDeviceClass),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.pQV(o.deviceCount),e.QtT(86),e.xp6(5),e.Q6J("html",o.tooltips.directory),e.xp6(4),e.Q6J("form",o.form)("submitText",e.lcZ(96,45,o.action)+" "+e.lcZ(97,47,o.resource))}},directives:[$e.z,a._Y,a.JL,fe.V,a.sg,Pe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,Re.S,a.EJ,C.sg,a.wV,a.qQ,a.YN,a.Kr,me.p,g._L,C.mk],pipes:[C.rS,Ce.m],styles:[""]}),t})();var Fo=r(7022);class No{constructor(){this.erasureInfo=!1,this.crushInfo=!1,this.pgs=1,this.poolTypes=["erasure","replicated"],this.applications={selected:[],default:["cephfs","rbd","rgw"],available:[],validators:[a.kI.pattern("[A-Za-z0-9_]+"),a.kI.maxLength(128)],messages:new Fo.a({empty:"No applications added",selectionLimit:{text:"Applications limit reached",tooltip:"A pool can only have up to four applications definitions."},customValidations:{pattern:"Allowed characters '_a-zA-Z0-9'",maxlength:"Maximum length is 128 characters"},filter:"Filter or add applications'",add:"Add application"})}}}var Ze=r(63285),he=r(74937),bo=r(63622),vo=r(60192),Io=r(17932),$o=r(54555),Do=r(30490),xe=r(61350);const Zo=["crushInfoTabs"],xo=["crushDeletionBtn"],yo=["ecpInfoTabs"],Uo=["ecpDeletionBtn"];function qo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,42),e.qZA())}function Ho(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,43),e.qZA())}function Go(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,44),e.qZA())}function zo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,45),e.qZA())}function Xo(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function wo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,47),e.qZA())}function Qo(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Jo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,58),e.qZA())}function ko(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,59),e.qZA())}function Vo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,60),e.qZA())}function Yo(t,i){1&t&&(e.TgZ(0,"span",55),e.SDv(1,61),e.qZA())}function Bo(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",52),e.SDv(2,53),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"input",54),e.NdJ("focus",function(){return e.CHM(_),e.oxw(3).externalPgChange=!1})("blur",function(){return e.CHM(_),e.oxw(3).alignPgs()}),e.qZA(),e.YNc(5,Jo,2,0,"span",13),e.YNc(6,ko,2,0,"span",13),e.YNc(7,Vo,2,0,"span",13),e.TgZ(8,"span",55),e._UZ(9,"cd-doc",56),e.qZA(),e.YNc(10,Yo,2,0,"span",57),e.qZA(),e.qZA()}if(2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.form.showError("pgNum",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"34")),e.xp6(3),e.Q6J("ngIf",o.externalPgChange)}}function jo(t,i){if(1&t&&(e.TgZ(0,"span",41),e.TgZ(1,"ul",66),e.TgZ(2,"li"),e.SDv(3,67),e.qZA(),e.TgZ(4,"li"),e.SDv(5,68),e.qZA(),e.qZA(),e.qZA()),2&t){const _=e.oxw(4);e.xp6(3),e.pQV(_.getMinSize()),e.QtT(3),e.xp6(2),e.pQV(_.getMaxSize()),e.QtT(5)}}function Ko(t,i){if(1&t&&(e.TgZ(0,"span",41),e.SDv(1,69),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.pQV(_.getMinSize())(_.getMaxSize()),e.QtT(1)}}function Wo(t,i){1&t&&(e.TgZ(0,"span",70),e.SDv(1,71),e.qZA())}function et(t,i){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",62),e.SDv(2,63),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",64),e.YNc(5,jo,6,2,"span",13),e.YNc(6,Ko,2,2,"span",13),e.YNc(7,Wo,2,0,"span",65),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(4),e.Q6J("max",o.getMaxSize())("min",o.getMinSize()),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",1===o.form.getValue("size"))}}function _t(t,i){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",72),e.SDv(2,73),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",74),e._UZ(5,"input",75),e.TgZ(6,"label",76),e.SDv(7,77),e.qZA(),e.qZA(),e.qZA(),e.qZA())}function ot(t,i){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"div",8),e.TgZ(2,"label",48),e.SDv(3,49),e.qZA(),e.TgZ(4,"div",11),e.TgZ(5,"select",50),e.YNc(6,Qo,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.YNc(7,Bo,11,4,"div",51),e.YNc(8,et,8,5,"div",51),e.YNc(9,_t,8,0,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(6),e.Q6J("ngForOf",_.pgAutoscaleModes),e.xp6(1),e.Q6J("ngIf","on"!==_.form.getValue("pgAutoscaleMode")),e.xp6(1),e.Q6J("ngIf",_.isReplicated),e.xp6(1),e.Q6J("ngIf",_.info.is_all_bluestore&&_.isErasure)}}function tt(t,i){if(1&t&&e._UZ(0,"i",78),2&t){const _=e.oxw(2);e.Gre("",_.icons.warning," icon-warning-color")}}function it(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,93),e.qZA())}function nt(t,i){1&t&&(e.TgZ(0,"option",94),e.SDv(1,95),e.qZA()),2&t&&e.Q6J("ngValue",null)}function st(t,i){1&t&&(e.TgZ(0,"option",94),e.SDv(1,96),e.qZA()),2&t&&e.Q6J("ngValue",null)}function at(t,i){if(1&t&&(e.TgZ(0,"option",94),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}const F=function(t){return[t]};function lt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",97),e.NdJ("click",function(){return e.CHM(_),e.oxw(4).addErasureCodeProfile()}),e._UZ(1,"i",89),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function rt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",98,99),e.NdJ("click",function(){return e.CHM(_),e.oxw(4).deleteErasureCodeProfile()}),e._UZ(2,"i",89),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const ct=function(){return["name"]};function Ot(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",110),2&t){const _=e.oxw(5);e.Q6J("renderObjects",!0)("hideKeys",e.DdM(4,ct))("data",_.form.getValue("erasureProfile"))("autoReload",!1)}}function dt(t,i){1&t&&(e.TgZ(0,"span"),e.SDv(1,113),e.qZA())}function ut(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.xp6(1),e.hij(" ",_," ")}}function ft(t,i){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,ut,2,1,"li",114),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.ecpUsage)}}function Pt(t,i){if(1&t&&(e.YNc(0,dt,2,0,"ng-template",null,111,e.W1O),e.YNc(2,ft,2,1,"ul",112)),2&t){const _=e.MAs(1),o=e.oxw(5);e.xp6(2),e.Q6J("ngIf",o.ecpUsage)("ngIfElse",_)}}function Et(t,i){if(1&t&&(e.TgZ(0,"span",100),e.TgZ(1,"ul",101,102),e.TgZ(3,"li",103),e.TgZ(4,"a",104),e.SDv(5,105),e.qZA(),e.YNc(6,Ot,1,5,"ng-template",106),e.qZA(),e.TgZ(7,"li",107),e.TgZ(8,"a",104),e.SDv(9,108),e.qZA(),e.YNc(10,Pt,3,2,"ng-template",106),e.qZA(),e.qZA(),e._UZ(11,"div",109),e.qZA()),2&t){const _=e.MAs(2);e.xp6(11),e.Q6J("ngbNavOutlet",_)}}const ye=function(t){return{active:t}};function gt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",8),e.TgZ(1,"label",80),e.SDv(2,81),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"div",82),e.TgZ(5,"select",83),e.YNc(6,it,2,0,"option",84),e.YNc(7,nt,2,1,"option",85),e.YNc(8,st,2,1,"option",85),e.YNc(9,at,2,2,"option",86),e.qZA(),e.TgZ(10,"span",87),e.TgZ(11,"button",88),e.NdJ("click",function(){e.CHM(_);const n=e.oxw(3);return n.data.erasureInfo=!n.data.erasureInfo}),e._UZ(12,"i",89),e.qZA(),e.YNc(13,lt,2,3,"button",90),e.YNc(14,rt,3,3,"button",91),e.qZA(),e.qZA(),e.YNc(15,Et,12,1,"span",92),e.qZA(),e.qZA()}if(2&t){const _=e.oxw(3);e.xp6(6),e.Q6J("ngIf",!_.ecProfiles),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&0===_.ecProfiles.length),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&_.ecProfiles.length>0),e.xp6(1),e.Q6J("ngForOf",_.ecProfiles),e.xp6(2),e.Q6J("ngClass",e.VKq(9,ye,_.data.erasureInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,_.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",_.data.erasureInfo&&_.form.getValue("erasureProfile"))}}function pt(t,i){1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",115),e.SDv(2,116),e.qZA(),e.TgZ(3,"div",11),e.TgZ(4,"span",55),e.SDv(5,117),e.qZA(),e.qZA(),e.qZA())}function Rt(t,i){1&t&&(e.TgZ(0,"span",55),e.TgZ(1,"span"),e.SDv(2,120),e.qZA(),e._uU(3,"\xa0 "),e.qZA())}function mt(t,i){if(1&t&&(e.TgZ(0,"option",94),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.rule_name," ")}}function Ct(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",97),e.NdJ("click",function(){return e.CHM(_),e.oxw(5).addCrushRule()}),e._UZ(1,"i",89),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function Mt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",126,127),e.NdJ("click",function(){return e.CHM(_),e.oxw(5).deleteCrushRule()}),e._UZ(2,"i",89),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const ht=function(){return["steps","type","rule_name"]};function Tt(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",110),2&t){const _=e.oxw(6);e.Q6J("renderObjects",!1)("hideKeys",e.DdM(4,ht))("data",_.form.getValue("crushRule"))("autoReload",!1)}}function St(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw(7);e.xp6(1),e.hij(" ",o.describeCrushStep(_)," ")}}function Lt(t,i){if(1&t&&(e.TgZ(0,"ol"),e.YNc(1,St,2,1,"li",114),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.form.get("crushRule").value.steps)}}function At(t,i){1&t&&(e.TgZ(0,"span"),e.SDv(1,136),e.qZA())}function Ft(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.xp6(1),e.hij(" ",_," ")}}function Nt(t,i){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,Ft,2,1,"li",114),e.qZA()),2&t){const _=e.oxw(7);e.xp6(1),e.Q6J("ngForOf",_.crushUsage)}}function bt(t,i){if(1&t&&(e.YNc(0,At,2,0,"ng-template",null,135,e.W1O),e.YNc(2,Nt,2,1,"ul",112)),2&t){const _=e.MAs(1),o=e.oxw(6);e.xp6(2),e.Q6J("ngIf",o.crushUsage)("ngIfElse",_)}}function vt(t,i){if(1&t&&(e.TgZ(0,"div",128),e.TgZ(1,"ul",101,129),e.TgZ(3,"li",130),e.TgZ(4,"a",104),e.SDv(5,131),e.qZA(),e.YNc(6,Tt,1,5,"ng-template",106),e.qZA(),e.TgZ(7,"li",132),e.TgZ(8,"a",104),e.SDv(9,133),e.qZA(),e.YNc(10,Lt,2,1,"ng-template",106),e.qZA(),e.TgZ(11,"li",107),e.TgZ(12,"a",104),e.SDv(13,134),e.qZA(),e.YNc(14,bt,3,2,"ng-template",106),e.qZA(),e.qZA(),e._UZ(15,"div",109),e.qZA()),2&t){const _=e.MAs(2);e.xp6(15),e.Q6J("ngbNavOutlet",_)}}function It(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,137),e.qZA())}function $t(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,138),e.qZA())}function Dt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div"),e.TgZ(1,"div",82),e.TgZ(2,"select",121),e.TgZ(3,"option",94),e.SDv(4,122),e.qZA(),e.YNc(5,mt,2,2,"option",86),e.qZA(),e.TgZ(6,"span",87),e.TgZ(7,"button",123),e.NdJ("click",function(){e.CHM(_);const n=e.oxw(4);return n.data.crushInfo=!n.data.crushInfo}),e._UZ(8,"i",89),e.qZA(),e.YNc(9,Ct,2,3,"button",90),e.YNc(10,Mt,3,3,"button",124),e.qZA(),e.qZA(),e.YNc(11,vt,16,1,"div",125),e.YNc(12,It,2,0,"span",13),e.YNc(13,$t,2,0,"span",13),e.qZA()}if(2&t){e.oxw(3);const _=e.MAs(2),o=e.oxw();e.xp6(3),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.current.rules),e.xp6(2),e.Q6J("ngClass",e.VKq(9,ye,o.data.crushInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,o.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.data.crushInfo&&o.form.getValue("crushRule")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"tooFewOsds"))}}function Zt(t,i){if(1&t&&(e.TgZ(0,"div",8),e.TgZ(1,"label",115),e.SDv(2,118),e.qZA(),e.TgZ(3,"div",11),e.YNc(4,Rt,4,0,"ng-template",null,119,e.W1O),e.YNc(6,Dt,14,13,"div",112),e.qZA(),e.qZA()),2&t){const _=e.MAs(5),o=e.oxw(3);e.xp6(6),e.Q6J("ngIf",o.current.rules.length>0)("ngIfElse",_)}}function xt(t,i){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"legend"),e.SDv(2,79),e.qZA(),e.YNc(3,gt,16,13,"div",51),e.YNc(4,pt,6,0,"div",51),e.YNc(5,Zt,7,2,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(3),e.Q6J("ngIf",_.isErasure),e.xp6(1),e.Q6J("ngIf",_.isErasure&&!_.editing),e.xp6(1),e.Q6J("ngIf",_.isReplicated||_.editing)}}function yt(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Ut(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,156),e.qZA())}function qt(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,157),e.qZA())}function Ht(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Gt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,158),e.qZA())}function zt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,159),e.qZA())}function Xt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,160),e.qZA())}function wt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,161),e.qZA())}function Qt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,162),e.qZA())}function Jt(t,i){if(1&t&&(e.TgZ(0,"div"),e.TgZ(1,"div",8),e.TgZ(2,"label",144),e.SDv(3,145),e.qZA(),e.TgZ(4,"div",11),e.TgZ(5,"select",146),e.YNc(6,Ut,2,0,"option",84),e.YNc(7,qt,2,0,"option",84),e.YNc(8,Ht,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.TgZ(9,"div",8),e.TgZ(10,"label",147),e.SDv(11,148),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",149),e.YNc(14,Gt,2,0,"span",13),e.YNc(15,zt,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(16,"div",8),e.TgZ(17,"label",150),e.SDv(18,151),e.qZA(),e.TgZ(19,"div",11),e._UZ(20,"input",152),e.YNc(21,Xt,2,0,"span",13),e.YNc(22,wt,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(23,"div",8),e.TgZ(24,"label",153),e.SDv(25,154),e.qZA(),e.TgZ(26,"div",11),e._UZ(27,"input",155),e.YNc(28,Qt,2,0,"span",13),e.qZA(),e.qZA(),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(6),e.Q6J("ngIf",!o.info.compression_algorithms),e.xp6(1),e.Q6J("ngIf",o.info.compression_algorithms&&0===o.info.compression_algorithms.length),e.xp6(1),e.Q6J("ngForOf",o.info.compression_algorithms),e.xp6(6),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"maximum")),e.xp6(6),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"minimum")),e.xp6(6),e.Q6J("ngIf",o.form.showError("ratio",_,"min")||o.form.showError("ratio",_,"max"))}}function kt(t,i){if(1&t&&(e.TgZ(0,"div",139),e.TgZ(1,"legend"),e.SDv(2,140),e.qZA(),e.TgZ(3,"div",8),e.TgZ(4,"label",141),e.SDv(5,142),e.qZA(),e.TgZ(6,"div",11),e.TgZ(7,"select",143),e.YNc(8,yt,2,2,"option",19),e.qZA(),e.qZA(),e.qZA(),e.YNc(9,Jt,29,8,"div",20),e.qZA()),2&t){const _=e.oxw(2);e.xp6(8),e.Q6J("ngForOf",_.info.compression_modes),e.xp6(1),e.Q6J("ngIf",_.hasCompressionEnabled())}}function Vt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,163),e.qZA())}function Yt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",1),e.TgZ(1,"form",2,3),e.TgZ(3,"div",4),e.TgZ(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.TgZ(9,"div",8),e.TgZ(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,qo,2,0,"span",13),e.YNc(15,Ho,2,0,"span",13),e.YNc(16,Go,2,0,"span",13),e.YNc(17,zo,2,0,"span",13),e.qZA(),e.qZA(),e.TgZ(18,"div",8),e.TgZ(19,"label",14),e.SDv(20,15),e.qZA(),e.TgZ(21,"div",11),e.TgZ(22,"select",16),e.TgZ(23,"option",17),e.SDv(24,18),e.qZA(),e.YNc(25,Xo,2,2,"option",19),e.qZA(),e.YNc(26,wo,2,0,"span",13),e.qZA(),e.qZA(),e.YNc(27,ot,10,4,"div",20),e.TgZ(28,"div",8),e.TgZ(29,"label",21),e.SDv(30,22),e.qZA(),e.TgZ(31,"div",11),e.TgZ(32,"cd-select-badges",23),e.NdJ("selection",function(){return e.CHM(_),e.oxw().appSelection()}),e.qZA(),e.YNc(33,tt,1,3,"i",24),e.qZA(),e.qZA(),e.YNc(34,xt,6,3,"div",20),e.YNc(35,kt,10,2,"div",25),e.TgZ(36,"div"),e.TgZ(37,"legend"),e.SDv(38,26),e.qZA(),e.TgZ(39,"div",8),e.TgZ(40,"label",27),e.ynx(41),e.SDv(42,28),e.BQk(),e.TgZ(43,"cd-helper"),e.TgZ(44,"span"),e.SDv(45,29),e.qZA(),e._UZ(46,"br"),e.TgZ(47,"span"),e.SDv(48,30),e.qZA(),e.qZA(),e.qZA(),e.TgZ(49,"div",11),e._UZ(50,"input",31),e.qZA(),e.qZA(),e.TgZ(51,"div",8),e.TgZ(52,"label",32),e.ynx(53),e.SDv(54,33),e.BQk(),e.TgZ(55,"cd-helper"),e.TgZ(56,"span"),e.SDv(57,34),e.qZA(),e._UZ(58,"br"),e.TgZ(59,"span"),e.SDv(60,35),e.qZA(),e.qZA(),e.qZA(),e.TgZ(61,"div",11),e._UZ(62,"input",36),e.YNc(63,Vt,2,0,"span",13),e.qZA(),e.qZA(),e.qZA(),e.TgZ(64,"div",37),e.TgZ(65,"cd-rbd-configuration-form",38),e.NdJ("changes",function(n){return e.CHM(_),e.oxw().currentConfigurationValues=n()}),e.qZA(),e.qZA(),e.qZA(),e.TgZ(66,"div",39),e.TgZ(67,"cd-form-button-panel",40),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().submit()}),e.ALo(68,"titlecase"),e.ALo(69,"upperFirst"),e.qZA(),e.qZA(),e.qZA(),e.qZA(),e.qZA()}if(2&t){const _=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.form),e.xp6(6),e.pQV(e.lcZ(6,25,o.action))(e.lcZ(7,27,o.resource)),e.QtT(5),e.xp6(7),e.Q6J("ngIf",o.form.showError("name",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"uniqueName")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"rbdPool")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"pattern")),e.xp6(8),e.Q6J("ngForOf",o.data.poolTypes),e.xp6(1),e.Q6J("ngIf",o.form.showError("poolType",_,"required")),e.xp6(1),e.Q6J("ngIf",o.isReplicated||o.isErasure),e.xp6(5),e.Q6J("customBadges",!0)("customBadgeValidators",o.data.applications.validators)("messages",o.data.applications.messages)("data",o.data.applications.selected)("options",o.data.applications.available)("selectionLimit",4),e.xp6(1),e.Q6J("ngIf",o.data.applications.selected<=0),e.xp6(1),e.Q6J("ngIf",o.isErasure||o.isReplicated),e.xp6(1),e.Q6J("ngIf",o.info.is_all_bluestore),e.xp6(28),e.Q6J("ngIf",o.form.showError("max_objects",_,"min")),e.xp6(1),e.Q6J("hidden",o.isErasure||-1===o.data.applications.selected.indexOf("rbd")),e.xp6(1),e.Q6J("form",o.form)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(68,29,o.action)+" "+e.lcZ(69,31,o.resource))}}let Ue=(()=>{class t extends R_.E{constructor(_,o,n,s,c,d,P,p,R,h,T){super(),this.dimlessBinaryPipe=_,this.route=o,this.router=n,this.modalService=s,this.poolService=c,this.authStorageService=d,this.formatter=P,this.taskWrapper=p,this.ecpService=R,this.crushRuleService=h,this.actionLabels=T,this.editing=!1,this.isReplicated=!1,this.isErasure=!1,this.data=new No,this.externalPgChange=!1,this.current={rules:[]},this.initializeConfigData=new E_.t(1),this.currentConfigurationValues={},this.icons=b.P,this.crushUsage=void 0,this.ecpUsage=void 0,this.crushRuleMaxSize=10,this.editing=this.router.url.startsWith(`/pool/${M.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="pool",this.authenticate(),this.createForm()}authenticate(){if(this.permission=this.authStorageService.getPermissions().pool,!this.permission.read||!this.permission.update&&this.editing||!this.permission.create&&!this.editing)throw new g_._2}createForm(){const _=new be.d({mode:new a.NI("none"),algorithm:new a.NI(""),minBlobSize:new a.NI("",{updateOn:"blur"}),maxBlobSize:new a.NI("",{updateOn:"blur"}),ratio:new a.NI("",{updateOn:"blur"})});this.form=new be.d({name:new a.NI("",{validators:[a.kI.pattern(/^[.A-Za-z0-9_/-]+$/),a.kI.required,E.h.custom("rbdPool",()=>this.form&&this.form.getValue("name").includes("/")&&this.data&&-1!==this.data.applications.selected.indexOf("rbd"))]}),poolType:new a.NI("",{validators:[a.kI.required]}),crushRule:new a.NI(null,{validators:[E.h.custom("tooFewOsds",o=>this.info&&o&&this.info.osd_count<1),E.h.custom("required",o=>this.isReplicated&&this.info.crush_rules_replicated.length>0&&!o)]}),size:new a.NI("",{updateOn:"blur"}),erasureProfile:new a.NI(null),pgNum:new a.NI("",{validators:[a.kI.required]}),pgAutoscaleMode:new a.NI(null),ecOverwrites:new a.NI(!1),compression:_,max_bytes:new a.NI(""),max_objects:new a.NI(0)},[E.h.custom("form",()=>null)])}ngOnInit(){this.poolService.getInfo().subscribe(_=>{this.initInfo(_),this.editing?this.initEditMode():(this.setAvailableApps(),this.loadingReady()),this.listenToChanges(),this.setComplexValidators()})}initInfo(_){this.pgAutoscaleModes=_.pg_autoscale_modes,this.form.silentSet("pgAutoscaleMode",_.pg_autoscale_default_mode),this.form.silentSet("algorithm",_.bluestore_compression_algorithm),this.info=_,this.initEcp(_.erasure_code_profiles)}initEcp(_){this.setListControlStatus("erasureProfile",_),this.ecProfiles=_}setListControlStatus(_,o){const n=this.form.get(_),s=n.value;1!==o.length||s&&u().isEqual(s,o[0])?0===o.length&&s&&n.setValue(null):n.setValue(o[0]),o.length<=1?n.enabled&&n.disable():n.disabled&&n.enable()}initEditMode(){this.disableForEdit(),this.routeParamsSubscribe=this.route.params.subscribe(_=>this.poolService.get(_.name).subscribe(o=>{this.data.pool=o,this.initEditFormData(o),this.loadingReady()}))}disableForEdit(){["poolType","crushRule","size","erasureProfile","ecOverwrites"].forEach(_=>this.form.get(_).disable())}initEditFormData(_){this.initializeConfigData.next({initialData:_.configuration,sourceType:m_.h.pool}),this.poolTypeChange(_.type);const o=this.info.crush_rules_replicated.concat(this.info.crush_rules_erasure),n={name:_.pool_name,poolType:_.type,crushRule:o.find(s=>s.rule_name===_.crush_rule),size:_.size,erasureProfile:this.ecProfiles.find(s=>s.name===_.erasure_code_profile),pgAutoscaleMode:_.pg_autoscale_mode,pgNum:_.pg_num,ecOverwrites:_.flags_names.includes("ec_overwrites"),mode:_.options.compression_mode,algorithm:_.options.compression_algorithm,minBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_min_blob_size),maxBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_max_blob_size),ratio:_.options.compression_required_ratio,max_bytes:this.dimlessBinaryPipe.transform(_.quota_max_bytes),max_objects:_.quota_max_objects};Object.keys(n).forEach(s=>{const c=n[s];!u().isUndefined(c)&&""!==c&&this.form.silentSet(s,c)}),this.data.pgs=this.form.getValue("pgNum"),this.setAvailableApps(this.data.applications.default.concat(_.application_metadata)),this.data.applications.selected=_.application_metadata}setAvailableApps(_=this.data.applications.default){this.data.applications.available=u().uniq(_.sort()).map(o=>new p_.$(!1,o,""))}listenToChanges(){this.listenToChangesDuringAddEdit(),this.editing||this.listenToChangesDuringAdd()}listenToChangesDuringAddEdit(){this.form.get("pgNum").valueChanges.subscribe(_=>{const o=_-this.data.pgs;1===Math.abs(o)&&2!==_?this.doPgPowerJump(o):this.data.pgs=_})}doPgPowerJump(_){const o=this.calculatePgPower()+_;this.setPgs(-1===_?Math.round(o):Math.floor(o))}calculatePgPower(_=this.form.getValue("pgNum")){return Math.log(_)/Math.log(2)}setPgs(_){const o=Math.pow(2,_<0?0:_);this.data.pgs=o,this.form.silentSet("pgNum",o)}listenToChangesDuringAdd(){this.form.get("poolType").valueChanges.subscribe(_=>{this.poolTypeChange(_)}),this.form.get("crushRule").valueChanges.subscribe(_=>{this.crushDeletionBtn&&this.crushDeletionBtn.isOpen()&&this.crushDeletionBtn.close(),_&&(this.setCorrectMaxSize(_),this.crushRuleIsUsedBy(_.rule_name),this.replicatedRuleChange(),this.pgCalc())}),this.form.get("size").valueChanges.subscribe(()=>{this.pgCalc()}),this.form.get("erasureProfile").valueChanges.subscribe(_=>{this.ecpDeletionBtn&&this.ecpDeletionBtn.isOpen()&&this.ecpDeletionBtn.close(),_&&(this.ecpIsUsedBy(_.name),this.pgCalc())}),this.form.get("mode").valueChanges.subscribe(()=>{["minBlobSize","maxBlobSize","ratio"].forEach(_=>{this.form.get(_).updateValueAndValidity({emitEvent:!1})})}),this.form.get("minBlobSize").valueChanges.subscribe(()=>{this.form.get("maxBlobSize").updateValueAndValidity({emitEvent:!1})}),this.form.get("maxBlobSize").valueChanges.subscribe(()=>{this.form.get("minBlobSize").updateValueAndValidity({emitEvent:!1})})}poolTypeChange(_){if("replicated"===_?this.setTypeBooleans(!0,!1):this.setTypeBooleans(!1,"erasure"===_),!_||!this.info)return void(this.current.rules=[]);const o=this.info["crush_rules_"+_]||[];this.current.rules=o,!this.editing&&(this.isReplicated&&this.setListControlStatus("crushRule",o),this.replicatedRuleChange(),this.pgCalc())}setTypeBooleans(_,o){this.isReplicated=_,this.isErasure=o}replicatedRuleChange(){if(!this.isReplicated)return;const _=this.form.get("size");let o=this.form.getValue("size")||3;const n=this.getMinSize(),s=this.getMaxSize();os&&(o=s),o!==_.value&&this.form.silentSet("size",o)}getMinSize(){return!this.info||this.info.osd_count<1?0:1}getMaxSize(){const _=this.form.getValue("crushRule");return this.info?_?_.usable_size:Math.min(this.info.osd_count,3):0}pgCalc(){const _=this.form.getValue("poolType");if(!this.info||this.form.get("pgNum").dirty||!_)return;const o=100*this.info.osd_count,n=this.isReplicated?this.replicatedPgCalc(o):this.erasurePgCalc(o);if(!n)return;const s=this.data.pgs;this.alignPgs(n),this.externalPgChange||(this.externalPgChange=s!==this.data.pgs)}setCorrectMaxSize(_=this.form.getValue("crushRule")){if(!_)return;const n=S.searchFailureDomains(this.info.nodes,_.steps[0].item_name)[_.steps[1].type];_.usable_size=Math.min(n?n.length:this.crushRuleMaxSize,this.crushRuleMaxSize)}replicatedPgCalc(_){const o=this.form.get("size"),n=o.value;return o.valid&&n>0?_/n:0}erasurePgCalc(_){const o=this.form.get("erasureProfile"),n=o.value;return(o.valid||o.disabled)&&n?_/(n.k+n.m):0}alignPgs(_=this.form.getValue("pgNum")){this.setPgs(Math.round(this.calculatePgPower(_<1?1:_)))}setComplexValidators(){this.editing?this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.data.pool&&this.info&&-1!==this.info.pool_names.indexOf(_)&&this.info.pool_names.indexOf(_)!==this.info.pool_names.indexOf(this.data.pool.pool_name))]):(E.h.validateIf(this.form.get("size"),()=>this.isReplicated,[E.h.custom("min",_=>this.form.getValue("size")&&_this.form.getValue("size")&&this.getMaxSize()<_)]),this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.info&&-1!==this.info.pool_names.indexOf(_))])),this.setCompressionValidators()}setCompressionValidators(){E.h.validateIf(this.form.get("minBlobSize"),()=>this.hasCompressionEnabled(),[a.kI.min(0),E.h.custom("maximum",_=>this.oddBlobSize(_,this.form.getValue("maxBlobSize")))]),E.h.validateIf(this.form.get("maxBlobSize"),()=>this.hasCompressionEnabled(),[a.kI.min(0),E.h.custom("minimum",_=>this.oddBlobSize(this.form.getValue("minBlobSize"),_))]),E.h.validateIf(this.form.get("ratio"),()=>this.hasCompressionEnabled(),[a.kI.min(0),a.kI.max(1)])}oddBlobSize(_,o){const n=this.formatter.toBytes(_),s=this.formatter.toBytes(o);return Boolean(n&&s&&n>=s)}hasCompressionEnabled(){return this.form.getValue("mode")&&"none"!==this.form.get("mode").value.toLowerCase()}describeCrushStep(_){return[_.op.replace("_"," "),_.item_name||"",_.type?_.num+" type "+_.type:""].join(" ")}addErasureCodeProfile(){this.addModal(Ao,_=>this.reloadECPs(_))}addModal(_,o){this.hideOpenTooltips(),this.modalService.show(_).componentInstance.submitAction.subscribe(s=>{o(s.name)})}hideOpenTooltips(){const _=o=>o&&o.isOpen()&&o.close();_(this.ecpDeletionBtn),_(this.crushDeletionBtn)}reloadECPs(_){this.reloadList({newItemName:_,getInfo:()=>this.ecpService.list(),initInfo:o=>this.initEcp(o),findNewItem:()=>this.ecProfiles.find(o=>o.name===_),controlName:"erasureProfile"})}reloadList({newItemName:_,getInfo:o,initInfo:n,findNewItem:s,controlName:c}){this.modalSubscription&&this.modalSubscription.unsubscribe(),o().subscribe(d=>{if(n(d),!_)return;const P=s();P&&this.form.get(c).setValue(P)})}deleteErasureCodeProfile(){this.deletionModal({value:this.form.getValue("erasureProfile"),usage:this.ecpUsage,deletionBtn:this.ecpDeletionBtn,dataName:"erasureInfo",getTabs:()=>this.ecpInfoTabs,tabPosition:"used-by-pools",nameAttribute:"name",itemDescription:"erasure code profile",reloadFn:()=>this.reloadECPs(),deleteFn:_=>this.ecpService.delete(_),taskName:"ecp/delete"})}deletionModal({value:_,usage:o,deletionBtn:n,dataName:s,getTabs:c,tabPosition:d,nameAttribute:P,itemDescription:p,reloadFn:R,deleteFn:h,taskName:T}){if(!_)return;if(o)return n.animation=!1,n.toggle(),this.data[s]=!0,void setTimeout(()=>{const f=c();f&&f.select(d)},50);const m=_[P];this.modalService.show(Ne.M,{itemDescription:p,itemNames:[m],submitActionObservable:()=>{const f=h(m);return f.subscribe(()=>R()),this.taskWrapper.wrapTaskAroundCall({task:new v.R(T,{name:m}),call:f})}})}addCrushRule(){this.addModal($_,_=>this.reloadCrushRules(_))}reloadCrushRules(_){this.reloadList({newItemName:_,getInfo:()=>this.poolService.getInfo(),initInfo:o=>{this.initInfo(o),this.poolTypeChange("replicated")},findNewItem:()=>this.info.crush_rules_replicated.find(o=>o.rule_name===_),controlName:"crushRule"})}deleteCrushRule(){this.deletionModal({value:this.form.getValue("crushRule"),usage:this.crushUsage,deletionBtn:this.crushDeletionBtn,dataName:"crushInfo",getTabs:()=>this.crushInfoTabs,tabPosition:"used-by-pools",nameAttribute:"rule_name",itemDescription:"crush rule",reloadFn:()=>this.reloadCrushRules(),deleteFn:_=>this.crushRuleService.delete(_),taskName:"crushRule/delete"})}crushRuleIsUsedBy(_){this.crushUsage=_?this.info.used_rules[_]:void 0}ecpIsUsedBy(_){this.ecpUsage=_?this.info.used_profiles[_]:void 0}submit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _={pool:this.form.getValue("name")};this.assignFormFields(_,[{externalFieldName:"pool_type",formControlName:"poolType"},{externalFieldName:"pg_autoscale_mode",formControlName:"pgAutoscaleMode",editable:!0},{externalFieldName:"pg_num",formControlName:"pgNum",replaceFn:n=>"on"===this.form.getValue("pgAutoscaleMode")?1:n,editable:!0},this.isReplicated?{externalFieldName:"size",formControlName:"size"}:{externalFieldName:"erasure_code_profile",formControlName:"erasureProfile",attr:"name"},{externalFieldName:"rule_name",formControlName:"crushRule",replaceFn:n=>this.isReplicated?n&&n.rule_name:void 0},{externalFieldName:"quota_max_bytes",formControlName:"max_bytes",replaceFn:this.formatter.toBytes,editable:!0,resetValue:this.editing?0:void 0},{externalFieldName:"quota_max_objects",formControlName:"max_objects",editable:!0,resetValue:this.editing?0:void 0}]),this.info.is_all_bluestore&&(this.assignFormField(_,{externalFieldName:"flags",formControlName:"ecOverwrites",replaceFn:()=>this.isErasure?["ec_overwrites"]:void 0}),"none"!==this.form.getValue("mode")?this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:n=>this.hasCompressionEnabled()&&n},{externalFieldName:"compression_algorithm",formControlName:"algorithm",editable:!0},{externalFieldName:"compression_min_blob_size",formControlName:"minBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_max_blob_size",formControlName:"maxBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_required_ratio",formControlName:"ratio",editable:!0,resetValue:0}]):this.editing&&this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:()=>"unset"},{externalFieldName:"srcpool",formControlName:"name",editable:!0,replaceFn:()=>this.data.pool.pool_name}]));const o=this.data.applications.selected;(o.length>0||this.editing)&&(_.application_metadata=o),this.isReplicated&&!u().isEmpty(this.currentConfigurationValues)&&(_.configuration=this.currentConfigurationValues),this.triggerApiTask(_)}assignFormFields(_,o){o.forEach(n=>this.assignFormField(_,n))}assignFormField(_,{externalFieldName:o,formControlName:n,attr:s,replaceFn:c,editable:d,resetValue:P}){if(this.editing&&(!d||this.form.get(n).pristine))return;const p=this.form.getValue(n);let R=c?c(p):s?u().get(p,s):p;if(!p||!R){if(!d||u().isUndefined(P))return;R=P}_[o]=R}triggerApiTask(_){this.taskWrapper.wrapTaskAroundCall({task:new v.R("pool/"+(this.editing?M.MQ.EDIT:M.MQ.CREATE),{pool_name:_.hasOwnProperty("srcpool")?_.srcpool:_.pool}),call:this.poolService[this.editing?M.MQ.UPDATE:M.MQ.CREATE](_)}).subscribe({error:o=>{u().isObject(o.error)&&"34"===o.error.code&&this.form.get("pgNum").setErrors({34:!0}),this.form.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/pool"])})}appSelection(){this.form.get("name").updateValueAndValidity({emitEvent:!1,onlySelf:!0})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(C_.$),e.Y36(Oe.gz),e.Y36(Oe.F0),e.Y36(Ze.Z),e.Y36(ue.q),e.Y36(he.j),e.Y36(M_.H),e.Y36(de.P),e.Y36(Me),e.Y36(Ie.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-form"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Zo,5),e.Gf(xo,5),e.Gf(yo,5),e.Gf(Uo,5)),2&_){let n;e.iGM(n=e.CRH())&&(o.crushInfoTabs=n.first),e.iGM(n=e.CRH())&&(o.crushDeletionBtn=n.first),e.iGM(n=e.CRH())&&(o.ecpInfoTabs=n.first),e.iGM(n=e.CRH())&&(o.ecpDeletionBtn=n.first)}},features:[e.qOj],decls:1,vars:1,consts:function(){let i,_,o,n,s,c,d,P,p,R,h,T,m,f,A,I,$,D,Z,x,y,U,q,H,G,z,X,w,Q,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ie,ne,se,ae,le,re,ce,O,Xe,we,Qe,Je,ke,Ve,Ye,Be,je,Ke,We,e_,__,o_,t_,i_,n_,s_,a_,l_,r_,c_,O_;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Name...",n="Pool type",s="-- Select a pool type --",c="Applications",d="Pools should be associated with an application tag",P="Quotas",p="Max bytes",R="Leave it blank or specify 0 to disable this quota.",h="A valid quota should be greater than 0.",T="e.g., 10GiB",m="Max objects",f="Leave it blank or specify 0 to disable this quota.",A="A valid quota should be greater than 0.",I="This field is required!",$="The chosen Ceph pool name is already in use.",D="It's not possible to create an RBD pool with '/' in the name. Please change the name or remove 'rbd' from the applications list.",Z="Pool name can only contain letters, numbers, '.', '-', '_' or '/'.",x="This field is required!",y="PG Autoscale",U="Placement groups",q="Calculation help",H="This field is required!",G="At least one placement group is needed!",z="Your cluster can't handle this many PGs. Please recalculate the PG amount needed.",X="The current PGs settings were calculated for you, you should make sure the values suit your needs before submit.",w="Replicated size",Q="Minimum: " + "\ufffd0\ufffd" + "",J="Maximum: " + "\ufffd0\ufffd" + "",k="The size specified is out of range. A value from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + " is usable.",V="A size of 1 will not create a replication of the object. The 'Replicated size' includes the object itself.",Y="Flags",B="EC Overwrites",j="CRUSH",K="Erasure code profile",N="This profile can't be deleted as it is in use.",W="Loading...",ee="-- No erasure code profile available --",_e="-- Select an erasure code profile --",oe="Profile",te="Used by pools",ie="Profile is not in use.",ne="Crush ruleset",se="A new crush ruleset will be implicitly created.",ae="Crush ruleset",le="There are no rules.",re="-- Select a crush rule --",ce="Placement and\n replication strategies or distribution policies that allow to\n specify how CRUSH places data replicas.",O="This rule can't be deleted as it is in use.",Xe="Crush rule",we="Crush steps",Qe="Used by pools",Je="Rule is not in use.",ke="This field is required!",Ve="The rule can't be used in the current cluster as it has too few OSDs to meet the minimum required OSD by this rule.",Ye="Compression",Be="Mode",je="Algorithm",Ke="Minimum blob size",We="e.g., 128KiB",e_="Maximum blob size",__="e.g., 512KiB",o_="Ratio",t_="Compression ratio",i_="Loading...",n_="-- No erasure compression algorithm available --",s_="Value should be greater than 0",a_="Value should be less than the maximum blob size",l_="Value should be greater than 0",r_="Value should be greater than the minimum blob size",c_="Value should be between 0.0 and 1.0",O_="The value should be greater or equal to 0",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","form","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],i,[1,"card-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],_,[1,"cd-col-form-input"],["id","name","name","name","type","text","placeholder",o,"formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","poolType",1,"cd-col-form-label","required"],n,["id","poolType","formControlName","poolType","name","poolType",1,"form-control"],["ngValue",""],s,[3,"value",4,"ngFor","ngForOf"],[4,"ngIf"],["for","applications",1,"cd-col-form-label"],c,["id","applications",3,"customBadges","customBadgeValidators","messages","data","options","selectionLimit","selection"],["title",d,3,"class",4,"ngIf"],["formGroupName","compression",4,"ngIf"],P,["for","max_bytes",1,"cd-col-form-label"],p,R,h,["id","max_bytes","name","max_bytes","type","text","formControlName","max_bytes","placeholder",T,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["for","max_objects",1,"cd-col-form-label"],m,f,A,["id","max_objects","min","0","name","max_objects","type","number","formControlName","max_objects",1,"form-control"],[3,"hidden"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],I,$,D,Z,[3,"value"],x,["for","pgAutoscaleMode",1,"cd-col-form-label"],y,["id","pgAutoscaleMode","name","pgAutoscaleMode","formControlName","pgAutoscaleMode",1,"form-control"],["class","form-group row",4,"ngIf"],["for","pgNum",1,"cd-col-form-label","required"],U,["id","pgNum","name","pgNum","formControlName","pgNum","min","1","type","number","required","",1,"form-control",3,"focus","blur"],[1,"form-text","text-muted"],["section","pgs","docText",q],["class","form-text text-muted",4,"ngIf"],H,G,z,X,["for","size",1,"cd-col-form-label","required"],w,["id","size","name","size","type","number","formControlName","size",1,"form-control",3,"max","min"],["class","text-warning-dark",4,"ngIf"],[1,"list-inline"],Q,J,k,[1,"text-warning-dark"],V,[1,"cd-col-form-label"],Y,[1,"custom-control","custom-checkbox"],["type","checkbox","id","ec-overwrites","formControlName","ecOverwrites",1,"custom-control-input"],["for","ec-overwrites",1,"custom-control-label"],B,["title",d],j,["for","erasureProfile",1,"cd-col-form-label"],K,[1,"input-group"],["id","erasureProfile","name","erasureProfile","formControlName","erasureProfile",1,"form-control"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],[1,"input-group-append"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"ngClass","click"],["aria-hidden","true",3,"ngClass"],["class","btn btn-light","type","button",3,"click",4,"ngIf"],["class","btn btn-light","type","button","ngbTooltip",N,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","ecp-info-block",4,"ngIf"],W,[3,"ngValue"],ee,_e,["type","button",1,"btn","btn-light",3,"click"],["type","button","ngbTooltip",N,"triggers","manual",1,"btn","btn-light",3,"click"],["ecpDeletionBtn","ngbTooltip"],["id","ecp-info-block",1,"form-text","text-muted"],["ngbNav","",1,"nav-tabs"],["ecpInfoTabs","ngbNav"],["ngbNavItem","ecp-info"],["ngbNavLink",""],oe,["ngbNavContent",""],["ngbNavItem","used-by-pools"],te,[3,"ngbNavOutlet"],[3,"renderObjects","hideKeys","data","autoReload"],["ecpIsNotUsed",""],[4,"ngIf","ngIfElse"],ie,[4,"ngFor","ngForOf"],["for","crushRule",1,"cd-col-form-label"],ne,se,ae,["noRules",""],le,["id","crushRule","formControlName","crushRule","name","crushSet",1,"form-control"],re,["id","crush-info-button","type","button","ngbTooltip",ce,1,"btn","btn-light",3,"ngClass","click"],["class","btn btn-light","type","button","ngbTooltip",O,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","crush-info-block",4,"ngIf"],["type","button","ngbTooltip",O,"triggers","manual",1,"btn","btn-light",3,"click"],["crushDeletionBtn","ngbTooltip"],["id","crush-info-block",1,"form-text","text-muted"],["crushInfoTabs","ngbNav"],["ngbNavItem","crush-rule-info"],Xe,["ngbNavItem","crush-rule-steps"],we,Qe,["ruleIsNotUsed",""],Je,ke,Ve,["formGroupName","compression"],Ye,["for","mode",1,"cd-col-form-label"],Be,["id","mode","name","mode","formControlName","mode",1,"form-control"],["for","algorithm",1,"cd-col-form-label"],je,["id","algorithm","name","algorithm","formControlName","algorithm",1,"form-control"],["for","minBlobSize",1,"cd-col-form-label"],Ke,["id","minBlobSize","name","minBlobSize","formControlName","minBlobSize","type","text","min","0","placeholder",We,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","maxBlobSize",1,"cd-col-form-label"],e_,["id","maxBlobSize","type","text","min","0","formControlName","maxBlobSize","placeholder",__,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","ratio",1,"cd-col-form-label"],o_,["id","ratio","name","ratio","formControlName","ratio","type","number","min","0","max","1","step","0.1","placeholder",t_,1,"form-control"],i_,n_,s_,a_,l_,r_,c_,O_]},template:function(_,o){1&_&&e.YNc(0,Yt,70,33,"div",0),2&_&&e.Q6J("cdFormLoading",o.loading)},directives:[bo.y,a._Y,a.JL,fe.V,a.sg,Pe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,a.EJ,a.YN,a.Kr,C.sg,vo.m,Re.S,Io.Q,a.qQ,a.wV,$o.d,me.p,a.Q7,Do.K,a.Fd,a.Wl,C.mk,g._L,g.Pz,g.nv,g.Vx,g.uN,g.tO,xe.b,a.x0],pipes:[C.rS,Ce.m],styles:[".icon-warning-color[_ngcontent-%COMP%]{margin-left:3px}"]}),t})();var Bt=r(19773),jt=r(49671),Kt=r(68136),Te=r(69158),Se=r(35905),L=r(99466),Wt=r(91801),ei=r(68774),_i=r(66369),qe=r(38047),Le=r(51847);class oi{constructor(i){this.pool_name=i}}var ti=r(64724);let ii=(()=>{class t{constructor(_,o,n){this.templateRef=_,this.viewContainer=o,this.authStorageService=n,this.cdScopeMatchAll=!0}set cdScope(_){this.permissions=this.authStorageService.getPermissions(),this.isAuthorized(_)?this.viewContainer.createEmbeddedView(this.templateRef):this.viewContainer.clear()}isAuthorized(_){const o=this.cdScopeMatchAll?u().every:u().some;return u().isString(_)?u().get(this.permissions,[_,"read"],!1):u().isArray(_)?o(_,n=>this.permissions[n].read):!!u().isObject(_)&&o(_,(n,s)=>o(n,c=>this.permissions[s][c]))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(e.Rgc),e.Y36(e.s_b),e.Y36(he.j))},t.\u0275dir=e.lG2({type:t,selectors:[["","cdScope",""]],inputs:{cdScope:"cdScope",cdScopeMatchAll:"cdScopeMatchAll"}}),t})();var ni=r(94928),He=r(51295),si=r(59376),Ge=r(76317),ai=r(71752);function li(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",12),2&t){const _=e.oxw(2);e.Q6J("renderObjects",!0)("data",_.poolDetails)("autoReload",!1)}}function ri(t,i){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.MGl("grafanaPath","ceph-pool-detail?var-pool_name=",_.selection.pool_name,""),e.Q6J("type","metrics")}}function ci(t,i){1&t&&(e.TgZ(0,"li",13),e.TgZ(1,"a",5),e.SDv(2,14),e.qZA(),e.YNc(3,ri,1,2,"ng-template",7),e.qZA())}function Oi(t,i){if(1&t&&e._UZ(0,"cd-rbd-configuration-table",18),2&t){const _=e.oxw(3);e.Q6J("data",_.selectedPoolConfiguration)}}function di(t,i){1&t&&(e.TgZ(0,"li",16),e.TgZ(1,"a",5),e.SDv(2,17),e.qZA(),e.YNc(3,Oi,1,1,"ng-template",7),e.qZA())}function ui(t,i){if(1&t&&e._UZ(0,"cd-table",21),2&t){const _=e.oxw(3);e.Q6J("data",_.cacheTiers)("columns",_.cacheTierColumns)("autoSave",!1)}}function fi(t,i){1&t&&(e.TgZ(0,"li",19),e.TgZ(1,"a",5),e.SDv(2,20),e.qZA(),e.YNc(3,ui,1,3,"ng-template",7),e.qZA())}function Pi(t,i){if(1&t&&(e.ynx(0,1),e.TgZ(1,"ul",2,3),e.TgZ(3,"li",4),e.TgZ(4,"a",5),e.SDv(5,6),e.qZA(),e.YNc(6,li,1,3,"ng-template",7),e.qZA(),e.YNc(7,ci,4,0,"li",8),e.YNc(8,di,4,0,"li",9),e.YNc(9,fi,4,0,"li",10),e.qZA(),e._UZ(10,"div",11),e.BQk()),2&t){const _=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.permissions.grafana.read),e.xp6(1),e.Q6J("ngIf","replicated"===o.selection.type),e.xp6(1),e.Q6J("ngIf",(null==o.selection.tiers?null:o.selection.tiers.length)>0),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Ei=(()=>{class t{constructor(_){this.poolService=_,this.cacheTierColumns=[],this.omittedPoolAttributes=["cdExecuting","cdIsBinary","stats"],this.cacheTierColumns=[{prop:"pool_name",name:"Name",flexGrow:3},{prop:"cache_mode",name:"Cache Mode",flexGrow:2},{prop:"cache_min_evict_age",name:"Min Evict Age",flexGrow:2},{prop:"cache_min_flush_age",name:"Min Flush Age",flexGrow:2},{prop:"target_max_bytes",name:"Target Max Bytes",flexGrow:2},{prop:"target_max_objects",name:"Target Max Objects",flexGrow:2}]}ngOnChanges(){this.selection&&(this.poolService.getConfiguration(this.selection.pool_name).subscribe(_=>{He.T.updateChanged(this,{selectedPoolConfiguration:_})}),He.T.updateChanged(this,{poolDetails:u().omit(this.selection,this.omittedPoolAttributes)}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-details"]],inputs:{cacheTiers:"cacheTiers",permissions:"permissions",selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let i,_,o,n;return i="Details",_="Performance Details",o="Configuration",n="Cache Tiers Details",[["cdTableDetail","",4,"ngIf"],["cdTableDetail",""],["ngbNav","","cdStatefulTab","pool-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],i,["ngbNavContent",""],["ngbNavItem","performance-details",4,"ngIf"],["ngbNavItem","configuration",4,"ngIf"],["ngbNavItem","cache-tiers-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"renderObjects","data","autoReload"],["ngbNavItem","performance-details"],_,["uid","-xyV8KCiz","grafanaStyle","three",3,"grafanaPath","type"],["ngbNavItem","configuration"],o,[3,"data"],["ngbNavItem","cache-tiers-details"],n,["columnMode","flex",3,"data","columns","autoSave"]]},template:function(_,o){1&_&&e.YNc(0,Pi,11,4,"ng-container",0),2&_&&e.Q6J("ngIf",o.selection)},directives:[C.O5,g.Pz,si.m,g.nv,g.Vx,g.uN,g.tO,xe.b,Ge.F,ai.P,Se.a],styles:[""],changeDetection:0}),t})();var gi=r(60251);const pi=["poolUsageTpl"],Ri=["poolConfigurationSourceTpl"];function mi(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",9,10),e.NdJ("fetchData",function(){return e.CHM(_),e.oxw().taskListService.fetch()})("setExpandedRow",function(n){return e.CHM(_),e.oxw().setExpandedRow(n)})("updateSelection",function(n){return e.CHM(_),e.oxw().updateSelection(n)}),e._UZ(2,"cd-table-actions",11),e._UZ(3,"cd-pool-details",12),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.pools)("columns",_.columns)("hasDetails",!0)("status",_.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",_.permissions.pool)("selection",_.selection)("tableActions",_.tableActions),e.xp6(1),e.Q6J("selection",_.expandedRow)("permissions",_.permissions)("cacheTiers",_.cacheTiers)}}function Ci(t,i){1&t&&e._UZ(0,"cd-grafana",14),2&t&&e.Q6J("grafanaPath","ceph-pools-overview?")("type","metrics")}function Mi(t,i){1&t&&(e.TgZ(0,"li",2),e.TgZ(1,"a",3),e.SDv(2,13),e.qZA(),e.YNc(3,Ci,1,2,"ng-template",5),e.qZA())}function hi(t,i){if(1&t&&e._UZ(0,"cd-usage-bar",16),2&t){const _=e.oxw().row;e.Q6J("total",_.stats.bytes_used.latest+_.stats.avail_raw.latest)("used",_.stats.bytes_used.latest)}}function Ti(t,i){if(1&t&&e.YNc(0,hi,1,2,"cd-usage-bar",15),2&t){const _=i.row;e.Q6J("ngIf",null==_.stats||null==_.stats.avail_raw?null:_.stats.avail_raw.latest)}}let Si=(()=>{class t extends Kt.o{constructor(_,o,n,s,c,d,P,p,R,h,T){super(),this.poolService=_,this.taskWrapper=o,this.ecpService=n,this.authStorageService=s,this.taskListService=c,this.modalService=d,this.pgCategoryService=P,this.dimlessPipe=p,this.urlBuilder=R,this.configurationService=h,this.actionLabels=T,this.selection=new ei.r,this.executingTasks=[],this.tableStatus=new Te.E,this.cacheTiers=[],this.monAllowPoolDelete=!1,this.permissions=this.authStorageService.getPermissions(),this.tableActions=[{permission:"create",icon:b.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE},{permission:"update",icon:b.P.edit,routerLink:()=>this.urlBuilder.getEdit(encodeURIComponent(this.selection.first().pool_name)),name:this.actionLabels.EDIT},{permission:"delete",icon:b.P.destroy,click:()=>this.deletePoolModal(),name:this.actionLabels.DELETE,disable:this.getDisableDesc.bind(this)}],this.permissions.configOpt.read&&this.configurationService.get("mon_allow_pool_delete").subscribe(m=>{if(u().has(m,"value")){const f=u().find(m.value,A=>"mon"===A.section)||{value:!1};this.monAllowPoolDelete="true"===f.value}})}ngOnInit(){const _=(o,n,s)=>u().get(n,o)>u().get(s,o)?1:-1;this.columns=[{prop:"pool_name",name:"Name",flexGrow:4,cellTransformation:L.e.executing},{prop:"data_protection",name:"Data Protection",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-gray"},flexGrow:1.3},{prop:"application_metadata",name:"Applications",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-primary"},flexGrow:1.5},{prop:"pg_status",name:"PG Status",flexGrow:1.2,cellClass:({row:o,column:n,value:s})=>this.getPgStatusCellClass(o,n,s)},{prop:"crush_rule",name:"Crush Ruleset",isHidden:!0,flexGrow:2},{name:"Usage",prop:"usage",cellTemplate:this.poolUsageTpl,flexGrow:1.2},{prop:"stats.rd_bytes.rates",name:"Read bytes",comparator:(o,n,s,c)=>_("stats.rd_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.wr_bytes.rates",name:"Write bytes",comparator:(o,n,s,c)=>_("stats.wr_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.rd.rate",name:"Read ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond},{prop:"stats.wr.rate",name:"Write ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond}],this.taskListService.init(()=>this.ecpService.list().pipe((0,Bt.zg)(o=>(this.ecProfileList=o,this.poolService.getList()))),void 0,o=>{this.pools=this.transformPoolsData(o),this.tableStatus=new Te.E},()=>{this.table.reset(),this.tableStatus=new Te.E(Wt.T.ValueException)},o=>o.name.startsWith("pool/"),(o,n)=>n.metadata.pool_name===o.pool_name,{default:o=>new oi(o.pool_name)})}updateSelection(_){this.selection=_}deletePoolModal(){const _=this.selection.first().pool_name;this.modalService.show(Ne.M,{itemDescription:"Pool",itemNames:[_],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new v.R(`pool/${M.MQ.DELETE}`,{pool_name:_}),call:this.poolService.delete(_)})})}getPgStatusCellClass(_,o,n){return{"text-right":!0,[`pg-${this.pgCategoryService.getTypeByStates(n)}`]:!0}}getErasureCodeProfile(_){let o="";return u().forEach(this.ecProfileList,n=>{n.name===_&&(o=`EC: ${n.k}+${n.m}`)}),o}transformPoolsData(_){const o=["bytes_used","max_avail","avail_raw","percent_used","rd_bytes","wr_bytes","rd","wr"],n={latest:0,rate:0,rates:[]};return u().forEach(_,s=>{s.pg_status=this.transformPgStatus(s.pg_status);const c={};u().forEach(o,d=>{c[d]=s.stats&&s.stats[d]?s.stats[d]:n}),s.stats=c,s.usage=c.percent_used.latest,!s.cdExecuting&&s.pg_num+s.pg_placement_num!==s.pg_num_target+s.pg_placement_num_target&&(s.cdExecuting="Updating"),["rd_bytes","wr_bytes"].forEach(d=>{s.stats[d].rates=s.stats[d].rates.map(P=>P[1])}),s.cdIsBinary=!0,"erasure"===s.type&&(s.data_protection=this.getErasureCodeProfile(s.erasure_code_profile)),"replicated"===s.type&&(s.data_protection=`replica: \xd7${s.size}`)}),_}transformPgStatus(_){const o=[];return u().forEach(_,(n,s)=>{o.push(`${n} ${s}`)}),o.join(", ")}getSelectionTiers(){if(void 0!==this.expandedRow){const _=this.expandedRow.tiers;this.cacheTiers=this.pools.filter(o=>_.includes(o.pool))}}getDisableDesc(){var _;return!(null===(_=this.selection)||void 0===_?void 0:_.hasSelection)||!this.monAllowPoolDelete&&"Pool deletion is disabled by the mon_allow_pool_delete configuration setting."}setExpandedRow(_){super.setExpandedRow(_),this.getSelectionTiers()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q),e.Y36(de.P),e.Y36(Me),e.Y36(he.j),e.Y36(qe.j),e.Y36(Ze.Z),e.Y36(jt.j),e.Y36(_i.n),e.Y36(Le.F),e.Y36(ti.e),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-list"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Se.a,5),e.Gf(pi,7),e.Gf(Ri,5)),2&_){let n;e.iGM(n=e.CRH())&&(o.table=n.first),e.iGM(n=e.CRH())&&(o.poolUsageTpl=n.first),e.iGM(n=e.CRH())&&(o.poolConfigurationSourceTpl=n.first)}},features:[e._Bn([qe.j,{provide:Le.F,useValue:new Le.F("pool")}]),e.qOj],decls:10,vars:2,consts:function(){let i,_;return i="Pools List",_="Overall Performance",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],i,["ngbNavContent",""],["ngbNavItem","",4,"cdScope"],[3,"ngbNavOutlet"],["poolUsageTpl",""],["id","pool-list","selectionType","single",3,"data","columns","hasDetails","status","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],["id","pool-list-actions",1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","","id","pool-list-details",3,"selection","permissions","cacheTiers"],_,["uid","z99hzWtmk","grafanaStyle","two",3,"grafanaPath","type"],["decimals","2",3,"total","used",4,"ngIf"],["decimals","2",3,"total","used"]]},template:function(_,o){if(1&_&&(e.TgZ(0,"ul",0,1),e.TgZ(2,"li",2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,mi,4,11,"ng-template",5),e.qZA(),e.YNc(6,Mi,4,0,"li",6),e.qZA(),e._UZ(7,"div",7),e.YNc(8,Ti,1,1,"ng-template",null,8,e.W1O)),2&_){const n=e.MAs(1);e.xp6(6),e.Q6J("cdScope","grafana"),e.xp6(1),e.Q6J("ngbNavOutlet",n)}},directives:[g.Pz,g.nv,g.Vx,g.uN,ii,g.tO,Se.a,ni.K,Ei,Ge.F,C.O5,gi.O],styles:["cd-pool-list .pg-clean{color:#0b0} cd-pool-list .pg-working{color:#2b99a8} cd-pool-list .pg-warning{color:#ffc200} cd-pool-list .pg-unknown{color:#ef5c55}"]}),t})(),ze=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[f_.t,C.ez,g.Oz,d_.m,Oe.Bz,a.UX,g.HK,u_.BlockModule]]}),t})();const Li=[{path:"",component:Si},{path:M.MQ.CREATE,component:Ue,data:{breadcrumbs:M.Qn.CREATE}},{path:`${M.MQ.EDIT}/:name`,component:Ue,data:{breadcrumbs:M.Qn.EDIT}}];let Ai=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[ze,Oe.Bz.forChild(Li)]]}),t})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html index 80c67410d..d37b61f38 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html @@ -4,7 +4,7 @@ - +